summaryrefslogtreecommitdiff
path: root/poky/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'poky/meta/classes')
-rw-r--r--poky/meta/classes/autotools.bbclass14
-rw-r--r--poky/meta/classes/baremetal-image.bbclass1
-rw-r--r--poky/meta/classes/base.bbclass35
-rw-r--r--poky/meta/classes/buildhistory.bbclass2
-rw-r--r--poky/meta/classes/buildstats.bbclass40
-rw-r--r--poky/meta/classes/ccache.bbclass5
-rw-r--r--poky/meta/classes/cmake.bbclass1
-rw-r--r--poky/meta/classes/cve-check.bbclass35
-rw-r--r--poky/meta/classes/devicetree.bbclass2
-rw-r--r--poky/meta/classes/distutils3-base.bbclass3
-rw-r--r--poky/meta/classes/externalsrc.bbclass17
-rw-r--r--poky/meta/classes/go.bbclass8
-rw-r--r--poky/meta/classes/goarch.bbclass9
-rw-r--r--poky/meta/classes/gtk-doc.bbclass1
-rw-r--r--poky/meta/classes/image-live.bbclass2
-rw-r--r--poky/meta/classes/image.bbclass7
-rw-r--r--poky/meta/classes/image_types.bbclass10
-rw-r--r--poky/meta/classes/image_types_wic.bbclass10
-rw-r--r--poky/meta/classes/insane.bbclass33
-rw-r--r--poky/meta/classes/kernel-devicetree.bbclass7
-rw-r--r--poky/meta/classes/kernel-fitimage.bbclass202
-rw-r--r--poky/meta/classes/kernel.bbclass18
-rw-r--r--poky/meta/classes/license.bbclass17
-rw-r--r--poky/meta/classes/license_image.bbclass7
-rw-r--r--poky/meta/classes/linuxloader.bbclass4
-rw-r--r--poky/meta/classes/meson.bbclass2
-rw-r--r--poky/meta/classes/native.bbclass23
-rw-r--r--poky/meta/classes/npm.bbclass39
-rw-r--r--poky/meta/classes/package.bbclass9
-rw-r--r--poky/meta/classes/package_deb.bbclass1
-rw-r--r--poky/meta/classes/package_ipk.bbclass2
-rw-r--r--poky/meta/classes/package_rpm.bbclass43
-rw-r--r--poky/meta/classes/patch.bbclass1
-rw-r--r--poky/meta/classes/populate_sdk_base.bbclass2
-rw-r--r--poky/meta/classes/report-error.bbclass4
-rw-r--r--poky/meta/classes/reproducible_build.bbclass15
-rw-r--r--poky/meta/classes/rootfs_deb.bbclass4
-rw-r--r--poky/meta/classes/rootfs_ipk.bbclass4
-rw-r--r--poky/meta/classes/sanity.bbclass22
-rw-r--r--poky/meta/classes/sstate.bbclass6
-rw-r--r--poky/meta/classes/staging.bbclass6
-rw-r--r--poky/meta/classes/systemd.bbclass3
42 files changed, 485 insertions, 191 deletions
diff --git a/poky/meta/classes/autotools.bbclass b/poky/meta/classes/autotools.bbclass
index 2ceb790b5..9dc8ebdaa 100644
--- a/poky/meta/classes/autotools.bbclass
+++ b/poky/meta/classes/autotools.bbclass
@@ -17,7 +17,7 @@ def autotools_dep_prepend(d):
and not d.getVar('INHIBIT_DEFAULT_DEPS'):
deps += 'libtool-cross '
- return deps + 'gnu-config-native '
+ return deps
DEPENDS_prepend = "${@autotools_dep_prepend(d)} "
@@ -30,7 +30,7 @@ inherit siteinfo
export CONFIG_SITE
acpaths ?= "default"
-EXTRA_AUTORECONF = "--exclude=autopoint"
+EXTRA_AUTORECONF = "--exclude=autopoint --exclude=gtkdocize"
export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir}"
@@ -215,21 +215,13 @@ autotools_do_configure() {
PRUNE_M4="$PRUNE_M4 gettext.m4 iconv.m4 lib-ld.m4 lib-link.m4 lib-prefix.m4 nls.m4 po.m4 progtest.m4"
fi
mkdir -p m4
- if grep -q "^[[:space:]]*[AI][CT]_PROG_INTLTOOL" $CONFIGURE_AC; then
- if ! echo "${DEPENDS}" | grep -q intltool-native; then
- bbwarn "Missing DEPENDS on intltool-native"
- fi
- PRUNE_M4="$PRUNE_M4 intltool.m4"
- bbnote Executing intltoolize --copy --force --automake
- intltoolize --copy --force --automake
- fi
for i in $PRUNE_M4; do
find ${S} -ignore_readdir_race -name $i -delete
done
bbnote Executing ACLOCAL=\"$ACLOCAL\" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths
- ACLOCAL="$ACLOCAL" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || die "autoreconf execution failed."
+ ACLOCAL="$ACLOCAL" autoreconf -Wcross -Wno-obsolete --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || die "autoreconf execution failed."
cd $olddir
fi
if [ -e ${CONFIGURE_SCRIPT} ]; then
diff --git a/poky/meta/classes/baremetal-image.bbclass b/poky/meta/classes/baremetal-image.bbclass
index 90d58f261..b0f5e885b 100644
--- a/poky/meta/classes/baremetal-image.bbclass
+++ b/poky/meta/classes/baremetal-image.bbclass
@@ -56,7 +56,6 @@ python do_rootfs(){
# Assure binaries, manifest and qemubootconf are populated on DEPLOY_DIR_IMAGE
do_image_complete[dirs] = "${TOPDIR}"
-do_image_complete[umask] = "022"
SSTATETASKS += "do_image_complete"
SSTATE_SKIP_CREATION_task-image-complete = '1'
do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}"
diff --git a/poky/meta/classes/base.bbclass b/poky/meta/classes/base.bbclass
index 78ae28bb0..b4160402f 100644
--- a/poky/meta/classes/base.bbclass
+++ b/poky/meta/classes/base.bbclass
@@ -506,15 +506,10 @@ python () {
# in order to capture permissions, owners, groups and special files
if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
d.appendVarFlag('do_prepare_recipe_sysroot', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
- d.setVarFlag('do_unpack', 'umask', '022')
- d.setVarFlag('do_configure', 'umask', '022')
- d.setVarFlag('do_compile', 'umask', '022')
d.appendVarFlag('do_install', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
d.setVarFlag('do_install', 'fakeroot', '1')
- d.setVarFlag('do_install', 'umask', '022')
d.appendVarFlag('do_package', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
d.setVarFlag('do_package', 'fakeroot', '1')
- d.setVarFlag('do_package', 'umask', '022')
d.setVarFlag('do_package_setscene', 'fakeroot', '1')
d.appendVarFlag('do_package_setscene', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
d.setVarFlag('do_devshell', 'fakeroot', '1')
@@ -596,62 +591,62 @@ python () {
needsrcrev = False
srcuri = d.getVar('SRC_URI')
- for uri in srcuri.split():
- (scheme, _ , path) = bb.fetch.decodeurl(uri)[:3]
+ for uri_string in srcuri.split():
+ uri = bb.fetch.URI(uri_string)
# HTTP/FTP use the wget fetcher
- if scheme in ("http", "https", "ftp"):
+ if uri.scheme in ("http", "https", "ftp"):
d.appendVarFlag('do_fetch', 'depends', ' wget-native:do_populate_sysroot')
# Svn packages should DEPEND on subversion-native
- if scheme == "svn":
+ if uri.scheme == "svn":
needsrcrev = True
d.appendVarFlag('do_fetch', 'depends', ' subversion-native:do_populate_sysroot')
# Git packages should DEPEND on git-native
- elif scheme in ("git", "gitsm"):
+ elif uri.scheme in ("git", "gitsm"):
needsrcrev = True
d.appendVarFlag('do_fetch', 'depends', ' git-native:do_populate_sysroot')
# Mercurial packages should DEPEND on mercurial-native
- elif scheme == "hg":
+ elif uri.scheme == "hg":
needsrcrev = True
d.appendVar("EXTRANATIVEPATH", ' python3-native ')
d.appendVarFlag('do_fetch', 'depends', ' mercurial-native:do_populate_sysroot')
# Perforce packages support SRCREV = "${AUTOREV}"
- elif scheme == "p4":
+ elif uri.scheme == "p4":
needsrcrev = True
# OSC packages should DEPEND on osc-native
- elif scheme == "osc":
+ elif uri.scheme == "osc":
d.appendVarFlag('do_fetch', 'depends', ' osc-native:do_populate_sysroot')
- elif scheme == "npm":
+ elif uri.scheme == "npm":
d.appendVarFlag('do_fetch', 'depends', ' nodejs-native:do_populate_sysroot')
# *.lz4 should DEPEND on lz4-native for unpacking
- if path.endswith('.lz4'):
+ if uri.path.endswith('.lz4'):
d.appendVarFlag('do_unpack', 'depends', ' lz4-native:do_populate_sysroot')
# *.lz should DEPEND on lzip-native for unpacking
- elif path.endswith('.lz'):
+ elif uri.path.endswith('.lz'):
d.appendVarFlag('do_unpack', 'depends', ' lzip-native:do_populate_sysroot')
# *.xz should DEPEND on xz-native for unpacking
- elif path.endswith('.xz') or path.endswith('.txz'):
+ elif uri.path.endswith('.xz') or uri.path.endswith('.txz'):
d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
# .zip should DEPEND on unzip-native for unpacking
- elif path.endswith('.zip') or path.endswith('.jar'):
+ elif uri.path.endswith('.zip') or uri.path.endswith('.jar'):
d.appendVarFlag('do_unpack', 'depends', ' unzip-native:do_populate_sysroot')
# Some rpm files may be compressed internally using xz (for example, rpms from Fedora)
- elif path.endswith('.rpm'):
+ elif uri.path.endswith('.rpm'):
d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
# *.deb should DEPEND on xz-native for unpacking
- elif path.endswith('.deb'):
+ elif uri.path.endswith('.deb'):
d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
if needsrcrev:
diff --git a/poky/meta/classes/buildhistory.bbclass b/poky/meta/classes/buildhistory.bbclass
index daae05614..117a44eaf 100644
--- a/poky/meta/classes/buildhistory.bbclass
+++ b/poky/meta/classes/buildhistory.bbclass
@@ -859,7 +859,7 @@ END
}
python buildhistory_eventhandler() {
- if e.data.getVar('BUILDHISTORY_FEATURES').strip():
+ if (e.data.getVar('BUILDHISTORY_FEATURES') or "").strip():
reset = e.data.getVar("BUILDHISTORY_RESET")
olddir = e.data.getVar("BUILDHISTORY_OLD_DIR")
if isinstance(e, bb.event.BuildStarted):
diff --git a/poky/meta/classes/buildstats.bbclass b/poky/meta/classes/buildstats.bbclass
index 6f8718723..a8ee6e69a 100644
--- a/poky/meta/classes/buildstats.bbclass
+++ b/poky/meta/classes/buildstats.bbclass
@@ -104,14 +104,46 @@ def write_task_data(status, logfile, e, d):
f.write("Status: FAILED \n")
f.write("Ended: %0.2f \n" % e.time)
+def write_host_data(logfile, e, d):
+ import subprocess, os, datetime
+ cmds = d.getVar('BB_LOG_HOST_STAT_CMDS')
+ if cmds is None:
+ d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
+ d.setVar("BB_LOG_HOST_STAT_ON_FAILURE", "0")
+ bb.warn("buildstats: Collecting host data failed. Set BB_LOG_HOST_STAT_CMDS=\"command1 ; command2 ; ... \" in conf\/local.conf\n")
+ return
+ path = d.getVar("PATH")
+ opath = d.getVar("BB_ORIGENV", False).getVar("PATH")
+ ospath = os.environ['PATH']
+ os.environ['PATH'] = path + ":" + opath + ":" + ospath
+ with open(logfile, "a") as f:
+ f.write("Event Time: %f\nDate: %s\n" % (e.time, datetime.datetime.now()))
+ for cmd in cmds.split(";"):
+ if len(cmd) == 0:
+ continue
+ try:
+ output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT, timeout=1).decode('utf-8')
+ except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError) as err:
+ output = "Error running command: %s\n%s\n" % (cmd, err)
+ f.write("%s\n%s\n" % (cmd, output))
+ os.environ['PATH'] = ospath
+
python run_buildstats () {
import bb.build
import bb.event
import time, subprocess, platform
bn = d.getVar('BUILDNAME')
- bsdir = os.path.join(d.getVar('BUILDSTATS_BASE'), bn)
- taskdir = os.path.join(bsdir, d.getVar('PF'))
+ ########################################################################
+ # bitbake fires HeartbeatEvent even before a build has been
+ # triggered, causing BUILDNAME to be None
+ ########################################################################
+ if bn is not None:
+ bsdir = os.path.join(d.getVar('BUILDSTATS_BASE'), bn)
+ taskdir = os.path.join(bsdir, d.getVar('PF'))
+ if isinstance(e, bb.event.HeartbeatEvent) and bb.utils.to_boolean(d.getVar("BB_LOG_HOST_STAT_ON_INTERVAL")):
+ bb.utils.mkdirhier(bsdir)
+ write_host_data(os.path.join(bsdir, "host_stats"), e, d)
if isinstance(e, bb.event.BuildStarted):
########################################################################
@@ -186,10 +218,12 @@ python run_buildstats () {
build_status = os.path.join(bsdir, "build_stats")
with open(build_status, "a") as f:
f.write(d.expand("Failed at: ${PF} at task: %s \n" % e.task))
+ if bb.utils.to_boolean(d.getVar("BB_LOG_HOST_STAT_ON_FAILURE")):
+ write_host_data(build_status, e, d)
}
addhandler run_buildstats
-run_buildstats[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.build.TaskStarted bb.build.TaskSucceeded bb.build.TaskFailed"
+run_buildstats[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.event.HeartbeatEvent bb.build.TaskStarted bb.build.TaskSucceeded bb.build.TaskFailed"
python runqueue_stats () {
import buildstats
diff --git a/poky/meta/classes/ccache.bbclass b/poky/meta/classes/ccache.bbclass
index f00fafc29..4532894c5 100644
--- a/poky/meta/classes/ccache.bbclass
+++ b/poky/meta/classes/ccache.bbclass
@@ -1,7 +1,5 @@
#
# Usage:
-# - Install ccache package on the host distribution and set up a build directory
-#
# - Enable ccache
# Add the following line to a conffile such as conf/local.conf:
# INHERIT += "ccache"
@@ -51,8 +49,9 @@ python() {
"""
pn = d.getVar('PN')
# quilt-native doesn't need ccache since no c files
- if not (pn in ('ccache-native', 'quilt-native') or
+ if not (bb.data.inherits_class("native", d) or
bb.utils.to_boolean(d.getVar('CCACHE_DISABLE'))):
+ d.appendVar('DEPENDS', ' ccache-native')
d.setVar('CCACHE', 'ccache ')
}
diff --git a/poky/meta/classes/cmake.bbclass b/poky/meta/classes/cmake.bbclass
index 7c055e8a3..4af22268b 100644
--- a/poky/meta/classes/cmake.bbclass
+++ b/poky/meta/classes/cmake.bbclass
@@ -187,6 +187,7 @@ cmake_do_configure() {
-DCMAKE_INSTALL_SO_NO_EXE=0 \
-DCMAKE_TOOLCHAIN_FILE=${WORKDIR}/toolchain.cmake \
-DCMAKE_NO_SYSTEM_FROM_IMPORTED=1 \
+ -DCMAKE_EXPORT_NO_PACKAGE_REGISTRY=ON \
${EXTRA_OECMAKE} \
-Wno-dev
}
diff --git a/poky/meta/classes/cve-check.bbclass b/poky/meta/classes/cve-check.bbclass
index d843e7c4a..112ee3379 100644
--- a/poky/meta/classes/cve-check.bbclass
+++ b/poky/meta/classes/cve-check.bbclass
@@ -53,6 +53,16 @@ CVE_CHECK_PN_WHITELIST ?= ""
#
CVE_CHECK_WHITELIST ?= ""
+# Layers to be excluded
+CVE_CHECK_LAYER_EXCLUDELIST ??= ""
+
+# Layers to be included
+CVE_CHECK_LAYER_INCLUDELIST ??= ""
+
+
+# set to "alphabetical" for version using single alphabetical character as increament release
+CVE_VERSION_SUFFIX ??= ""
+
python cve_save_summary_handler () {
import shutil
import datetime
@@ -206,10 +216,11 @@ def check_cves(d, patched_cves):
"""
Connect to the NVD database and find unpatched cves.
"""
- from distutils.version import LooseVersion
+ from oe.cve_check import Version
pn = d.getVar("PN")
real_pv = d.getVar("PV")
+ suffix = d.getVar("CVE_VERSION_SUFFIX")
cves_unpatched = []
# CVE_PRODUCT can contain more than one product (eg. curl/libcurl)
@@ -263,8 +274,8 @@ def check_cves(d, patched_cves):
else:
if operator_start:
try:
- vulnerable_start = (operator_start == '>=' and LooseVersion(pv) >= LooseVersion(version_start))
- vulnerable_start |= (operator_start == '>' and LooseVersion(pv) > LooseVersion(version_start))
+ vulnerable_start = (operator_start == '>=' and Version(pv,suffix) >= Version(version_start,suffix))
+ vulnerable_start |= (operator_start == '>' and Version(pv,suffix) > Version(version_start,suffix))
except:
bb.warn("%s: Failed to compare %s %s %s for %s" %
(product, pv, operator_start, version_start, cve))
@@ -274,8 +285,8 @@ def check_cves(d, patched_cves):
if operator_end:
try:
- vulnerable_end = (operator_end == '<=' and LooseVersion(pv) <= LooseVersion(version_end))
- vulnerable_end |= (operator_end == '<' and LooseVersion(pv) < LooseVersion(version_end))
+ vulnerable_end = (operator_end == '<=' and Version(pv,suffix) <= Version(version_end,suffix) )
+ vulnerable_end |= (operator_end == '<' and Version(pv,suffix) < Version(version_end,suffix) )
except:
bb.warn("%s: Failed to compare %s %s %s for %s" %
(product, pv, operator_end, version_end, cve))
@@ -330,7 +341,20 @@ def cve_write_data(d, patched, unpatched, whitelisted, cve_data):
CVE manifest if enabled.
"""
+
cve_file = d.getVar("CVE_CHECK_LOG")
+ fdir_name = d.getVar("FILE_DIRNAME")
+ layer = fdir_name.split("/")[-3]
+
+ include_layers = d.getVar("CVE_CHECK_LAYER_INCLUDELIST").split()
+ exclude_layers = d.getVar("CVE_CHECK_LAYER_EXCLUDELIST").split()
+
+ if exclude_layers and layer in exclude_layers:
+ return
+
+ if include_layers and layer not in include_layers:
+ return
+
nvd_link = "https://web.nvd.nist.gov/view/vuln/detail?vulnId="
write_string = ""
unpatched_cves = []
@@ -340,6 +364,7 @@ def cve_write_data(d, patched, unpatched, whitelisted, cve_data):
is_patched = cve in patched
if is_patched and (d.getVar("CVE_CHECK_REPORT_PATCHED") != "1"):
continue
+ write_string += "LAYER: %s\n" % layer
write_string += "PACKAGE NAME: %s\n" % d.getVar("PN")
write_string += "PACKAGE VERSION: %s%s\n" % (d.getVar("EXTENDPE"), d.getVar("PV"))
write_string += "CVE: %s\n" % cve
diff --git a/poky/meta/classes/devicetree.bbclass b/poky/meta/classes/devicetree.bbclass
index c772ab2ab..ece883acc 100644
--- a/poky/meta/classes/devicetree.bbclass
+++ b/poky/meta/classes/devicetree.bbclass
@@ -18,7 +18,7 @@ SECTION ?= "bsp"
# device trees built with them are at least GPLv2 (and in some cases dual
# licensed). Default to GPLv2 if the recipe does not specify a license.
LICENSE ?= "GPLv2"
-LIC_FILES_CHKSUM ?= "file://${COMMON_LICENSE_DIR}/GPL-2.0;md5=801f80980d171dd6425610833a22dbe6"
+LIC_FILES_CHKSUM ?= "file://${COMMON_LICENSE_DIR}/GPL-2.0-only;md5=801f80980d171dd6425610833a22dbe6"
INHIBIT_DEFAULT_DEPS = "1"
DEPENDS += "dtc-native"
diff --git a/poky/meta/classes/distutils3-base.bbclass b/poky/meta/classes/distutils3-base.bbclass
index a277d1c7b..302ee8c82 100644
--- a/poky/meta/classes/distutils3-base.bbclass
+++ b/poky/meta/classes/distutils3-base.bbclass
@@ -1,4 +1,5 @@
-DEPENDS += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES') == '')]}"
+DEPENDS_append_class-target = " ${PYTHON_PN}-native ${PYTHON_PN}"
+DEPENDS_append_class-nativesdk = " ${PYTHON_PN}-native ${PYTHON_PN}"
RDEPENDS_${PN} += "${@['', '${PYTHON_PN}-core']['${CLASSOVERRIDE}' == 'class-target']}"
inherit distutils-common-base python3native python3targetconfig
diff --git a/poky/meta/classes/externalsrc.bbclass b/poky/meta/classes/externalsrc.bbclass
index dd0939578..c7b2bf2f4 100644
--- a/poky/meta/classes/externalsrc.bbclass
+++ b/poky/meta/classes/externalsrc.bbclass
@@ -68,6 +68,7 @@ python () {
url_data = fetch.ud[url]
parm = url_data.parm
if (url_data.type == 'file' or
+ url_data.type == 'npmsw' or
'type' in parm and parm['type'] == 'kmeta'):
local_srcuri.append(url)
@@ -190,6 +191,7 @@ def srctree_hash_files(d, srcdir=None):
import shutil
import subprocess
import tempfile
+ import hashlib
s_dir = srcdir or d.getVar('EXTERNALSRC')
git_dir = None
@@ -197,6 +199,10 @@ def srctree_hash_files(d, srcdir=None):
try:
git_dir = os.path.join(s_dir,
subprocess.check_output(['git', '-C', s_dir, 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
+ top_git_dir = os.path.join(s_dir, subprocess.check_output(['git', '-C', d.getVar("TOPDIR"), 'rev-parse', '--git-dir'],
+ stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
+ if git_dir == top_git_dir:
+ git_dir = None
except subprocess.CalledProcessError:
pass
@@ -210,7 +216,16 @@ def srctree_hash_files(d, srcdir=None):
env = os.environ.copy()
env['GIT_INDEX_FILE'] = tmp_index.name
subprocess.check_output(['git', 'add', '-A', '.'], cwd=s_dir, env=env)
- sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8")
+ git_sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8")
+ submodule_helper = subprocess.check_output(['git', 'submodule--helper', 'list'], cwd=s_dir, env=env).decode("utf-8")
+ for line in submodule_helper.splitlines():
+ module_dir = os.path.join(s_dir, line.rsplit(maxsplit=1)[1])
+ proc = subprocess.Popen(['git', 'add', '-A', '.'], cwd=module_dir, env=env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+ proc.communicate()
+ proc = subprocess.Popen(['git', 'write-tree'], cwd=module_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
+ stdout, _ = proc.communicate()
+ git_sha1 += stdout.decode("utf-8")
+ sha1 = hashlib.sha1(git_sha1.encode("utf-8")).hexdigest()
with open(oe_hash_file, 'w') as fobj:
fobj.write(sha1)
ret = oe_hash_file + ':True'
diff --git a/poky/meta/classes/go.bbclass b/poky/meta/classes/go.bbclass
index 5b26378a4..77ec98dd5 100644
--- a/poky/meta/classes/go.bbclass
+++ b/poky/meta/classes/go.bbclass
@@ -54,7 +54,6 @@ GOTOOLDIR_class-native = "${STAGING_LIBDIR_NATIVE}/go/pkg/tool/${BUILD_GOTUPLE}"
export GOTOOLDIR
export CGO_ENABLED ?= "1"
-export CGO_ENABLED_riscv64 = "0"
export CGO_CFLAGS ?= "${CFLAGS}"
export CGO_CPPFLAGS ?= "${CPPFLAGS}"
export CGO_CXXFLAGS ?= "${CXXFLAGS}"
@@ -116,7 +115,8 @@ go_do_install() {
install -d ${D}${libdir}/go/src/${GO_IMPORT}
tar -C ${S}/src/${GO_IMPORT} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' . | \
tar -C ${D}${libdir}/go/src/${GO_IMPORT} --no-same-owner -xf -
- tar -C ${B} -cf - --exclude-vcs pkg | tar -C ${D}${libdir}/go --no-same-owner -xf -
+ tar -C ${B} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' pkg | \
+ tar -C ${D}${libdir}/go --no-same-owner -xf -
if [ -n "`ls ${B}/${GO_BUILD_BINDIR}/`" ]; then
install -d ${D}${bindir}
@@ -146,10 +146,10 @@ FILES_${PN}-staticdev = "${libdir}/go/pkg"
INSANE_SKIP_${PN} += "ldflags"
# Add -buildmode=pie to GOBUILDFLAGS to satisfy "textrel" QA checking, but mips
-# doesn't support -buildmode=pie, so skip the QA checking for mips and its
+# doesn't support -buildmode=pie, so skip the QA checking for mips/rv32 and its
# variants.
python() {
- if 'mips' in d.getVar('TARGET_ARCH') or 'riscv' in d.getVar('TARGET_ARCH'):
+ if 'mips' in d.getVar('TARGET_ARCH') or 'riscv32' in d.getVar('TARGET_ARCH'):
d.appendVar('INSANE_SKIP_%s' % d.getVar('PN'), " textrel")
else:
d.appendVar('GOBUILDFLAGS', ' -buildmode=pie')
diff --git a/poky/meta/classes/goarch.bbclass b/poky/meta/classes/goarch.bbclass
index 1099b9576..e4e0ca37b 100644
--- a/poky/meta/classes/goarch.bbclass
+++ b/poky/meta/classes/goarch.bbclass
@@ -47,7 +47,6 @@ COMPATIBLE_HOST_linux-gnux32 = "null"
COMPATIBLE_HOST_linux-muslx32 = "null"
COMPATIBLE_HOST_powerpc = "null"
COMPATIBLE_HOST_powerpc64 = "null"
-COMPATIBLE_HOST_powerpc64le = "null"
COMPATIBLE_HOST_mipsarchn32 = "null"
ARM_INSTRUCTION_SET_armv4 = "arm"
@@ -79,10 +78,10 @@ def go_map_arch(a, d):
return 'mips'
elif a == 'mipsel':
return 'mipsle'
+ elif re.match('p(pc|owerpc)(64le)', a):
+ return 'ppc64le'
elif re.match('p(pc|owerpc)(64)', a):
return 'ppc64'
- elif re.match('p(pc|owerpc)(64el)', a):
- return 'ppc64le'
elif a == 'riscv64':
return 'riscv64'
else:
@@ -99,7 +98,7 @@ def go_map_386(a, f, d):
if ('core2' in f) or ('corei7' in f):
return 'sse2'
else:
- return '387'
+ return 'softfloat'
return ''
def go_map_mips(a, f, d):
@@ -115,5 +114,3 @@ def go_map_os(o, d):
if o.startswith('linux'):
return 'linux'
return o
-
-
diff --git a/poky/meta/classes/gtk-doc.bbclass b/poky/meta/classes/gtk-doc.bbclass
index 7dd662bf8..ef99e63fa 100644
--- a/poky/meta/classes/gtk-doc.bbclass
+++ b/poky/meta/classes/gtk-doc.bbclass
@@ -7,6 +7,7 @@
#
# It should be used in recipes to determine whether gtk-doc based documentation should be built,
# so that qemu use can be avoided when necessary.
+GTKDOC_ENABLED_class-native = "False"
GTKDOC_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', \
bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
diff --git a/poky/meta/classes/image-live.bbclass b/poky/meta/classes/image-live.bbclass
index 9ea5ddc31..1b2183ead 100644
--- a/poky/meta/classes/image-live.bbclass
+++ b/poky/meta/classes/image-live.bbclass
@@ -234,7 +234,7 @@ build_hddimg() {
bberror "${HDDDIR}/rootfs.img rootfs size is greather than or equal to 4GB,"
bberror "and this doesn't work on a FAT filesystem. You can either:"
bberror "1) Reduce the size of rootfs.img, or,"
- bbfatal "2) Use wic, vmdk or vdi instead of hddimg\n"
+ bbfatal "2) Use wic, vmdk,vhd, vhdx or vdi instead of hddimg\n"
fi
fi
diff --git a/poky/meta/classes/image.bbclass b/poky/meta/classes/image.bbclass
index 045f4494c..41fc32917 100644
--- a/poky/meta/classes/image.bbclass
+++ b/poky/meta/classes/image.bbclass
@@ -33,7 +33,7 @@ INHIBIT_DEFAULT_DEPS = "1"
# IMAGE_FEATURES may contain any available package group
IMAGE_FEATURES ?= ""
IMAGE_FEATURES[type] = "list"
-IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs stateless-rootfs empty-root-password allow-empty-password allow-root-login post-install-logging"
+IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs read-only-rootfs-delayed-postinsts stateless-rootfs empty-root-password allow-empty-password allow-root-login post-install-logging"
# Generate companion debugfs?
IMAGE_GEN_DEBUGFS ?= "0"
@@ -180,6 +180,8 @@ IMAGE_LOCALES_ARCHIVE ?= '1'
# aren't yet available.
PSEUDO_PASSWD = "${IMAGE_ROOTFS}:${STAGING_DIR_NATIVE}"
+PSEUDO_IGNORE_PATHS .= ",${WORKDIR}/intercept_scripts,${WORKDIR}/oe-rootfs-repo,${WORKDIR}/sstate-build-image_complete"
+
PACKAGE_EXCLUDE ??= ""
PACKAGE_EXCLUDE[type] = "list"
@@ -248,7 +250,6 @@ fakeroot python do_rootfs () {
}
do_rootfs[dirs] = "${TOPDIR}"
do_rootfs[cleandirs] += "${S} ${IMGDEPLOYDIR}"
-do_rootfs[umask] = "022"
do_rootfs[file-checksums] += "${POSTINST_INTERCEPT_CHECKSUMS}"
addtask rootfs after do_prepare_recipe_sysroot
@@ -261,7 +262,6 @@ fakeroot python do_image () {
execute_pre_post_process(d, pre_process_cmds)
}
do_image[dirs] = "${TOPDIR}"
-do_image[umask] = "022"
addtask do_image after do_rootfs
fakeroot python do_image_complete () {
@@ -272,7 +272,6 @@ fakeroot python do_image_complete () {
execute_pre_post_process(d, post_process_cmds)
}
do_image_complete[dirs] = "${TOPDIR}"
-do_image_complete[umask] = "022"
SSTATETASKS += "do_image_complete"
SSTATE_SKIP_CREATION_task-image-complete = '1'
do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}"
diff --git a/poky/meta/classes/image_types.bbclass b/poky/meta/classes/image_types.bbclass
index 286009057..802869140 100644
--- a/poky/meta/classes/image_types.bbclass
+++ b/poky/meta/classes/image_types.bbclass
@@ -110,7 +110,7 @@ IMAGE_CMD_squashfs-lz4 = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAM
IMAGE_CMD_TAR ?= "tar"
# ignore return code 1 "file changed as we read it" as other tasks(e.g. do_image_wic) may be hardlinking rootfs
-IMAGE_CMD_tar = "${IMAGE_CMD_TAR} --sort=name --numeric-owner -cf ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} . || [ $? -eq 1 ]"
+IMAGE_CMD_tar = "${IMAGE_CMD_TAR} --sort=name --format=posix --numeric-owner -cf ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} . || [ $? -eq 1 ]"
do_image_cpio[cleandirs] += "${WORKDIR}/cpio_append"
IMAGE_CMD_cpio () {
@@ -269,7 +269,7 @@ IMAGE_TYPES = " \
# CONVERSION_CMD/DEPENDS.
COMPRESSIONTYPES ?= ""
-CONVERSIONTYPES = "gz bz2 lzma xz lz4 lzo zip zst sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap u-boot vmdk vdi qcow2 base64 ${COMPRESSIONTYPES}"
+CONVERSIONTYPES = "gz bz2 lzma xz lz4 lzo zip zst sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap u-boot vmdk vhd vhdx vdi qcow2 base64 ${COMPRESSIONTYPES}"
CONVERSION_CMD_lzma = "lzma -k -f -7 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
CONVERSION_CMD_gz = "gzip -f -9 -n -c --rsyncable ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
CONVERSION_CMD_bz2 = "pbzip2 -f -k ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
@@ -288,6 +288,8 @@ CONVERSION_CMD_sha512sum = "sha512sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}
CONVERSION_CMD_bmap = "bmaptool create ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.bmap"
CONVERSION_CMD_u-boot = "mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C none -n ${IMAGE_NAME} -d ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.u-boot"
CONVERSION_CMD_vmdk = "qemu-img convert -O vmdk ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vmdk"
+CONVERSION_CMD_vhdx = "qemu-img convert -O vhdx -o subformat=dynamic ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vhdx"
+CONVERSION_CMD_vhd = "qemu-img convert -O vpc -o subformat=fixed ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vhd"
CONVERSION_CMD_vdi = "qemu-img convert -O vdi ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vdi"
CONVERSION_CMD_qcow2 = "qemu-img convert -O qcow2 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.qcow2"
CONVERSION_CMD_base64 = "base64 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.base64"
@@ -306,6 +308,8 @@ CONVERSION_DEPENDS_vmdk = "qemu-system-native"
CONVERSION_DEPENDS_vdi = "qemu-system-native"
CONVERSION_DEPENDS_qcow2 = "qemu-system-native"
CONVERSION_DEPENDS_base64 = "coreutils-native"
+CONVERSION_DEPENDS_vhdx = "qemu-system-native"
+CONVERSION_DEPENDS_vhd = "qemu-system-native"
RUNNABLE_IMAGE_TYPES ?= "ext2 ext3 ext4"
RUNNABLE_MACHINE_PATTERNS ?= "qemu"
@@ -313,7 +317,7 @@ RUNNABLE_MACHINE_PATTERNS ?= "qemu"
DEPLOYABLE_IMAGE_TYPES ?= "hddimg iso"
# The IMAGE_TYPES_MASKED variable will be used to mask out from the IMAGE_FSTYPES,
-# images that will not be built at do_rootfs time: vmdk, vdi, qcow2, hddimg, iso, etc.
+# images that will not be built at do_rootfs time: vmdk, vhd, vhdx, vdi, qcow2, hddimg, iso, etc.
IMAGE_TYPES_MASKED ?= ""
# bmap requires python3 to be in the PATH
diff --git a/poky/meta/classes/image_types_wic.bbclass b/poky/meta/classes/image_types_wic.bbclass
index 286e0f5d5..49be1da77 100644
--- a/poky/meta/classes/image_types_wic.bbclass
+++ b/poky/meta/classes/image_types_wic.bbclass
@@ -3,7 +3,7 @@
WICVARS ?= "\
BBLAYERS IMGDEPLOYDIR DEPLOY_DIR_IMAGE FAKEROOTCMD IMAGE_BASENAME IMAGE_EFI_BOOT_FILES IMAGE_BOOT_FILES \
IMAGE_LINK_NAME IMAGE_ROOTFS INITRAMFS_FSTYPES INITRD INITRD_LIVE ISODIR RECIPE_SYSROOT_NATIVE \
- ROOTFS_SIZE STAGING_DATADIR STAGING_DIR STAGING_LIBDIR TARGET_SYS \
+ ROOTFS_SIZE STAGING_DATADIR STAGING_DIR STAGING_LIBDIR TARGET_SYS HOSTTOOLS_DIR \
KERNEL_IMAGETYPE MACHINE INITRAMFS_IMAGE INITRAMFS_IMAGE_BUNDLE INITRAMFS_LINK_NAME APPEND \
ASSUME_PROVIDED PSEUDO_IGNORE_PATHS"
@@ -29,11 +29,17 @@ WIC_CREATE_EXTRA_ARGS ?= ""
IMAGE_CMD_wic () {
out="${IMGDEPLOYDIR}/${IMAGE_NAME}"
build_wic="${WORKDIR}/build-wic"
+ tmp_wic="${WORKDIR}/tmp-wic"
wks="${WKS_FULL_PATH}"
+ if [ -e "$tmp_wic" ]; then
+ # Ensure we don't have any junk leftover from a previously interrupted
+ # do_image_wic execution
+ rm -rf "$tmp_wic"
+ fi
if [ -z "$wks" ]; then
bbfatal "No kickstart files from WKS_FILES were found: ${WKS_FILES}. Please set WKS_FILE or WKS_FILES appropriately."
fi
- BUILDDIR="${TOPDIR}" PSEUDO_UNLOAD=1 wic create "$wks" --vars "${STAGING_DIR}/${MACHINE}/imgdata/" -e "${IMAGE_BASENAME}" -o "$build_wic/" ${WIC_CREATE_EXTRA_ARGS}
+ BUILDDIR="${TOPDIR}" PSEUDO_UNLOAD=1 wic create "$wks" --vars "${STAGING_DIR}/${MACHINE}/imgdata/" -e "${IMAGE_BASENAME}" -o "$build_wic/" -w "$tmp_wic" ${WIC_CREATE_EXTRA_ARGS}
mv "$build_wic/$(basename "${wks%.wks}")"*.direct "$out${IMAGE_NAME_SUFFIX}.wic"
}
IMAGE_CMD_wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES TOPDIR"
diff --git a/poky/meta/classes/insane.bbclass b/poky/meta/classes/insane.bbclass
index 105d2a5ce..53230fc66 100644
--- a/poky/meta/classes/insane.bbclass
+++ b/poky/meta/classes/insane.bbclass
@@ -27,7 +27,7 @@ WARN_QA ?= " libdir xorg-driver-abi \
infodir build-deps src-uri-bad symlink-to-sysroot multilib \
invalid-packageconfig host-user-contaminated uppercase-pn patch-fuzz \
mime mime-xdg unlisted-pkg-lics unhandled-features-check \
- missing-update-alternatives \
+ missing-update-alternatives native-last \
"
ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \
perms dep-cmp pkgvarcheck perm-config perm-line perm-link \
@@ -1366,6 +1366,37 @@ python () {
d.setVarFlag('do_package_qa', 'rdeptask', '')
for i in issues:
package_qa_handle_error("pkgvarcheck", "%s: Variable %s is set as not being package specific, please fix this." % (d.getVar("FILE"), i), d)
+
+ if 'native-last' not in (d.getVar('INSANE_SKIP') or "").split():
+ for native_class in ['native', 'nativesdk']:
+ if bb.data.inherits_class(native_class, d):
+
+ inherited_classes = d.getVar('__inherit_cache', False) or []
+ needle = os.path.join('classes', native_class)
+
+ bbclassextend = (d.getVar('BBCLASSEXTEND') or '').split()
+ # BBCLASSEXTEND items are always added in the end
+ skip_classes = bbclassextend
+ if bb.data.inherits_class('native', d) or 'native' in bbclassextend:
+ # native also inherits nopackages and relocatable bbclasses
+ skip_classes.extend(['nopackages', 'relocatable'])
+
+ broken_order = []
+ for class_item in reversed(inherited_classes):
+ if needle not in class_item:
+ for extend_item in skip_classes:
+ if os.path.join('classes', '%s.bbclass' % extend_item) in class_item:
+ break
+ else:
+ pn = d.getVar('PN')
+ broken_order.append(os.path.basename(class_item))
+ else:
+ break
+ if broken_order:
+ package_qa_handle_error("native-last", "%s: native/nativesdk class is not inherited last, this can result in unexpected behaviour. "
+ "Classes inherited after native/nativesdk: %s" % (pn, " ".join(broken_order)), d)
+
+
qa_sane = d.getVar("QA_SANE")
if not qa_sane:
bb.fatal("Fatal QA errors found, failing task.")
diff --git a/poky/meta/classes/kernel-devicetree.bbclass b/poky/meta/classes/kernel-devicetree.bbclass
index 81dda8003..d4f886420 100644
--- a/poky/meta/classes/kernel-devicetree.bbclass
+++ b/poky/meta/classes/kernel-devicetree.bbclass
@@ -9,6 +9,9 @@ FILES_${KERNEL_PACKAGE_NAME}-image-zimage-bundle = "/${KERNEL_IMAGEDEST}/zImage-
# Generate kernel+devicetree bundle
KERNEL_DEVICETREE_BUNDLE ?= "0"
+# dtc flags passed via DTC_FLAGS env variable
+KERNEL_DTC_FLAGS ?= ""
+
normalize_dtb () {
dtb="$1"
if echo $dtb | grep -q '/dts/'; then
@@ -50,6 +53,10 @@ do_configure_append() {
}
do_compile_append() {
+ if [ -n "${KERNEL_DTC_FLAGS}" ]; then
+ export DTC_FLAGS="${KERNEL_DTC_FLAGS}"
+ fi
+
for dtbf in ${KERNEL_DEVICETREE}; do
dtb=`normalize_dtb "$dtbf"`
oe_runmake $dtb CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
diff --git a/poky/meta/classes/kernel-fitimage.bbclass b/poky/meta/classes/kernel-fitimage.bbclass
index 9fa302a5c..f5082c93d 100644
--- a/poky/meta/classes/kernel-fitimage.bbclass
+++ b/poky/meta/classes/kernel-fitimage.bbclass
@@ -1,5 +1,7 @@
inherit kernel-uboot kernel-artifact-names uboot-sign
+KERNEL_IMAGETYPE_REPLACEMENT = ""
+
python __anonymous () {
kerneltypes = d.getVar('KERNEL_IMAGETYPES') or ""
if 'fitImage' in kerneltypes.split():
@@ -21,6 +23,8 @@ python __anonymous () {
else:
replacementtype = "zImage"
+ d.setVar("KERNEL_IMAGETYPE_REPLACEMENT", replacementtype)
+
# Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
# to kernel.bbclass . We have to override it, since we pack zImage
# (at least for now) into the fitImage .
@@ -45,6 +49,8 @@ python __anonymous () {
if d.getVar('UBOOT_SIGN_ENABLE') == "1" and d.getVar('UBOOT_DTB_BINARY'):
uboot_pn = d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'
d.appendVarFlag('do_assemble_fitimage', 'depends', ' %s:do_populate_sysroot' % uboot_pn)
+ if d.getVar('INITRAMFS_IMAGE_BUNDLE') == "1":
+ d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' %s:do_populate_sysroot' % uboot_pn)
}
# Options for the device tree compiler passed to mkimage '-D' feature:
@@ -155,7 +161,7 @@ fitimage_emit_section_kernel() {
fi
cat << EOF >> ${1}
- kernel@${2} {
+ kernel-${2} {
description = "Linux kernel";
data = /incbin/("${3}");
type = "kernel";
@@ -164,7 +170,7 @@ fitimage_emit_section_kernel() {
compression = "${4}";
load = <${UBOOT_LOADADDRESS}>;
entry = <${ENTRYPOINT}>;
- hash@1 {
+ hash-1 {
algo = "${kernel_csum}";
};
};
@@ -173,7 +179,7 @@ EOF
if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "${kernel_sign_keyname}" ] ; then
sed -i '$ d' ${1}
cat << EOF >> ${1}
- signature@1 {
+ signature-1 {
algo = "${kernel_csum},${kernel_sign_algo}";
key-name-hint = "${kernel_sign_keyname}";
};
@@ -204,14 +210,14 @@ fitimage_emit_section_dtb() {
dtb_loadline="load = <${UBOOT_DTB_LOADADDRESS}>;"
fi
cat << EOF >> ${1}
- fdt@${2} {
+ fdt-${2} {
description = "Flattened Device Tree blob";
data = /incbin/("${3}");
type = "flat_dt";
arch = "${UBOOT_ARCH}";
compression = "none";
${dtb_loadline}
- hash@1 {
+ hash-1 {
algo = "${dtb_csum}";
};
};
@@ -220,7 +226,7 @@ EOF
if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "${dtb_sign_keyname}" ] ; then
sed -i '$ d' ${1}
cat << EOF >> ${1}
- signature@1 {
+ signature-1 {
algo = "${dtb_csum},${dtb_sign_algo}";
key-name-hint = "${dtb_sign_keyname}";
};
@@ -230,6 +236,43 @@ EOF
}
#
+# Emit the fitImage ITS u-boot script section
+#
+# $1 ... .its filename
+# $2 ... Image counter
+# $3 ... Path to boot script image
+fitimage_emit_section_boot_script() {
+
+ bootscr_csum="${FIT_HASH_ALG}"
+ bootscr_sign_algo="${FIT_SIGN_ALG}"
+ bootscr_sign_keyname="${UBOOT_SIGN_KEYNAME}"
+
+ cat << EOF >> ${1}
+ bootscr@${2} {
+ description = "U-boot script";
+ data = /incbin/("${3}");
+ type = "script";
+ arch = "${UBOOT_ARCH}";
+ compression = "none";
+ hash@1 {
+ algo = "${bootscr_csum}";
+ };
+ };
+EOF
+
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "${bootscr_sign_keyname}" ] ; then
+ sed -i '$ d' ${1}
+ cat << EOF >> ${1}
+ signature@1 {
+ algo = "${bootscr_csum},${bootscr_sign_algo}";
+ key-name-hint = "${bootscr_sign_keyname}";
+ };
+ };
+EOF
+ fi
+}
+
+#
# Emit the fitImage ITS setup section
#
# $1 ... .its filename
@@ -240,7 +283,7 @@ fitimage_emit_section_setup() {
setup_csum="${FIT_HASH_ALG}"
cat << EOF >> ${1}
- setup@${2} {
+ setup-${2} {
description = "Linux setup.bin";
data = /incbin/("${3}");
type = "x86_setup";
@@ -249,7 +292,7 @@ fitimage_emit_section_setup() {
compression = "none";
load = <0x00090000>;
entry = <0x00090000>;
- hash@1 {
+ hash-1 {
algo = "${setup_csum}";
};
};
@@ -278,7 +321,7 @@ fitimage_emit_section_ramdisk() {
fi
cat << EOF >> ${1}
- ramdisk@${2} {
+ ramdisk-${2} {
description = "${INITRAMFS_IMAGE}";
data = /incbin/("${3}");
type = "ramdisk";
@@ -287,7 +330,7 @@ fitimage_emit_section_ramdisk() {
compression = "none";
${ramdisk_loadline}
${ramdisk_entryline}
- hash@1 {
+ hash-1 {
algo = "${ramdisk_csum}";
};
};
@@ -296,7 +339,7 @@ EOF
if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "${ramdisk_sign_keyname}" ] ; then
sed -i '$ d' ${1}
cat << EOF >> ${1}
- signature@1 {
+ signature-1 {
algo = "${ramdisk_csum},${ramdisk_sign_algo}";
key-name-hint = "${ramdisk_sign_keyname}";
};
@@ -312,8 +355,9 @@ EOF
# $2 ... Linux kernel ID
# $3 ... DTB image name
# $4 ... ramdisk ID
-# $5 ... config ID
-# $6 ... default flag
+# $5 ... u-boot script ID
+# $6 ... config ID
+# $7 ... default flag
fitimage_emit_section_config() {
conf_csum="${FIT_HASH_ALG}"
@@ -326,16 +370,18 @@ fitimage_emit_section_config() {
kernel_id="${2}"
dtb_image="${3}"
ramdisk_id="${4}"
- config_id="${5}"
- default_flag="${6}"
+ bootscr_id="${5}"
+ config_id="${6}"
+ default_flag="${7}"
# Test if we have any DTBs at all
sep=""
conf_desc=""
- conf_node="conf@"
+ conf_node="conf-"
kernel_line=""
fdt_line=""
ramdisk_line=""
+ bootscr_line=""
setup_line=""
default_line=""
@@ -350,33 +396,39 @@ fitimage_emit_section_config() {
if [ -n "${kernel_id}" ]; then
conf_desc="Linux kernel"
sep=", "
- kernel_line="kernel = \"kernel@${kernel_id}\";"
+ kernel_line="kernel = \"kernel-${kernel_id}\";"
fi
if [ -n "${dtb_image}" ]; then
conf_desc="${conf_desc}${sep}FDT blob"
sep=", "
- fdt_line="fdt = \"fdt@${dtb_image}\";"
+ fdt_line="fdt = \"fdt-${dtb_image}\";"
fi
if [ -n "${ramdisk_id}" ]; then
conf_desc="${conf_desc}${sep}ramdisk"
sep=", "
- ramdisk_line="ramdisk = \"ramdisk@${ramdisk_id}\";"
+ ramdisk_line="ramdisk = \"ramdisk-${ramdisk_id}\";"
+ fi
+
+ if [ -n "${bootscr_id}" ]; then
+ conf_desc="${conf_desc}${sep}u-boot script"
+ sep=", "
+ bootscr_line="bootscr = \"bootscr@${bootscr_id}\";"
fi
if [ -n "${config_id}" ]; then
conf_desc="${conf_desc}${sep}setup"
- setup_line="setup = \"setup@${config_id}\";"
+ setup_line="setup = \"setup-${config_id}\";"
fi
if [ "${default_flag}" = "1" ]; then
# default node is selected based on dtb ID if it is present,
# otherwise its selected based on kernel ID
if [ -n "${dtb_image}" ]; then
- default_line="default = \"conf@${dtb_image}\";"
+ default_line="default = \"conf-${dtb_image}\";"
else
- default_line="default = \"conf@${kernel_id}\";"
+ default_line="default = \"conf-${kernel_id}\";"
fi
fi
@@ -387,8 +439,9 @@ fitimage_emit_section_config() {
${kernel_line}
${fdt_line}
${ramdisk_line}
+ ${bootscr_line}
${setup_line}
- hash@1 {
+ hash-1 {
algo = "${conf_csum}";
};
EOF
@@ -413,6 +466,11 @@ EOF
sep=", "
fi
+ if [ -n "${bootscr_id}" ]; then
+ sign_line="${sign_line}${sep}\"bootscr\""
+ sep=", "
+ fi
+
if [ -n "${config_id}" ]; then
sign_line="${sign_line}${sep}\"setup\""
fi
@@ -420,7 +478,7 @@ EOF
sign_line="${sign_line};"
cat << EOF >> ${its_file}
- signature@1 {
+ signature-1 {
algo = "${conf_csum},${conf_sign_algo}";
key-name-hint = "${conf_sign_keyname}";
${sign_line}
@@ -445,6 +503,7 @@ fitimage_assemble() {
DTBS=""
ramdiskcount=${3}
setupcount=""
+ bootscr_id=""
rm -f ${1} arch/${ARCH}/boot/${2}
fitimage_emit_fit_header ${1}
@@ -455,7 +514,22 @@ fitimage_assemble() {
fitimage_emit_section_maint ${1} imagestart
uboot_prep_kimage
- fitimage_emit_section_kernel ${1} "${kernelcount}" linux.bin "${linux_comp}"
+
+ if [ "${INITRAMFS_IMAGE_BUNDLE}" = "1" ]; then
+ initramfs_bundle_path="arch/"${UBOOT_ARCH}"/boot/"${KERNEL_IMAGETYPE_REPLACEMENT}".initramfs"
+ if [ -e "${initramfs_bundle_path}" ]; then
+
+ #
+ # Include the kernel/rootfs bundle.
+ #
+
+ fitimage_emit_section_kernel ${1} "${kernelcount}" "${initramfs_bundle_path}" "${linux_comp}"
+ else
+ bbwarn "${initramfs_bundle_path} not found."
+ fi
+ else
+ fitimage_emit_section_kernel ${1} "${kernelcount}" linux.bin "${linux_comp}"
+ fi
#
# Step 2: Prepare a DTB image section
@@ -489,7 +563,21 @@ fitimage_assemble() {
fi
#
- # Step 3: Prepare a setup section. (For x86)
+ # Step 3: Prepare a u-boot script section
+ #
+
+ if [ -n "${UBOOT_ENV}" ] && [ -d "${STAGING_DIR_HOST}/boot" ]; then
+ if [ -e "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY}" ]; then
+ cp ${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} ${B}
+ bootscr_id="${UBOOT_ENV_BINARY}"
+ fitimage_emit_section_boot_script ${1} "${bootscr_id}" ${UBOOT_ENV_BINARY}
+ else
+ bbwarn "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} not found."
+ fi
+ fi
+
+ #
+ # Step 4: Prepare a setup section. (For x86)
#
if [ -e arch/${ARCH}/boot/setup.bin ]; then
setupcount=1
@@ -497,9 +585,9 @@ fitimage_assemble() {
fi
#
- # Step 4: Prepare a ramdisk section.
+ # Step 5: Prepare a ramdisk section.
#
- if [ "x${ramdiskcount}" = "x1" ] ; then
+ if [ "x${ramdiskcount}" = "x1" ] && [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
# Find and use the first initramfs image archive type we find
for img in cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.gz ext2.gz cpio; do
initramfs_path="${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.${img}"
@@ -520,13 +608,15 @@ fitimage_assemble() {
fi
#
- # Step 5: Prepare a configurations section
+ # Step 6: Prepare a configurations section
#
fitimage_emit_section_maint ${1} confstart
# kernel-fitimage.bbclass currently only supports a single kernel (no less or
# more) to be added to the FIT image along with 0 or more device trees and
# 0 or 1 ramdisk.
+ # It is also possible to include an initramfs bundle (kernel and rootfs in one binary)
+ # When the initramfs bundle is used ramdisk is disabled.
# If a device tree is to be part of the FIT image, then select
# the default configuration to be used is based on the dtbcount. If there is
# no dtb present than select the default configuation to be based on
@@ -536,15 +626,15 @@ fitimage_assemble() {
for DTB in ${DTBS}; do
dtb_ext=${DTB##*.}
if [ "${dtb_ext}" = "dtbo" ]; then
- fitimage_emit_section_config ${1} "" "${DTB}" "" "" "`expr ${i} = ${dtbcount}`"
+ fitimage_emit_section_config ${1} "" "${DTB}" "" "${bootscr_id}" "" "`expr ${i} = ${dtbcount}`"
else
- fitimage_emit_section_config ${1} "${kernelcount}" "${DTB}" "${ramdiskcount}" "${setupcount}" "`expr ${i} = ${dtbcount}`"
+ fitimage_emit_section_config ${1} "${kernelcount}" "${DTB}" "${ramdiskcount}" "${bootscr_id}" "${setupcount}" "`expr ${i} = ${dtbcount}`"
fi
i=`expr ${i} + 1`
done
else
defaultconfigcount=1
- fitimage_emit_section_config ${1} "${kernelcount}" "" "${ramdiskcount}" "${setupcount}" "${defaultconfigcount}"
+ fitimage_emit_section_config ${1} "${kernelcount}" "" "${ramdiskcount}" "${bootscr_id}" "${setupcount}" "${defaultconfigcount}"
fi
fitimage_emit_section_maint ${1} sectend
@@ -552,7 +642,7 @@ fitimage_assemble() {
fitimage_emit_section_maint ${1} fitend
#
- # Step 6: Assemble the image
+ # Step 7: Assemble the image
#
${UBOOT_MKIMAGE} \
${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
@@ -560,7 +650,7 @@ fitimage_assemble() {
arch/${ARCH}/boot/${2}
#
- # Step 7: Sign the image and add public key to U-Boot dtb
+ # Step 8: Sign the image and add public key to U-Boot dtb
#
if [ "x${UBOOT_SIGN_ENABLE}" = "x1" ] ; then
add_key_to_u_boot=""
@@ -592,7 +682,11 @@ do_assemble_fitimage_initramfs() {
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage" && \
test -n "${INITRAMFS_IMAGE}" ; then
cd ${B}
- fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage-${INITRAMFS_IMAGE} 1
+ if [ "${INITRAMFS_IMAGE_BUNDLE}" = "1" ]; then
+ fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage ""
+ else
+ fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage-${INITRAMFS_IMAGE} 1
+ fi
fi
}
@@ -631,22 +725,27 @@ kernel_do_deploy[vardepsexclude] = "DATETIME"
kernel_do_deploy_append() {
# Update deploy directory
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
- echo "Copying fit-image.its source file..."
- install -m 0644 ${B}/fit-image.its "$deployDir/fitImage-its-${KERNEL_FIT_NAME}.its"
- ln -snf fitImage-its-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${KERNEL_FIT_LINK_NAME}"
- echo "Copying linux.bin file..."
- install -m 0644 ${B}/linux.bin $deployDir/fitImage-linux.bin-${KERNEL_FIT_NAME}.bin
- ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}"
+ if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
+ echo "Copying fit-image.its source file..."
+ install -m 0644 ${B}/fit-image.its "$deployDir/fitImage-its-${KERNEL_FIT_NAME}.its"
+ ln -snf fitImage-its-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${KERNEL_FIT_LINK_NAME}"
+
+ echo "Copying linux.bin file..."
+ install -m 0644 ${B}/linux.bin $deployDir/fitImage-linux.bin-${KERNEL_FIT_NAME}.bin
+ ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}"
+ fi
if [ -n "${INITRAMFS_IMAGE}" ]; then
echo "Copying fit-image-${INITRAMFS_IMAGE}.its source file..."
install -m 0644 ${B}/fit-image-${INITRAMFS_IMAGE}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its"
ln -snf fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
- echo "Copying fitImage-${INITRAMFS_IMAGE} file..."
- install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin"
- ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
+ if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
+ echo "Copying fitImage-${INITRAMFS_IMAGE} file..."
+ install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin"
+ ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
+ fi
fi
if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ] ; then
# UBOOT_DTB_IMAGE is a realfile, but we can't use
@@ -656,3 +755,18 @@ kernel_do_deploy_append() {
fi
fi
}
+
+# The function below performs the following in case of initramfs bundles:
+# - Removes do_assemble_fitimage. FIT generation is done through
+# do_assemble_fitimage_initramfs. do_assemble_fitimage is not needed
+# and should not be part of the tasks to be executed.
+# - Since do_generate_rsa_keys is inserted by default
+# between do_compile and do_assemble_fitimage, this is
+# not suitable in case of initramfs bundles. do_generate_rsa_keys
+# should be between do_bundle_initramfs and do_assemble_fitimage_initramfs.
+python () {
+ if d.getVar('INITRAMFS_IMAGE_BUNDLE') == "1":
+ bb.build.deltask('do_assemble_fitimage', d)
+ bb.build.deltask('generate_rsa_keys', d)
+ bb.build.addtask('generate_rsa_keys', 'do_assemble_fitimage_initramfs', 'do_bundle_initramfs', d)
+} \ No newline at end of file
diff --git a/poky/meta/classes/kernel.bbclass b/poky/meta/classes/kernel.bbclass
index 2f1006ddf..8693ab86b 100644
--- a/poky/meta/classes/kernel.bbclass
+++ b/poky/meta/classes/kernel.bbclass
@@ -415,9 +415,23 @@ kernel_do_install() {
#
install -d ${D}/${KERNEL_IMAGEDEST}
install -d ${D}/boot
+
+ #
+ # When including an initramfs bundle inside a FIT image, the fitImage is created after the install task
+ # by do_assemble_fitimage_initramfs.
+ # This happens after the generation of the initramfs bundle (done by do_bundle_initramfs).
+ # So, at the level of the install task we should not try to install the fitImage. fitImage is still not
+ # generated yet.
+ # After the generation of the fitImage, the deploy task copies the fitImage from the build directory to
+ # the deploy folder.
+ #
+
for imageType in ${KERNEL_IMAGETYPES} ; do
- install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType ${D}/${KERNEL_IMAGEDEST}/$imageType-${KERNEL_VERSION}
+ if [ $imageType != "fitImage" ] || [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ] ; then
+ install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType ${D}/${KERNEL_IMAGEDEST}/$imageType-${KERNEL_VERSION}
+ fi
done
+
install -m 0644 System.map ${D}/boot/System.map-${KERNEL_VERSION}
install -m 0644 .config ${D}/boot/config-${KERNEL_VERSION}
install -m 0644 vmlinux ${D}/boot/vmlinux-${KERNEL_VERSION}
@@ -745,7 +759,7 @@ kernel_do_deploy() {
fi
if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
- for imageType in ${KERNEL_IMAGETYPES} ; do
+ for imageType in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
if [ "$imageType" = "fitImage" ] ; then
continue
fi
diff --git a/poky/meta/classes/license.bbclass b/poky/meta/classes/license.bbclass
index dc9111834..bcea0b3cb 100644
--- a/poky/meta/classes/license.bbclass
+++ b/poky/meta/classes/license.bbclass
@@ -252,16 +252,9 @@ def return_spdx(d, license):
def canonical_license(d, license):
"""
Return the canonical (SPDX) form of the license if available (so GPLv3
- becomes GPL-3.0), for the license named 'X+', return canonical form of
- 'X' if available and the tailing '+' (so GPLv3+ becomes GPL-3.0+),
- or the passed license if there is no canonical form.
+ becomes GPL-3.0) or the passed license if there is no canonical form.
"""
- lic = d.getVarFlag('SPDXLICENSEMAP', license) or ""
- if not lic and license.endswith('+'):
- lic = d.getVarFlag('SPDXLICENSEMAP', license.rstrip('+'))
- if lic:
- lic += '+'
- return lic or license
+ return d.getVarFlag('SPDXLICENSEMAP', license) or license
def available_licenses(d):
"""
@@ -288,6 +281,12 @@ def expand_wildcard_licenses(d, wildcard_licenses):
wildcards from SPDXLICENSEMAP flags and AVAILABLE_LICENSES.
"""
import fnmatch
+
+ # Assume if we're passed "GPLv3" or "*GPLv3" it means -or-later as well
+ for lic in wildcard_licenses[:]:
+ if not lic.endswith(("-or-later", "-only", "*")):
+ wildcard_licenses.append(lic + "+")
+
licenses = wildcard_licenses[:]
spdxmapkeys = d.getVarFlags('SPDXLICENSEMAP').keys()
for wld_lic in wildcard_licenses:
diff --git a/poky/meta/classes/license_image.bbclass b/poky/meta/classes/license_image.bbclass
index 119c8dfc8..c96b032eb 100644
--- a/poky/meta/classes/license_image.bbclass
+++ b/poky/meta/classes/license_image.bbclass
@@ -40,7 +40,6 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
import stat
bad_licenses = (d.getVar("INCOMPATIBLE_LICENSE") or "").split()
- bad_licenses = [canonical_license(d, l) for l in bad_licenses]
bad_licenses = expand_wildcard_licenses(d, bad_licenses)
whitelist = []
@@ -210,7 +209,8 @@ def license_deployed_manifest(d):
os.unlink(lic_manifest_symlink_dir)
# create the image dir symlink
- os.symlink(lic_manifest_dir, lic_manifest_symlink_dir)
+ if lic_manifest_dir != lic_manifest_symlink_dir:
+ os.symlink(lic_manifest_dir, lic_manifest_symlink_dir)
def get_deployed_dependencies(d):
"""
@@ -220,9 +220,10 @@ def get_deployed_dependencies(d):
deploy = {}
# Get all the dependencies for the current task (rootfs).
taskdata = d.getVar("BB_TASKDEPDATA", False)
+ pn = d.getVar("PN", True)
depends = list(set([dep[0] for dep
in list(taskdata.values())
- if not dep[0].endswith("-native")]))
+ if not dep[0].endswith("-native") and not dep[0] == pn]))
# To verify what was deployed it checks the rootfs dependencies against
# the SSTATE_MANIFESTS for "deploy" task.
diff --git a/poky/meta/classes/linuxloader.bbclass b/poky/meta/classes/linuxloader.bbclass
index b161c51a5..30925ac87 100644
--- a/poky/meta/classes/linuxloader.bbclass
+++ b/poky/meta/classes/linuxloader.bbclass
@@ -9,8 +9,8 @@ def get_musl_loader_arch(d):
ldso_arch = "mips${ABIEXTENSION}${MIPSPKGSFX_BYTE}${MIPSPKGSFX_R6}${MIPSPKGSFX_ENDIAN}${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
elif targetarch == "powerpc":
ldso_arch = "powerpc${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
- elif targetarch == "powerpc64":
- ldso_arch = "powerpc64"
+ elif targetarch.startswith("powerpc64"):
+ ldso_arch = "powerpc64${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'le', d)}"
elif targetarch == "x86_64":
ldso_arch = "x86_64"
elif re.search("i.86", targetarch):
diff --git a/poky/meta/classes/meson.bbclass b/poky/meta/classes/meson.bbclass
index 83aa854b7..a7644e70c 100644
--- a/poky/meta/classes/meson.bbclass
+++ b/poky/meta/classes/meson.bbclass
@@ -99,6 +99,8 @@ readelf = ${@meson_array('READELF', d)}
pkgconfig = 'pkg-config'
llvm-config = 'llvm-config${LLVMVERSION}'
cups-config = 'cups-config'
+g-ir-scanner = '${STAGING_BINDIR}/g-ir-scanner-wrapper'
+g-ir-compiler = '${STAGING_BINDIR}/g-ir-compiler-wrapper'
[properties]
needs_exe_wrapper = true
diff --git a/poky/meta/classes/native.bbclass b/poky/meta/classes/native.bbclass
index 08106e345..a0838e41b 100644
--- a/poky/meta/classes/native.bbclass
+++ b/poky/meta/classes/native.bbclass
@@ -5,20 +5,12 @@ inherit relocatable
# no need for them to be a direct target of 'world'
EXCLUDE_FROM_WORLD = "1"
-PACKAGES = ""
-PACKAGES_class-native = ""
-PACKAGES_DYNAMIC = ""
-PACKAGES_DYNAMIC_class-native = ""
PACKAGE_ARCH = "${BUILD_ARCH}"
# used by cmake class
OECMAKE_RPATH = "${libdir}"
OECMAKE_RPATH_class-native = "${libdir}"
-# When this class has packaging enabled, setting
-# RPROVIDES becomes unnecessary.
-RPROVIDES = "${PN}"
-
TARGET_ARCH = "${BUILD_ARCH}"
TARGET_OS = "${BUILD_OS}"
TARGET_VENDOR = "${BUILD_VENDOR}"
@@ -138,7 +130,7 @@ python native_virtclass_handler () {
if "native" not in classextend:
return
- def map_dependencies(varname, d, suffix = ""):
+ def map_dependencies(varname, d, suffix = "", selfref=True):
if suffix:
varname = varname + "_" + suffix
deps = d.getVar(varname)
@@ -148,22 +140,25 @@ python native_virtclass_handler () {
newdeps = []
for dep in deps:
if dep == pn:
- continue
+ if not selfref:
+ continue
+ newdeps.append(dep)
elif "-cross-" in dep:
newdeps.append(dep.replace("-cross", "-native"))
elif not dep.endswith("-native"):
- newdeps.append(dep + "-native")
+ newdeps.append(dep.replace("-native", "") + "-native")
else:
newdeps.append(dep)
- d.setVar(varname, " ".join(newdeps))
+ d.setVar(varname, " ".join(newdeps), parsing=True)
- map_dependencies("DEPENDS", e.data)
- for pkg in [e.data.getVar("PN"), "", "${PN}"]:
+ map_dependencies("DEPENDS", e.data, selfref=False)
+ for pkg in e.data.getVar("PACKAGES", False).split():
map_dependencies("RDEPENDS", e.data, pkg)
map_dependencies("RRECOMMENDS", e.data, pkg)
map_dependencies("RSUGGESTS", e.data, pkg)
map_dependencies("RPROVIDES", e.data, pkg)
map_dependencies("RREPLACES", e.data, pkg)
+ map_dependencies("PACKAGES", e.data)
provides = e.data.getVar("PROVIDES")
nprovides = []
diff --git a/poky/meta/classes/npm.bbclass b/poky/meta/classes/npm.bbclass
index 068032a1e..55a6985fb 100644
--- a/poky/meta/classes/npm.bbclass
+++ b/poky/meta/classes/npm.bbclass
@@ -17,8 +17,10 @@
# NPM_INSTALL_DEV:
# Set to 1 to also install devDependencies.
+inherit python3native
+
DEPENDS_prepend = "nodejs-native "
-RDEPENDS_${PN}_prepend = "nodejs "
+RDEPENDS_${PN}_append_class-target = " nodejs"
NPM_INSTALL_DEV ?= "0"
@@ -130,11 +132,17 @@ python npm_do_configure() {
cached_manifest.pop("dependencies", None)
cached_manifest.pop("devDependencies", None)
- with open(orig_shrinkwrap_file, "r") as f:
- orig_shrinkwrap = json.load(f)
+ has_shrinkwrap_file = True
+
+ try:
+ with open(orig_shrinkwrap_file, "r") as f:
+ orig_shrinkwrap = json.load(f)
+ except IOError:
+ has_shrinkwrap_file = False
- cached_shrinkwrap = copy.deepcopy(orig_shrinkwrap)
- cached_shrinkwrap.pop("dependencies", None)
+ if has_shrinkwrap_file:
+ cached_shrinkwrap = copy.deepcopy(orig_shrinkwrap)
+ cached_shrinkwrap.pop("dependencies", None)
# Manage the dependencies
progress = OutOfProgressHandler(d, r"^(\d+)/(\d+)$")
@@ -165,8 +173,10 @@ python npm_do_configure() {
progress.write("%d/%d" % (progress_done, progress_total))
dev = bb.utils.to_boolean(d.getVar("NPM_INSTALL_DEV"), False)
- foreach_dependencies(orig_shrinkwrap, _count_dependency, dev)
- foreach_dependencies(orig_shrinkwrap, _cache_dependency, dev)
+
+ if has_shrinkwrap_file:
+ foreach_dependencies(orig_shrinkwrap, _count_dependency, dev)
+ foreach_dependencies(orig_shrinkwrap, _cache_dependency, dev)
# Configure the main package
with tempfile.TemporaryDirectory() as tmpdir:
@@ -181,16 +191,19 @@ python npm_do_configure() {
cached_manifest[depkey] = {}
cached_manifest[depkey][name] = version
- _update_manifest("dependencies")
+ if has_shrinkwrap_file:
+ _update_manifest("dependencies")
if dev:
- _update_manifest("devDependencies")
+ if has_shrinkwrap_file:
+ _update_manifest("devDependencies")
with open(cached_manifest_file, "w") as f:
json.dump(cached_manifest, f, indent=2)
- with open(cached_shrinkwrap_file, "w") as f:
- json.dump(cached_shrinkwrap, f, indent=2)
+ if has_shrinkwrap_file:
+ with open(cached_shrinkwrap_file, "w") as f:
+ json.dump(cached_shrinkwrap, f, indent=2)
}
python npm_do_compile() {
@@ -237,9 +250,7 @@ python npm_do_compile() {
sysroot = d.getVar("RECIPE_SYSROOT_NATIVE")
nodedir = os.path.join(sysroot, d.getVar("prefix_native").strip("/"))
configs.append(("nodedir", nodedir))
- bindir = os.path.join(sysroot, d.getVar("bindir_native").strip("/"))
- pythondir = os.path.join(bindir, "python-native", "python")
- configs.append(("python", pythondir))
+ configs.append(("python", d.getVar("PYTHON")))
# Add node-pre-gyp configuration
args.append(("target_arch", d.getVar("NPM_ARCH")))
diff --git a/poky/meta/classes/package.bbclass b/poky/meta/classes/package.bbclass
index 247bdc7bb..e3f0a7060 100644
--- a/poky/meta/classes/package.bbclass
+++ b/poky/meta/classes/package.bbclass
@@ -807,10 +807,16 @@ python package_do_split_locales() {
python perform_packagecopy () {
import subprocess
+ import shutil
dest = d.getVar('D')
dvar = d.getVar('PKGD')
+ # Remove ${D}/sysroot-only if present
+ sysroot_only = os.path.join(dest, 'sysroot-only')
+ if cpath.exists(sysroot_only) and cpath.isdir(sysroot_only):
+ shutil.rmtree(sysroot_only)
+
# Start by package population by taking a copy of the installed
# files to operate on
# Preserve sparse files and hard links
@@ -1704,7 +1710,7 @@ if [ x"$D" = "x" ]; then
fi
}
-RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/rpmdeps --alldeps"
+RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/rpmdeps --alldeps --define '__font_provides %{nil}'"
# Collect perfile run-time dependency metadata
# Output:
@@ -2446,6 +2452,7 @@ python do_packagedata () {
bb.build.exec_func("packagedata_translate_pr_autoinc", d)
}
+do_packagedata[cleandirs] += "${WORKDIR}/pkgdata-pdata-input"
# Translate the EXTENDPRAUTO and AUTOINC to the final values
packagedata_translate_pr_autoinc() {
diff --git a/poky/meta/classes/package_deb.bbclass b/poky/meta/classes/package_deb.bbclass
index cb723fc1d..b3d8ce330 100644
--- a/poky/meta/classes/package_deb.bbclass
+++ b/poky/meta/classes/package_deb.bbclass
@@ -314,7 +314,6 @@ python do_package_write_deb () {
}
do_package_write_deb[dirs] = "${PKGWRITEDIRDEB}"
do_package_write_deb[cleandirs] = "${PKGWRITEDIRDEB}"
-do_package_write_deb[umask] = "022"
do_package_write_deb[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
addtask package_write_deb after do_packagedata do_package
diff --git a/poky/meta/classes/package_ipk.bbclass b/poky/meta/classes/package_ipk.bbclass
index 79cb36c51..600b3ac90 100644
--- a/poky/meta/classes/package_ipk.bbclass
+++ b/poky/meta/classes/package_ipk.bbclass
@@ -4,6 +4,7 @@ IMAGE_PKGTYPE ?= "ipk"
IPKGCONF_TARGET = "${WORKDIR}/opkg.conf"
IPKGCONF_SDK = "${WORKDIR}/opkg-sdk.conf"
+IPKGCONF_SDK_TARGET = "${WORKDIR}/opkg-sdk-target.conf"
PKGWRITEDIRIPK = "${WORKDIR}/deploy-ipks"
@@ -272,7 +273,6 @@ python do_package_write_ipk () {
}
do_package_write_ipk[dirs] = "${PKGWRITEDIRIPK}"
do_package_write_ipk[cleandirs] = "${PKGWRITEDIRIPK}"
-do_package_write_ipk[umask] = "022"
do_package_write_ipk[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
addtask package_write_ipk after do_packagedata do_package
diff --git a/poky/meta/classes/package_rpm.bbclass b/poky/meta/classes/package_rpm.bbclass
index 53b4700cd..84a9a6dd1 100644
--- a/poky/meta/classes/package_rpm.bbclass
+++ b/poky/meta/classes/package_rpm.bbclass
@@ -300,13 +300,13 @@ python write_specfile () {
srccustomtagschunk = get_package_additional_metadata("rpm", localdata)
srcdepends = d.getVar('DEPENDS')
- srcrdepends = []
- srcrrecommends = []
- srcrsuggests = []
- srcrprovides = []
- srcrreplaces = []
- srcrconflicts = []
- srcrobsoletes = []
+ srcrdepends = ""
+ srcrrecommends = ""
+ srcrsuggests = ""
+ srcrprovides = ""
+ srcrreplaces = ""
+ srcrconflicts = ""
+ srcrobsoletes = ""
srcrpreinst = []
srcrpostinst = []
@@ -365,13 +365,13 @@ python write_specfile () {
# Map the dependencies into their final form
mapping_rename_hook(localdata)
- splitrdepends = localdata.getVar('RDEPENDS')
- splitrrecommends = localdata.getVar('RRECOMMENDS')
- splitrsuggests = localdata.getVar('RSUGGESTS')
- splitrprovides = localdata.getVar('RPROVIDES')
- splitrreplaces = localdata.getVar('RREPLACES')
- splitrconflicts = localdata.getVar('RCONFLICTS')
- splitrobsoletes = []
+ splitrdepends = localdata.getVar('RDEPENDS') or ""
+ splitrrecommends = localdata.getVar('RRECOMMENDS') or ""
+ splitrsuggests = localdata.getVar('RSUGGESTS') or ""
+ splitrprovides = localdata.getVar('RPROVIDES') or ""
+ splitrreplaces = localdata.getVar('RREPLACES') or ""
+ splitrconflicts = localdata.getVar('RCONFLICTS') or ""
+ splitrobsoletes = ""
splitrpreinst = localdata.getVar('pkg_preinst')
splitrpostinst = localdata.getVar('pkg_postinst')
@@ -439,9 +439,9 @@ python write_specfile () {
spec_preamble_bottom.append(splitcustomtagschunk)
# Replaces == Obsoletes && Provides
- robsoletes = bb.utils.explode_dep_versions2(splitrobsoletes or "")
- rprovides = bb.utils.explode_dep_versions2(splitrprovides or "")
- rreplaces = bb.utils.explode_dep_versions2(splitrreplaces or "")
+ robsoletes = bb.utils.explode_dep_versions2(splitrobsoletes)
+ rprovides = bb.utils.explode_dep_versions2(splitrprovides)
+ rreplaces = bb.utils.explode_dep_versions2(splitrreplaces)
for dep in rreplaces:
if not dep in robsoletes:
robsoletes[dep] = rreplaces[dep]
@@ -533,9 +533,9 @@ python write_specfile () {
tail_source(d)
# Replaces == Obsoletes && Provides
- robsoletes = bb.utils.explode_dep_versions2(srcrobsoletes or "")
- rprovides = bb.utils.explode_dep_versions2(srcrprovides or "")
- rreplaces = bb.utils.explode_dep_versions2(srcrreplaces or "")
+ robsoletes = bb.utils.explode_dep_versions2(srcrobsoletes)
+ rprovides = bb.utils.explode_dep_versions2(srcrprovides)
+ rreplaces = bb.utils.explode_dep_versions2(srcrreplaces)
for dep in rreplaces:
if not dep in robsoletes:
robsoletes[dep] = rreplaces[dep]
@@ -687,7 +687,9 @@ python do_package_rpm () {
cmd = cmd + " --define '_binary_payload w6T.xzdio'"
cmd = cmd + " --define '_source_payload w6T.xzdio'"
cmd = cmd + " --define 'clamp_mtime_to_source_date_epoch 1'"
+ cmd = cmd + " --define 'use_source_date_epoch_as_buildtime 1'"
cmd = cmd + " --define '_buildhost reproducible'"
+ cmd = cmd + " --define '__font_provides %{nil}'"
if perfiledeps:
cmd = cmd + " --define '__find_requires " + outdepends + "'"
cmd = cmd + " --define '__find_provides " + outprovides + "'"
@@ -745,7 +747,6 @@ python do_package_write_rpm () {
do_package_write_rpm[dirs] = "${PKGWRITEDIRRPM}"
do_package_write_rpm[cleandirs] = "${PKGWRITEDIRRPM}"
-do_package_write_rpm[umask] = "022"
do_package_write_rpm[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
addtask package_write_rpm after do_packagedata do_package
diff --git a/poky/meta/classes/patch.bbclass b/poky/meta/classes/patch.bbclass
index 25ec089ae..cd491a563 100644
--- a/poky/meta/classes/patch.bbclass
+++ b/poky/meta/classes/patch.bbclass
@@ -160,7 +160,6 @@ python patch_do_patch() {
patch_do_patch[vardepsexclude] = "PATCHRESOLVE"
addtask patch after do_unpack
-do_patch[umask] = "022"
do_patch[dirs] = "${WORKDIR}"
do_patch[depends] = "${PATCHDEPENDENCY}"
diff --git a/poky/meta/classes/populate_sdk_base.bbclass b/poky/meta/classes/populate_sdk_base.bbclass
index 4db0511db..c8a7084d3 100644
--- a/poky/meta/classes/populate_sdk_base.bbclass
+++ b/poky/meta/classes/populate_sdk_base.bbclass
@@ -178,7 +178,7 @@ do_populate_sdk[sstate-inputdirs] = "${SDKDEPLOYDIR}"
do_populate_sdk[sstate-outputdirs] = "${SDK_DEPLOY}"
do_populate_sdk[stamp-extra-info] = "${MACHINE_ARCH}${SDKMACHINE}"
-PSEUDO_IGNORE_PATHS .= ",${SDKDEPLOYDIR}"
+PSEUDO_IGNORE_PATHS .= ",${SDKDEPLOYDIR},${WORKDIR}/oe-sdk-repo,${WORKDIR}/sstate-build-populate_sdk"
fakeroot create_sdk_files() {
cp ${COREBASE}/scripts/relocate_sdk.py ${SDK_OUTPUT}/${SDKPATH}/
diff --git a/poky/meta/classes/report-error.bbclass b/poky/meta/classes/report-error.bbclass
index 1a12db120..9cb6b0bd3 100644
--- a/poky/meta/classes/report-error.bbclass
+++ b/poky/meta/classes/report-error.bbclass
@@ -6,6 +6,8 @@
#
# Licensed under the MIT license, see COPYING.MIT for details
+inherit base
+
ERR_REPORT_DIR ?= "${LOG_DIR}/error-report"
def errorreport_getdata(e):
@@ -64,6 +66,8 @@ python errorreport_handler () {
data['failures'] = []
data['component'] = " ".join(e.getPkgs())
data['branch_commit'] = str(base_detect_branch(e.data)) + ": " + str(base_detect_revision(e.data))
+ data['bitbake_version'] = e.data.getVar("BB_VERSION")
+ data['layer_version'] = get_layers_branch_rev(e.data)
data['local_conf'] = get_conf_data(e, 'local.conf')
data['auto_conf'] = get_conf_data(e, 'auto.conf')
lock = bb.utils.lockfile(datafile + '.lock')
diff --git a/poky/meta/classes/reproducible_build.bbclass b/poky/meta/classes/reproducible_build.bbclass
index 2f3bd90b0..f06e00d70 100644
--- a/poky/meta/classes/reproducible_build.bbclass
+++ b/poky/meta/classes/reproducible_build.bbclass
@@ -37,10 +37,13 @@
BUILD_REPRODUCIBLE_BINARIES ??= '1'
inherit ${@oe.utils.ifelse(d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1', 'reproducible_build_simple', '')}
-SDE_DIR ="${WORKDIR}/source-date-epoch"
+SDE_DIR = "${WORKDIR}/source-date-epoch"
SDE_FILE = "${SDE_DIR}/__source_date_epoch.txt"
SDE_DEPLOYDIR = "${WORKDIR}/deploy-source-date-epoch"
+# A SOURCE_DATE_EPOCH of '0' might be misinterpreted as no SDE
+export SOURCE_DATE_EPOCH_FALLBACK ??= "1302044400"
+
SSTATETASKS += "do_deploy_source_date_epoch"
do_deploy_source_date_epoch () {
@@ -93,15 +96,19 @@ def get_source_date_epoch_value(d):
return cached
epochfile = d.getVar('SDE_FILE')
- source_date_epoch = 0
+ source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
if os.path.isfile(epochfile):
with open(epochfile, 'r') as f:
s = f.read()
try:
source_date_epoch = int(s)
+ # workaround for old sstate with SDE_FILE content being 0 - use SOURCE_DATE_EPOCH_FALLBACK
+ if source_date_epoch == 0 :
+ source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
+ bb.warn("SOURCE_DATE_EPOCH value from sstate '%s' is deprecated/invalid. Reverting to SOURCE_DATE_EPOCH_FALLBACK '%s'" % (s, source_date_epoch))
except ValueError:
- bb.warn("SOURCE_DATE_EPOCH value '%s' is invalid. Reverting to 0" % s)
- source_date_epoch = 0
+ bb.warn("SOURCE_DATE_EPOCH value '%s' is invalid. Reverting to SOURCE_DATE_EPOCH_FALLBACK" % s)
+ source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
else:
bb.debug(1, "Cannot find %s. SOURCE_DATE_EPOCH will default to %d" % (epochfile, source_date_epoch))
diff --git a/poky/meta/classes/rootfs_deb.bbclass b/poky/meta/classes/rootfs_deb.bbclass
index ef616da22..0469ba705 100644
--- a/poky/meta/classes/rootfs_deb.bbclass
+++ b/poky/meta/classes/rootfs_deb.bbclass
@@ -32,4 +32,8 @@ python () {
d.setVar('DEB_SDK_ARCH', 'amd64')
elif darch == "arm":
d.setVar('DEB_SDK_ARCH', 'armel')
+ elif darch == "aarch64":
+ d.setVar('DEB_SDK_ARCH', 'arm64')
+ else:
+ bb.fatal("Unhandled SDK_ARCH %s" % darch)
}
diff --git a/poky/meta/classes/rootfs_ipk.bbclass b/poky/meta/classes/rootfs_ipk.bbclass
index f1e021973..245c256a6 100644
--- a/poky/meta/classes/rootfs_ipk.bbclass
+++ b/poky/meta/classes/rootfs_ipk.bbclass
@@ -14,8 +14,8 @@ do_rootfs[recrdeptask] += "do_package_write_ipk do_package_qa"
do_rootfs[vardeps] += "PACKAGE_FEED_URIS PACKAGE_FEED_BASE_PATHS PACKAGE_FEED_ARCHS"
do_rootfs[lockfiles] += "${WORKDIR}/ipk.lock"
-do_populate_sdk[lockfiles] += "${WORKDIR}/ipk.lock"
-do_populate_sdk_ext[lockfiles] += "${WORKDIR}/ipk.lock"
+do_populate_sdk[lockfiles] += "${WORKDIR}/sdk-ipk.lock"
+do_populate_sdk_ext[lockfiles] += "${WORKDIR}/sdk-ipk.lock"
OPKG_PREPROCESS_COMMANDS = ""
diff --git a/poky/meta/classes/sanity.bbclass b/poky/meta/classes/sanity.bbclass
index d134b40a8..485173ab4 100644
--- a/poky/meta/classes/sanity.bbclass
+++ b/poky/meta/classes/sanity.bbclass
@@ -703,6 +703,23 @@ def check_sanity_version_change(status, d):
if (tmpdirmode & stat.S_ISUID):
status.addresult("TMPDIR is setuid, please don't build in a setuid directory")
+ # Check that a user isn't building in a path in PSEUDO_IGNORE_PATHS
+ pseudoignorepaths = d.getVar('PSEUDO_IGNORE_PATHS', expand=True).split(",")
+ workdir = d.getVar('WORKDIR', expand=True)
+ for i in pseudoignorepaths:
+ if i and workdir.startswith(i):
+ status.addresult("You are building in a path included in PSEUDO_IGNORE_PATHS " + str(i) + " please locate the build outside this path.\n")
+
+ # Check if PSEUDO_IGNORE_PATHS and and paths under pseudo control overlap
+ pseudoignorepaths = d.getVar('PSEUDO_IGNORE_PATHS', expand=True).split(",")
+ pseudo_control_dir = "${D},${PKGD},${PKGDEST},${IMAGEROOTFS},${SDK_OUTPUT}"
+ pseudocontroldir = d.expand(pseudo_control_dir).split(",")
+ for i in pseudoignorepaths:
+ for j in pseudocontroldir:
+ if i and j:
+ if j.startswith(i):
+ status.addresult("A path included in PSEUDO_IGNORE_PATHS " + str(i) + " and the path " + str(j) + " overlap and this will break pseudo permission and ownership tracking. Please set the path " + str(j) + " to a different directory which does not overlap with pseudo controlled directories. \n")
+
# Some third-party software apparently relies on chmod etc. being suid root (!!)
import stat
suid_check_bins = "chown chmod mknod".split()
@@ -787,6 +804,11 @@ def check_sanity_everybuild(status, d):
if "." in paths or "./" in paths or "" in paths:
status.addresult("PATH contains '.', './' or '' (empty element), which will break the build, please remove this.\nParsed PATH is " + str(paths) + "\n")
+ #Check if bitbake is present in PATH environment variable
+ bb_check = bb.utils.which(d.getVar('PATH'), 'bitbake')
+ if not bb_check:
+ bb.warn("bitbake binary is not found in PATH, did you source the script?")
+
# Check whether 'inherit' directive is found (used for a class to inherit)
# in conf file it's supposed to be uppercase INHERIT
inherit = d.getVar('inherit')
diff --git a/poky/meta/classes/sstate.bbclass b/poky/meta/classes/sstate.bbclass
index d08d950e7..f57916816 100644
--- a/poky/meta/classes/sstate.bbclass
+++ b/poky/meta/classes/sstate.bbclass
@@ -862,6 +862,8 @@ BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs):
found = set()
+ foundLocal = set()
+ foundNet = set()
missed = set()
def gethash(task):
@@ -894,6 +896,7 @@ def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True,
if os.path.exists(sstatefile):
bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
found.add(tid)
+ foundLocal.add(tid)
continue
else:
missed.add(tid)
@@ -939,6 +942,7 @@ def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True,
fetcher.checkstatus()
bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
found.add(tid)
+ foundNet.add(tid)
if tid in missed:
missed.remove(tid)
except:
@@ -1000,7 +1004,7 @@ def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True,
match = 0
if total:
match = len(found) / total * 100
- bb.plain("Sstate summary: Wanted %d Found %d Missed %d Current %d (%d%% match, %d%% complete)" % (total, len(found), len(missed), currentcount, match, complete))
+ bb.plain("Sstate summary: Wanted %d Local %d Network %d Missed %d Current %d (%d%% match, %d%% complete)" % (total, len(foundLocal), len(foundNet),len(missed), currentcount, match, complete))
if hasattr(bb.parse.siggen, "checkhashes"):
bb.parse.siggen.checkhashes(sq_data, missed, found, d)
diff --git a/poky/meta/classes/staging.bbclass b/poky/meta/classes/staging.bbclass
index f0a619b35..806a85773 100644
--- a/poky/meta/classes/staging.bbclass
+++ b/poky/meta/classes/staging.bbclass
@@ -5,6 +5,7 @@ SYSROOT_DIRS = " \
${base_libdir} \
${nonarch_base_libdir} \
${datadir} \
+ /sysroot-only \
"
# These directories are also staged in the sysroot when they contain files that
@@ -27,11 +28,15 @@ SYSROOT_DIRS_BLACKLIST = " \
${mandir} \
${docdir} \
${infodir} \
+ ${datadir}/X11/locale \
${datadir}/applications \
+ ${datadir}/bash-completion \
${datadir}/fonts \
${datadir}/gtk-doc/html \
+ ${datadir}/installed-tests \
${datadir}/locale \
${datadir}/pixmaps \
+ ${datadir}/terminfo \
${libdir}/${BPN}/ptest \
"
@@ -85,7 +90,6 @@ python sysroot_strip () {
}
do_populate_sysroot[dirs] = "${SYSROOT_DESTDIR}"
-do_populate_sysroot[umask] = "022"
addtask populate_sysroot after do_install
diff --git a/poky/meta/classes/systemd.bbclass b/poky/meta/classes/systemd.bbclass
index 9ec465c75..db5d10954 100644
--- a/poky/meta/classes/systemd.bbclass
+++ b/poky/meta/classes/systemd.bbclass
@@ -174,7 +174,8 @@ python systemd_populate_packages() {
if path_found != '':
systemd_add_files_and_parse(pkg_systemd, path_found, service, keys)
else:
- bb.fatal("SYSTEMD_SERVICE_%s value %s does not exist" % (pkg_systemd, service))
+ bb.fatal("Didn't find service unit '{0}', specified in SYSTEMD_SERVICE_{1}. {2}".format(
+ service, pkg_systemd, "Also looked for service unit '{0}'.".format(base) if base is not None else ""))
def systemd_create_presets(pkg, action):
presetf = oe.path.join(d.getVar("PKGD"), d.getVar("systemd_unitdir"), "system-preset/98-%s.preset" % pkg)