summaryrefslogtreecommitdiff
path: root/poky/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'poky/meta/classes')
-rw-r--r--poky/meta/classes/base.bbclass4
-rw-r--r--poky/meta/classes/cargo_common.bbclass2
-rw-r--r--poky/meta/classes/cve-check.bbclass73
-rw-r--r--poky/meta/classes/externalsrc.bbclass2
-rw-r--r--poky/meta/classes/image.bbclass6
-rw-r--r--poky/meta/classes/image_types.bbclass33
-rw-r--r--poky/meta/classes/kernel-yocto.bbclass2
-rw-r--r--poky/meta/classes/kernel.bbclass3
-rw-r--r--poky/meta/classes/npm.bbclass6
-rw-r--r--poky/meta/classes/overlayfs.bbclass18
-rw-r--r--poky/meta/classes/package.bbclass3
-rw-r--r--poky/meta/classes/pypi.bbclass2
-rw-r--r--poky/meta/classes/rootfs-postcommands.bbclass24
-rw-r--r--poky/meta/classes/rust-common.bbclass3
-rw-r--r--poky/meta/classes/sanity.bbclass2
-rw-r--r--poky/meta/classes/sstate.bbclass2
-rw-r--r--poky/meta/classes/staging.bbclass24
17 files changed, 146 insertions, 63 deletions
diff --git a/poky/meta/classes/base.bbclass b/poky/meta/classes/base.bbclass
index 3515720bf9..bdb3ac33c6 100644
--- a/poky/meta/classes/base.bbclass
+++ b/poky/meta/classes/base.bbclass
@@ -115,6 +115,10 @@ def setup_hosttools_dir(dest, toolsvar, d, fatal=True):
tools = d.getVar(toolsvar).split()
origbbenv = d.getVar("BB_ORIGENV", False)
path = origbbenv.getVar("PATH")
+ # Need to ignore our own scripts directories to avoid circular links
+ for p in path.split(":"):
+ if p.endswith("/scripts"):
+ path = path.replace(p, "/ignoreme")
bb.utils.mkdirhier(dest)
notfound = []
for tool in tools:
diff --git a/poky/meta/classes/cargo_common.bbclass b/poky/meta/classes/cargo_common.bbclass
index 90fad75415..39f32829fd 100644
--- a/poky/meta/classes/cargo_common.bbclass
+++ b/poky/meta/classes/cargo_common.bbclass
@@ -45,7 +45,7 @@ cargo_common_do_configure () {
directory = "${CARGO_VENDORING_DIRECTORY}"
EOF
- if [ -z "${EXTERNALSRC}" ] && [ ${CARGO_DISABLE_BITBAKE_VENDORING} = "0" ]; then
+ if [ ${CARGO_DISABLE_BITBAKE_VENDORING} = "0" ]; then
cat <<- EOF >> ${CARGO_HOME}/config
[source.crates-io]
diff --git a/poky/meta/classes/cve-check.bbclass b/poky/meta/classes/cve-check.bbclass
index 7cf206299b..3729d9cba8 100644
--- a/poky/meta/classes/cve-check.bbclass
+++ b/poky/meta/classes/cve-check.bbclass
@@ -79,6 +79,30 @@ CVE_CHECK_LAYER_INCLUDELIST ??= ""
# set to "alphabetical" for version using single alphabetical character as increment release
CVE_VERSION_SUFFIX ??= ""
+def generate_json_report(d, out_path, link_path):
+ if os.path.exists(d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")):
+ import json
+ from oe.cve_check import cve_check_merge_jsons
+
+ bb.note("Generating JSON CVE summary")
+ index_file = d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")
+ summary = {"version":"1", "package": []}
+ with open(index_file) as f:
+ filename = f.readline()
+ while filename:
+ with open(filename.rstrip()) as j:
+ data = json.load(j)
+ cve_check_merge_jsons(summary, data)
+ filename = f.readline()
+
+ with open(out_path, "w") as f:
+ json.dump(summary, f, indent=2)
+
+ if link_path != out_path:
+ if os.path.exists(os.path.realpath(link_path)):
+ os.remove(link_path)
+ os.symlink(os.path.basename(out_path), link_path)
+
python cve_save_summary_handler () {
import shutil
import datetime
@@ -97,10 +121,17 @@ python cve_save_summary_handler () {
if cve_summary_file and os.path.exists(cve_summary_file):
cvefile_link = os.path.join(cvelogpath, cve_summary_name)
+ # if the paths are the same don't create the link
+ if cvefile_link != cve_summary_file:
+ if os.path.exists(os.path.realpath(cvefile_link)):
+ os.remove(cvefile_link)
+ os.symlink(os.path.basename(cve_summary_file), cvefile_link)
- if os.path.exists(os.path.realpath(cvefile_link)):
- os.remove(cvefile_link)
- os.symlink(os.path.basename(cve_summary_file), cvefile_link)
+ if d.getVar("CVE_CHECK_FORMAT_JSON") == "1":
+ json_summary_link_name = os.path.join(cvelogpath, d.getVar("CVE_CHECK_SUMMARY_FILE_NAME_JSON"))
+ json_summary_name = os.path.join(cvelogpath, "%s-%s.json" % (cve_summary_name, timestamp))
+ generate_json_report(d, json_summary_name, json_summary_link_name)
+ bb.plain("Complete CVE JSON report summary created at: %s" % json_summary_link_name)
}
addhandler cve_save_summary_handler
@@ -169,35 +200,25 @@ python cve_check_write_rootfs_manifest () {
if manifest_name and os.path.exists(manifest_name):
manifest_link = os.path.join(deploy_dir, "%s.cve" % link_name)
- # If we already have another manifest, update symlinks
- if os.path.exists(os.path.realpath(manifest_link)):
- os.remove(manifest_link)
- os.symlink(os.path.basename(manifest_name), manifest_link)
+ # if they are the same don't create the link
+ if manifest_link != manifest_name:
+ # If we already have another manifest, update symlinks
+ if os.path.exists(os.path.realpath(manifest_link)):
+ os.remove(manifest_link)
+ os.symlink(os.path.basename(manifest_name), manifest_link)
bb.plain("Image CVE report stored in: %s" % manifest_name)
- if os.path.exists(d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")):
- import json
- bb.note("Generating JSON CVE manifest")
- deploy_dir = d.getVar("DEPLOY_DIR_IMAGE")
- link_name = d.getVar("IMAGE_LINK_NAME")
- manifest_name = d.getVar("CVE_CHECK_MANIFEST_JSON")
- index_file = d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")
- manifest = {"version":"1", "package": []}
- with open(index_file) as f:
- filename = f.readline()
- while filename:
- with open(filename.rstrip()) as j:
- data = json.load(j)
- cve_check_merge_jsons(manifest, data)
- filename = f.readline()
-
- with open(manifest_name, "w") as f:
- json.dump(manifest, f, indent=2)
- bb.plain("Image CVE report stored in: %s" % manifest_name)
+ if d.getVar("CVE_CHECK_FORMAT_JSON") == "1":
+ link_path = os.path.join(deploy_dir, "%s.json" % link_name)
+ manifest_path = d.getVar("CVE_CHECK_MANIFEST_JSON")
+ bb.note("Generating JSON CVE manifest")
+ generate_json_report(d, manifest_path, link_path)
+ bb.plain("Image CVE JSON report stored in: %s" % link_path)
}
ROOTFS_POSTPROCESS_COMMAND:prepend = "${@'cve_check_write_rootfs_manifest; ' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
do_rootfs[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
+do_populate_sdk[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
def check_cves(d, patched_cves):
"""
diff --git a/poky/meta/classes/externalsrc.bbclass b/poky/meta/classes/externalsrc.bbclass
index b2f216f361..90792a737b 100644
--- a/poky/meta/classes/externalsrc.bbclass
+++ b/poky/meta/classes/externalsrc.bbclass
@@ -68,7 +68,7 @@ python () {
url_data = fetch.ud[url]
parm = url_data.parm
if (url_data.type == 'file' or
- url_data.type == 'npmsw' or
+ url_data.type == 'npmsw' or url_data.type == 'crate' or
'type' in parm and parm['type'] == 'kmeta'):
local_srcuri.append(url)
diff --git a/poky/meta/classes/image.bbclass b/poky/meta/classes/image.bbclass
index 47776db2b0..2139a7e576 100644
--- a/poky/meta/classes/image.bbclass
+++ b/poky/meta/classes/image.bbclass
@@ -133,11 +133,11 @@ def rootfs_variables(d):
do_rootfs[vardeps] += "${@rootfs_variables(d)}"
# This is needed to have kernel image in DEPLOY_DIR.
-# This follow many common usecases and user expectations.
+# This follows many common usecases and user expectations.
# But if you are building an image which doesn't need the kernel image at all,
# you can unset this variable manually.
-KERNELDEPLOYDEPEND ?= "virtual/kernel:do_deploy"
-do_build[depends] += "${KERNELDEPMODDEPEND}"
+KERNEL_DEPLOY_DEPEND ?= "virtual/kernel:do_deploy"
+do_build[depends] += "${KERNEL_DEPLOY_DEPEND}"
python () {
diff --git a/poky/meta/classes/image_types.bbclass b/poky/meta/classes/image_types.bbclass
index 960dab1a60..0ffea91195 100644
--- a/poky/meta/classes/image_types.bbclass
+++ b/poky/meta/classes/image_types.bbclass
@@ -81,7 +81,7 @@ oe_mkext234fs () {
bbdebug 1 Executing "dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024"
dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024
bbdebug 1 "Actual Rootfs size: `du -s ${IMAGE_ROOTFS}`"
- bbdebug 1 "Actual Partion size: `stat -c '%s' ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype`"
+ bbdebug 1 "Actual Partition size: `stat -c '%s' ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype`"
bbdebug 1 Executing "mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype -d ${IMAGE_ROOTFS}"
mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype -d ${IMAGE_ROOTFS}
# Error codes 0-3 indicate successfull operation of fsck (no errors or errors corrected)
@@ -142,6 +142,24 @@ UBI_VOLNAME ?= "${MACHINE}-rootfs"
UBI_VOLTYPE ?= "dynamic"
UBI_IMGTYPE ?= "ubifs"
+write_ubi_config() {
+ if [ -z "$1" ]; then
+ local vname=""
+ else
+ local vname="_$1"
+ fi
+
+ cat <<EOF > ubinize${vname}-${IMAGE_NAME}.cfg
+[ubifs]
+mode=ubi
+image=${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.${UBI_IMGTYPE}
+vol_id=0
+vol_type=${UBI_VOLTYPE}
+vol_name=${UBI_VOLNAME}
+vol_flags=autoresize
+EOF
+}
+
multiubi_mkfs() {
local mkubifs_args="$1"
local ubinize_args="$2"
@@ -151,19 +169,8 @@ multiubi_mkfs() {
bbfatal "MKUBIFS_ARGS and UBINIZE_ARGS have to be set, see http://www.linux-mtd.infradead.org/faq/ubifs.html for details"
fi
- if [ -z "$3" ]; then
- local vname=""
- else
- local vname="_$3"
- fi
+ write_ubi_config "$3"
- echo \[ubifs\] > ubinize${vname}-${IMAGE_NAME}.cfg
- echo mode=ubi >> ubinize${vname}-${IMAGE_NAME}.cfg
- echo image=${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.${UBI_IMGTYPE} >> ubinize${vname}-${IMAGE_NAME}.cfg
- echo vol_id=0 >> ubinize${vname}-${IMAGE_NAME}.cfg
- echo vol_type=${UBI_VOLTYPE} >> ubinize${vname}-${IMAGE_NAME}.cfg
- echo vol_name=${UBI_VOLNAME} >> ubinize${vname}-${IMAGE_NAME}.cfg
- echo vol_flags=autoresize >> ubinize${vname}-${IMAGE_NAME}.cfg
if [ -n "$vname" ]; then
mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs ${mkubifs_args}
fi
diff --git a/poky/meta/classes/kernel-yocto.bbclass b/poky/meta/classes/kernel-yocto.bbclass
index fb30c7cc05..b276ded775 100644
--- a/poky/meta/classes/kernel-yocto.bbclass
+++ b/poky/meta/classes/kernel-yocto.bbclass
@@ -192,7 +192,7 @@ do_kernel_metadata() {
if [ -n "$in_tree_defconfig" ]; then
sccs_defconfig=$in_tree_defconfig
if [ -n "$src_uri_defconfig" ]; then
- bbwarn "[NOTE]: defconfig was supplied both via KBUILD_DEFCONFIG and SRC_URI. Dropping SRC_URI defconfig"
+ bbwarn "[NOTE]: defconfig was supplied both via KBUILD_DEFCONFIG and SRC_URI. Dropping SRC_URI entry $src_uri_defconfig"
fi
else
# if we didn't have an in-tree one, make our defconfig the one
diff --git a/poky/meta/classes/kernel.bbclass b/poky/meta/classes/kernel.bbclass
index 8299b394a7..5d2f17c3be 100644
--- a/poky/meta/classes/kernel.bbclass
+++ b/poky/meta/classes/kernel.bbclass
@@ -110,7 +110,7 @@ python __anonymous () {
d.setVar('PKG:%s-image-%s' % (kname,typelower), '%s-image-%s-${KERNEL_VERSION_PKG_NAME}' % (kname, typelower))
d.setVar('ALLOW_EMPTY:%s-image-%s' % (kname, typelower), '1')
- d.setVar('pkg_postinst:%s-image-%s' % (kname,typelower), """set +e
+ d.prependVar('pkg_postinst:%s-image-%s' % (kname,typelower), """set +e
if [ -n "$D" ]; then
ln -sf %s-${KERNEL_VERSION} $D/${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
else
@@ -649,6 +649,7 @@ FILES:${KERNEL_PACKAGE_NAME}-image = ""
FILES:${KERNEL_PACKAGE_NAME}-dev = "/${KERNEL_IMAGEDEST}/System.map* /${KERNEL_IMAGEDEST}/Module.symvers* /${KERNEL_IMAGEDEST}/config* ${KERNEL_SRC_PATH} ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
FILES:${KERNEL_PACKAGE_NAME}-vmlinux = "/${KERNEL_IMAGEDEST}/vmlinux-${KERNEL_VERSION_NAME}"
FILES:${KERNEL_PACKAGE_NAME}-modules = ""
+FILES:${KERNEL_PACKAGE_NAME}-dbg = "/usr/lib/debug /usr/src/debug"
RDEPENDS:${KERNEL_PACKAGE_NAME} = "${KERNEL_PACKAGE_NAME}-base (= ${EXTENDPKGV})"
# Allow machines to override this dependency if kernel image files are
# not wanted in images as standard
diff --git a/poky/meta/classes/npm.bbclass b/poky/meta/classes/npm.bbclass
index ba50fcac20..dbfc2e728e 100644
--- a/poky/meta/classes/npm.bbclass
+++ b/poky/meta/classes/npm.bbclass
@@ -81,6 +81,7 @@ python npm_do_configure() {
import json
import re
import shlex
+ import stat
import tempfile
from bb.fetch2.npm import NpmEnvironment
from bb.fetch2.npm import npm_unpack
@@ -202,6 +203,7 @@ python npm_do_configure() {
if has_shrinkwrap_file:
_update_manifest("devDependencies")
+ os.chmod(cached_manifest_file, os.stat(cached_manifest_file).st_mode | stat.S_IWUSR)
with open(cached_manifest_file, "w") as f:
json.dump(cached_manifest, f, indent=2)
@@ -305,10 +307,6 @@ npm_do_install() {
# Remove the shrinkwrap file which does not need to be packed
rm -f ${D}/${nonarch_libdir}/node_modules/*/npm-shrinkwrap.json
rm -f ${D}/${nonarch_libdir}/node_modules/@*/*/npm-shrinkwrap.json
-
- # node(1) is using /usr/lib/node as default include directory and npm(1) is
- # using /usr/lib/node_modules as install directory. Let's make both happy.
- ln -fs node_modules ${D}/${nonarch_libdir}/node
}
FILES:${PN} += " \
diff --git a/poky/meta/classes/overlayfs.bbclass b/poky/meta/classes/overlayfs.bbclass
index 29fced2ca7..f7069edd41 100644
--- a/poky/meta/classes/overlayfs.bbclass
+++ b/poky/meta/classes/overlayfs.bbclass
@@ -16,10 +16,18 @@
#
# OVERLAYFS_MOUNT_POINT[data] ?= "/data"
#
-# The class assumes you have a data.mount systemd unit defined in your
-# systemd-machine-units recipe and installed to the image.
+# Per default the class assumes you have a corresponding fstab entry or systemd
+# mount unit (data.mount in this case) for this mount point installed on the
+# image, for instance via a wks script or the systemd-machine-units recipe.
#
-# Then you can specify writable directories on a recipe base
+# If the mount point is handled somewhere else, e.g. custom boot or preinit
+# scripts or in a initramfs, then this QA check can be skipped by adding
+# mount-configured to the related OVERLAYFS_QA_SKIP flag:
+#
+# OVERLAYFS_QA_SKIP[data] = "mount-configured"
+#
+# To use the overlayfs, you just have to specify writable directories inside
+# their recipe:
#
# OVERLAYFS_WRITABLE_PATHS[data] = "/usr/share/my-custom-application"
#
@@ -30,6 +38,10 @@
# OVERLAYFS_MOUNT_POINT[mnt-overlay] = "/mnt/overlay"
# OVERLAYFS_WRITABLE_PATHS[mnt-overlay] = "/usr/share/another-application"
#
+# If your recipe deploys a systemd service, then it should require and be
+# started after the ${PN}-overlays.service to make sure that all overlays are
+# mounted beforehand.
+#
# Note: the class does not support /etc directory itself, because systemd depends on it
# For /etc directory use overlayfs-etc class
diff --git a/poky/meta/classes/package.bbclass b/poky/meta/classes/package.bbclass
index 44fbc32df6..62050a18b8 100644
--- a/poky/meta/classes/package.bbclass
+++ b/poky/meta/classes/package.bbclass
@@ -662,7 +662,10 @@ def runtime_mapping_rename (varname, pkg, d):
# Used by do_packagedata (and possibly other routines post do_package)
#
+PRSERV_ACTIVE = "${@bool(d.getVar("PRSERV_HOST"))}"
+PRSERV_ACTIVE[vardepvalue] = "${PRSERV_ACTIVE}"
package_get_auto_pr[vardepsexclude] = "BB_TASKDEPDATA"
+package_get_auto_pr[vardeps] += "PRSERV_ACTIVE"
python package_get_auto_pr() {
import oe.prservice
diff --git a/poky/meta/classes/pypi.bbclass b/poky/meta/classes/pypi.bbclass
index 9405d58601..5fa7b8a6ae 100644
--- a/poky/meta/classes/pypi.bbclass
+++ b/poky/meta/classes/pypi.bbclass
@@ -24,3 +24,5 @@ S = "${WORKDIR}/${PYPI_PACKAGE}-${PV}"
UPSTREAM_CHECK_URI ?= "https://pypi.org/project/${PYPI_PACKAGE}/"
UPSTREAM_CHECK_REGEX ?= "/${PYPI_PACKAGE}/(?P<pver>(\d+[\.\-_]*)+)/"
+
+CVE_PRODUCT ?= "python:${PYPI_PACKAGE}"
diff --git a/poky/meta/classes/rootfs-postcommands.bbclass b/poky/meta/classes/rootfs-postcommands.bbclass
index 7b92df69c5..d302c23cf4 100644
--- a/poky/meta/classes/rootfs-postcommands.bbclass
+++ b/poky/meta/classes/rootfs-postcommands.bbclass
@@ -267,9 +267,10 @@ python write_image_manifest () {
if os.path.exists(manifest_name) and link_name:
manifest_link = deploy_dir + "/" + link_name + ".manifest"
- if os.path.lexists(manifest_link):
- os.remove(manifest_link)
- os.symlink(os.path.basename(manifest_name), manifest_link)
+ if manifest_link != manifest_name:
+ if os.path.lexists(manifest_link):
+ os.remove(manifest_link)
+ os.symlink(os.path.basename(manifest_name), manifest_link)
}
# Can be used to create /etc/timestamp during image construction to give a reasonably
@@ -339,9 +340,10 @@ python write_image_test_data() {
if os.path.exists(testdata_name) and link_name:
testdata_link = os.path.join(deploy_dir, "%s.testdata.json" % link_name)
- if os.path.lexists(testdata_link):
- os.remove(testdata_link)
- os.symlink(os.path.basename(testdata_name), testdata_link)
+ if testdata_link != testdata_name:
+ if os.path.lexists(testdata_link):
+ os.remove(testdata_link)
+ os.symlink(os.path.basename(testdata_name), testdata_link)
}
write_image_test_data[vardepsexclude] += "TOPDIR"
@@ -398,6 +400,10 @@ python overlayfs_qa_check() {
allUnitExist = True;
for mountPoint in overlayMountPoints:
+ qaSkip = (d.getVarFlag("OVERLAYFS_QA_SKIP", mountPoint) or "").split()
+ if "mount-configured" in qaSkip:
+ continue
+
mountPath = d.getVarFlag('OVERLAYFS_MOUNT_POINT', mountPoint)
if mountPath in fstabDevices:
continue
@@ -407,8 +413,10 @@ python overlayfs_qa_check() {
for dirpath in searchpaths):
continue
- bb.warn('Mount path %s not found in fstat and unit %s not found '
- 'in systemd unit directories' % (mountPath, mountUnit))
+ bb.warn(f'Mount path {mountPath} not found in fstab and unit '
+ f'{mountUnit} not found in systemd unit directories.')
+ bb.warn(f'Skip this check by setting OVERLAYFS_QA_SKIP[{mountPoint}] = '
+ '"mount-configured"')
allUnitExist = False;
if not allUnitExist:
diff --git a/poky/meta/classes/rust-common.bbclass b/poky/meta/classes/rust-common.bbclass
index 02a538258a..cb811ac5da 100644
--- a/poky/meta/classes/rust-common.bbclass
+++ b/poky/meta/classes/rust-common.bbclass
@@ -117,8 +117,11 @@ RUST_BUILD_ARCH = "${@oe.rust.arch_to_rust_arch(d.getVar('BUILD_ARCH'))}"
# its likely best to not use the triple suffix due to potential confusion.
RUST_BUILD_SYS = "${@rust_base_triple(d, 'BUILD')}"
+RUST_BUILD_SYS[vardepvalue] = "${RUST_BUILD_SYS}"
RUST_HOST_SYS = "${@rust_base_triple(d, 'HOST')}"
+RUST_HOST_SYS[vardepvalue] = "${RUST_HOST_SYS}"
RUST_TARGET_SYS = "${@rust_base_triple(d, 'TARGET')}"
+RUST_TARGET_SYS[vardepvalue] = "${RUST_TARGET_SYS}"
# wrappers to get around the fact that Rust needs a single
# binary but Yocto's compiler and linker commands have
diff --git a/poky/meta/classes/sanity.bbclass b/poky/meta/classes/sanity.bbclass
index c72a7b3ed3..b416918013 100644
--- a/poky/meta/classes/sanity.bbclass
+++ b/poky/meta/classes/sanity.bbclass
@@ -470,7 +470,7 @@ def check_make_version(sanity_data):
if bb.utils.vercmp_string_op(version, "4.2.1", "=="):
distro = oe.lsb.distro_identifier()
- if "ubuntu" in distro or "debian" in distro:
+ if "ubuntu" in distro or "debian" in distro or "linuxmint" in distro:
return None
return "make version 4.2.1 is known to have issues on Centos/OpenSUSE and other non-Ubuntu systems. Please use a buildtools-make-tarball or a newer version of make.\n"
return None
diff --git a/poky/meta/classes/sstate.bbclass b/poky/meta/classes/sstate.bbclass
index 1c0cae4893..3513269bca 100644
--- a/poky/meta/classes/sstate.bbclass
+++ b/poky/meta/classes/sstate.bbclass
@@ -1,4 +1,4 @@
-SSTATE_VERSION = "8"
+SSTATE_VERSION = "10"
SSTATE_ZSTD_CLEVEL ??= "8"
diff --git a/poky/meta/classes/staging.bbclass b/poky/meta/classes/staging.bbclass
index 9fc8f4f283..8372a4574a 100644
--- a/poky/meta/classes/staging.bbclass
+++ b/poky/meta/classes/staging.bbclass
@@ -404,7 +404,9 @@ python extend_recipe_sysroot() {
# All files that we're going to be installing, to find conflicts.
fileset = {}
+ invalidate_tasks = set()
for f in os.listdir(depdir):
+ removed = []
if not f.endswith(".complete"):
continue
f = depdir + "/" + f
@@ -414,6 +416,28 @@ python extend_recipe_sysroot() {
sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
os.unlink(f)
os.unlink(f.replace(".complete", ""))
+ removed.append(os.path.basename(f.replace(".complete", "")))
+
+ # If we've removed files from the sysroot above, the task that installed them may still
+ # have a stamp file present for the task. This is probably invalid right now but may become
+ # valid again if the user were to change configuration back for example. Since we've removed
+ # the files a task might need, remove the stamp file too to force it to rerun.
+ # YOCTO #14790
+ if removed:
+ for i in glob.glob(depdir + "/index.*"):
+ if i.endswith("." + mytaskname):
+ continue
+ with open(i, "r") as f:
+ for l in f:
+ if l.startswith("TaskDeps:"):
+ continue
+ l = l.strip()
+ if l in removed:
+ invalidate_tasks.add(i.rsplit(".", 1)[1])
+ break
+ for t in invalidate_tasks:
+ bb.note("Invalidating stamps for task %s" % t)
+ bb.build.clean_stamp(t, d)
installed = []
for dep in configuredeps: