summaryrefslogtreecommitdiff
path: root/poky/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'poky/meta/classes')
-rw-r--r--poky/meta/classes/archiver.bbclass15
-rw-r--r--poky/meta/classes/autotools.bbclass2
-rw-r--r--poky/meta/classes/baremetal-image.bbclass99
-rw-r--r--poky/meta/classes/buildhistory.bbclass65
-rw-r--r--poky/meta/classes/buildstats.bbclass4
-rw-r--r--poky/meta/classes/cmake.bbclass55
-rw-r--r--poky/meta/classes/cml1.bbclass28
-rw-r--r--poky/meta/classes/cve-check.bbclass76
-rw-r--r--poky/meta/classes/deploy.bbclass1
-rw-r--r--poky/meta/classes/features_check.bbclass125
-rw-r--r--poky/meta/classes/go-mod.bbclass2
-rw-r--r--poky/meta/classes/gtk-icon-cache.bbclass24
-rw-r--r--poky/meta/classes/gtk-immodules-cache.bbclass1
-rw-r--r--poky/meta/classes/image-artifact-names.bbclass15
-rw-r--r--poky/meta/classes/image-live.bbclass2
-rw-r--r--poky/meta/classes/image.bbclass6
-rw-r--r--poky/meta/classes/image_types.bbclass9
-rw-r--r--poky/meta/classes/image_types_wic.bbclass5
-rw-r--r--poky/meta/classes/insane.bbclass64
-rw-r--r--poky/meta/classes/kernel-artifact-names.bbclass8
-rw-r--r--poky/meta/classes/kernel-devicetree.bbclass2
-rw-r--r--poky/meta/classes/kernel-fitimage.bbclass29
-rw-r--r--poky/meta/classes/kernel-yocto.bbclass315
-rw-r--r--poky/meta/classes/kernel.bbclass65
-rw-r--r--poky/meta/classes/license_image.bbclass11
-rw-r--r--poky/meta/classes/linuxloader.bbclass4
-rw-r--r--poky/meta/classes/meson.bbclass4
-rw-r--r--poky/meta/classes/mime.bbclass13
-rw-r--r--poky/meta/classes/nativesdk.bbclass3
-rw-r--r--poky/meta/classes/nopackages.bbclass1
-rw-r--r--poky/meta/classes/package.bbclass72
-rw-r--r--poky/meta/classes/package_rpm.bbclass2
-rw-r--r--poky/meta/classes/package_tar.bbclass6
-rw-r--r--poky/meta/classes/packagefeed-stability.bbclass252
-rw-r--r--poky/meta/classes/populate_sdk_ext.bbclass16
-rw-r--r--poky/meta/classes/ptest.bbclass2
-rw-r--r--poky/meta/classes/qemuboot.bbclass10
-rw-r--r--poky/meta/classes/relocatable.bbclass20
-rw-r--r--poky/meta/classes/reproducible_build.bbclass90
-rw-r--r--poky/meta/classes/rootfs-postcommands.bbclass20
-rw-r--r--poky/meta/classes/rootfs_rpm.bbclass2
-rw-r--r--poky/meta/classes/rootfsdebugfiles.bbclass2
-rw-r--r--poky/meta/classes/sign_rpm.bbclass1
-rw-r--r--poky/meta/classes/spdx.bbclass360
-rw-r--r--poky/meta/classes/sstate.bbclass10
-rw-r--r--poky/meta/classes/staging.bbclass2
-rw-r--r--poky/meta/classes/testexport.bbclass8
-rw-r--r--poky/meta/classes/testimage.bbclass3
-rw-r--r--poky/meta/classes/uboot-sign.bbclass11
-rw-r--r--poky/meta/classes/uninative.bbclass13
50 files changed, 884 insertions, 1071 deletions
diff --git a/poky/meta/classes/archiver.bbclass b/poky/meta/classes/archiver.bbclass
index 780c562b6..aff1f9dbb 100644
--- a/poky/meta/classes/archiver.bbclass
+++ b/poky/meta/classes/archiver.bbclass
@@ -345,13 +345,12 @@ python do_ar_mirror() {
fetcher = bb.fetch2.Fetch(src_uri, d)
- for url in fetcher.urls:
- if is_excluded(url):
- bb.note('Skipping excluded url: %s' % (url))
+ for ud in fetcher.expanded_urldata():
+ if is_excluded(ud.url):
+ bb.note('Skipping excluded url: %s' % (ud.url))
continue
- bb.note('Archiving url: %s' % (url))
- ud = fetcher.ud[url]
+ bb.note('Archiving url: %s' % (ud.url))
ud.setup_localpath(d)
localpath = None
@@ -367,7 +366,7 @@ python do_ar_mirror() {
if len(ud.mirrortarballs) and not localpath:
bb.warn('Mirror tarballs are listed for a source but none are present. ' \
'Falling back to original download.\n' \
- 'SRC_URI = %s' % (url))
+ 'SRC_URI = %s' % (ud.url))
# Check original download
if not localpath:
@@ -376,7 +375,7 @@ python do_ar_mirror() {
if not localpath or not os.path.exists(localpath):
bb.fatal('Original download is missing for a source.\n' \
- 'SRC_URI = %s' % (url))
+ 'SRC_URI = %s' % (ud.url))
# We now have an appropriate localpath
bb.note('Copying source mirror')
@@ -583,7 +582,7 @@ do_deploy_archives[sstate-outputdirs] = "${DEPLOY_DIR_SRC}"
addtask do_deploy_archives_setscene
addtask do_ar_original after do_unpack
-addtask do_unpack_and_patch after do_patch
+addtask do_unpack_and_patch after do_patch do_preconfigure
addtask do_ar_patched after do_unpack_and_patch
addtask do_ar_configured after do_unpack_and_patch
addtask do_ar_mirror after do_fetch
diff --git a/poky/meta/classes/autotools.bbclass b/poky/meta/classes/autotools.bbclass
index 6c2a33ac7..1f3c771c6 100644
--- a/poky/meta/classes/autotools.bbclass
+++ b/poky/meta/classes/autotools.bbclass
@@ -5,7 +5,7 @@ def autotools_dep_prepend(d):
pn = d.getVar('PN')
deps = ''
- if pn in ['autoconf-native', 'automake-native', 'help2man-native']:
+ if pn in ['autoconf-native', 'automake-native']:
return deps
deps += 'autoconf-native automake-native '
diff --git a/poky/meta/classes/baremetal-image.bbclass b/poky/meta/classes/baremetal-image.bbclass
new file mode 100644
index 000000000..90d58f261
--- /dev/null
+++ b/poky/meta/classes/baremetal-image.bbclass
@@ -0,0 +1,99 @@
+# Baremetal image class
+#
+# This class is meant to be inherited by recipes for baremetal/RTOS applications
+# It contains code that would be used by all of them, every recipe just needs to
+# override certain variables.
+#
+# For scalability purposes, code within this class focuses on the "image" wiring
+# to satisfy the OpenEmbedded image creation and testing infrastructure.
+#
+# See meta-skeleton for a working example.
+
+
+# Toolchain should be baremetal or newlib based.
+# TCLIBC="baremetal" or TCLIBC="newlib"
+COMPATIBLE_HOST_libc-musl_class-target = "null"
+COMPATIBLE_HOST_libc-glibc_class-target = "null"
+
+
+inherit rootfs-postcommands
+
+# Set some defaults, but these should be overriden by each recipe if required
+IMGDEPLOYDIR ?= "${WORKDIR}/deploy-${PN}-image-complete"
+BAREMETAL_BINNAME ?= "hello_baremetal_${MACHINE}"
+IMAGE_LINK_NAME ?= "baremetal-helloworld-image-${MACHINE}"
+IMAGE_NAME_SUFFIX ?= ""
+
+do_rootfs[dirs] = "${IMGDEPLOYDIR} ${DEPLOY_DIR_IMAGE}"
+
+do_image(){
+ install ${D}/${base_libdir}/firmware/${BAREMETAL_BINNAME}.bin ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.bin
+ install ${D}/${base_libdir}/firmware/${BAREMETAL_BINNAME}.elf ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.elf
+}
+
+do_image_complete(){
+ :
+}
+
+python do_rootfs(){
+ from oe.utils import execute_pre_post_process
+ from pathlib import Path
+
+ # Write empty manifest file to satisfy test infrastructure
+ deploy_dir = d.getVar('IMGDEPLOYDIR')
+ link_name = d.getVar('IMAGE_LINK_NAME')
+ manifest_name = d.getVar('IMAGE_MANIFEST')
+
+ Path(manifest_name).touch()
+ if os.path.exists(manifest_name) and link_name:
+ manifest_link = deploy_dir + "/" + link_name + ".manifest"
+ if os.path.lexists(manifest_link):
+ os.remove(manifest_link)
+ os.symlink(os.path.basename(manifest_name), manifest_link)
+ execute_pre_post_process(d, d.getVar('ROOTFS_POSTPROCESS_COMMAND'))
+}
+
+
+# Assure binaries, manifest and qemubootconf are populated on DEPLOY_DIR_IMAGE
+do_image_complete[dirs] = "${TOPDIR}"
+do_image_complete[umask] = "022"
+SSTATETASKS += "do_image_complete"
+SSTATE_SKIP_CREATION_task-image-complete = '1'
+do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}"
+do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
+do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}"
+addtask do_image_complete after do_image before do_build
+
+python do_image_complete_setscene () {
+ sstate_setscene(d)
+}
+addtask do_image_complete_setscene
+
+# QEMU generic Baremetal/RTOS parameters
+QB_DEFAULT_KERNEL ?= "${IMAGE_LINK_NAME}.bin"
+QB_MEM ?= "-m 256"
+QB_DEFAULT_FSTYPE ?= "bin"
+QB_DTB ?= ""
+QB_OPT_APPEND = "-nographic"
+
+# This next part is necessary to trick the build system into thinking
+# its building an image recipe so it generates the qemuboot.conf
+addtask do_rootfs before do_image after do_install
+addtask do_image after do_rootfs before do_image_complete
+addtask do_image_complete after do_image before do_build
+inherit qemuboot
+
+# Based on image.bbclass to make sure we build qemu
+python(){
+ # do_addto_recipe_sysroot doesnt exist for all recipes, but we need it to have
+ # /usr/bin on recipe-sysroot (qemu) populated
+ def extraimage_getdepends(task):
+ deps = ""
+ for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split():
+ # Make sure we only add it for qemu
+ if 'qemu' in dep:
+ deps += " %s:%s" % (dep, task)
+ return deps
+ d.appendVarFlag('do_image', 'depends', extraimage_getdepends('do_addto_recipe_sysroot'))
+ d.appendVarFlag('do_image', 'depends', extraimage_getdepends('do_populate_sysroot'))
+}
diff --git a/poky/meta/classes/buildhistory.bbclass b/poky/meta/classes/buildhistory.bbclass
index eb7295570..0f26c3c07 100644
--- a/poky/meta/classes/buildhistory.bbclass
+++ b/poky/meta/classes/buildhistory.bbclass
@@ -7,6 +7,8 @@
# Copyright (C) 2007-2011 Koen Kooi <koen@openembedded.org>
#
+inherit image-artifact-names
+
BUILDHISTORY_FEATURES ?= "image package sdk"
BUILDHISTORY_DIR ?= "${TOPDIR}/buildhistory"
BUILDHISTORY_DIR_IMAGE = "${BUILDHISTORY_DIR}/images/${MACHINE_ARCH}/${TCLIBC}/${IMAGE_BASENAME}"
@@ -113,6 +115,7 @@ python buildhistory_emit_pkghistory() {
self.packages = ""
self.srcrev = ""
self.layer = ""
+ self.config = ""
class PackageInfo:
@@ -254,22 +257,18 @@ python buildhistory_emit_pkghistory() {
rcpinfo.depends = sortlist(oe.utils.squashspaces(d.getVar('DEPENDS') or ""))
rcpinfo.packages = packages
rcpinfo.layer = layer
+ rcpinfo.config = sortlist(oe.utils.squashspaces(d.getVar('PACKAGECONFIG') or ""))
write_recipehistory(rcpinfo, d)
- pkgdest = d.getVar('PKGDEST')
+ bb.build.exec_func("read_subpackage_metadata", d)
+
for pkg in packagelist:
- pkgdata = {}
- with open(os.path.join(pkgdata_dir, 'runtime', pkg)) as f:
- for line in f.readlines():
- item = line.rstrip('\n').split(': ', 1)
- key = item[0]
- if key.endswith('_' + pkg):
- key = key[:-len(pkg)-1]
- pkgdata[key] = item[1].encode('latin-1').decode('unicode_escape')
-
- pkge = pkgdata.get('PKGE', '0')
- pkgv = pkgdata['PKGV']
- pkgr = pkgdata['PKGR']
+ localdata = d.createCopy()
+ localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
+
+ pkge = localdata.getVar("PKGE") or '0'
+ pkgv = localdata.getVar("PKGV")
+ pkgr = localdata.getVar("PKGR")
#
# Find out what the last version was
# Make sure the version did not decrease
@@ -286,31 +285,31 @@ python buildhistory_emit_pkghistory() {
pkginfo = PackageInfo(pkg)
# Apparently the version can be different on a per-package basis (see Python)
- pkginfo.pe = pkgdata.get('PE', '0')
- pkginfo.pv = pkgdata['PV']
- pkginfo.pr = pkgdata['PR']
- pkginfo.pkg = pkgdata['PKG']
+ pkginfo.pe = localdata.getVar("PE") or '0'
+ pkginfo.pv = localdata.getVar("PV")
+ pkginfo.pr = localdata.getVar("PR")
+ pkginfo.pkg = localdata.getVar("PKG")
pkginfo.pkge = pkge
pkginfo.pkgv = pkgv
pkginfo.pkgr = pkgr
- pkginfo.rprovides = sortpkglist(oe.utils.squashspaces(pkgdata.get('RPROVIDES', "")))
- pkginfo.rdepends = sortpkglist(oe.utils.squashspaces(pkgdata.get('RDEPENDS', "")))
- pkginfo.rrecommends = sortpkglist(oe.utils.squashspaces(pkgdata.get('RRECOMMENDS', "")))
- pkginfo.rsuggests = sortpkglist(oe.utils.squashspaces(pkgdata.get('RSUGGESTS', "")))
- pkginfo.rreplaces = sortpkglist(oe.utils.squashspaces(pkgdata.get('RREPLACES', "")))
- pkginfo.rconflicts = sortpkglist(oe.utils.squashspaces(pkgdata.get('RCONFLICTS', "")))
- pkginfo.files = oe.utils.squashspaces(pkgdata.get('FILES', ""))
+ pkginfo.rprovides = sortpkglist(oe.utils.squashspaces(localdata.getVar("RPROVIDES") or ""))
+ pkginfo.rdepends = sortpkglist(oe.utils.squashspaces(localdata.getVar("RDEPENDS") or ""))
+ pkginfo.rrecommends = sortpkglist(oe.utils.squashspaces(localdata.getVar("RRECOMMENDS") or ""))
+ pkginfo.rsuggests = sortpkglist(oe.utils.squashspaces(localdata.getVar("RSUGGESTS") or ""))
+ pkginfo.replaces = sortpkglist(oe.utils.squashspaces(localdata.getVar("RREPLACES") or ""))
+ pkginfo.rconflicts = sortpkglist(oe.utils.squashspaces(localdata.getVar("RCONFLICTS") or ""))
+ pkginfo.files = oe.utils.squashspaces(localdata.getVar("FILES") or "")
for filevar in pkginfo.filevars:
- pkginfo.filevars[filevar] = pkgdata.get(filevar, "")
+ pkginfo.filevars[filevar] = localdata.getVar(filevar) or ""
# Gather information about packaged files
- val = pkgdata.get('FILES_INFO', '')
+ val = localdata.getVar('FILES_INFO') or ''
dictval = json.loads(val)
filelist = list(dictval.keys())
filelist.sort()
pkginfo.filelist = " ".join([shlex.quote(x) for x in filelist])
- pkginfo.size = int(pkgdata['PKGSIZE'])
+ pkginfo.size = int(localdata.getVar('PKGSIZE') or '0')
write_pkghistory(pkginfo, d)
@@ -368,6 +367,7 @@ def write_recipehistory(rcpinfo, d):
f.write(u"DEPENDS = %s\n" % rcpinfo.depends)
f.write(u"PACKAGES = %s\n" % rcpinfo.packages)
f.write(u"LAYER = %s\n" % rcpinfo.layer)
+ f.write(u"CONFIG = %s\n" % rcpinfo.config)
write_latest_srcrev(d, pkghistdir)
@@ -426,8 +426,8 @@ def buildhistory_list_installed(d, rootfs_type="image"):
from oe.sdk import sdk_list_installed_packages
from oe.utils import format_pkg_list
- process_list = [('file', 'bh_installed_pkgs.txt'),\
- ('deps', 'bh_installed_pkgs_deps.txt')]
+ process_list = [('file', 'bh_installed_pkgs_%s.txt' % os.getpid()),\
+ ('deps', 'bh_installed_pkgs_deps_%s.txt' % os.getpid())]
if rootfs_type == "image":
pkgs = image_list_installed_packages(d)
@@ -457,9 +457,10 @@ buildhistory_get_installed() {
# Get list of installed packages
pkgcache="$1/installed-packages.tmp"
- cat ${WORKDIR}/bh_installed_pkgs.txt | sort > $pkgcache && rm ${WORKDIR}/bh_installed_pkgs.txt
+ cat ${WORKDIR}/bh_installed_pkgs_${PID}.txt | sort > $pkgcache && rm ${WORKDIR}/bh_installed_pkgs_${PID}.txt
cat $pkgcache | awk '{ print $1 }' > $1/installed-package-names.txt
+
if [ -s $pkgcache ] ; then
cat $pkgcache | awk '{ print $2 }' | xargs -n1 basename > $1/installed-packages.txt
else
@@ -468,8 +469,8 @@ buildhistory_get_installed() {
# Produce dependency graph
# First, quote each name to handle characters that cause issues for dot
- sed 's:\([^| ]*\):"\1":g' ${WORKDIR}/bh_installed_pkgs_deps.txt > $1/depends.tmp &&
- rm ${WORKDIR}/bh_installed_pkgs_deps.txt
+ sed 's:\([^| ]*\):"\1":g' ${WORKDIR}/bh_installed_pkgs_deps_${PID}.txt > $1/depends.tmp &&
+ rm ${WORKDIR}/bh_installed_pkgs_deps_${PID}.txt
# Remove lines with rpmlib(...) and config(...) dependencies, change the
# delimiter from pipe to "->", set the style for recommend lines and
# turn versioned dependencies into edge labels.
diff --git a/poky/meta/classes/buildstats.bbclass b/poky/meta/classes/buildstats.bbclass
index 2590c60c6..6f8718723 100644
--- a/poky/meta/classes/buildstats.bbclass
+++ b/poky/meta/classes/buildstats.bbclass
@@ -80,8 +80,6 @@ def get_buildtimedata(var, d):
return timediff, cpuperc
def write_task_data(status, logfile, e, d):
- bn = d.getVar('BUILDNAME')
- bsdir = os.path.join(d.getVar('BUILDSTATS_BASE'), bn)
with open(os.path.join(logfile), "a") as f:
elapsedtime = get_timedata("__timedata_task", d, e.time)
if elapsedtime:
@@ -138,7 +136,7 @@ python run_buildstats () {
if x:
f.write(x + " ")
f.write("\n")
- f.write("Build Started: %0.2f \n" % time.time())
+ f.write("Build Started: %0.2f \n" % d.getVar('__timedata_build', False)[0])
elif isinstance(e, bb.event.BuildCompleted):
build_time = os.path.join(bsdir, "build_stats")
diff --git a/poky/meta/classes/cmake.bbclass b/poky/meta/classes/cmake.bbclass
index 94ed8061b..7c055e8a3 100644
--- a/poky/meta/classes/cmake.bbclass
+++ b/poky/meta/classes/cmake.bbclass
@@ -21,23 +21,6 @@ python() {
d.setVarFlag("do_compile", "progress", r"outof:^\[(\d+)/(\d+)\]\s+")
else:
bb.fatal("Unknown CMake Generator %s" % generator)
-
- # C/C++ Compiler (without cpu arch/tune arguments)
- if not d.getVar('OECMAKE_C_COMPILER'):
- cc_list = d.getVar('CC').split()
- if cc_list[0] == 'ccache':
- d.setVar('OECMAKE_C_COMPILER_LAUNCHER', cc_list[0])
- d.setVar('OECMAKE_C_COMPILER', cc_list[1])
- else:
- d.setVar('OECMAKE_C_COMPILER', cc_list[0])
-
- if not d.getVar('OECMAKE_CXX_COMPILER'):
- cxx_list = d.getVar('CXX').split()
- if cxx_list[0] == 'ccache':
- d.setVar('OECMAKE_CXX_COMPILER_LAUNCHER', cxx_list[0])
- d.setVar('OECMAKE_CXX_COMPILER', cxx_list[1])
- else:
- d.setVar('OECMAKE_CXX_COMPILER', cxx_list[0])
}
OECMAKE_AR ?= "${AR}"
@@ -51,8 +34,23 @@ OECMAKE_CXX_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} ${LD
CXXFLAGS += "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS}"
CFLAGS += "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS}"
-OECMAKE_C_COMPILER_LAUNCHER ?= ""
-OECMAKE_CXX_COMPILER_LAUNCHER ?= ""
+def oecmake_map_compiler(compiler, d):
+ args = d.getVar(compiler).split()
+ if args[0] == "ccache":
+ return args[1], args[0]
+ return args[0], ""
+
+# C/C++ Compiler (without cpu arch/tune arguments)
+OECMAKE_C_COMPILER ?= "${@oecmake_map_compiler('CC', d)[0]}"
+OECMAKE_C_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('CC', d)[1]}"
+OECMAKE_CXX_COMPILER ?= "${@oecmake_map_compiler('CXX', d)[0]}"
+OECMAKE_CXX_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('CXX', d)[1]}"
+
+# clear compiler vars for allarch to avoid sig hash difference
+OECMAKE_C_COMPILER_allarch = ""
+OECMAKE_C_COMPILER_LAUNCHER_allarch = ""
+OECMAKE_CXX_COMPILER_allarch = ""
+OECMAKE_CXX_COMPILER_LAUNCHER_allarch = ""
OECMAKE_RPATH ?= ""
OECMAKE_PERLNATIVE_DIR ??= ""
@@ -70,15 +68,22 @@ CMAKE_BUILD_PARALLEL_LEVEL_task-install = "${@oe.utils.parallel_make(d, True)}"
OECMAKE_TARGET_COMPILE ?= "all"
OECMAKE_TARGET_INSTALL ?= "install"
+def map_host_os_to_system_name(host_os):
+ if host_os.startswith('mingw'):
+ return 'Windows'
+ if host_os.startswith('linux'):
+ return 'Linux'
+ return host_os
+
# CMake expects target architectures in the format of uname(2),
# which do not always match TARGET_ARCH, so all the necessary
# conversions should happen here.
-def map_target_arch_to_uname_arch(target_arch):
- if target_arch == "powerpc":
+def map_host_arch_to_uname_arch(host_arch):
+ if host_arch == "powerpc":
return "ppc"
- if target_arch == "powerpc64":
+ if host_arch == "powerpc64":
return "ppc64"
- return target_arch
+ return host_arch
cmake_do_generate_toolchain_file() {
if [ "${BUILD_SYS}" = "${HOST_SYS}" ]; then
@@ -88,8 +93,8 @@ cmake_do_generate_toolchain_file() {
# CMake system name must be something like "Linux".
# This is important for cross-compiling.
$cmake_crosscompiling
-set( CMAKE_SYSTEM_NAME `echo ${TARGET_OS} | sed -e 's/^./\u&/' -e 's/^\(Linux\).*/\1/'` )
-set( CMAKE_SYSTEM_PROCESSOR ${@map_target_arch_to_uname_arch(d.getVar('TARGET_ARCH'))} )
+set( CMAKE_SYSTEM_NAME ${@map_host_os_to_system_name(d.getVar('HOST_OS'))} )
+set( CMAKE_SYSTEM_PROCESSOR ${@map_host_arch_to_uname_arch(d.getVar('HOST_ARCH'))} )
set( CMAKE_C_COMPILER ${OECMAKE_C_COMPILER} )
set( CMAKE_CXX_COMPILER ${OECMAKE_CXX_COMPILER} )
set( CMAKE_C_COMPILER_LAUNCHER ${OECMAKE_C_COMPILER_LAUNCHER} )
diff --git a/poky/meta/classes/cml1.bbclass b/poky/meta/classes/cml1.bbclass
index c7f6723cb..9b9866f4c 100644
--- a/poky/meta/classes/cml1.bbclass
+++ b/poky/meta/classes/cml1.bbclass
@@ -1,3 +1,13 @@
+# returns all the elements from the src uri that are .cfg files
+def find_cfgs(d):
+ sources=src_patches(d, True)
+ sources_list=[]
+ for s in sources:
+ if s.endswith('.cfg'):
+ sources_list.append(s)
+
+ return sources_list
+
cml1_do_configure() {
set -e
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
@@ -17,12 +27,16 @@ CROSS_CURSES_INC = '-DCURSES_LOC="<curses.h>"'
TERMINFO = "${STAGING_DATADIR_NATIVE}/terminfo"
KCONFIG_CONFIG_COMMAND ??= "menuconfig"
+KCONFIG_CONFIG_ROOTDIR ??= "${B}"
python do_menuconfig() {
import shutil
+ config = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config")
+ configorig = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config.orig")
+
try:
- mtime = os.path.getmtime(".config")
- shutil.copy(".config", ".config.orig")
+ mtime = os.path.getmtime(config)
+ shutil.copy(config, configorig)
except OSError:
mtime = 0
@@ -32,7 +46,7 @@ python do_menuconfig() {
# FIXME this check can be removed when the minimum bitbake version has been bumped
if hasattr(bb.build, 'write_taint'):
try:
- newmtime = os.path.getmtime(".config")
+ newmtime = os.path.getmtime(config)
except OSError:
newmtime = 0
@@ -42,7 +56,7 @@ python do_menuconfig() {
}
do_menuconfig[depends] += "ncurses-native:do_populate_sysroot"
do_menuconfig[nostamp] = "1"
-do_menuconfig[dirs] = "${B}"
+do_menuconfig[dirs] = "${KCONFIG_CONFIG_ROOTDIR}"
addtask menuconfig after do_configure
python do_diffconfig() {
@@ -51,8 +65,8 @@ python do_diffconfig() {
workdir = d.getVar('WORKDIR')
fragment = workdir + '/fragment.cfg'
- configorig = '.config.orig'
- config = '.config'
+ configorig = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config.orig")
+ config = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config")
try:
md5newconfig = bb.utils.md5_file(configorig)
@@ -75,5 +89,5 @@ python do_diffconfig() {
}
do_diffconfig[nostamp] = "1"
-do_diffconfig[dirs] = "${B}"
+do_diffconfig[dirs] = "${KCONFIG_CONFIG_ROOTDIR}"
addtask diffconfig
diff --git a/poky/meta/classes/cve-check.bbclass b/poky/meta/classes/cve-check.bbclass
index 556ac6e67..25cefda92 100644
--- a/poky/meta/classes/cve-check.bbclass
+++ b/poky/meta/classes/cve-check.bbclass
@@ -27,25 +27,59 @@ CVE_VERSION ??= "${PV}"
CVE_CHECK_DB_DIR ?= "${DL_DIR}/CVE_CHECK"
CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_1.1.db"
+CVE_CHECK_DB_FILE_LOCK ?= "${CVE_CHECK_DB_FILE}.lock"
CVE_CHECK_LOG ?= "${T}/cve.log"
CVE_CHECK_TMP_FILE ?= "${TMPDIR}/cve_check"
+CVE_CHECK_SUMMARY_DIR ?= "${LOG_DIR}/cve"
+CVE_CHECK_SUMMARY_FILE_NAME ?= "cve-summary"
+CVE_CHECK_SUMMARY_FILE ?= "${CVE_CHECK_SUMMARY_DIR}/${CVE_CHECK_SUMMARY_FILE_NAME}"
CVE_CHECK_DIR ??= "${DEPLOY_DIR}/cve"
+CVE_CHECK_RECIPE_FILE ?= "${CVE_CHECK_DIR}/${PN}"
CVE_CHECK_MANIFEST ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cve"
CVE_CHECK_COPY_FILES ??= "1"
CVE_CHECK_CREATE_MANIFEST ??= "1"
+CVE_CHECK_REPORT_PATCHED ??= "1"
+
# Whitelist for packages (PN)
CVE_CHECK_PN_WHITELIST ?= ""
# Whitelist for CVE. If a CVE is found, then it is considered patched.
# The value is a string containing space separated CVE values:
-#
+#
# CVE_CHECK_WHITELIST = 'CVE-2014-2524 CVE-2018-1234'
-#
+#
CVE_CHECK_WHITELIST ?= ""
+python cve_save_summary_handler () {
+ import shutil
+ import datetime
+
+ cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE")
+
+ cve_summary_name = d.getVar("CVE_CHECK_SUMMARY_FILE_NAME")
+ cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR")
+ bb.utils.mkdirhier(cvelogpath)
+
+ timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
+ cve_summary_file = os.path.join(cvelogpath, "%s-%s.txt" % (cve_summary_name, timestamp))
+
+ if os.path.exists(cve_tmp_file):
+ shutil.copyfile(cve_tmp_file, cve_summary_file)
+
+ if cve_summary_file and os.path.exists(cve_summary_file):
+ cvefile_link = os.path.join(cvelogpath, cve_summary_name)
+
+ if os.path.exists(os.path.realpath(cvefile_link)):
+ os.remove(cvefile_link)
+ os.symlink(os.path.basename(cve_summary_file), cvefile_link)
+}
+
+addhandler cve_save_summary_handler
+cve_save_summary_handler[eventmask] = "bb.event.BuildCompleted"
+
python do_cve_check () {
"""
Check recipe for patched and unpatched CVEs
@@ -66,7 +100,7 @@ python do_cve_check () {
}
addtask cve_check before do_build after do_fetch
-do_cve_check[depends] = "cve-update-db-native:do_populate_cve_db"
+do_cve_check[depends] = "cve-update-db-native:do_fetch"
do_cve_check[nostamp] = "1"
python cve_check_cleanup () {
@@ -87,7 +121,7 @@ python cve_check_write_rootfs_manifest () {
import shutil
if d.getVar("CVE_CHECK_COPY_FILES") == "1":
- deploy_file = os.path.join(d.getVar("CVE_CHECK_DIR"), d.getVar("PN"))
+ deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE")
if os.path.exists(deploy_file):
bb.utils.remove(deploy_file)
@@ -300,12 +334,15 @@ def cve_write_data(d, patched, unpatched, whitelisted, cve_data):
bb.utils.mkdirhier(os.path.dirname(cve_file))
for cve in sorted(cve_data):
+ is_patched = cve in patched
+ if is_patched and (d.getVar("CVE_CHECK_REPORT_PATCHED") != "1"):
+ continue
write_string += "PACKAGE NAME: %s\n" % d.getVar("PN")
- write_string += "PACKAGE VERSION: %s\n" % d.getVar("PV")
+ write_string += "PACKAGE VERSION: %s%s\n" % (d.getVar("EXTENDPE"), d.getVar("PV"))
write_string += "CVE: %s\n" % cve
if cve in whitelisted:
write_string += "CVE STATUS: Whitelisted\n"
- elif cve in patched:
+ elif is_patched:
write_string += "CVE STATUS: Patched\n"
else:
unpatched_cves.append(cve)
@@ -319,17 +356,20 @@ def cve_write_data(d, patched, unpatched, whitelisted, cve_data):
if unpatched_cves:
bb.warn("Found unpatched CVE (%s), for more information check %s" % (" ".join(unpatched_cves),cve_file))
- with open(cve_file, "w") as f:
- bb.note("Writing file %s with CVE information" % cve_file)
- f.write(write_string)
-
- if d.getVar("CVE_CHECK_COPY_FILES") == "1":
- cve_dir = d.getVar("CVE_CHECK_DIR")
- bb.utils.mkdirhier(cve_dir)
- deploy_file = os.path.join(cve_dir, d.getVar("PN"))
- with open(deploy_file, "w") as f:
+ if write_string:
+ with open(cve_file, "w") as f:
+ bb.note("Writing file %s with CVE information" % cve_file)
f.write(write_string)
- if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1":
- with open(d.getVar("CVE_CHECK_TMP_FILE"), "a") as f:
- f.write("%s" % write_string)
+ if d.getVar("CVE_CHECK_COPY_FILES") == "1":
+ deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE")
+ bb.utils.mkdirhier(os.path.dirname(deploy_file))
+ with open(deploy_file, "w") as f:
+ f.write(write_string)
+
+ if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1":
+ cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR")
+ bb.utils.mkdirhier(cvelogpath)
+
+ with open(d.getVar("CVE_CHECK_TMP_FILE"), "a") as f:
+ f.write("%s" % write_string)
diff --git a/poky/meta/classes/deploy.bbclass b/poky/meta/classes/deploy.bbclass
index 6d5290878..737c26122 100644
--- a/poky/meta/classes/deploy.bbclass
+++ b/poky/meta/classes/deploy.bbclass
@@ -8,4 +8,5 @@ python do_deploy_setscene () {
}
addtask do_deploy_setscene
do_deploy[dirs] = "${DEPLOYDIR} ${B}"
+do_deploy[cleandirs] = "${DEPLOYDIR}"
do_deploy[stamp-extra-info] = "${MACHINE_ARCH}"
diff --git a/poky/meta/classes/features_check.bbclass b/poky/meta/classes/features_check.bbclass
index 876d32e31..b3c804786 100644
--- a/poky/meta/classes/features_check.bbclass
+++ b/poky/meta/classes/features_check.bbclass
@@ -1,23 +1,13 @@
-# Allow checking of required and conflicting DISTRO_FEATURES
+# Allow checking of required and conflicting features
#
-# ANY_OF_DISTRO_FEATURES: ensure at least one item on this list is included
-# in DISTRO_FEATURES.
-# REQUIRED_DISTRO_FEATURES: ensure every item on this list is included
-# in DISTRO_FEATURES.
-# CONFLICT_DISTRO_FEATURES: ensure no item in this list is included in
-# DISTRO_FEATURES.
-# ANY_OF_MACHINE_FEATURES: ensure at least one item on this list is included
-# in MACHINE_FEATURES.
-# REQUIRED_MACHINE_FEATURES: ensure every item on this list is included
-# in MACHINE_FEATURES.
-# CONFLICT_MACHINE_FEATURES: ensure no item in this list is included in
-# MACHINE_FEATURES.
-# ANY_OF_COMBINED_FEATURES: ensure at least one item on this list is included
-# in COMBINED_FEATURES.
-# REQUIRED_COMBINED_FEATURES: ensure every item on this list is included
-# in COMBINED_FEATURES.
-# CONFLICT_COMBINED_FEATURES: ensure no item in this list is included in
-# COMBINED_FEATURES.
+# xxx = [DISTRO,MACHINE,COMBINED]
+#
+# ANY_OF_xxx_FEATURES: ensure at least one item on this list is included
+# in xxx_FEATURES.
+# REQUIRED_xxx_FEATURES: ensure every item on this list is included
+# in xxx_FEATURES.
+# CONFLICT_xxx_FEATURES: ensure no item in this list is included in
+# xxx_FEATURES.
#
# Copyright 2019 (C) Texas Instruments Inc.
# Copyright 2013 (C) O.S. Systems Software LTDA.
@@ -26,63 +16,42 @@ python () {
if d.getVar('PARSE_ALL_RECIPES', False):
return
- # Assume at least one var is set.
- distro_features = set((d.getVar('DISTRO_FEATURES') or '').split())
-
- any_of_distro_features = set((d.getVar('ANY_OF_DISTRO_FEATURES') or '').split())
- if any_of_distro_features:
- if set.isdisjoint(any_of_distro_features, distro_features):
- raise bb.parse.SkipRecipe("one of '%s' needs to be in DISTRO_FEATURES" % ' '.join(any_of_distro_features))
-
- required_distro_features = set((d.getVar('REQUIRED_DISTRO_FEATURES') or '').split())
- if required_distro_features:
- missing = set.difference(required_distro_features, distro_features)
- if missing:
- raise bb.parse.SkipRecipe("missing required distro feature%s '%s' (not in DISTRO_FEATURES)" % ('s' if len(missing) > 1 else '', ' '.join(missing)))
-
- conflict_distro_features = set((d.getVar('CONFLICT_DISTRO_FEATURES') or '').split())
- if conflict_distro_features:
- conflicts = set.intersection(conflict_distro_features, distro_features)
- if conflicts:
- raise bb.parse.SkipRecipe("conflicting distro feature%s '%s' (in DISTRO_FEATURES)" % ('s' if len(conflicts) > 1 else '', ' '.join(conflicts)))
-
- # Assume at least one var is set.
- machine_features = set((d.getVar('MACHINE_FEATURES') or '').split())
-
- any_of_machine_features = set((d.getVar('ANY_OF_MACHINE_FEATURES') or '').split())
- if any_of_machine_features:
- if set.isdisjoint(any_of_machine_features, machine_features):
- raise bb.parse.SkipRecipe("one of '%s' needs to be in MACHINE_FEATURES" % ' '.join(any_of_machine_features))
-
- required_machine_features = set((d.getVar('REQUIRED_MACHINE_FEATURES') or '').split())
- if required_machine_features:
- missing = set.difference(required_machine_features, machine_features)
- if missing:
- raise bb.parse.SkipRecipe("missing required machine feature%s '%s' (not in MACHINE_FEATURES)" % ('s' if len(missing) > 1 else '', ' '.join(missing)))
-
- conflict_machine_features = set((d.getVar('CONFLICT_MACHINE_FEATURES') or '').split())
- if conflict_machine_features:
- conflicts = set.intersection(conflict_machine_features, machine_features)
- if conflicts:
- raise bb.parse.SkipRecipe("conflicting machine feature%s '%s' (in MACHINE_FEATURES)" % ('s' if len(conflicts) > 1 else '', ' '.join(conflicts)))
-
- # Assume at least one var is set.
- combined_features = set((d.getVar('COMBINED_FEATURES') or '').split())
-
- any_of_combined_features = set((d.getVar('ANY_OF_COMBINED_FEATURES') or '').split())
- if any_of_combined_features:
- if set.isdisjoint(any_of_combined_features, combined_features):
- raise bb.parse.SkipRecipe("one of '%s' needs to be in COMBINED_FEATURES" % ' '.join(any_of_combined_features))
-
- required_combined_features = set((d.getVar('REQUIRED_COMBINED_FEATURES') or '').split())
- if required_combined_features:
- missing = set.difference(required_combined_features, combined_features)
- if missing:
- raise bb.parse.SkipRecipe("missing required machine feature%s '%s' (not in COMBINED_FEATURES)" % ('s' if len(missing) > 1 else '', ' '.join(missing)))
-
- conflict_combined_features = set((d.getVar('CONFLICT_COMBINED_FEATURES') or '').split())
- if conflict_combined_features:
- conflicts = set.intersection(conflict_combined_features, combined_features)
- if conflicts:
- raise bb.parse.SkipRecipe("conflicting machine feature%s '%s' (in COMBINED_FEATURES)" % ('s' if len(conflicts) > 1 else '', ' '.join(conflicts)))
+ unused = True
+
+ for kind in ['DISTRO', 'MACHINE', 'COMBINED']:
+ if d.getVar('ANY_OF_' + kind + '_FEATURES') is None and \
+ d.overridedata.get('ANY_OF_' + kind + '_FEATURES') is None and \
+ d.getVar('REQUIRED_' + kind + '_FEATURES') is None and \
+ d.overridedata.get('REQUIRED_' + kind + '_FEATURES') is None and \
+ d.getVar('CONFLICT_' + kind + '_FEATURES') is None and \
+ d.overridedata.get('CONFLICT_' + kind + '_FEATURES') is None:
+ continue
+
+ unused = False
+
+ # Assume at least one var is set.
+ features = set((d.getVar(kind + '_FEATURES') or '').split())
+
+ any_of_features = set((d.getVar('ANY_OF_' + kind + '_FEATURES') or '').split())
+ if any_of_features:
+ if set.isdisjoint(any_of_features, features):
+ raise bb.parse.SkipRecipe("one of '%s' needs to be in %s_FEATURES"
+ % (' '.join(any_of_features), kind))
+
+ required_features = set((d.getVar('REQUIRED_' + kind + '_FEATURES') or '').split())
+ if required_features:
+ missing = set.difference(required_features, features)
+ if missing:
+ raise bb.parse.SkipRecipe("missing required %s feature%s '%s' (not in %s_FEATURES)"
+ % (kind.lower(), 's' if len(missing) > 1 else '', ' '.join(missing), kind))
+
+ conflict_features = set((d.getVar('CONFLICT_' + kind + '_FEATURES') or '').split())
+ if conflict_features:
+ conflicts = set.intersection(conflict_features, features)
+ if conflicts:
+ raise bb.parse.SkipRecipe("conflicting %s feature%s '%s' (in %s_FEATURES)"
+ % (kind.lower(), 's' if len(conflicts) > 1 else '', ' '.join(conflicts), kind))
+
+ if unused:
+ bb.warn("Recipe inherits features_check but doesn't use it")
}
diff --git a/poky/meta/classes/go-mod.bbclass b/poky/meta/classes/go-mod.bbclass
index 5871d0250..cabb04d0e 100644
--- a/poky/meta/classes/go-mod.bbclass
+++ b/poky/meta/classes/go-mod.bbclass
@@ -12,7 +12,7 @@
# The '-modcacherw' option ensures we have write access to the cached objects so
# we avoid errors during clean task as well as when removing the TMPDIR.
-export GOBUILDFLAGS ?= "-v ${GO_LDFLAGS} -modcacherw"
+GOBUILDFLAGS_append = " -modcacherw"
inherit go
diff --git a/poky/meta/classes/gtk-icon-cache.bbclass b/poky/meta/classes/gtk-icon-cache.bbclass
index 91cb4ad40..340a28385 100644
--- a/poky/meta/classes/gtk-icon-cache.bbclass
+++ b/poky/meta/classes/gtk-icon-cache.bbclass
@@ -1,6 +1,15 @@
FILES_${PN} += "${datadir}/icons/hicolor"
-DEPENDS +=" ${@['hicolor-icon-theme', '']['${BPN}' == 'hicolor-icon-theme']} gtk+3-native"
+#gtk+3 reqiure GTK3DISTROFEATURES, DEPENDS on it make all the
+#recipes inherit this class require GTK3DISTROFEATURES
+inherit features_check
+ANY_OF_DISTRO_FEATURES = "${GTK3DISTROFEATURES}"
+
+DEPENDS +=" ${@['hicolor-icon-theme', '']['${BPN}' == 'hicolor-icon-theme']} \
+ ${@['gdk-pixbuf', '']['${BPN}' == 'gdk-pixbuf']} \
+ ${@['gtk+3', '']['${BPN}' == 'gtk+3']} \
+ gtk+3-native \
+"
PACKAGE_WRITE_DEPS += "gtk+3-native gdk-pixbuf-native"
@@ -48,9 +57,18 @@ python populate_packages_append () {
bb.note("adding hicolor-icon-theme dependency to %s" % pkg)
rdepends = ' ' + d.getVar('MLPREFIX', False) + "hicolor-icon-theme"
d.appendVar('RDEPENDS_%s' % pkg, rdepends)
-
+
+ #gtk_icon_cache_postinst depend on gdk-pixbuf and gtk+3
+ bb.note("adding gdk-pixbuf dependency to %s" % pkg)
+ rdepends = ' ' + d.getVar('MLPREFIX', False) + "gdk-pixbuf"
+ d.appendVar('RDEPENDS_%s' % pkg, rdepends)
+
+ bb.note("adding gtk+3 dependency to %s" % pkg)
+ rdepends = ' ' + d.getVar('MLPREFIX', False) + "gtk+3"
+ d.appendVar('RDEPENDS_%s' % pkg, rdepends)
+
bb.note("adding gtk-icon-cache postinst and postrm scripts to %s" % pkg)
-
+
postinst = d.getVar('pkg_postinst_%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
diff --git a/poky/meta/classes/gtk-immodules-cache.bbclass b/poky/meta/classes/gtk-immodules-cache.bbclass
index 9bb0af8b2..8e783fb49 100644
--- a/poky/meta/classes/gtk-immodules-cache.bbclass
+++ b/poky/meta/classes/gtk-immodules-cache.bbclass
@@ -22,6 +22,7 @@ else
gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
fi
if [ ! -z `which gtk-query-immodules-3.0` ]; then
+ mkdir -p ${libdir}/gtk-3.0/3.0.0
gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
fi
fi
diff --git a/poky/meta/classes/image-artifact-names.bbclass b/poky/meta/classes/image-artifact-names.bbclass
new file mode 100644
index 000000000..3ac8dd731
--- /dev/null
+++ b/poky/meta/classes/image-artifact-names.bbclass
@@ -0,0 +1,15 @@
+##################################################################
+# Specific image creation and rootfs population info.
+##################################################################
+
+IMAGE_BASENAME ?= "${PN}"
+IMAGE_VERSION_SUFFIX ?= "-${DATETIME}"
+IMAGE_VERSION_SUFFIX[vardepsexclude] += "DATETIME"
+IMAGE_NAME ?= "${IMAGE_BASENAME}-${MACHINE}${IMAGE_VERSION_SUFFIX}"
+IMAGE_LINK_NAME ?= "${IMAGE_BASENAME}-${MACHINE}"
+
+# IMAGE_NAME is the base name for everything produced when building images.
+# The actual image that contains the rootfs has an additional suffix (.rootfs
+# by default) followed by additional suffices which describe the format (.ext4,
+# .ext4.xz, etc.).
+IMAGE_NAME_SUFFIX ??= ".rootfs"
diff --git a/poky/meta/classes/image-live.bbclass b/poky/meta/classes/image-live.bbclass
index 54058b350..9ea5ddc31 100644
--- a/poky/meta/classes/image-live.bbclass
+++ b/poky/meta/classes/image-live.bbclass
@@ -22,7 +22,7 @@
# ${HDDIMG_ID} - FAT image volume-id
# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
-inherit live-vm-common
+inherit live-vm-common image-artifact-names
do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
mtools-native:do_populate_sysroot \
diff --git a/poky/meta/classes/image.bbclass b/poky/meta/classes/image.bbclass
index 694b58fc9..730c843c1 100644
--- a/poky/meta/classes/image.bbclass
+++ b/poky/meta/classes/image.bbclass
@@ -548,14 +548,14 @@ def get_rootfs_size(d):
if rootfs_maxsize:
rootfs_maxsize_int = int(rootfs_maxsize)
if base_size > rootfs_maxsize_int:
- bb.fatal("The rootfs size %d(K) overrides IMAGE_ROOTFS_MAXSIZE: %d(K)" % \
+ bb.fatal("The rootfs size %d(K) exceeds IMAGE_ROOTFS_MAXSIZE: %d(K)" % \
(base_size, rootfs_maxsize_int))
# Check the initramfs size against INITRAMFS_MAXSIZE (if set)
if image_fstypes == initramfs_fstypes != '' and initramfs_maxsize:
initramfs_maxsize_int = int(initramfs_maxsize)
if base_size > initramfs_maxsize_int:
- bb.error("The initramfs size %d(K) overrides INITRAMFS_MAXSIZE: %d(K)" % \
+ bb.error("The initramfs size %d(K) exceeds INITRAMFS_MAXSIZE: %d(K)" % \
(base_size, initramfs_maxsize_int))
bb.error("You can set INITRAMFS_MAXSIZE a larger value. Usually, it should")
bb.fatal("be less than 1/2 of ram size, or you may fail to boot it.\n")
@@ -651,7 +651,7 @@ reproducible_final_image_task () {
if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
REPRODUCIBLE_TIMESTAMP_ROOTFS=`git -C "${COREBASE}" log -1 --pretty=%ct 2>/dev/null` || true
- if [ "${REPRODUCIBLE_TIMESTAMP_ROOTFS}" = "" ]; then
+ if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
REPRODUCIBLE_TIMESTAMP_ROOTFS=`stat -c%Y ${@bb.utils.which(d.getVar("BBPATH"), "conf/bitbake.conf")}`
fi
fi
diff --git a/poky/meta/classes/image_types.bbclass b/poky/meta/classes/image_types.bbclass
index ab05cc90f..66884af8e 100644
--- a/poky/meta/classes/image_types.bbclass
+++ b/poky/meta/classes/image_types.bbclass
@@ -1,9 +1,3 @@
-# IMAGE_NAME is the base name for everything produced when building images.
-# The actual image that contains the rootfs has an additional suffix (.rootfs
-# by default) followed by additional suffices which describe the format (.ext4,
-# .ext4.xz, etc.).
-IMAGE_NAME_SUFFIX ??= ".rootfs"
-
# The default aligment of the size of the rootfs is set to 1KiB. In case
# you're using the SD card emulation of a QEMU system simulator you may
# set this value to 2048 (2MiB alignment).
@@ -231,7 +225,8 @@ IMAGE_CMD_f2fs () {
EXTRA_IMAGECMD = ""
-inherit siteinfo kernel-arch
+inherit siteinfo kernel-arch image-artifact-names
+
JFFS2_ENDIANNESS ?= "${@oe.utils.conditional('SITEINFO_ENDIANNESS', 'le', '-l', '-b', d)}"
JFFS2_ERASEBLOCK ?= "0x40000"
EXTRA_IMAGECMD_jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLOCK} --no-cleanmarkers"
diff --git a/poky/meta/classes/image_types_wic.bbclass b/poky/meta/classes/image_types_wic.bbclass
index 7b1db50a2..4f888ef6e 100644
--- a/poky/meta/classes/image_types_wic.bbclass
+++ b/poky/meta/classes/image_types_wic.bbclass
@@ -1,10 +1,11 @@
# The WICVARS variable is used to define list of bitbake variables used in wic code
# variables from this list is written to <image>.env file
WICVARS ?= "\
- BBLAYERS IMGDEPLOYDIR DEPLOY_DIR_IMAGE FAKEROOTCMD IMAGE_BASENAME IMAGE_BOOT_FILES \
+ BBLAYERS IMGDEPLOYDIR DEPLOY_DIR_IMAGE FAKEROOTCMD IMAGE_BASENAME IMAGE_EFI_BOOT_FILES IMAGE_BOOT_FILES \
IMAGE_LINK_NAME IMAGE_ROOTFS INITRAMFS_FSTYPES INITRD INITRD_LIVE ISODIR RECIPE_SYSROOT_NATIVE \
ROOTFS_SIZE STAGING_DATADIR STAGING_DIR STAGING_LIBDIR TARGET_SYS \
- KERNEL_IMAGETYPE MACHINE INITRAMFS_IMAGE INITRAMFS_IMAGE_BUNDLE INITRAMFS_LINK_NAME APPEND"
+ KERNEL_IMAGETYPE MACHINE INITRAMFS_IMAGE INITRAMFS_IMAGE_BUNDLE INITRAMFS_LINK_NAME APPEND \
+ ASSUME_PROVIDED"
inherit ${@bb.utils.contains('INITRAMFS_IMAGE_BUNDLE', '1', 'kernel-artifact-names', '', d)}
diff --git a/poky/meta/classes/insane.bbclass b/poky/meta/classes/insane.bbclass
index b7c613880..c6dff9659 100644
--- a/poky/meta/classes/insane.bbclass
+++ b/poky/meta/classes/insane.bbclass
@@ -26,7 +26,8 @@ WARN_QA ?= " libdir xorg-driver-abi \
textrel incompatible-license files-invalid \
infodir build-deps src-uri-bad symlink-to-sysroot multilib \
invalid-packageconfig host-user-contaminated uppercase-pn patch-fuzz \
- mime mime-xdg unlisted-pkg-lics \
+ mime mime-xdg unlisted-pkg-lics unhandled-features-check \
+ missing-update-alternatives \
"
ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \
perms dep-cmp pkgvarcheck perm-config perm-line perm-link \
@@ -364,14 +365,14 @@ def package_qa_check_arch(path,name,d, elf, messages):
target_os == "linux-gnu_ilp32" or re.match(r'mips64.*32', d.getVar('DEFAULTTUNE')))
is_bpf = (oe.qa.elf_machine_to_string(elf.machine()) == "BPF")
if not ((machine == elf.machine()) or is_32 or is_bpf):
- package_qa_add_message(messages, "arch", "Architecture did not match (%s, expected %s) on %s" % \
- (oe.qa.elf_machine_to_string(elf.machine()), oe.qa.elf_machine_to_string(machine), package_qa_clean_path(path,d)))
+ package_qa_add_message(messages, "arch", "Architecture did not match (%s, expected %s) in %s" % \
+ (oe.qa.elf_machine_to_string(elf.machine()), oe.qa.elf_machine_to_string(machine), package_qa_clean_path(path, d, name)))
elif not ((bits == elf.abiSize()) or is_32 or is_bpf):
- package_qa_add_message(messages, "arch", "Bit size did not match (%d to %d) %s on %s" % \
- (bits, elf.abiSize(), bpn, package_qa_clean_path(path,d)))
+ package_qa_add_message(messages, "arch", "Bit size did not match (%d, expected %d) in %s" % \
+ (elf.abiSize(), bits, package_qa_clean_path(path, d, name)))
elif not ((littleendian == elf.isLittleEndian()) or is_bpf):
- package_qa_add_message(messages, "arch", "Endiannes did not match (%d to %d) on %s" % \
- (littleendian, elf.isLittleEndian(), package_qa_clean_path(path,d)))
+ package_qa_add_message(messages, "arch", "Endiannes did not match (%d, expected %d) in %s" % \
+ (elf.isLittleEndian(), littleendian, package_qa_clean_path(path,d, name)))
QAPATHTEST[desktop] = "package_qa_check_desktop"
def package_qa_check_desktop(path, name, d, elf, messages):
@@ -438,12 +439,13 @@ def package_qa_hash_style(path, name, d, elf, messages):
for line in phdrs.split("\n"):
if "SYMTAB" in line:
has_syms = True
- if "GNU_HASH" or "DT_MIPS_XHASH" in line:
+ if "GNU_HASH" in line or "DT_MIPS_XHASH" in line:
sane = True
if ("[mips32]" in line or "[mips64]" in line) and d.getVar('TCLIBC') == "musl":
sane = True
if has_syms and not sane:
- package_qa_add_message(messages, "ldflags", "No GNU_HASH in the ELF binary %s, didn't pass LDFLAGS?" % path)
+ path = package_qa_clean_path(path, d, name)
+ package_qa_add_message(messages, "ldflags", "File %s in package %s doesn't have GNU_HASH (didn't pass LDFLAGS?)" % (path, name))
QAPATHTEST[buildpaths] = "package_qa_check_buildpaths"
@@ -459,10 +461,6 @@ def package_qa_check_buildpaths(path, name, d, elf, messages):
if os.path.islink(path):
return
- # Ignore ipk and deb's CONTROL dir
- if path.find(name + "/CONTROL/") != -1 or path.find(name + "/DEBIAN/") != -1:
- return
-
tmpdir = bytes(d.getVar('TMPDIR'), encoding="utf-8")
with open(path, 'rb') as f:
file_content = f.read()
@@ -712,12 +710,13 @@ def package_qa_walk(warnfuncs, errorfuncs, package, d):
warnings = {}
errors = {}
for path in pkgfiles[package]:
- elf = oe.qa.ELFFile(path)
- try:
- elf.open()
- except (IOError, oe.qa.NotELFFileError):
- # IOError can happen if the packaging control files disappear,
- elf = None
+ elf = None
+ if os.path.isfile(path):
+ elf = oe.qa.ELFFile(path)
+ try:
+ elf.open()
+ except oe.qa.NotELFFileError:
+ elf = None
for func in warnfuncs:
func(path, package, d, elf, warnings)
for func in errorfuncs:
@@ -980,6 +979,24 @@ def package_qa_check_src_uri(pn, d, messages):
if re.search(r"github\.com/.+/.+/archive/.+", url):
package_qa_handle_error("src-uri-bad", "%s: SRC_URI uses unstable GitHub archives" % pn, d)
+QARECIPETEST[unhandled-features-check] = "package_qa_check_unhandled_features_check"
+def package_qa_check_unhandled_features_check(pn, d, messages):
+ if not bb.data.inherits_class('features_check', d):
+ var_set = False
+ for kind in ['DISTRO', 'MACHINE', 'COMBINED']:
+ for var in ['ANY_OF_' + kind + '_FEATURES', 'REQUIRED_' + kind + '_FEATURES', 'CONFLICT_' + kind + '_FEATURES']:
+ if d.getVar(var) is not None or d.overridedata.get(var) is not None:
+ var_set = True
+ if var_set:
+ package_qa_handle_error("unhandled-features-check", "%s: recipe doesn't inherit features_check" % pn, d)
+
+QARECIPETEST[missing-update-alternatives] = "package_qa_check_missing_update_alternatives"
+def package_qa_check_missing_update_alternatives(pn, d, messages):
+ # Look at all packages and find out if any of those sets ALTERNATIVE variable
+ # without inheriting update-alternatives class
+ for pkg in (d.getVar('PACKAGES') or '').split():
+ if d.getVar('ALTERNATIVE_%s' % pkg) and not bb.data.inherits_class('update-alternatives', d):
+ package_qa_handle_error("missing-update-alternatives", "%s: recipe defines ALTERNATIVE_%s but doesn't inherit update-alternatives. This might fail during do_rootfs later!" % (pn, pkg), d)
# The PACKAGE FUNC to scan each package
python do_package_qa () {
@@ -1024,7 +1041,14 @@ python do_package_qa () {
pkgfiles = {}
for pkg in packages:
pkgfiles[pkg] = []
- for walkroot, dirs, files in os.walk(os.path.join(pkgdest, pkg)):
+ pkgdir = os.path.join(pkgdest, pkg)
+ for walkroot, dirs, files in os.walk(pkgdir):
+ # Don't walk into top-level CONTROL or DEBIAN directories as these
+ # are temporary directories created by do_package.
+ if walkroot == pkgdir:
+ for control in ("CONTROL", "DEBIAN"):
+ if control in dirs:
+ dirs.remove(control)
for file in files:
pkgfiles[pkg].append(os.path.join(walkroot, file))
diff --git a/poky/meta/classes/kernel-artifact-names.bbclass b/poky/meta/classes/kernel-artifact-names.bbclass
index bbeecba7b..a65cdddb3 100644
--- a/poky/meta/classes/kernel-artifact-names.bbclass
+++ b/poky/meta/classes/kernel-artifact-names.bbclass
@@ -1,3 +1,11 @@
+##################################################################
+# Specific kernel creation info
+# for recipes/bbclasses which need to reuse some of the kernel
+# artifacts, but aren't kernel recipes themselves
+##################################################################
+
+inherit image-artifact-names
+
KERNEL_ARTIFACT_NAME ?= "${PKGE}-${PKGV}-${PKGR}-${MACHINE}${IMAGE_VERSION_SUFFIX}"
KERNEL_ARTIFACT_LINK_NAME ?= "${MACHINE}"
diff --git a/poky/meta/classes/kernel-devicetree.bbclass b/poky/meta/classes/kernel-devicetree.bbclass
index 522c46575..81dda8003 100644
--- a/poky/meta/classes/kernel-devicetree.bbclass
+++ b/poky/meta/classes/kernel-devicetree.bbclass
@@ -52,7 +52,7 @@ do_configure_append() {
do_compile_append() {
for dtbf in ${KERNEL_DEVICETREE}; do
dtb=`normalize_dtb "$dtbf"`
- oe_runmake $dtb
+ oe_runmake $dtb CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
done
}
diff --git a/poky/meta/classes/kernel-fitimage.bbclass b/poky/meta/classes/kernel-fitimage.bbclass
index 72b05ff8d..fa4ea6fee 100644
--- a/poky/meta/classes/kernel-fitimage.bbclass
+++ b/poky/meta/classes/kernel-fitimage.bbclass
@@ -257,12 +257,21 @@ fitimage_emit_section_config() {
# Test if we have any DTBs at all
sep=""
conf_desc=""
+ conf_node="conf@"
kernel_line=""
fdt_line=""
ramdisk_line=""
setup_line=""
default_line=""
+ # conf node name is selected based on dtb ID if it is present,
+ # otherwise its selected based on kernel ID
+ if [ -n "${3}" ]; then
+ conf_node=$conf_node${3}
+ else
+ conf_node=$conf_node${2}
+ fi
+
if [ -n "${2}" ]; then
conf_desc="Linux kernel"
sep=", "
@@ -287,12 +296,18 @@ fitimage_emit_section_config() {
fi
if [ "${6}" = "1" ]; then
- default_line="default = \"conf@${3}\";"
+ # default node is selected based on dtb ID if it is present,
+ # otherwise its selected based on kernel ID
+ if [ -n "${3}" ]; then
+ default_line="default = \"conf@${3}\";"
+ else
+ default_line="default = \"conf@${2}\";"
+ fi
fi
cat << EOF >> ${1}
${default_line}
- conf@${3} {
+ $conf_node {
description = "${6} ${conf_desc}";
${kernel_line}
${fdt_line}
@@ -434,6 +449,13 @@ fitimage_assemble() {
#
fitimage_emit_section_maint ${1} confstart
+ # kernel-fitimage.bbclass currently only supports a single kernel (no less or
+ # more) to be added to the FIT image along with 0 or more device trees and
+ # 0 or 1 ramdisk.
+ # If a device tree is to be part of the FIT image, then select
+ # the default configuration to be used is based on the dtbcount. If there is
+ # no dtb present than select the default configuation to be based on
+ # the kernelcount.
if [ -n "${DTBS}" ]; then
i=1
for DTB in ${DTBS}; do
@@ -445,6 +467,9 @@ fitimage_assemble() {
fi
i=`expr ${i} + 1`
done
+ else
+ defaultconfigcount=1
+ fitimage_emit_section_config ${1} "${kernelcount}" "" "${ramdiskcount}" "${setupcount}" "${defaultconfigcount}"
fi
fitimage_emit_section_maint ${1} sectend
diff --git a/poky/meta/classes/kernel-yocto.bbclass b/poky/meta/classes/kernel-yocto.bbclass
index 5bc627066..35587dd56 100644
--- a/poky/meta/classes/kernel-yocto.bbclass
+++ b/poky/meta/classes/kernel-yocto.bbclass
@@ -18,6 +18,7 @@ SRCREV_FORMAT ?= "meta_machine"
KCONF_AUDIT_LEVEL ?= "1"
KCONF_BSP_AUDIT_LEVEL ?= "0"
KMETA_AUDIT ?= "yes"
+KMETA_AUDIT_WERROR ?= ""
# returns local (absolute) path names for all valid patches in the
# src_uri
@@ -85,8 +86,30 @@ def get_machine_branch(d, default):
return default
+# returns a list of all directories that are on FILESEXTRAPATHS (and
+# hence available to the build) that contain .scc or .cfg files
+def get_dirs_with_fragments(d):
+ extrapaths = []
+ extrafiles = []
+ extrapathsvalue = (d.getVar("FILESEXTRAPATHS") or "")
+ # Remove default flag which was used for checking
+ extrapathsvalue = extrapathsvalue.replace("__default:", "")
+ extrapaths = extrapathsvalue.split(":")
+ for path in extrapaths:
+ if path + ":True" not in extrafiles:
+ extrafiles.append(path + ":" + str(os.path.exists(path)))
+
+ return " ".join(extrafiles)
+
do_kernel_metadata() {
set +e
+
+ if [ -n "$1" ]; then
+ mode="$1"
+ else
+ mode="patch"
+ fi
+
cd ${S}
export KMETA=${KMETA}
@@ -120,47 +143,59 @@ do_kernel_metadata() {
if [ -n "${KBUILD_DEFCONFIG}" ]; then
if [ -f "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}" ]; then
if [ -f "${WORKDIR}/defconfig" ]; then
- # If the two defconfig's are different, warn that we didn't overwrite the
- # one already placed in WORKDIR by the fetcher.
+ # If the two defconfig's are different, warn that we overwrote the
+ # one already placed in WORKDIR
cmp "${WORKDIR}/defconfig" "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}"
if [ $? -ne 0 ]; then
- bbwarn "defconfig detected in WORKDIR. ${KBUILD_DEFCONFIG} skipped"
- else
- cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
+ bbdebug 1 "detected SRC_URI or unpatched defconfig in WORKDIR. ${KBUILD_DEFCONFIG} copied over it"
fi
+ cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
else
cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
fi
- sccs="${WORKDIR}/defconfig"
+ in_tree_defconfig="${WORKDIR}/defconfig"
else
- bbfatal "A KBUILD_DEFCONFIG '${KBUILD_DEFCONFIG}' was specified, but not present in the source tree"
+ bbfatal "A KBUILD_DEFCONFIG '${KBUILD_DEFCONFIG}' was specified, but not present in the source tree (${S}/arch/${ARCH}/configs/)"
fi
fi
- # was anyone trying to patch the kernel meta data ?, we need to do
- # this here, since the scc commands migrate the .cfg fragments to the
- # kernel source tree, where they'll be used later.
- check_git_config
- patches="${@" ".join(find_patches(d,'kernel-meta'))}"
- for p in $patches; do
- (
- cd ${WORKDIR}/kernel-meta
- git am -s $p
- )
- done
+ if [ "$mode" = "patch" ]; then
+ # was anyone trying to patch the kernel meta data ?, we need to do
+ # this here, since the scc commands migrate the .cfg fragments to the
+ # kernel source tree, where they'll be used later.
+ check_git_config
+ patches="${@" ".join(find_patches(d,'kernel-meta'))}"
+ for p in $patches; do
+ (
+ cd ${WORKDIR}/kernel-meta
+ git am -s $p
+ )
+ done
+ fi
sccs_from_src_uri="${@" ".join(find_sccs(d))}"
patches="${@" ".join(find_patches(d,''))}"
feat_dirs="${@" ".join(find_kernel_feature_dirs(d))}"
- # a quick check to make sure we don't have duplicate defconfigs
- # If there's a defconfig in the SRC_URI, did we also have one from
- # the KBUILD_DEFCONFIG processing above ?
- if [ -n "$sccs" ]; then
- # we did have a defconfig from above. remove any that might be in the src_uri
- sccs_from_src_uri=$(echo $sccs_from_src_uri | awk '{ if ($0!="defconfig") { print $0 } }' RS=' ')
+ # a quick check to make sure we don't have duplicate defconfigs If
+ # there's a defconfig in the SRC_URI, did we also have one from the
+ # KBUILD_DEFCONFIG processing above ?
+ src_uri_defconfig=$(echo $sccs_from_src_uri | awk '(match($0, "defconfig") != 0) { print $0 }' RS=' ')
+ # drop and defconfig's from the src_uri variable, we captured it just above here if it existed
+ sccs_from_src_uri=$(echo $sccs_from_src_uri | awk '(match($0, "defconfig") == 0) { print $0 }' RS=' ')
+
+ if [ -n "$in_tree_defconfig" ]; then
+ sccs_defconfig=$in_tree_defconfig
+ if [ -n "$src_uri_defconfig" ]; then
+ bbwarn "[NOTE]: defconfig was supplied both via KBUILD_DEFCONFIG and SRC_URI. Dropping SRC_URI defconfig"
+ fi
+ else
+ # if we didn't have an in-tree one, make our defconfig the one
+ # from the src_uri. Note: there may not have been one from the
+ # src_uri, so this can be an empty variable.
+ sccs_defconfig=$src_uri_defconfig
fi
- sccs="$sccs $sccs_from_src_uri"
+ sccs="$sccs_from_src_uri"
# check for feature directories/repos/branches that were part of the
# SRC_URI. If they were supplied, we convert them into include directives
@@ -187,11 +222,10 @@ do_kernel_metadata() {
# expand kernel features into their full path equivalents
bsp_definition=$(spp ${includes} --find -DKMACHINE=${KMACHINE} -DKTYPE=${LINUX_KERNEL_TYPE})
if [ -z "$bsp_definition" ]; then
- echo "$sccs" | grep -q defconfig
- if [ $? -ne 0 ]; then
+ if [ -z "$sccs_defconfig" ]; then
bbfatal_log "Could not locate BSP definition for ${KMACHINE}/${LINUX_KERNEL_TYPE} and no defconfig was provided"
fi
-
+ else
# if the bsp definition has "define KMETA_EXTERNAL_BSP t",
# then we need to set a flag that will instruct the next
# steps to use the BSP as both configuration and patches.
@@ -202,13 +236,40 @@ do_kernel_metadata() {
fi
meta_dir=$(kgit --meta)
- # run1: pull all the configuration fragments, no matter where they come from
- elements="`echo -n ${bsp_definition} ${sccs} ${patches} ${KERNEL_FEATURES}`"
- if [ -n "${elements}" ]; then
- echo "${bsp_definition}" > ${S}/${meta_dir}/bsp_definition
- scc --force -o ${S}/${meta_dir}:cfg,merge,meta ${includes} ${bsp_definition} ${sccs} ${patches} ${KERNEL_FEATURES}
- if [ $? -ne 0 ]; then
- bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
+ KERNEL_FEATURES_FINAL=""
+ if [ -n "${KERNEL_FEATURES}" ]; then
+ for feature in ${KERNEL_FEATURES}; do
+ feature_found=f
+ for d in $includes; do
+ path_to_check=$(echo $d | sed 's/^-I//')
+ if [ "$feature_found" = "f" ] && [ -e "$path_to_check/$feature" ]; then
+ feature_found=t
+ fi
+ done
+ if [ "$feature_found" = "f" ]; then
+ if [ -n "${KERNEL_DANGLING_FEATURES_WARN_ONLY}" ]; then
+ bbwarn "Feature '$feature' not found, but KERNEL_DANGLING_FEATURES_WARN_ONLY is set"
+ bbwarn "This may cause runtime issues, dropping feature and allowing configuration to continue"
+ else
+ bberror "Feature '$feature' not found, this will cause configuration failures."
+ bberror "Check the SRC_URI for meta-data repositories or directories that may be missing"
+ bbfatal_log "Set KERNEL_DANGLING_FEATURES_WARN_ONLY to ignore this issue"
+ fi
+ else
+ KERNEL_FEATURES_FINAL="$KERNEL_FEATURES_FINAL $feature"
+ fi
+ done
+ fi
+
+ if [ "$mode" = "config" ]; then
+ # run1: pull all the configuration fragments, no matter where they come from
+ elements="`echo -n ${bsp_definition} $sccs_defconfig ${sccs} ${patches} $KERNEL_FEATURES_FINAL`"
+ if [ -n "${elements}" ]; then
+ echo "${bsp_definition}" > ${S}/${meta_dir}/bsp_definition
+ scc --force -o ${S}/${meta_dir}:cfg,merge,meta ${includes} $sccs_defconfig $bsp_definition $sccs $patches $KERNEL_FEATURES_FINAL
+ if [ $? -ne 0 ]; then
+ bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
+ fi
fi
fi
@@ -219,12 +280,14 @@ do_kernel_metadata() {
sccs="${bsp_definition} ${sccs}"
fi
- # run2: only generate patches for elements that have been passed on the SRC_URI
- elements="`echo -n ${sccs} ${patches} ${KERNEL_FEATURES}`"
- if [ -n "${elements}" ]; then
- scc --force -o ${S}/${meta_dir}:patch --cmds patch ${includes} ${sccs} ${patches} ${KERNEL_FEATURES}
- if [ $? -ne 0 ]; then
- bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
+ if [ "$mode" = "patch" ]; then
+ # run2: only generate patches for elements that have been passed on the SRC_URI
+ elements="`echo -n ${sccs} ${patches} $KERNEL_FEATURES_FINAL`"
+ if [ -n "${elements}" ]; then
+ scc --force -o ${S}/${meta_dir}:patch --cmds patch ${includes} ${sccs} ${patches} $KERNEL_FEATURES_FINAL
+ if [ $? -ne 0 ]; then
+ bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
+ fi
fi
fi
}
@@ -320,6 +383,7 @@ do_kernel_checkout[dirs] = "${S}"
addtask kernel_checkout before do_kernel_metadata after do_symlink_kernsrc
addtask kernel_metadata after do_validate_branches do_unpack before do_patch
do_kernel_metadata[depends] = "kern-tools-native:do_populate_sysroot"
+do_kernel_metadata[file-checksums] = " ${@get_dirs_with_fragments(d)}"
do_validate_branches[depends] = "kern-tools-native:do_populate_sysroot"
do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}binutils:do_populate_sysroot"
@@ -328,6 +392,8 @@ do_kernel_configme[depends] += "bc-native:do_populate_sysroot bison-native:do_po
do_kernel_configme[depends] += "kern-tools-native:do_populate_sysroot"
do_kernel_configme[dirs] += "${S} ${B}"
do_kernel_configme() {
+ do_kernel_metadata config
+
# translate the kconfig_mode into something that merge_config.sh
# understands
case ${KCONFIG_MODE} in
@@ -370,6 +436,67 @@ do_kernel_configme() {
}
addtask kernel_configme before do_configure after do_patch
+addtask config_analysis
+
+do_config_analysis[depends] = "virtual/kernel:do_configure"
+do_config_analysis[depends] += "kern-tools-native:do_populate_sysroot"
+
+CONFIG_AUDIT_FILE ?= "${WORKDIR}/config-audit.txt"
+CONFIG_ANALYSIS_FILE ?= "${WORKDIR}/config-analysis.txt"
+
+python do_config_analysis() {
+ import re, string, sys, subprocess
+
+ s = d.getVar('S')
+
+ env = os.environ.copy()
+ env['PATH'] = "%s:%s%s" % (d.getVar('PATH'), s, "/scripts/util/")
+ env['LD'] = d.getVar('KERNEL_LD')
+ env['CC'] = d.getVar('KERNEL_CC')
+ env['ARCH'] = d.getVar('ARCH')
+ env['srctree'] = s
+
+ # read specific symbols from the kernel recipe or from local.conf
+ # i.e.: CONFIG_ANALYSIS_pn-linux-yocto-dev = 'NF_CONNTRACK LOCALVERSION'
+ config = d.getVar( 'CONFIG_ANALYSIS' )
+ if not config:
+ config = [ "" ]
+ else:
+ config = config.split()
+
+ for c in config:
+ for action in ["analysis","audit"]:
+ if action == "analysis":
+ try:
+ analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--blame', c], cwd=s, env=env ).decode('utf-8')
+ except subprocess.CalledProcessError as e:
+ bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+
+ outfile = d.getVar( 'CONFIG_ANALYSIS_FILE' )
+
+ if action == "audit":
+ try:
+ analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--summary', '--extended', '--sanity', c], cwd=s, env=env ).decode('utf-8')
+ except subprocess.CalledProcessError as e:
+ bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+
+ outfile = d.getVar( 'CONFIG_AUDIT_FILE' )
+
+ if c:
+ outdir = os.path.dirname( outfile )
+ outname = os.path.basename( outfile )
+ outfile = outdir + '/'+ c + '-' + outname
+
+ if config and os.path.isfile(outfile):
+ os.remove(outfile)
+
+ with open(outfile, 'w+') as f:
+ f.write( analysis )
+
+ bb.warn( "Configuration {} executed, see: {} for details".format(action,outfile ))
+ if c:
+ bb.warn( analysis )
+}
python do_kernel_configcheck() {
import re, string, sys, subprocess
@@ -379,59 +506,99 @@ python do_kernel_configcheck() {
# meta-series for processing
kmeta = d.getVar("KMETA") or "meta"
if not os.path.exists(kmeta):
- kmeta = "." + kmeta
+ kmeta = subprocess.check_output(['kgit', '--meta'], cwd=d.getVar('S')).decode('utf-8').rstrip()
s = d.getVar('S')
env = os.environ.copy()
env['PATH'] = "%s:%s%s" % (d.getVar('PATH'), s, "/scripts/util/")
- env['LD'] = "${KERNEL_LD}"
+ env['LD'] = d.getVar('KERNEL_LD')
+ env['CC'] = d.getVar('KERNEL_CC')
+ env['ARCH'] = d.getVar('ARCH')
+ env['srctree'] = s
try:
configs = subprocess.check_output(['scc', '--configs', '-o', s + '/.kernel-meta'], env=env).decode('utf-8')
except subprocess.CalledProcessError as e:
bb.fatal( "Cannot gather config fragments for audit: %s" % e.output.decode("utf-8") )
- try:
- subprocess.check_call(['kconf_check', '--report', '-o',
- '%s/%s/cfg' % (s, kmeta), d.getVar('B') + '/.config', s, configs], cwd=s, env=env)
- except subprocess.CalledProcessError:
- # The configuration gathering can return different exit codes, but
- # we interpret them based on the KCONF_AUDIT_LEVEL variable, so we catch
- # everything here, and let the run continue.
- pass
-
config_check_visibility = int(d.getVar("KCONF_AUDIT_LEVEL") or 0)
bsp_check_visibility = int(d.getVar("KCONF_BSP_AUDIT_LEVEL") or 0)
+ kmeta_audit_werror = d.getVar("KMETA_AUDIT_WERROR") or ""
+ warnings_detected = False
+
+ # if config check visibility is "1", that's the lowest level of audit. So
+ # we add the --classify option to the run, since classification will
+ # streamline the output to only report options that could be boot issues,
+ # or are otherwise required for proper operation.
+ extra_params = ""
+ if config_check_visibility == 1:
+ extra_params = "--classify"
+
+ # category #1: mismatches
+ try:
+ analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--mismatches', extra_params], cwd=s, env=env ).decode('utf-8')
+ except subprocess.CalledProcessError as e:
+ bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
- # if config check visibility is non-zero, report dropped configuration values
- mismatch_file = d.expand("${S}/%s/cfg/mismatch.txt" % kmeta)
- if os.path.exists(mismatch_file):
- if config_check_visibility:
- with open (mismatch_file, "r") as myfile:
+ if analysis:
+ outfile = "{}/{}/cfg/mismatch.txt".format( s, kmeta )
+ if os.path.isfile(outfile):
+ os.remove(outfile)
+ with open(outfile, 'w+') as f:
+ f.write( analysis )
+
+ if config_check_visibility and os.stat(outfile).st_size > 0:
+ with open (outfile, "r") as myfile:
results = myfile.read()
bb.warn( "[kernel config]: specified values did not make it into the kernel's final configuration:\n\n%s" % results)
+ warnings_detected = True
+
+ # category #2: invalid fragment elements
+ extra_params = ""
+ if bsp_check_visibility > 1:
+ extra_params = "--strict"
+ try:
+ analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--invalid', extra_params], cwd=s, env=env ).decode('utf-8')
+ except subprocess.CalledProcessError as e:
+ bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+
+ if analysis:
+ outfile = "{}/{}/cfg/invalid.txt".format(s,kmeta)
+ if os.path.isfile(outfile):
+ os.remove(outfile)
+ with open(outfile, 'w+') as f:
+ f.write( analysis )
- if bsp_check_visibility:
- invalid_file = d.expand("${S}/%s/cfg/invalid.cfg" % kmeta)
- if os.path.exists(invalid_file) and os.stat(invalid_file).st_size > 0:
- with open (invalid_file, "r") as myfile:
+ if bsp_check_visibility and os.stat(outfile).st_size > 0:
+ with open (outfile, "r") as myfile:
results = myfile.read()
- bb.warn( "[kernel config]: This BSP sets config options that are not offered anywhere within this kernel:\n\n%s" % results)
- errors_file = d.expand("${S}/%s/cfg/fragment_errors.txt" % kmeta)
- if os.path.exists(errors_file) and os.stat(errors_file).st_size > 0:
- with open (errors_file, "r") as myfile:
- results = myfile.read()
- bb.warn( "[kernel config]: This BSP contains fragments with errors:\n\n%s" % results)
-
- # if the audit level is greater than two, we report if a fragment has overriden
- # a value from a base fragment. This is really only used for new kernel introduction
- if bsp_check_visibility > 2:
- redefinition_file = d.expand("${S}/%s/cfg/redefinition.txt" % kmeta)
- if os.path.exists(redefinition_file) and os.stat(redefinition_file).st_size > 0:
- with open (redefinition_file, "r") as myfile:
+ bb.warn( "[kernel config]: This BSP contains fragments with warnings:\n\n%s" % results)
+ warnings_detected = True
+
+ # category #3: redefined options (this is pretty verbose and is debug only)
+ try:
+ analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--sanity'], cwd=s, env=env ).decode('utf-8')
+ except subprocess.CalledProcessError as e:
+ bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+
+ if analysis:
+ outfile = "{}/{}/cfg/redefinition.txt".format(s,kmeta)
+ if os.path.isfile(outfile):
+ os.remove(outfile)
+ with open(outfile, 'w+') as f:
+ f.write( analysis )
+
+ # if the audit level is greater than two, we report if a fragment has overriden
+ # a value from a base fragment. This is really only used for new kernel introduction
+ if bsp_check_visibility > 2 and os.stat(outfile).st_size > 0:
+ with open (outfile, "r") as myfile:
results = myfile.read()
bb.warn( "[kernel config]: This BSP has configuration options defined in more than one config, with differing values:\n\n%s" % results)
+ warnings_detected = True
+
+ if warnings_detected and kmeta_audit_werror:
+ bb.fatal( "configuration warnings detected, werror is set, promoting to fatal" )
}
# Ensure that the branches (BSP and meta) are on the locations specified by
diff --git a/poky/meta/classes/kernel.bbclass b/poky/meta/classes/kernel.bbclass
index 20a0135fc..78def5bbc 100644
--- a/poky/meta/classes/kernel.bbclass
+++ b/poky/meta/classes/kernel.bbclass
@@ -4,10 +4,12 @@ KERNEL_PACKAGE_NAME ??= "kernel"
KERNEL_DEPLOYSUBDIR ??= "${@ "" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else d.getVar("KERNEL_PACKAGE_NAME") }"
PROVIDES += "${@ "virtual/kernel" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else "" }"
-DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native bc-native lzop-native bison-native"
+DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native bc-native bison-native"
+DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lzo", "lzop-native", "", d)}"
+DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lz4", "lz4-native", "", d)}"
PACKAGE_WRITE_DEPS += "depmodwrapper-cross"
-do_deploy[depends] += "depmodwrapper-cross:do_populate_sysroot"
+do_deploy[depends] += "depmodwrapper-cross:do_populate_sysroot gzip-native:do_populate_sysroot"
do_clean[depends] += "make-mod-scripts:do_clean"
CVE_PRODUCT ?= "linux_kernel"
@@ -94,6 +96,25 @@ python __anonymous () {
d.appendVar('RDEPENDS_%s-image' % kname, ' %s-image-%s' % (kname, typelower))
d.setVar('PKG_%s-image-%s' % (kname,typelower), '%s-image-%s-${KERNEL_VERSION_PKG_NAME}' % (kname, typelower))
d.setVar('ALLOW_EMPTY_%s-image-%s' % (kname, typelower), '1')
+ d.setVar('pkg_postinst_%s-image-%s' % (kname,typelower), """set +e
+if [ -n "$D" ]; then
+ ln -sf %s-${KERNEL_VERSION} $D/${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
+else
+ ln -sf %s-${KERNEL_VERSION} ${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
+ if [ $? -ne 0 ]; then
+ echo "Filesystem on ${KERNEL_IMAGEDEST}/ doesn't support symlinks, falling back to copied image (%s)."
+ install -m 0644 ${KERNEL_IMAGEDEST}/%s-${KERNEL_VERSION} ${KERNEL_IMAGEDEST}/%s
+ fi
+fi
+set -e
+""" % (type, type, type, type, type, type, type))
+ d.setVar('pkg_postrm_%s-image-%s' % (kname,typelower), """set +e
+if [ -f "${KERNEL_IMAGEDEST}/%s" -o -L "${KERNEL_IMAGEDEST}/%s" ]; then
+ rm -f ${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
+fi
+set -e
+""" % (type, type, type))
+
image = d.getVar('INITRAMFS_IMAGE')
# If the INTIRAMFS_IMAGE is set but the INITRAMFS_IMAGE_BUNDLE is set to 0,
@@ -151,7 +172,10 @@ python do_symlink_kernsrc () {
shutil.move(s, kernsrc)
os.symlink(kernsrc, s)
}
-addtask symlink_kernsrc before do_configure after do_unpack
+# do_patch is normally ordered before do_configure, but
+# externalsrc.bbclass deletes do_patch, breaking the dependency of
+# do_configure on do_symlink_kernsrc.
+addtask symlink_kernsrc before do_patch do_configure after do_unpack
inherit kernel-arch deploy
@@ -191,6 +215,8 @@ UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
KERNEL_EXTRA_ARGS ?= ""
EXTRA_OEMAKE = " HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}""
+EXTRA_OEMAKE += " HOSTCXX="${BUILD_CXX} ${BUILD_CXXFLAGS} ${BUILD_LDFLAGS}""
+
KERNEL_ALT_IMAGETYPE ??= ""
copy_initramfs() {
@@ -210,7 +236,7 @@ copy_initramfs() {
;;
*lz4)
echo "lz4 decompressing image"
- lz4 -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
+ lz4 -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
break
;;
*lzo)
@@ -384,10 +410,7 @@ kernel_do_install() {
install -d ${D}/${KERNEL_IMAGEDEST}
install -d ${D}/boot
for imageType in ${KERNEL_IMAGETYPES} ; do
- install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType} ${D}/${KERNEL_IMAGEDEST}/${imageType}-${KERNEL_VERSION}
- if [ "${KERNEL_PACKAGE_NAME}" = "kernel" ]; then
- ln -sf ${imageType}-${KERNEL_VERSION} ${D}/${KERNEL_IMAGEDEST}/${imageType}
- fi
+ install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType ${D}/${KERNEL_IMAGEDEST}/$imageType-${KERNEL_VERSION}
done
install -m 0644 System.map ${D}/boot/System.map-${KERNEL_VERSION}
install -m 0644 .config ${D}/boot/config-${KERNEL_VERSION}
@@ -396,7 +419,6 @@ kernel_do_install() {
install -d ${D}${sysconfdir}/modules-load.d
install -d ${D}${sysconfdir}/modprobe.d
}
-do_install[prefuncs] += "package_get_auto_pr"
# Must be ran no earlier than after do_kernel_checkout or else Makefile won't be in ${S}/Makefile
do_kernel_version_sanity_check() {
@@ -463,7 +485,7 @@ do_shared_workdir () {
# Copy files required for module builds
cp System.map $kerneldir/System.map-${KERNEL_VERSION}
- cp Module.symvers $kerneldir/
+ [ -e Module.symvers ] && cp Module.symvers $kerneldir/
cp .config $kerneldir/
mkdir -p $kerneldir/include/config
cp include/config/kernel.release $kerneldir/include/config/kernel.release
@@ -697,11 +719,10 @@ kernel_do_deploy() {
fi
for imageType in ${KERNEL_IMAGETYPES} ; do
- base_name=${imageType}-${KERNEL_IMAGE_NAME}
- install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType} $deployDir/${base_name}.bin
- symlink_name=${imageType}-${KERNEL_IMAGE_LINK_NAME}
- ln -sf ${base_name}.bin $deployDir/${symlink_name}.bin
- ln -sf ${base_name}.bin $deployDir/${imageType}
+ baseName=$imageType-${KERNEL_IMAGE_NAME}
+ install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType $deployDir/$baseName.bin
+ ln -sf $baseName.bin $deployDir/$imageType-${KERNEL_IMAGE_LINK_NAME}.bin
+ ln -sf $baseName.bin $deployDir/$imageType
done
if [ ${MODULE_TARBALL_DEPLOY} = "1" ] && (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
@@ -722,16 +743,16 @@ kernel_do_deploy() {
if [ "$imageType" = "fitImage" ] ; then
continue
fi
- initramfs_base_name=${imageType}-${INITRAMFS_NAME}
- initramfs_symlink_name=${imageType}-${INITRAMFS_LINK_NAME}
- install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType}.initramfs $deployDir/${initramfs_base_name}.bin
- ln -sf ${initramfs_base_name}.bin $deployDir/${initramfs_symlink_name}.bin
+ initramfsBaseName=$imageType-${INITRAMFS_NAME}
+ install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType.initramfs $deployDir/$initramfsBaseName.bin
+ ln -sf $initramfsBaseName.bin $deployDir/$imageType-${INITRAMFS_LINK_NAME}.bin
done
fi
}
-do_deploy[cleandirs] = "${DEPLOYDIR}"
-do_deploy[dirs] = "${DEPLOYDIR} ${B}"
-do_deploy[prefuncs] += "package_get_auto_pr"
+
+# We deploy to filenames that include PKGV and PKGR, read the saved data to
+# ensure we get the right values for both
+do_deploy[prefuncs] += "read_subpackage_metadata"
addtask deploy after do_populate_sysroot do_packagedata
diff --git a/poky/meta/classes/license_image.bbclass b/poky/meta/classes/license_image.bbclass
index a8c72da3c..702e9f9c5 100644
--- a/poky/meta/classes/license_image.bbclass
+++ b/poky/meta/classes/license_image.bbclass
@@ -200,6 +200,17 @@ def license_deployed_manifest(d):
image_license_manifest = os.path.join(lic_manifest_dir, 'image_license.manifest')
write_license_files(d, image_license_manifest, man_dic, rootfs=False)
+ link_name = d.getVar('IMAGE_LINK_NAME')
+ if link_name:
+ lic_manifest_symlink_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
+ link_name)
+ # remove old symlink
+ if os.path.islink(lic_manifest_symlink_dir):
+ os.unlink(lic_manifest_symlink_dir)
+
+ # create the image dir symlink
+ os.symlink(lic_manifest_dir, lic_manifest_symlink_dir)
+
def get_deployed_dependencies(d):
"""
Get all the deployed dependencies of an image
diff --git a/poky/meta/classes/linuxloader.bbclass b/poky/meta/classes/linuxloader.bbclass
index ec0e0556d..720e5dfad 100644
--- a/poky/meta/classes/linuxloader.bbclass
+++ b/poky/meta/classes/linuxloader.bbclass
@@ -4,7 +4,7 @@ def get_musl_loader_arch(d):
targetarch = d.getVar("TARGET_ARCH")
if targetarch.startswith("microblaze"):
- ldso_arch = "microblaze${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'el' ,d)}"
+ ldso_arch = "microblaze${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'el', d)}"
elif targetarch.startswith("mips"):
ldso_arch = "mips${ABIEXTENSION}${MIPSPKGSFX_BYTE}${MIPSPKGSFX_R6}${MIPSPKGSFX_ENDIAN}${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
elif targetarch == "powerpc":
@@ -21,6 +21,8 @@ def get_musl_loader_arch(d):
ldso_arch = "aarch64${ARMPKGSFX_ENDIAN_64}"
elif targetarch.startswith("riscv64"):
ldso_arch = "riscv64${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
+ elif targetarch.startswith("riscv32"):
+ ldso_arch = "riscv32${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
return ldso_arch
def get_musl_loader(d):
diff --git a/poky/meta/classes/meson.bbclass b/poky/meta/classes/meson.bbclass
index e9628033c..83aa854b7 100644
--- a/poky/meta/classes/meson.bbclass
+++ b/poky/meta/classes/meson.bbclass
@@ -68,6 +68,9 @@ def meson_operating_system(var, d):
os = d.getVar(var)
if "mingw" in os:
return "windows"
+ # avoid e.g 'linux-gnueabi'
+ elif "linux" in os:
+ return "linux"
else:
return os
@@ -95,6 +98,7 @@ strip = ${@meson_array('STRIP', d)}
readelf = ${@meson_array('READELF', d)}
pkgconfig = 'pkg-config'
llvm-config = 'llvm-config${LLVMVERSION}'
+cups-config = 'cups-config'
[properties]
needs_exe_wrapper = true
diff --git a/poky/meta/classes/mime.bbclass b/poky/meta/classes/mime.bbclass
index c9072adf3..bb99bc35c 100644
--- a/poky/meta/classes/mime.bbclass
+++ b/poky/meta/classes/mime.bbclass
@@ -24,7 +24,18 @@ if [ "x$D" != "x" ]; then
mimedir=${MIMEDIR}
else
echo "Updating MIME database... this may take a while."
- update-mime-database $D${MIMEDIR}
+ # $D${MIMEDIR}/packages belong to package shared-mime-info-data,
+ # packages like libfm-mime depend on shared-mime-info-data.
+ # after shared-mime-info-data uninstalled, $D${MIMEDIR}/packages
+ # is removed, but update-mime-database need this dir to update
+ # database, workaround to create one and remove it later
+ if [ ! -d $D${MIMEDIR}/packages ]; then
+ mkdir -p $D${MIMEDIR}/packages
+ update-mime-database $D${MIMEDIR}
+ rmdir --ignore-fail-on-non-empty $D${MIMEDIR}/packages
+ else
+ update-mime-database $D${MIMEDIR}
+fi
fi
}
diff --git a/poky/meta/classes/nativesdk.bbclass b/poky/meta/classes/nativesdk.bbclass
index 7b7571072..7f2692c51 100644
--- a/poky/meta/classes/nativesdk.bbclass
+++ b/poky/meta/classes/nativesdk.bbclass
@@ -9,6 +9,7 @@ NATIVESDKLIBC ?= "libc-glibc"
LIBCOVERRIDE = ":${NATIVESDKLIBC}"
CLASSOVERRIDE = "class-nativesdk"
MACHINEOVERRIDES = ""
+MACHINE_FEATURES = ""
MULTILIBS = ""
@@ -57,7 +58,7 @@ EXTRA_OECONF_GCC_FLOAT = ""
CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
CFLAGS = "${BUILDSDK_CFLAGS}"
-CXXFLAGS = "${BUILDSDK_CFLAGS}"
+CXXFLAGS = "${BUILDSDK_CXXFLAGS}"
LDFLAGS = "${BUILDSDK_LDFLAGS}"
# Change to place files in SDKPATH
diff --git a/poky/meta/classes/nopackages.bbclass b/poky/meta/classes/nopackages.bbclass
index 559f5078b..7a4f632d7 100644
--- a/poky/meta/classes/nopackages.bbclass
+++ b/poky/meta/classes/nopackages.bbclass
@@ -2,6 +2,7 @@ deltask do_package
deltask do_package_write_rpm
deltask do_package_write_ipk
deltask do_package_write_deb
+deltask do_package_write_tar
deltask do_package_qa
deltask do_packagedata
deltask do_package_setscene
diff --git a/poky/meta/classes/package.bbclass b/poky/meta/classes/package.bbclass
index 0af5f6673..e6236c0bb 100644
--- a/poky/meta/classes/package.bbclass
+++ b/poky/meta/classes/package.bbclass
@@ -7,7 +7,7 @@
#
# There are the following default steps but PACKAGEFUNCS can be extended:
#
-# a) package_get_auto_pr - get PRAUTO from remote PR service
+# a) package_convert_pr_autoinc - convert AUTOINC in PKGV to ${PRSERV_PV_AUTOINC}
#
# b) perform_packagecopy - Copy D into PKGD
#
@@ -664,12 +664,20 @@ def runtime_mapping_rename (varname, pkg, d):
#bb.note("%s after: %s" % (varname, d.getVar(varname)))
#
-# Package functions suitable for inclusion in PACKAGEFUNCS
+# Used by do_packagedata (and possibly other routines post do_package)
#
+package_get_auto_pr[vardepsexclude] = "BB_TASKDEPDATA"
python package_get_auto_pr() {
import oe.prservice
- import re
+
+ def get_do_package_hash(pn):
+ if d.getVar("BB_RUNTASK") != "do_package":
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ for dep in taskdepdata:
+ if taskdepdata[dep][1] == "do_package" and taskdepdata[dep][0] == pn:
+ return taskdepdata[dep][6]
+ return None
# Support per recipe PRSERV_HOST
pn = d.getVar('PN')
@@ -681,15 +689,22 @@ python package_get_auto_pr() {
# PR Server not active, handle AUTOINC
if not d.getVar('PRSERV_HOST'):
- if 'AUTOINC' in pkgv:
- d.setVar("PKGV", pkgv.replace("AUTOINC", "0"))
+ d.setVar("PRSERV_PV_AUTOINC", "0")
return
auto_pr = None
pv = d.getVar("PV")
version = d.getVar("PRAUTOINX")
pkgarch = d.getVar("PACKAGE_ARCH")
- checksum = d.getVar("BB_TASKHASH")
+ checksum = get_do_package_hash(pn)
+
+ # If do_package isn't in the dependencies, we can't get the checksum...
+ if not checksum:
+ bb.warn('Task %s requested do_package unihash, but it was not available.' % d.getVar('BB_RUNTASK'))
+ #taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ #for dep in taskdepdata:
+ # bb.warn('%s:%s = %s' % (taskdepdata[dep][0], taskdepdata[dep][1], taskdepdata[dep][6]))
+ return
if d.getVar('PRSERV_LOCKDOWN'):
auto_pr = d.getVar('PRAUTO_' + version + '_' + pkgarch) or d.getVar('PRAUTO_' + version) or None
@@ -707,7 +722,7 @@ python package_get_auto_pr() {
srcpv = bb.fetch2.get_srcrev(d)
base_ver = "AUTOINC-%s" % version[:version.find(srcpv)]
value = conn.getPR(base_ver, pkgarch, srcpv)
- d.setVar("PKGV", pkgv.replace("AUTOINC", str(value)))
+ d.setVar("PRSERV_PV_AUTOINC", str(value))
auto_pr = conn.getPR(version, pkgarch, checksum)
except Exception as e:
@@ -717,6 +732,22 @@ python package_get_auto_pr() {
d.setVar('PRAUTO',str(auto_pr))
}
+#
+# Package functions suitable for inclusion in PACKAGEFUNCS
+#
+
+python package_convert_pr_autoinc() {
+ pkgv = d.getVar("PKGV")
+
+ # Adjust pkgv as necessary...
+ if 'AUTOINC' in pkgv:
+ d.setVar("PKGV", pkgv.replace("AUTOINC", "${PRSERV_PV_AUTOINC}"))
+
+ # Change PRSERV_PV_AUTOINC and EXTENDPRAUTO usage to special values
+ d.setVar('PRSERV_PV_AUTOINC', '@PRSERV_PV_AUTOINC@')
+ d.setVar('EXTENDPRAUTO', '@EXTENDPRAUTO@')
+}
+
LOCALEBASEPN ??= "${PN}"
python package_do_split_locales() {
@@ -1039,7 +1070,7 @@ python split_and_strip_files () {
dvar = d.getVar('PKGD')
pn = d.getVar('PN')
- targetos = d.getVar('TARGET_OS')
+ hostos = d.getVar('HOST_OS')
oldcwd = os.getcwd()
os.chdir(dvar)
@@ -1194,7 +1225,7 @@ python split_and_strip_files () {
if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
results = oe.utils.multiprocess_launch(splitdebuginfo, list(elffiles), d, extraargs=(dvar, debugdir, debuglibdir, debugappend, debugsrcdir, d))
- if debugsrcdir and not targetos.startswith("mingw"):
+ if debugsrcdir and not hostos.startswith("mingw"):
if (d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'):
results = oe.utils.multiprocess_launch(splitstaticdebuginfo, staticlibs, d, extraargs=(dvar, debugstaticdir, debugstaticlibdir, debugstaticappend, debugsrcdir, d))
else:
@@ -1638,7 +1669,7 @@ fi
# Symlinks needed for rprovides lookup
rprov = d.getVar('RPROVIDES_%s' % pkg) or d.getVar('RPROVIDES')
if rprov:
- for p in rprov.strip().split():
+ for p in bb.utils.explode_deps(rprov):
subdata_sym = pkgdatadir + "/runtime-rprovides/%s/%s" % (p, pkg)
bb.utils.mkdirhier(os.path.dirname(subdata_sym))
oe.path.symlink("../../runtime/%s" % pkg, subdata_sym, True)
@@ -1761,7 +1792,7 @@ python package_do_shlibs() {
else:
shlib_pkgs = packages.split()
- targetos = d.getVar('TARGET_OS')
+ hostos = d.getVar('HOST_OS')
workdir = d.getVar('WORKDIR')
@@ -1912,9 +1943,9 @@ python package_do_shlibs() {
soname = None
if cpath.islink(file):
continue
- if targetos == "darwin" or targetos == "darwin8":
+ if hostos == "darwin" or hostos == "darwin8":
darwin_so(file, needed, sonames, renames, pkgver)
- elif targetos.startswith("mingw"):
+ elif hostos.startswith("mingw"):
mingw_dll(file, needed, sonames, renames, pkgver)
elif os.access(file, os.X_OK) or lib_re.match(file):
linuxlist.append(file)
@@ -1936,7 +1967,7 @@ python package_do_shlibs() {
shlibs_file = os.path.join(shlibswork_dir, pkg + ".list")
if len(sonames):
with open(shlibs_file, 'w') as fd:
- for s in sonames:
+ for s in sorted(sonames):
if s[0] in shlib_provider and s[1] in shlib_provider[s[0]]:
(old_pkg, old_pkgver) = shlib_provider[s[0]][s[1]]
if old_pkg != pkg:
@@ -2335,7 +2366,7 @@ python do_package () {
package_qa_handle_error("var-undefined", msg, d)
return
- bb.build.exec_func("package_get_auto_pr", d)
+ bb.build.exec_func("package_convert_pr_autoinc", d)
###########################################################################
# Optimisations
@@ -2407,9 +2438,20 @@ addtask do_package_setscene
# Copy from PKGDESTWORK to tempdirectory as tempdirectory can be cleaned at both
# do_package_setscene and do_packagedata_setscene leading to races
python do_packagedata () {
+ bb.build.exec_func("package_get_auto_pr", d)
+
src = d.expand("${PKGDESTWORK}")
dest = d.expand("${WORKDIR}/pkgdata-pdata-input")
oe.path.copyhardlinktree(src, dest)
+
+ bb.build.exec_func("packagedata_translate_pr_autoinc", d)
+}
+
+# Translate the EXTENDPRAUTO and AUTOINC to the final values
+packagedata_translate_pr_autoinc() {
+ find ${WORKDIR}/pkgdata-pdata-input -type f | xargs --no-run-if-empty \
+ sed -e 's,@PRSERV_PV_AUTOINC@,${PRSERV_PV_AUTOINC},g' \
+ -e 's,@EXTENDPRAUTO@,${EXTENDPRAUTO},g' -i
}
addtask packagedata before do_build after do_package
diff --git a/poky/meta/classes/package_rpm.bbclass b/poky/meta/classes/package_rpm.bbclass
index 519c22be4..53b4700cd 100644
--- a/poky/meta/classes/package_rpm.bbclass
+++ b/poky/meta/classes/package_rpm.bbclass
@@ -557,7 +557,7 @@ python write_specfile () {
print_deps(srcrrecommends, "Recommends", spec_preamble_top, d)
print_deps(srcrsuggests, "Suggests", spec_preamble_top, d)
- print_deps(srcrprovides + (" /bin/sh" if srcname.startswith("nativesdk-") else ""), "Provides", spec_preamble_top, d)
+ print_deps(srcrprovides, "Provides", spec_preamble_top, d)
print_deps(srcrobsoletes, "Obsoletes", spec_preamble_top, d)
print_deps(srcrconflicts, "Conflicts", spec_preamble_top, d)
diff --git a/poky/meta/classes/package_tar.bbclass b/poky/meta/classes/package_tar.bbclass
index ce3ab4c8e..d6c1b306f 100644
--- a/poky/meta/classes/package_tar.bbclass
+++ b/poky/meta/classes/package_tar.bbclass
@@ -57,10 +57,8 @@ python do_package_tar () {
python () {
if d.getVar('PACKAGES') != '':
- deps = (d.getVarFlag('do_package_write_tar', 'depends') or "").split()
- deps.append('tar-native:do_populate_sysroot')
- deps.append('virtual/fakeroot-native:do_populate_sysroot')
- d.setVarFlag('do_package_write_tar', 'depends', " ".join(deps))
+ deps = ' tar-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
+ d.appendVarFlag('do_package_write_tar', 'depends', deps)
d.setVarFlag('do_package_write_tar', 'fakeroot', "1")
}
diff --git a/poky/meta/classes/packagefeed-stability.bbclass b/poky/meta/classes/packagefeed-stability.bbclass
deleted file mode 100644
index 564860256..000000000
--- a/poky/meta/classes/packagefeed-stability.bbclass
+++ /dev/null
@@ -1,252 +0,0 @@
-# Class to avoid copying packages into the feed if they haven't materially changed
-#
-# Copyright (C) 2015 Intel Corporation
-# Released under the MIT license (see COPYING.MIT for details)
-#
-# This class effectively intercepts packages as they are written out by
-# do_package_write_*, causing them to be written into a different
-# directory where we can compare them to whatever older packages might
-# be in the "real" package feed directory, and avoid copying the new
-# package to the feed if it has not materially changed. The idea is to
-# avoid unnecessary churn in the packages when dependencies trigger task
-# reexecution (and thus repackaging). Enabling the class is simple:
-#
-# INHERIT += "packagefeed-stability"
-#
-# Caveats:
-# 1) Latest PR values in the build system may not match those in packages
-# seen on the target (naturally)
-# 2) If you rebuild from sstate without the existing package feed present,
-# you will lose the "state" of the package feed i.e. the preserved old
-# package versions. Not the end of the world, but would negate the
-# entire purpose of this class.
-#
-# Note that running -c cleanall on a recipe will purposely delete the old
-# package files so they will definitely be copied the next time.
-
-python() {
- if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d):
- return
- # Package backend agnostic intercept
- # This assumes that the package_write task is called package_write_<pkgtype>
- # and that the directory in which packages should be written is
- # pointed to by the variable DEPLOY_DIR_<PKGTYPE>
- for pkgclass in (d.getVar('PACKAGE_CLASSES') or '').split():
- if pkgclass.startswith('package_'):
- pkgtype = pkgclass.split('_', 1)[1]
- pkgwritefunc = 'do_package_write_%s' % pkgtype
- sstate_outputdirs = d.getVarFlag(pkgwritefunc, 'sstate-outputdirs', False)
- deploydirvar = 'DEPLOY_DIR_%s' % pkgtype.upper()
- deploydirvarref = '${' + deploydirvar + '}'
- pkgcomparefunc = 'do_package_compare_%s' % pkgtype
-
- if bb.data.inherits_class('image', d):
- d.appendVarFlag('do_rootfs', 'recrdeptask', ' ' + pkgcomparefunc)
-
- if bb.data.inherits_class('populate_sdk_base', d):
- d.appendVarFlag('do_populate_sdk', 'recrdeptask', ' ' + pkgcomparefunc)
-
- if bb.data.inherits_class('populate_sdk_ext', d):
- d.appendVarFlag('do_populate_sdk_ext', 'recrdeptask', ' ' + pkgcomparefunc)
-
- d.appendVarFlag('do_build', 'recrdeptask', ' ' + pkgcomparefunc)
-
- if d.getVarFlag(pkgwritefunc, 'noexec') or not d.getVarFlag(pkgwritefunc, 'task'):
- # Packaging is disabled for this recipe, we shouldn't do anything
- continue
-
- if deploydirvarref in sstate_outputdirs:
- deplor_dir_pkgtype = d.expand(deploydirvarref + '-prediff')
- # Set intermediate output directory
- d.setVarFlag(pkgwritefunc, 'sstate-outputdirs', sstate_outputdirs.replace(deploydirvarref, deplor_dir_pkgtype))
- # Update SSTATE_DUPWHITELIST to avoid shared location conflicted error
- d.appendVar('SSTATE_DUPWHITELIST', ' %s' % deplor_dir_pkgtype)
-
- d.setVar(pkgcomparefunc, d.getVar('do_package_compare', False))
- d.setVarFlags(pkgcomparefunc, d.getVarFlags('do_package_compare', False))
- d.appendVarFlag(pkgcomparefunc, 'depends', ' build-compare-native:do_populate_sysroot')
- bb.build.addtask(pkgcomparefunc, 'do_build', 'do_packagedata ' + pkgwritefunc, d)
-}
-
-# This isn't the real task function - it's a template that we use in the
-# anonymous python code above
-fakeroot python do_package_compare () {
- currenttask = d.getVar('BB_CURRENTTASK')
- pkgtype = currenttask.rsplit('_', 1)[1]
- package_compare_impl(pkgtype, d)
-}
-
-def package_compare_impl(pkgtype, d):
- import errno
- import fnmatch
- import glob
- import subprocess
- import oe.sstatesig
-
- pn = d.getVar('PN')
- deploydir = d.getVar('DEPLOY_DIR_%s' % pkgtype.upper())
- prepath = deploydir + '-prediff/'
-
- # Find out PKGR values are
- pkgdatadir = d.getVar('PKGDATA_DIR')
- packages = []
- try:
- with open(os.path.join(pkgdatadir, pn), 'r') as f:
- for line in f:
- if line.startswith('PACKAGES:'):
- packages = line.split(':', 1)[1].split()
- break
- except IOError as e:
- if e.errno == errno.ENOENT:
- pass
-
- if not packages:
- bb.debug(2, '%s: no packages, nothing to do' % pn)
- return
-
- pkgrvalues = {}
- rpkgnames = {}
- rdepends = {}
- pkgvvalues = {}
- for pkg in packages:
- with open(os.path.join(pkgdatadir, 'runtime', pkg), 'r') as f:
- for line in f:
- if line.startswith('PKGR:'):
- pkgrvalues[pkg] = line.split(':', 1)[1].strip()
- if line.startswith('PKGV:'):
- pkgvvalues[pkg] = line.split(':', 1)[1].strip()
- elif line.startswith('PKG_%s:' % pkg):
- rpkgnames[pkg] = line.split(':', 1)[1].strip()
- elif line.startswith('RDEPENDS_%s:' % pkg):
- rdepends[pkg] = line.split(':', 1)[1].strip()
-
- # Prepare a list of the runtime package names for packages that were
- # actually produced
- rpkglist = []
- for pkg, rpkg in rpkgnames.items():
- if os.path.exists(os.path.join(pkgdatadir, 'runtime', pkg + '.packaged')):
- rpkglist.append((rpkg, pkg))
- rpkglist.sort(key=lambda x: len(x[0]), reverse=True)
-
- pvu = d.getVar('PV', False)
- if '$' + '{SRCPV}' in pvu:
- pvprefix = pvu.split('$' + '{SRCPV}', 1)[0]
- else:
- pvprefix = None
-
- pkgwritetask = 'package_write_%s' % pkgtype
- files = []
- docopy = False
- manifest, _ = oe.sstatesig.sstate_get_manifest_filename(pkgwritetask, d)
- mlprefix = d.getVar('MLPREFIX')
- # Copy recipe's all packages if one of the packages are different to make
- # they have the same PR.
- with open(manifest, 'r') as f:
- for line in f:
- if line.startswith(prepath):
- srcpath = line.rstrip()
- if os.path.isfile(srcpath):
- destpath = os.path.join(deploydir, os.path.relpath(srcpath, prepath))
-
- # This is crude but should work assuming the output
- # package file name starts with the package name
- # and rpkglist is sorted by length (descending)
- pkgbasename = os.path.basename(destpath)
- pkgname = None
- for rpkg, pkg in rpkglist:
- if mlprefix and pkgtype == 'rpm' and rpkg.startswith(mlprefix):
- rpkg = rpkg[len(mlprefix):]
- if pkgbasename.startswith(rpkg):
- pkgr = pkgrvalues[pkg]
- destpathspec = destpath.replace(pkgr, '*')
- if pvprefix:
- pkgv = pkgvvalues[pkg]
- if pkgv.startswith(pvprefix):
- pkgvsuffix = pkgv[len(pvprefix):]
- if '+' in pkgvsuffix:
- newpkgv = pvprefix + '*+' + pkgvsuffix.split('+', 1)[1]
- destpathspec = destpathspec.replace(pkgv, newpkgv)
- pkgname = pkg
- break
- else:
- bb.warn('Unable to map %s back to package' % pkgbasename)
- destpathspec = destpath
-
- oldfile = None
- if not docopy:
- oldfiles = glob.glob(destpathspec)
- if oldfiles:
- oldfile = oldfiles[-1]
- result = subprocess.call(['pkg-diff.sh', oldfile, srcpath])
- if result != 0:
- docopy = True
- bb.note("%s and %s are different, will copy packages" % (oldfile, srcpath))
- else:
- docopy = True
- bb.note("No old packages found for %s, will copy packages" % pkgname)
-
- files.append((pkgname, pkgbasename, srcpath, destpath))
-
- # Remove all the old files and copy again if docopy
- if docopy:
- bb.note('Copying packages for recipe %s' % pn)
- pcmanifest = os.path.join(prepath, d.expand('pkg-compare-manifest-${MULTIMACH_TARGET_SYS}-${PN}'))
- try:
- with open(pcmanifest, 'r') as f:
- for line in f:
- fn = line.rstrip()
- if fn:
- try:
- os.remove(fn)
- bb.note('Removed old package %s' % fn)
- except OSError as e:
- if e.errno == errno.ENOENT:
- pass
- except IOError as e:
- if e.errno == errno.ENOENT:
- pass
-
- # Create new manifest
- with open(pcmanifest, 'w') as f:
- for pkgname, pkgbasename, srcpath, destpath in files:
- destdir = os.path.dirname(destpath)
- bb.utils.mkdirhier(destdir)
- # Remove allarch rpm pkg if it is already existed (for
- # multilib), they're identical in theory, but sstate.bbclass
- # copies it again, so keep align with that.
- if os.path.exists(destpath) and pkgtype == 'rpm' \
- and d.getVar('PACKAGE_ARCH') == 'all':
- os.unlink(destpath)
- if (os.stat(srcpath).st_dev == os.stat(destdir).st_dev):
- # Use a hard link to save space
- os.link(srcpath, destpath)
- else:
- shutil.copyfile(srcpath, destpath)
- f.write('%s\n' % destpath)
- else:
- bb.note('Not copying packages for recipe %s' % pn)
-
-do_cleansstate[postfuncs] += "pfs_cleanpkgs"
-python pfs_cleanpkgs () {
- import errno
- for pkgclass in (d.getVar('PACKAGE_CLASSES') or '').split():
- if pkgclass.startswith('package_'):
- pkgtype = pkgclass.split('_', 1)[1]
- deploydir = d.getVar('DEPLOY_DIR_%s' % pkgtype.upper())
- prepath = deploydir + '-prediff'
- pcmanifest = os.path.join(prepath, d.expand('pkg-compare-manifest-${MULTIMACH_TARGET_SYS}-${PN}'))
- try:
- with open(pcmanifest, 'r') as f:
- for line in f:
- fn = line.rstrip()
- if fn:
- try:
- os.remove(fn)
- except OSError as e:
- if e.errno == errno.ENOENT:
- pass
- os.remove(pcmanifest)
- except IOError as e:
- if e.errno == errno.ENOENT:
- pass
-}
diff --git a/poky/meta/classes/populate_sdk_ext.bbclass b/poky/meta/classes/populate_sdk_ext.bbclass
index 9f26cfc13..6f35b612c 100644
--- a/poky/meta/classes/populate_sdk_ext.bbclass
+++ b/poky/meta/classes/populate_sdk_ext.bbclass
@@ -310,8 +310,9 @@ python copy_buildsystem () {
if os.path.exists(builddir + '/conf/auto.conf'):
with open(builddir + '/conf/auto.conf', 'r') as f:
oldlines += f.readlines()
- with open(builddir + '/conf/local.conf', 'r') as f:
- oldlines += f.readlines()
+ if os.path.exists(builddir + '/conf/local.conf'):
+ with open(builddir + '/conf/local.conf', 'r') as f:
+ oldlines += f.readlines()
(updated, newlines) = bb.utils.edit_metadata(oldlines, varlist, handle_var)
with open(baseoutpath + '/conf/local.conf', 'w') as f:
@@ -521,12 +522,18 @@ python copy_buildsystem () {
# sdk_ext_postinst() below) thus the checksum we take here would always
# be different.
manifest_file_list = ['conf/*']
+ esdk_manifest_excludes = (d.getVar('ESDK_MANIFEST_EXCLUDES') or '').split()
+ esdk_manifest_excludes_list = []
+ for exclude_item in esdk_manifest_excludes:
+ esdk_manifest_excludes_list += glob.glob(os.path.join(baseoutpath, exclude_item))
manifest_file = os.path.join(baseoutpath, 'conf', 'sdk-conf-manifest')
with open(manifest_file, 'w') as f:
for item in manifest_file_list:
for fn in glob.glob(os.path.join(baseoutpath, item)):
if fn == manifest_file:
continue
+ if fn in esdk_manifest_excludes_list:
+ continue
chksum = bb.utils.sha256_file(fn)
f.write('%s\t%s\n' % (chksum, os.path.relpath(fn, baseoutpath)))
}
@@ -653,7 +660,10 @@ sdk_ext_postinst() {
# Make sure when the user sets up the environment, they also get
# the buildtools-tarball tools in their path.
+ echo "# Save and reset OECORE_NATIVE_SYSROOT as buildtools may change it" >> $env_setup_script
+ echo "SAVED=\"\$OECORE_NATIVE_SYSROOT\"" >> $env_setup_script
echo ". $target_sdk_dir/buildtools/environment-setup*" >> $env_setup_script
+ echo "OECORE_NATIVE_SYSROOT=\"\$SAVED\"" >> $env_setup_script
fi
# Allow bitbake environment setup to be ran as part of this sdk.
@@ -676,7 +686,7 @@ sdk_ext_postinst() {
# current working directory when first ran, nor will it set $1 when
# sourcing a script. That is why this has to look so ugly.
LOGFILE="$target_sdk_dir/preparing_build_system.log"
- sh -c ". buildtools/environment-setup* > $LOGFILE && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE && python $target_sdk_dir/ext-sdk-prepare.py $LOGFILE '${SDK_INSTALL_TARGETS}'" || { echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
+ sh -c ". buildtools/environment-setup* > $LOGFILE && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE && python3 $target_sdk_dir/ext-sdk-prepare.py $LOGFILE '${SDK_INSTALL_TARGETS}'" || { echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
fi
if [ -e $target_sdk_dir/ext-sdk-prepare.py ]; then
rm $target_sdk_dir/ext-sdk-prepare.py
diff --git a/poky/meta/classes/ptest.bbclass b/poky/meta/classes/ptest.bbclass
index fa4c36ec7..47611edea 100644
--- a/poky/meta/classes/ptest.bbclass
+++ b/poky/meta/classes/ptest.bbclass
@@ -6,7 +6,7 @@ PTEST_PATH ?= "${libdir}/${BPN}/ptest"
PTEST_BUILD_HOST_FILES ?= "Makefile"
PTEST_BUILD_HOST_PATTERN ?= ""
-FILES_${PN}-ptest = "${PTEST_PATH}"
+FILES_${PN}-ptest += "${PTEST_PATH}"
SECTION_${PN}-ptest = "devel"
ALLOW_EMPTY_${PN}-ptest = "1"
PTEST_ENABLED = "${@bb.utils.contains('DISTRO_FEATURES', 'ptest', '1', '0', d)}"
diff --git a/poky/meta/classes/qemuboot.bbclass b/poky/meta/classes/qemuboot.bbclass
index 3162e7a8e..824676216 100644
--- a/poky/meta/classes/qemuboot.bbclass
+++ b/poky/meta/classes/qemuboot.bbclass
@@ -4,7 +4,7 @@
#
# QB_SYSTEM_NAME: qemu name, e.g., "qemu-system-i386"
#
-# QB_OPT_APPEND: options to append to qemu, e.g., "-show-cursor"
+# QB_OPT_APPEND: options to append to qemu, e.g., "-device usb-mouse"
#
# QB_DEFAULT_KERNEL: default kernel to boot, e.g., "bzImage"
#
@@ -29,6 +29,9 @@
# QB_AUDIO_OPT: qemu audio option, e.g., "-soundhw ac97,es1370", used
# when QB_AUDIO_DRV is set.
#
+# QB_RNG: Pass-through for host random number generator, it can speedup boot
+# in system mode, where system is experiencing entropy starvation
+#
# QB_KERNEL_ROOT: kernel's root, e.g., /dev/vda
#
# QB_NETWORK_DEVICE: network device, e.g., "-device virtio-net-pci,netdev=net0,mac=@MAC@",
@@ -77,7 +80,8 @@ QB_MEM ?= "-m 256"
QB_SERIAL_OPT ?= "-serial mon:stdio -serial null"
QB_DEFAULT_KERNEL ?= "${KERNEL_IMAGETYPE}"
QB_DEFAULT_FSTYPE ?= "ext4"
-QB_OPT_APPEND ?= "-show-cursor"
+QB_RNG ?= "-object rng-random,filename=/dev/urandom,id=rng0 -device virtio-rng-pci,rng=rng0"
+QB_OPT_APPEND ?= ""
QB_NETWORK_DEVICE ?= "-device virtio-net-pci,netdev=net0,mac=@MAC@"
QB_CMDLINE_IP_SLIRP ?= "ip=dhcp"
QB_CMDLINE_IP_TAP ?= "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0"
@@ -86,6 +90,8 @@ QB_ROOTFS_EXTRA_OPT ?= ""
# This should be kept align with ROOT_VM
QB_DRIVE_TYPE ?= "/dev/sd"
+inherit image-artifact-names
+
# Create qemuboot.conf
addtask do_write_qemuboot_conf after do_rootfs before do_image
diff --git a/poky/meta/classes/relocatable.bbclass b/poky/meta/classes/relocatable.bbclass
index 582812c1c..af04be5cc 100644
--- a/poky/meta/classes/relocatable.bbclass
+++ b/poky/meta/classes/relocatable.bbclass
@@ -6,13 +6,15 @@ python relocatable_binaries_preprocess() {
rpath_replace(d.expand('${SYSROOT_DESTDIR}'), d)
}
-relocatable_native_pcfiles () {
- if [ -d ${SYSROOT_DESTDIR}${libdir}/pkgconfig ]; then
- rel=${@os.path.relpath(d.getVar('base_prefix'), d.getVar('libdir') + "/pkgconfig")}
- sed -i -e "s:${base_prefix}:\${pcfiledir}/$rel:g" ${SYSROOT_DESTDIR}${libdir}/pkgconfig/*.pc
- fi
- if [ -d ${SYSROOT_DESTDIR}${datadir}/pkgconfig ]; then
- rel=${@os.path.relpath(d.getVar('base_prefix'), d.getVar('datadir') + "/pkgconfig")}
- sed -i -e "s:${base_prefix}:\${pcfiledir}/$rel:g" ${SYSROOT_DESTDIR}${datadir}/pkgconfig/*.pc
- fi
+relocatable_native_pcfiles() {
+ for dir in ${libdir}/pkgconfig ${datadir}/pkgconfig; do
+ files_template=${SYSROOT_DESTDIR}$dir/*.pc
+ # Expand to any files matching $files_template
+ files=$(echo $files_template)
+ # $files_template and $files will differ if any files were found
+ if [ "$files_template" != "$files" ]; then
+ rel=$(realpath -m --relative-to=$dir ${base_prefix})
+ sed -i -e "s:${base_prefix}:\${pcfiledir}/$rel:g" $files
+ fi
+ done
}
diff --git a/poky/meta/classes/reproducible_build.bbclass b/poky/meta/classes/reproducible_build.bbclass
index 8da40f656..2f3bd90b0 100644
--- a/poky/meta/classes/reproducible_build.bbclass
+++ b/poky/meta/classes/reproducible_build.bbclass
@@ -70,100 +70,16 @@ do_deploy_source_date_epoch[sstate-plaindirs] = "${SDE_DEPLOYDIR}"
addtask do_deploy_source_date_epoch_setscene
addtask do_deploy_source_date_epoch before do_configure after do_patch
-def get_source_date_epoch_from_known_files(d, sourcedir):
- source_date_epoch = None
- newest_file = None
- known_files = set(["NEWS", "ChangeLog", "Changelog", "CHANGES"])
- for file in known_files:
- filepath = os.path.join(sourcedir, file)
- if os.path.isfile(filepath):
- mtime = int(os.lstat(filepath).st_mtime)
- # There may be more than one "known_file" present, if so, use the youngest one
- if not source_date_epoch or mtime > source_date_epoch:
- source_date_epoch = mtime
- newest_file = filepath
- if newest_file:
- bb.debug(1, "SOURCE_DATE_EPOCH taken from: %s" % newest_file)
- return source_date_epoch
-
-def find_git_folder(d, sourcedir):
- # First guess: WORKDIR/git
- # This is the default git fetcher unpack path
- workdir = d.getVar('WORKDIR')
- gitpath = os.path.join(workdir, "git/.git")
- if os.path.isdir(gitpath):
- return gitpath
-
- # Second guess: ${S}
- gitpath = os.path.join(sourcedir, ".git")
- if os.path.isdir(gitpath):
- return gitpath
-
- # Perhaps there was a subpath or destsuffix specified.
- # Go looking in the WORKDIR
- exclude = set(["build", "image", "license-destdir", "patches", "pseudo",
- "recipe-sysroot", "recipe-sysroot-native", "sysroot-destdir", "temp"])
- for root, dirs, files in os.walk(workdir, topdown=True):
- dirs[:] = [d for d in dirs if d not in exclude]
- if '.git' in dirs:
- return root
-
- bb.warn("Failed to find a git repository in WORKDIR: %s" % workdir)
- return None
-
-def get_source_date_epoch_from_git(d, sourcedir):
- source_date_epoch = None
- if "git://" in d.getVar('SRC_URI'):
- gitpath = find_git_folder(d, sourcedir)
- if gitpath:
- import subprocess
- source_date_epoch = int(subprocess.check_output(['git','log','-1','--pretty=%ct'], cwd=gitpath))
- bb.debug(1, "git repository: %s" % gitpath)
- return source_date_epoch
-
-def get_source_date_epoch_from_youngest_file(d, sourcedir):
- if sourcedir == d.getVar('WORKDIR'):
- # These sources are almost certainly not from a tarball
- return None
-
- # Do it the hard way: check all files and find the youngest one...
- source_date_epoch = None
- newest_file = None
- for root, dirs, files in os.walk(sourcedir, topdown=True):
- files = [f for f in files if not f[0] == '.']
-
- for fname in files:
- filename = os.path.join(root, fname)
- try:
- mtime = int(os.lstat(filename).st_mtime)
- except ValueError:
- mtime = 0
- if not source_date_epoch or mtime > source_date_epoch:
- source_date_epoch = mtime
- newest_file = filename
-
- if newest_file:
- bb.debug(1, "Newest file found: %s" % newest_file)
- return source_date_epoch
-
-def fixed_source_date_epoch():
- bb.debug(1, "No tarball or git repo found to determine SOURCE_DATE_EPOCH")
- return 0
-
python create_source_date_epoch_stamp() {
+ import oe.reproducible
+
epochfile = d.getVar('SDE_FILE')
# If it exists we need to regenerate as the sources may have changed
if os.path.isfile(epochfile):
bb.debug(1, "Deleting existing SOURCE_DATE_EPOCH from: %s" % epochfile)
os.remove(epochfile)
- sourcedir = d.getVar('S')
- source_date_epoch = (
- get_source_date_epoch_from_git(d, sourcedir) or
- get_source_date_epoch_from_known_files(d, sourcedir) or
- get_source_date_epoch_from_youngest_file(d, sourcedir) or
- fixed_source_date_epoch() # Last resort
- )
+ source_date_epoch = oe.reproducible.get_source_date_epoch(d, d.getVar('S'))
bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
bb.utils.mkdirhier(d.getVar('SDE_DIR'))
diff --git a/poky/meta/classes/rootfs-postcommands.bbclass b/poky/meta/classes/rootfs-postcommands.bbclass
index 2f171836f..1f27a3d07 100644
--- a/poky/meta/classes/rootfs-postcommands.bbclass
+++ b/poky/meta/classes/rootfs-postcommands.bbclass
@@ -1,6 +1,6 @@
# Zap the root password if debug-tweaks feature is not enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password ; ",d)}'
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password; ",d)}'
# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks or allow-empty-password is enabled
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-empty-password' ], "ssh_allow_empty_password; ", "",d)}'
@@ -12,7 +12,7 @@ ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'deb
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'post-install-logging' ], "postinst_enable_logging; ", "",d)}'
# Create /etc/timestamp during image construction to give a reasonably sane default time setting
-ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; "
+ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp; "
# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
@@ -26,7 +26,7 @@ ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only
APPEND_append = '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", " ro", "", d)}'
# Generates test data file with data store variables expanded in json format
-ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data ; "
+ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data; "
# Write manifest
IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.manifest"
@@ -39,6 +39,8 @@ ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd"
ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile;'
+inherit image-artifact-names
+
# Sort the user and group entries in /etc by ID in order to make the content
# deterministic. Package installs are not deterministic, causing the ordering
# of entries to change between builds. In case that this isn't desired,
@@ -308,12 +310,16 @@ rootfs_check_host_user_contaminated () {
HOST_USER_UID="$(PSEUDO_UNLOAD=1 id -u)"
HOST_USER_GID="$(PSEUDO_UNLOAD=1 id -g)"
- find "${IMAGE_ROOTFS}" -wholename "${IMAGE_ROOTFS}/home" -prune \
- -user "$HOST_USER_UID" -o -group "$HOST_USER_GID" >"$contaminated"
+ find "${IMAGE_ROOTFS}" -path "${IMAGE_ROOTFS}/home" -prune -o \
+ -user "$HOST_USER_UID" -print -o -group "$HOST_USER_GID" -print >"$contaminated"
+
+ sed -e "s,${IMAGE_ROOTFS},," $contaminated | while read line; do
+ bbwarn "Path in the rootfs is owned by the same user or group as the user running bitbake:" $line `ls -lan ${IMAGE_ROOTFS}/$line`
+ done
if [ -s "$contaminated" ]; then
- echo "WARNING: Paths in the rootfs are owned by the same user or group as the user running bitbake. See the logfile for the specific paths."
- cat "$contaminated" | sed "s,^, ,"
+ bbwarn "/etc/passwd:" `cat ${IMAGE_ROOTFS}/etc/passwd`
+ bbwarn "/etc/group:" `cat ${IMAGE_ROOTFS}/etc/group`
fi
}
diff --git a/poky/meta/classes/rootfs_rpm.bbclass b/poky/meta/classes/rootfs_rpm.bbclass
index 51f89ea99..82584f386 100644
--- a/poky/meta/classes/rootfs_rpm.bbclass
+++ b/poky/meta/classes/rootfs_rpm.bbclass
@@ -9,7 +9,7 @@ export STAGING_INCDIR
export STAGING_LIBDIR
# Add 100Meg of extra space for dnf
-IMAGE_ROOTFS_EXTRA_SPACE_append = "${@bb.utils.contains("PACKAGE_INSTALL", "dnf", " + 102400", "" ,d)}"
+IMAGE_ROOTFS_EXTRA_SPACE_append = "${@bb.utils.contains("PACKAGE_INSTALL", "dnf", " + 102400", "", d)}"
# Dnf is python based, so be sure python3-native is available to us.
EXTRANATIVEPATH += "python3-native"
diff --git a/poky/meta/classes/rootfsdebugfiles.bbclass b/poky/meta/classes/rootfsdebugfiles.bbclass
index e2ba4e364..85c7ec743 100644
--- a/poky/meta/classes/rootfsdebugfiles.bbclass
+++ b/poky/meta/classes/rootfsdebugfiles.bbclass
@@ -28,7 +28,7 @@
ROOTFS_DEBUG_FILES ?= ""
ROOTFS_DEBUG_FILES[doc] = "Lists additional files or directories to be installed with 'cp -a' in the format 'source1 target1;source2 target2;...'"
-ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files ;"
+ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files;"
rootfs_debug_files () {
#!/bin/sh -e
echo "${ROOTFS_DEBUG_FILES}" | sed -e 's/;/\n/g' | while read source target mode; do
diff --git a/poky/meta/classes/sign_rpm.bbclass b/poky/meta/classes/sign_rpm.bbclass
index 64ae7ce30..73a55a512 100644
--- a/poky/meta/classes/sign_rpm.bbclass
+++ b/poky/meta/classes/sign_rpm.bbclass
@@ -64,6 +64,7 @@ python sign_rpm () {
d.getVar('RPM_FSK_PATH'),
d.getVar('RPM_FSK_PASSWORD'))
}
+sign_rpm[vardepsexclude] += "RPM_GPG_SIGN_CHUNK"
do_package_index[depends] += "signing-keys:do_deploy"
do_rootfs[depends] += "signing-keys:do_populate_sysroot"
diff --git a/poky/meta/classes/spdx.bbclass b/poky/meta/classes/spdx.bbclass
deleted file mode 100644
index fb78e274a..000000000
--- a/poky/meta/classes/spdx.bbclass
+++ /dev/null
@@ -1,360 +0,0 @@
-# This class integrates real-time license scanning, generation of SPDX standard
-# output and verifiying license info during the building process.
-# It is a combination of efforts from the OE-Core, SPDX and Fossology projects.
-#
-# For more information on FOSSology:
-# http://www.fossology.org
-#
-# For more information on FOSSologySPDX commandline:
-# https://github.com/spdx-tools/fossology-spdx/wiki/Fossology-SPDX-Web-API
-#
-# For more information on SPDX:
-# http://www.spdx.org
-#
-
-# SPDX file will be output to the path which is defined as[SPDX_MANIFEST_DIR]
-# in ./meta/conf/licenses.conf.
-
-SPDXSSTATEDIR = "${WORKDIR}/spdx_sstate_dir"
-
-# If ${S} isn't actually the top-level source directory, set SPDX_S to point at
-# the real top-level directory.
-SPDX_S ?= "${S}"
-
-python do_spdx () {
- import os, sys
- import json, shutil
-
- info = {}
- info['workdir'] = d.getVar('WORKDIR')
- info['sourcedir'] = d.getVar('SPDX_S')
- info['pn'] = d.getVar('PN')
- info['pv'] = d.getVar('PV')
- info['spdx_version'] = d.getVar('SPDX_VERSION')
- info['data_license'] = d.getVar('DATA_LICENSE')
-
- sstatedir = d.getVar('SPDXSSTATEDIR')
- sstatefile = os.path.join(sstatedir, info['pn'] + info['pv'] + ".spdx")
-
- manifest_dir = d.getVar('SPDX_MANIFEST_DIR')
- info['outfile'] = os.path.join(manifest_dir, info['pn'] + ".spdx" )
-
- info['spdx_temp_dir'] = d.getVar('SPDX_TEMP_DIR')
- info['tar_file'] = os.path.join(info['workdir'], info['pn'] + ".tar.gz" )
-
- # Make sure important dirs exist
- try:
- bb.utils.mkdirhier(manifest_dir)
- bb.utils.mkdirhier(sstatedir)
- bb.utils.mkdirhier(info['spdx_temp_dir'])
- except OSError as e:
- bb.error("SPDX: Could not set up required directories: " + str(e))
- return
-
- ## get everything from cache. use it to decide if
- ## something needs to be rerun
- cur_ver_code = get_ver_code(info['sourcedir'])
- cache_cur = False
- if os.path.exists(sstatefile):
- ## cache for this package exists. read it in
- cached_spdx = get_cached_spdx(sstatefile)
-
- if cached_spdx['PackageVerificationCode'] == cur_ver_code:
- bb.warn("SPDX: Verification code for " + info['pn']
- + "is same as cache's. do nothing")
- cache_cur = True
- else:
- local_file_info = setup_foss_scan(info, True, cached_spdx['Files'])
- else:
- local_file_info = setup_foss_scan(info, False, None)
-
- if cache_cur:
- spdx_file_info = cached_spdx['Files']
- foss_package_info = cached_spdx['Package']
- foss_license_info = cached_spdx['Licenses']
- else:
- ## setup fossology command
- foss_server = d.getVar('FOSS_SERVER')
- foss_flags = d.getVar('FOSS_WGET_FLAGS')
- foss_full_spdx = d.getVar('FOSS_FULL_SPDX') == "true" or False
- foss_command = "wget %s --post-file=%s %s"\
- % (foss_flags, info['tar_file'], foss_server)
-
- foss_result = run_fossology(foss_command, foss_full_spdx)
- if foss_result is not None:
- (foss_package_info, foss_file_info, foss_license_info) = foss_result
- spdx_file_info = create_spdx_doc(local_file_info, foss_file_info)
- ## write to cache
- write_cached_spdx(sstatefile, cur_ver_code, foss_package_info,
- spdx_file_info, foss_license_info)
- else:
- bb.error("SPDX: Could not communicate with FOSSology server. Command was: " + foss_command)
- return
-
- ## Get document and package level information
- spdx_header_info = get_header_info(info, cur_ver_code, foss_package_info)
-
- ## CREATE MANIFEST
- create_manifest(info, spdx_header_info, spdx_file_info, foss_license_info)
-
- ## clean up the temp stuff
- shutil.rmtree(info['spdx_temp_dir'], ignore_errors=True)
- if os.path.exists(info['tar_file']):
- remove_file(info['tar_file'])
-}
-addtask spdx after do_patch before do_configure
-
-def create_manifest(info, header, files, licenses):
- import codecs
- with codecs.open(info['outfile'], mode='w', encoding='utf-8') as f:
- # Write header
- f.write(header + '\n')
-
- # Write file data
- for chksum, block in files.iteritems():
- f.write("FileName: " + block['FileName'] + '\n')
- for key, value in block.iteritems():
- if not key == 'FileName':
- f.write(key + ": " + value + '\n')
- f.write('\n')
-
- # Write license data
- for id, block in licenses.iteritems():
- f.write("LicenseID: " + id + '\n')
- for key, value in block.iteritems():
- f.write(key + ": " + value + '\n')
- f.write('\n')
-
-def get_cached_spdx(sstatefile):
- import json
- import codecs
- cached_spdx_info = {}
- with codecs.open(sstatefile, mode='r', encoding='utf-8') as f:
- try:
- cached_spdx_info = json.load(f)
- except ValueError as e:
- cached_spdx_info = None
- return cached_spdx_info
-
-def write_cached_spdx(sstatefile, ver_code, package_info, files, license_info):
- import json
- import codecs
- spdx_doc = {}
- spdx_doc['PackageVerificationCode'] = ver_code
- spdx_doc['Files'] = {}
- spdx_doc['Files'] = files
- spdx_doc['Package'] = {}
- spdx_doc['Package'] = package_info
- spdx_doc['Licenses'] = {}
- spdx_doc['Licenses'] = license_info
- with codecs.open(sstatefile, mode='w', encoding='utf-8') as f:
- f.write(json.dumps(spdx_doc))
-
-def setup_foss_scan(info, cache, cached_files):
- import errno, shutil
- import tarfile
- file_info = {}
- cache_dict = {}
-
- for f_dir, f in list_files(info['sourcedir']):
- full_path = os.path.join(f_dir, f)
- abs_path = os.path.join(info['sourcedir'], full_path)
- dest_dir = os.path.join(info['spdx_temp_dir'], f_dir)
- dest_path = os.path.join(info['spdx_temp_dir'], full_path)
-
- checksum = hash_file(abs_path)
- if not checksum is None:
- file_info[checksum] = {}
- ## retain cache information if it exists
- if cache and checksum in cached_files:
- file_info[checksum] = cached_files[checksum]
- ## have the file included in what's sent to the FOSSology server
- else:
- file_info[checksum]['FileName'] = full_path
- try:
- bb.utils.mkdirhier(dest_dir)
- shutil.copyfile(abs_path, dest_path)
- except OSError as e:
- bb.warn("SPDX: mkdirhier failed: " + str(e))
- except shutil.Error as e:
- bb.warn("SPDX: copyfile failed: " + str(e))
- except IOError as e:
- bb.warn("SPDX: copyfile failed: " + str(e))
- else:
- bb.warn("SPDX: Could not get checksum for file: " + f)
-
- with tarfile.open(info['tar_file'], "w:gz") as tar:
- tar.add(info['spdx_temp_dir'], arcname=os.path.basename(info['spdx_temp_dir']))
-
- return file_info
-
-def remove_file(file_name):
- try:
- os.remove(file_name)
- except OSError as e:
- pass
-
-def list_files(dir):
- for root, subFolders, files in os.walk(dir):
- for f in files:
- rel_root = os.path.relpath(root, dir)
- yield rel_root, f
- return
-
-def hash_file(file_name):
- from bb.utils import sha1_file
- return sha1_file(file_name)
-
-def hash_string(data):
- import hashlib
- sha1 = hashlib.sha1()
- sha1.update(data.encode('utf-8'))
- return sha1.hexdigest()
-
-def run_fossology(foss_command, full_spdx):
- import string, re
- import subprocess
-
- try:
- foss_output = subprocess.check_output(foss_command.split(),
- stderr=subprocess.STDOUT).decode('utf-8')
- except subprocess.CalledProcessError as e:
- return None
-
- foss_output = foss_output.replace('\r', '')
-
- # Package info
- package_info = {}
- if full_spdx:
- # All mandatory, only one occurrence
- package_info['PackageCopyrightText'] = re.findall('PackageCopyrightText: (.*?</text>)', foss_output, re.S)[0]
- package_info['PackageLicenseDeclared'] = re.findall('PackageLicenseDeclared: (.*)', foss_output)[0]
- package_info['PackageLicenseConcluded'] = re.findall('PackageLicenseConcluded: (.*)', foss_output)[0]
- # These may be more than one
- package_info['PackageLicenseInfoFromFiles'] = re.findall('PackageLicenseInfoFromFiles: (.*)', foss_output)
- else:
- DEFAULT = "NOASSERTION"
- package_info['PackageCopyrightText'] = "<text>" + DEFAULT + "</text>"
- package_info['PackageLicenseDeclared'] = DEFAULT
- package_info['PackageLicenseConcluded'] = DEFAULT
- package_info['PackageLicenseInfoFromFiles'] = []
-
- # File info
- file_info = {}
- records = []
- # FileName is also in PackageFileName, so we match on FileType as well.
- records = re.findall('FileName:.*?FileType:.*?</text>', foss_output, re.S)
- for rec in records:
- chksum = re.findall('FileChecksum: SHA1: (.*)\n', rec)[0]
- file_info[chksum] = {}
- file_info[chksum]['FileCopyrightText'] = re.findall('FileCopyrightText: '
- + '(.*?</text>)', rec, re.S )[0]
- fields = ['FileName', 'FileType', 'LicenseConcluded', 'LicenseInfoInFile']
- for field in fields:
- file_info[chksum][field] = re.findall(field + ': (.*)', rec)[0]
-
- # Licenses
- license_info = {}
- licenses = []
- licenses = re.findall('LicenseID:.*?LicenseName:.*?\n', foss_output, re.S)
- for lic in licenses:
- license_id = re.findall('LicenseID: (.*)\n', lic)[0]
- license_info[license_id] = {}
- license_info[license_id]['ExtractedText'] = re.findall('ExtractedText: (.*?</text>)', lic, re.S)[0]
- license_info[license_id]['LicenseName'] = re.findall('LicenseName: (.*)', lic)[0]
-
- return (package_info, file_info, license_info)
-
-def create_spdx_doc(file_info, scanned_files):
- import json
- ## push foss changes back into cache
- for chksum, lic_info in scanned_files.iteritems():
- if chksum in file_info:
- file_info[chksum]['FileType'] = lic_info['FileType']
- file_info[chksum]['FileChecksum: SHA1'] = chksum
- file_info[chksum]['LicenseInfoInFile'] = lic_info['LicenseInfoInFile']
- file_info[chksum]['LicenseConcluded'] = lic_info['LicenseConcluded']
- file_info[chksum]['FileCopyrightText'] = lic_info['FileCopyrightText']
- else:
- bb.warn("SPDX: " + lic_info['FileName'] + " : " + chksum
- + " : is not in the local file info: "
- + json.dumps(lic_info, indent=1))
- return file_info
-
-def get_ver_code(dirname):
- chksums = []
- for f_dir, f in list_files(dirname):
- path = os.path.join(dirname, f_dir, f)
- hash = hash_file(path)
- if not hash is None:
- chksums.append(hash)
- else:
- bb.warn("SPDX: Could not hash file: " + path)
- ver_code_string = ''.join(chksums).lower()
- ver_code = hash_string(ver_code_string)
- return ver_code
-
-def get_header_info(info, spdx_verification_code, package_info):
- """
- Put together the header SPDX information.
- Eventually this needs to become a lot less
- of a hardcoded thing.
- """
- from datetime import datetime
- import os
- head = []
- DEFAULT = "NOASSERTION"
-
- package_checksum = hash_file(info['tar_file'])
- if package_checksum is None:
- package_checksum = DEFAULT
-
- ## document level information
- head.append("## SPDX Document Information")
- head.append("SPDXVersion: " + info['spdx_version'])
- head.append("DataLicense: " + info['data_license'])
- head.append("DocumentComment: <text>SPDX for "
- + info['pn'] + " version " + info['pv'] + "</text>")
- head.append("")
-
- ## Creator information
- ## Note that this does not give time in UTC.
- now = datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
- head.append("## Creation Information")
- ## Tools are supposed to have a version, but FOSSology+SPDX provides none.
- head.append("Creator: Tool: FOSSology+SPDX")
- head.append("Created: " + now)
- head.append("CreatorComment: <text>UNO</text>")
- head.append("")
-
- ## package level information
- head.append("## Package Information")
- head.append("PackageName: " + info['pn'])
- head.append("PackageVersion: " + info['pv'])
- head.append("PackageFileName: " + os.path.basename(info['tar_file']))
- head.append("PackageSupplier: Person:" + DEFAULT)
- head.append("PackageDownloadLocation: " + DEFAULT)
- head.append("PackageSummary: <text></text>")
- head.append("PackageOriginator: Person:" + DEFAULT)
- head.append("PackageChecksum: SHA1: " + package_checksum)
- head.append("PackageVerificationCode: " + spdx_verification_code)
- head.append("PackageDescription: <text>" + info['pn']
- + " version " + info['pv'] + "</text>")
- head.append("")
- head.append("PackageCopyrightText: "
- + package_info['PackageCopyrightText'])
- head.append("")
- head.append("PackageLicenseDeclared: "
- + package_info['PackageLicenseDeclared'])
- head.append("PackageLicenseConcluded: "
- + package_info['PackageLicenseConcluded'])
-
- for licref in package_info['PackageLicenseInfoFromFiles']:
- head.append("PackageLicenseInfoFromFiles: " + licref)
- head.append("")
-
- ## header for file level
- head.append("## File Information")
- head.append("")
-
- return '\n'.join(head)
diff --git a/poky/meta/classes/sstate.bbclass b/poky/meta/classes/sstate.bbclass
index aa9c30b4e..a8ae75101 100644
--- a/poky/meta/classes/sstate.bbclass
+++ b/poky/meta/classes/sstate.bbclass
@@ -355,6 +355,9 @@ def sstate_installpkg(ss, d):
d.setVar('SSTATE_INSTDIR', sstateinst)
if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
+ if not os.path.isfile(sstatepkg + '.sig'):
+ bb.warn("No signature file for sstate package %s, skipping acceleration..." % sstatepkg)
+ return False
signer = get_signer(d, 'local')
if not signer.verify(sstatepkg + '.sig'):
bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg)
@@ -733,10 +736,11 @@ def pstaging_fetch(sstatefetch, d):
localdata.setVar('SRC_URI', srcuri)
try:
fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
+ fetcher.checkstatus()
fetcher.download()
except bb.fetch2.BBFetchException:
- break
+ pass
def sstate_setscene(d):
shared_state = sstate_state_fromvars(d)
@@ -783,7 +787,7 @@ sstate_create_package () {
return
fi
- mkdir -p `dirname ${SSTATE_PKG}`
+ mkdir --mode=0775 -p `dirname ${SSTATE_PKG}`
TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
# Use pigz if available
@@ -843,7 +847,7 @@ python sstate_report_unihash() {
sstate_unpack_package () {
tar -xvzf ${SSTATE_PKG}
# update .siginfo atime on local/NFS mirror
- [ -w ${SSTATE_PKG}.siginfo ] && [ -h ${SSTATE_PKG}.siginfo ] && touch -a ${SSTATE_PKG}.siginfo
+ [ -O ${SSTATE_PKG}.siginfo ] && [ -w ${SSTATE_PKG}.siginfo ] && [ -h ${SSTATE_PKG}.siginfo ] && touch -a ${SSTATE_PKG}.siginfo
# Use "! -w ||" to return true for read only files
[ ! -w ${SSTATE_PKG} ] || touch --no-dereference ${SSTATE_PKG}
[ ! -w ${SSTATE_PKG}.sig ] || [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig
diff --git a/poky/meta/classes/staging.bbclass b/poky/meta/classes/staging.bbclass
index de3a19815..f0a619b35 100644
--- a/poky/meta/classes/staging.bbclass
+++ b/poky/meta/classes/staging.bbclass
@@ -614,7 +614,7 @@ python staging_taskhandler() {
bbtasks = e.tasklist
for task in bbtasks:
deps = d.getVarFlag(task, "depends")
- if deps and "populate_sysroot" in deps:
+ if task == "do_configure" or (deps and "populate_sysroot" in deps):
d.appendVarFlag(task, "prefuncs", " extend_recipe_sysroot")
}
staging_taskhandler[eventmask] = "bb.event.RecipeTaskPreProcess"
diff --git a/poky/meta/classes/testexport.bbclass b/poky/meta/classes/testexport.bbclass
index 59cbaefbf..1b0fb44a4 100644
--- a/poky/meta/classes/testexport.bbclass
+++ b/poky/meta/classes/testexport.bbclass
@@ -137,7 +137,7 @@ def copy_needed_files(d, tc):
shutil.rmtree(os.path.join(subdir, dir))
# Create tar file for common parts of testexport
- create_tarball(d, "testexport.tar.gz", d.getVar("TEST_EXPORT_DIR"))
+ testexport_create_tarball(d, "testexport.tar.gz", d.getVar("TEST_EXPORT_DIR"))
# Copy packages needed for runtime testing
package_extraction(d, tc.suites)
@@ -146,7 +146,7 @@ def copy_needed_files(d, tc):
export_pkg_dir = os.path.join(d.getVar("TEST_EXPORT_DIR"), "packages")
oe.path.copytree(test_pkg_dir, export_pkg_dir)
# Create tar file for packages needed by the DUT
- create_tarball(d, "testexport_packages_%s.tar.gz" % d.getVar("MACHINE"), export_pkg_dir)
+ testexport_create_tarball(d, "testexport_packages_%s.tar.gz" % d.getVar("MACHINE"), export_pkg_dir)
# Copy SDK
if d.getVar("TEST_EXPORT_SDK_ENABLED") == "1":
@@ -159,11 +159,11 @@ def copy_needed_files(d, tc):
shutil.copy2(tarball_path, export_sdk_dir)
# Create tar file for the sdk
- create_tarball(d, "testexport_sdk_%s.tar.gz" % d.getVar("SDK_ARCH"), export_sdk_dir)
+ testexport_create_tarball(d, "testexport_sdk_%s.tar.gz" % d.getVar("SDK_ARCH"), export_sdk_dir)
bb.plain("Exported tests to: %s" % export_path)
-def create_tarball(d, tar_name, src_dir):
+def testexport_create_tarball(d, tar_name, src_dir):
import tarfile
diff --git a/poky/meta/classes/testimage.bbclass b/poky/meta/classes/testimage.bbclass
index 00f0c2983..e3feef02f 100644
--- a/poky/meta/classes/testimage.bbclass
+++ b/poky/meta/classes/testimage.bbclass
@@ -3,6 +3,8 @@
# Released under the MIT license (see COPYING.MIT)
inherit metadata_scm
+inherit image-artifact-names
+
# testimage.bbclass enables testing of qemu images using python unittests.
# Most of the tests are commands run on target image over ssh.
# To use it add testimage to global inherit and call your target image with -c testimage
@@ -317,6 +319,7 @@ def testimage_main(d):
target_kwargs['powercontrol_extra_args'] = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS") or ""
target_kwargs['serialcontrol_cmd'] = d.getVar("TEST_SERIALCONTROL_CMD") or None
target_kwargs['serialcontrol_extra_args'] = d.getVar("TEST_SERIALCONTROL_EXTRA_ARGS") or ""
+ target_kwargs['testimage_dump_target'] = d.getVar("testimage_dump_target") or ""
def export_ssh_agent(d):
import os
diff --git a/poky/meta/classes/uboot-sign.bbclass b/poky/meta/classes/uboot-sign.bbclass
index 982ed46d0..713196df4 100644
--- a/poky/meta/classes/uboot-sign.bbclass
+++ b/poky/meta/classes/uboot-sign.bbclass
@@ -117,15 +117,16 @@ do_install_append() {
fi
}
+do_deploy_prepend_pn-${UBOOT_PN}() {
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ]; then
+ concat_dtb
+ fi
+}
+
python () {
if d.getVar('UBOOT_SIGN_ENABLE') == '1' and d.getVar('PN') == d.getVar('UBOOT_PN') and d.getVar('UBOOT_DTB_BINARY'):
kernel_pn = d.getVar('PREFERRED_PROVIDER_virtual/kernel')
# Make "bitbake u-boot -cdeploy" deploys the signed u-boot.dtb
d.appendVarFlag('do_deploy', 'depends', ' %s:do_deploy' % kernel_pn)
-
- # kernerl's do_deploy is a litle special, so we can't use
- # do_deploy_append, otherwise it would override
- # kernel_do_deploy.
- d.appendVarFlag('do_deploy', 'prefuncs', ' concat_dtb')
}
diff --git a/poky/meta/classes/uninative.bbclass b/poky/meta/classes/uninative.bbclass
index 70799bbf6..316c0f061 100644
--- a/poky/meta/classes/uninative.bbclass
+++ b/poky/meta/classes/uninative.bbclass
@@ -56,12 +56,17 @@ python uninative_event_fetchloader() {
# Our games with path manipulation of DL_DIR mean standard PREMIRRORS don't work
# and we can't easily put 'chksum' into the url path from a url parameter with
# the current fetcher url handling
- ownmirror = d.getVar('SOURCE_MIRROR_URL')
- if ownmirror:
- localdata.appendVar("PREMIRRORS", " ${UNINATIVE_URL}${UNINATIVE_TARBALL} ${SOURCE_MIRROR_URL}/uninative/%s/${UNINATIVE_TARBALL}" % chksum)
+ premirrors = bb.fetch2.mirror_from_string(localdata.getVar("PREMIRRORS"))
+ for line in premirrors:
+ try:
+ (find, replace) = line
+ except ValueError:
+ continue
+ if find.startswith("http"):
+ localdata.appendVar("PREMIRRORS", " ${UNINATIVE_URL}${UNINATIVE_TARBALL} %s/uninative/%s/${UNINATIVE_TARBALL}" % (replace, chksum))
srcuri = d.expand("${UNINATIVE_URL}${UNINATIVE_TARBALL};sha256sum=%s" % chksum)
- bb.note("Fetching uninative binary shim from %s" % srcuri)
+ bb.note("Fetching uninative binary shim %s (will check PREMIRRORS first)" % srcuri)
fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
fetcher.download()