summaryrefslogtreecommitdiff
path: root/import-layers/meta-virtualization
diff options
context:
space:
mode:
authorBrad Bishop <bradleyb@fuzziesquirrel.com>2018-02-26 06:55:05 +0300
committerBrad Bishop <bradleyb@fuzziesquirrel.com>2018-03-15 17:22:49 +0300
commitd7bf8c17eca8f8c89898a7794462c773c449e983 (patch)
treed18618fca85ca5f0c077032cc7b009344b60f663 /import-layers/meta-virtualization
parente2b5abdc9f28cdf8578e5b9be803c8e697443c20 (diff)
downloadopenbmc-d7bf8c17eca8f8c89898a7794462c773c449e983.tar.xz
Yocto 2.4
Move OpenBMC to Yocto 2.4(rocko) Tested: Built and verified Witherspoon and Palmetto images Change-Id: I12057b18610d6fb0e6903c60213690301e9b0c67 Signed-off-by: Brad Bishop <bradleyb@fuzziesquirrel.com>
Diffstat (limited to 'import-layers/meta-virtualization')
-rw-r--r--import-layers/meta-virtualization/README22
-rw-r--r--import-layers/meta-virtualization/classes/sanity-meta-virt.bbclass10
-rw-r--r--import-layers/meta-virtualization/conf/layer.conf4
-rw-r--r--import-layers/meta-virtualization/recipes-containers/containerd/containerd-docker_git.bb14
-rw-r--r--import-layers/meta-virtualization/recipes-containers/containerd/containerd.inc31
-rw-r--r--import-layers/meta-virtualization/recipes-containers/containerd/files/containerd.service11
-rw-r--r--import-layers/meta-virtualization/recipes-containers/cri-o/cri-o_git.bb130
-rw-r--r--import-layers/meta-virtualization/recipes-containers/cri-o/files/0001-Makefile-force-symlinks.patch26
-rw-r--r--import-layers/meta-virtualization/recipes-containers/cri-o/files/crio.conf147
-rw-r--r--import-layers/meta-virtualization/recipes-containers/criu/criu_git.bb14
-rw-r--r--import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Change-libraries-install-directory.patch25
-rw-r--r--import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Fix-toolchain-hardcode.patch57
-rw-r--r--import-layers/meta-virtualization/recipes-containers/criu/files/disable-selinux.patch39
-rw-r--r--import-layers/meta-virtualization/recipes-containers/criu/files/lib-Makefile-overwrite-install-lib-to-allow-multiarc.patch16
-rw-r--r--import-layers/meta-virtualization/recipes-containers/docker-compose/files/0001-Allow-newer-versions-of-requests.patch32
-rw-r--r--import-layers/meta-virtualization/recipes-containers/docker-compose/python3-docker-compose_1.16.1.bb32
-rw-r--r--import-layers/meta-virtualization/recipes-containers/docker-distribution/docker-distribution_git.bb10
-rw-r--r--import-layers/meta-virtualization/recipes-containers/docker/docker_git.bb43
-rw-r--r--import-layers/meta-virtualization/recipes-containers/docker/files/context-use-golang.org-x-net-pkg-until-we-move-to-go.patch77
-rw-r--r--import-layers/meta-virtualization/recipes-containers/docker/files/docker.init5
-rw-r--r--import-layers/meta-virtualization/recipes-containers/kubernetes/kubernetes_git.bb98
-rw-r--r--import-layers/meta-virtualization/recipes-containers/lxc/files/Generate-lxc-restore-net-properly.patch121
-rw-r--r--import-layers/meta-virtualization/recipes-containers/lxc/files/Use-AC_HEADER_MAJOR-to-detect-major-minor-makedev.patch119
-rw-r--r--import-layers/meta-virtualization/recipes-containers/lxc/files/automake-ensure-VPATH-builds-correctly.patch2
-rw-r--r--import-layers/meta-virtualization/recipes-containers/lxc/files/cgroups-work-around-issue-in-gcc-7.patch34
-rw-r--r--import-layers/meta-virtualization/recipes-containers/lxc/files/runtest.patch2
-rw-r--r--import-layers/meta-virtualization/recipes-containers/lxc/lxc_2.0.8.bb (renamed from import-layers/meta-virtualization/recipes-containers/lxc/lxc_2.0.0.bb)38
-rw-r--r--import-layers/meta-virtualization/recipes-containers/oci-image-tools/files/0001-image-manifest-Recursively-remove-pre-existing-entri.patch78
-rw-r--r--import-layers/meta-virtualization/recipes-containers/oci-image-tools/files/0002-image-manifest-Split-unpackLayerEntry-into-its-own-f.patch242
-rw-r--r--import-layers/meta-virtualization/recipes-containers/oci-image-tools/oci-image-tools_git.bb28
-rw-r--r--import-layers/meta-virtualization/recipes-containers/oci-runtime-spec/oci-runtime-spec_git.bb6
-rw-r--r--import-layers/meta-virtualization/recipes-containers/oci-runtime-tools/files/0001-Revert-implement-add-set-function-for-hooks-items.patch202
-rw-r--r--import-layers/meta-virtualization/recipes-containers/oci-runtime-tools/oci-runtime-tools_git.bb31
-rw-r--r--import-layers/meta-virtualization/recipes-containers/oci-systemd-hook/oci-systemd-hook/0001-selinux-drop-selinux-support.patch35
-rw-r--r--import-layers/meta-virtualization/recipes-containers/oci-systemd-hook/oci-systemd-hook_git.bb2
-rw-r--r--import-layers/meta-virtualization/recipes-containers/riddler/riddler_git.bb14
-rw-r--r--import-layers/meta-virtualization/recipes-containers/runc/runc-docker/0001-Disable-building-recvtty.patch26
-rw-r--r--import-layers/meta-virtualization/recipes-containers/runc/runc-docker/0001-Update-to-runtime-spec-198f23f827eea397d4331d7eb048d.patch89
-rw-r--r--import-layers/meta-virtualization/recipes-containers/runc/runc-docker/0001-Use-correct-go-cross-compiler.patch85
-rw-r--r--import-layers/meta-virtualization/recipes-containers/runc/runc-docker/0001-runc-Add-console-socket-dev-null.patch33
-rw-r--r--import-layers/meta-virtualization/recipes-containers/runc/runc-docker/0002-Remove-Platform-as-no-longer-in-OCI-spec.patch75
-rw-r--r--import-layers/meta-virtualization/recipes-containers/runc/runc-docker/0003-Update-memory-specs-to-use-int64-not-uint64.patch194
-rw-r--r--import-layers/meta-virtualization/recipes-containers/runc/runc-docker_git.bb14
-rw-r--r--import-layers/meta-virtualization/recipes-containers/runc/runc-opencontainers/0001-Use-correct-go-cross-compiler.patch85
-rw-r--r--import-layers/meta-virtualization/recipes-containers/runc/runc-opencontainers_git.bb19
-rw-r--r--import-layers/meta-virtualization/recipes-containers/runc/runc.inc24
-rw-r--r--import-layers/meta-virtualization/recipes-containers/runc/runc/0001-nsexec-fix-build-against-musl-libc.patch48
-rw-r--r--import-layers/meta-virtualization/recipes-containers/singularity/README46
-rw-r--r--import-layers/meta-virtualization/recipes-containers/singularity/singularity_git.bb35
-rw-r--r--import-layers/meta-virtualization/recipes-core/runv/runv_git.bb82
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/go/go-systemd_git.bb4
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/protobuf/files/disable_tests.patch19
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/protobuf/files/protobuf-allow-running-python-scripts-from-anywhere.patch38
-rwxr-xr-ximport-layers/meta-virtualization/recipes-devtools/protobuf/files/run-ptest32
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/protobuf/protobuf-c_1.2.1.bb28
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/protobuf/protobuf-native_3.1.0.bb19
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/protobuf/protobuf_3.1.0.bb98
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/python/python-docopt.inc9
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/python/python-gevent_1.0.1.bb2
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/python/python-itsdangerous_0.24.bb22
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/python/python-m2crypto_0.22.3.bb2
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/python/python-pyyaml_3.11.bb2
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/python/python-requests_2.8.1.bb28
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/python/python-simplejson_3.7.3.bb31
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/python/python-six_1.10.0.bb19
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/python/python-sphinx_1.4.1.bb2
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/python/python-twisted.inc244
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/python/python-webob_1.6.0.bb2
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/python/python3-cached-property_1.3.0.bb9
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/python/python3-colorama_0.3.9.bb9
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/python/python3-docker-pycreds_0.2.1.bb9
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/python/python3-docker_2.5.1.bb17
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/python/python3-dockerpty_0.4.1.bb9
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/python/python3-docopt_0.6.2.bb2
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/python/python3-texttable_0.9.1.bb9
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/python/python3-twisted_13.2.0.bb67
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/python/python3-websocket-client_0.44.0.bb11
-rw-r--r--import-layers/meta-virtualization/recipes-extended/diod/diod_1.0.24.bb32
-rw-r--r--import-layers/meta-virtualization/recipes-extended/diod/files/0001-build-allow-builds-to-work-with-separate-build-dir.patch126
-rw-r--r--import-layers/meta-virtualization/recipes-extended/diod/files/0002-auto.diod.in-remove-bashisms.patch47
-rw-r--r--import-layers/meta-virtualization/recipes-extended/diod/files/diod75
-rw-r--r--import-layers/meta-virtualization/recipes-extended/diod/files/diod.conf15
-rw-r--r--import-layers/meta-virtualization/recipes-extended/images/cloud-image-controller.bb4
-rw-r--r--import-layers/meta-virtualization/recipes-extended/images/cloud-image-guest.bb4
-rw-r--r--import-layers/meta-virtualization/recipes-extended/images/xen-bootimg.inc35
-rw-r--r--import-layers/meta-virtualization/recipes-extended/images/xen-guest-image-minimal.bb11
-rw-r--r--import-layers/meta-virtualization/recipes-extended/images/xen-image-minimal.bb23
-rw-r--r--import-layers/meta-virtualization/recipes-extended/ipxe/ipxe_git.bb2
-rw-r--r--import-layers/meta-virtualization/recipes-extended/kvmtool/files/0001-kvmtool-9p-fixed-compilation-error.patch27
-rw-r--r--import-layers/meta-virtualization/recipes-extended/kvmtool/files/0002-kvmtool-add-EXTRA_CFLAGS-variable.patch29
-rw-r--r--import-layers/meta-virtualization/recipes-extended/kvmtool/kvmtool.bb7
-rw-r--r--import-layers/meta-virtualization/recipes-extended/libvirt/libvirt_1.3.5.bb2
-rw-r--r--import-layers/meta-virtualization/recipes-extended/seabios/seabios_1.9.1.bb2
-rw-r--r--import-layers/meta-virtualization/recipes-extended/vgabios/biossums_0.7a.bb37
-rw-r--r--import-layers/meta-virtualization/recipes-extended/vgabios/vgabios_0.7a.bb6
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/files/fix-libxc-xc_dom_arm-missing-initialization.patch36
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/xen.inc3
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/xen_4.8.0.bb10
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/xen_4.9.0.bb12
-rw-r--r--import-layers/meta-virtualization/recipes-graphics/xorg-xserver/xserver-xorg_%.bbappend13
-rw-r--r--import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/docker.cfg12
-rw-r--r--import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/docker.scc4
-rw-r--r--import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/lxc.cfg2
-rw-r--r--import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.1.bbappend19
-rw-r--r--import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.10.bbappend20
-rw-r--r--import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.12.bbappend1
-rw-r--r--import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.4.bbappend20
-rw-r--r--import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.9.bbappend20
-rw-r--r--import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_virtualization.inc20
-rw-r--r--import-layers/meta-virtualization/recipes-networking/cni/cni_git.bb95
-rw-r--r--import-layers/meta-virtualization/recipes-networking/netns/files/0001-Use-correct-go-cross-compiler.patch77
-rw-r--r--import-layers/meta-virtualization/recipes-networking/netns/netns_git.bb27
-rw-r--r--import-layers/meta-virtualization/recipes-networking/openvswitch/files/configure-Only-link-against-libpcap-on-FreeBSD.patch70
-rw-r--r--import-layers/meta-virtualization/recipes-networking/openvswitch/files/python-make-remaining-scripts-use-usr-bin-env.patch18
-rw-r--r--import-layers/meta-virtualization/recipes-networking/openvswitch/files/python-switch-remaining-scripts-to-use-python3.patch113
-rw-r--r--import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0001-Python3-compatibility-Convert-print-statements.patch1264
-rw-r--r--import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0002-Python3-compatibility-exception-cleanup.patch79
-rw-r--r--import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0003-Python3-compatibility-execfile-to-exec.patch33
-rw-r--r--import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0004-Python3-compatibility-iteritems-to-items.patch102
-rw-r--r--import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0005-Python3-compatibility-fix-integer-problems.patch51
-rw-r--r--import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0006-Python3-compatibility-math-error-compatibility.patch56
-rw-r--r--import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0007-Python3-compatibility-unicode-to-str.patch51
-rw-r--r--import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0008-AUTHORS-Add-Jason-Wessel.patch28
-rw-r--r--import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/CVE-2017-9263.patch29
-rw-r--r--import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/openvswitch-add-ptest-b29cb89e9e9fe3119b2e5dd5d4fb79141635b7cc.patch (renamed from import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/openvswitch-add-ptest-c298ef781c2d35d939fe163cbc2f41ea7b1cb8d1.patch)0
-rw-r--r--import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch.inc16
-rw-r--r--import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch_git.bb36
127 files changed, 5038 insertions, 1178 deletions
diff --git a/import-layers/meta-virtualization/README b/import-layers/meta-virtualization/README
index 2578f9049..7d842329f 100644
--- a/import-layers/meta-virtualization/README
+++ b/import-layers/meta-virtualization/README
@@ -4,6 +4,28 @@ meta-virtualization
This layer provides support for building Xen, KVM, Libvirt, and associated
packages necessary for constructing OE-based virtualized solutions.
+The bbappend files for some recipe (e.g. linux-yocto) in this layer needs to
+have 'virtualization' in DISTRO_FEATURES to have effect. To enable them, add
+in configuration file the following line.
+
+ DISTRO_FEATURES_append = " virtualization"
+
+If meta-virtualization is included, but virtualization is not enabled as a
+distro feature a warning is printed at parse time:
+
+ You have included the meta-virtualization layer, but
+ 'virtualization' has not been enabled in your DISTRO_FEATURES. Some bbappend files
+ may not take effect. See the meta-virtualization README for details on enabling
+ virtualization support.
+
+If you know what you are doing, this warning can be disabled by setting the following
+variable in your configuration:
+
+ SKIP_META_VIRT_SANITY_CHECK = 1
+
+Also note that there are kvm and xen specific distro flags/features that depending
+on your use case.
+
Dependencies
------------
This layer depends on:
diff --git a/import-layers/meta-virtualization/classes/sanity-meta-virt.bbclass b/import-layers/meta-virtualization/classes/sanity-meta-virt.bbclass
new file mode 100644
index 000000000..b4ed0825b
--- /dev/null
+++ b/import-layers/meta-virtualization/classes/sanity-meta-virt.bbclass
@@ -0,0 +1,10 @@
+addhandler virt_bbappend_distrocheck
+virt_bbappend_distrocheck[eventmask] = "bb.event.SanityCheck"
+python virt_bbappend_distrocheck() {
+ skip_check = e.data.getVar('SKIP_META_VIRT_SANITY_CHECK') == "1"
+ if 'virtualization' not in e.data.getVar('DISTRO_FEATURES').split() and not skip_check:
+ bb.warn("You have included the meta-virtualization layer, but \
+'virtualization' has not been enabled in your DISTRO_FEATURES. Some bbappend files \
+may not take effect. See the meta-virtualization README for details on enabling \
+virtualization support.")
+}
diff --git a/import-layers/meta-virtualization/conf/layer.conf b/import-layers/meta-virtualization/conf/layer.conf
index be08a985f..f71c117d6 100644
--- a/import-layers/meta-virtualization/conf/layer.conf
+++ b/import-layers/meta-virtualization/conf/layer.conf
@@ -22,3 +22,7 @@ require conf/distro/include/virt_security_flags.inc
PREFERRED_PROVIDER_virtual/runc ?= "runc-docker"
PREFERRED_PROVIDER_virtual/containerd ?= "containerd-docker"
+
+# Sanity check for meta-virtualization layer.
+# Setting SKIP_META_VIRT_SANITY_CHECK to "1" would skip the bbappend files check.
+INHERIT += "sanity-meta-virt"
diff --git a/import-layers/meta-virtualization/recipes-containers/containerd/containerd-docker_git.bb b/import-layers/meta-virtualization/recipes-containers/containerd/containerd-docker_git.bb
index f6dcaeca9..b18a9bb29 100644
--- a/import-layers/meta-virtualization/recipes-containers/containerd/containerd-docker_git.bb
+++ b/import-layers/meta-virtualization/recipes-containers/containerd/containerd-docker_git.bb
@@ -1,10 +1,14 @@
-include containerd.inc
-
-SRCREV = "03e5862ec0d8d3b3f750e19fca3ee367e13c090e"
+SRCREV = "3addd840653146c90a254301d6c3a663c7fd6429"
SRC_URI = "\
- git://github.com/docker/containerd.git;branch=docker-1.13.x \
+ git://github.com/docker/containerd.git;branch=v0.2.x;destsuffix=git/src/github.com/containerd/containerd \
"
-CONTAINERD_VERSION = "0.2.3"
+
+include containerd.inc
+
+CONTAINERD_VERSION = "v0.2.x"
+S = "${WORKDIR}/git/src/github.com/containerd/containerd"
PROVIDES += "virtual/containerd"
RPROVIDES_${PN} = "virtual/containerd"
+
+DEPENDS += "btrfs-tools"
diff --git a/import-layers/meta-virtualization/recipes-containers/containerd/containerd.inc b/import-layers/meta-virtualization/recipes-containers/containerd/containerd.inc
index b14397957..e7a371926 100644
--- a/import-layers/meta-virtualization/recipes-containers/containerd/containerd.inc
+++ b/import-layers/meta-virtualization/recipes-containers/containerd/containerd.inc
@@ -8,30 +8,23 @@ DESCRIPTION = "containerd is a daemon to control runC, built for performance and
LICENSE = "Apache-2.0"
LIC_FILES_CHKSUM = "file://LICENSE.code;md5=aadc30f9c14d876ded7bedc0afd2d3d7"
+SRC_URI += "file://containerd.service"
+
S = "${WORKDIR}/git"
PV = "${CONTAINERD_VERSION}+git${SRCREV}"
inherit go
+inherit goarch
RRECOMMENDS_${PN} = "lxc docker"
-CONTAINERD_PKG="github.com/docker/containerd"
+CONTAINERD_PKG="github.com/containerd/containerd"
+
+INSANE_SKIP_${PN} += "ldflags"
do_configure[noexec] = "1"
do_compile() {
- export GOARCH="${TARGET_ARCH}"
- # supported amd64, 386, arm arm64
- if [ "${TARGET_ARCH}" = "x86_64" ]; then
- export GOARCH="amd64"
- fi
- if [ "${TARGET_ARCH}" = "aarch64" ]; then
- export GOARCH="arm64"
- fi
- if [ "${TARGET_ARCH}" = "i586" ]; then
- export GOARCH="386"
- fi
-
# Set GOPATH. See 'PACKAGERS.md'. Don't rely on
# docker to download its dependencies but rather
# use dependencies packaged independently.
@@ -39,17 +32,19 @@ do_compile() {
rm -rf .gopath
mkdir -p .gopath/src/"$(dirname "${CONTAINERD_PKG}")"
ln -sf ../../../.. .gopath/src/"${CONTAINERD_PKG}"
- export GOPATH="${S}/.gopath:${S}/vendor:${STAGING_DIR_TARGET}/${prefix}/local/go"
- export GOROOT="${STAGING_DIR_NATIVE}/${nonarch_libdir}/${HOST_SYS}/go"
- cd -
+
+ export GOPATH="${WORKDIR}/git/"
+ export GOROOT="${STAGING_LIBDIR_NATIVE}/${TARGET_SYS}/go"
# Pass the needed cflags/ldflags so that cgo
# can find the needed headers files and libraries
+ export GOARCH="${TARGET_GOARCH}"
export CGO_ENABLED="1"
export CFLAGS=""
export LDFLAGS=""
export CGO_CFLAGS="${BUILDSDK_CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
export CGO_LDFLAGS="${BUILDSDK_LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+ export GO_GCFLAGS=""
export CC_FOR_TARGET="${TARGET_PREFIX}gcc ${TARGET_CC_ARCH} --sysroot=${STAGING_DIR_TARGET}"
export CXX_FOR_TARGET="${TARGET_PREFIX}g++ ${TARGET_CC_ARCH} --sysroot=${STAGING_DIR_TARGET}"
@@ -74,9 +69,9 @@ do_install() {
if ${@bb.utils.contains('DISTRO_FEATURES','systemd','true','false',d)}; then
install -d ${D}${systemd_unitdir}/system
- install -m 644 ${S}/hack/containerd.service ${D}/${systemd_unitdir}/system
+ install -m 644 ${WORKDIR}/containerd.service ${D}/${systemd_unitdir}/system
# adjust from /usr/local/bin to /usr/bin/
- sed -e "s:/usr/local/bin/containerd:${bindir}/docker-containerd -l \"unix\:///var/run/docker/libcontainerd/docker-containerd.sock\":g" -i ${D}/${systemd_unitdir}/system/containerd.service
+ sed -e "s:/usr/local/bin/containerd:${bindir}/docker-containerd:g" -i ${D}/${systemd_unitdir}/system/containerd.service
fi
}
diff --git a/import-layers/meta-virtualization/recipes-containers/containerd/files/containerd.service b/import-layers/meta-virtualization/recipes-containers/containerd/files/containerd.service
new file mode 100644
index 000000000..23633b02b
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/containerd/files/containerd.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=containerd
+Documentation=https://containerd.tools
+After=network.target
+
+[Service]
+ExecStart=/usr/local/bin/containerd
+Delegate=yes
+
+[Install]
+WantedBy=multi-user.target
diff --git a/import-layers/meta-virtualization/recipes-containers/cri-o/cri-o_git.bb b/import-layers/meta-virtualization/recipes-containers/cri-o/cri-o_git.bb
new file mode 100644
index 000000000..c14d54dc4
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/cri-o/cri-o_git.bb
@@ -0,0 +1,130 @@
+HOMEPAGE = "https://github.com/kubernetes-incubator/cri-o"
+SUMMARY = "Open Container Initiative-based implementation of Kubernetes Container Runtime Interface"
+DESCRIPTION = "cri-o is meant to provide an integration path between OCI conformant \
+runtimes and the kubelet. Specifically, it implements the Kubelet Container Runtime \
+Interface (CRI) using OCI conformant runtimes. The scope of cri-o is tied to the scope of the CRI. \
+. \
+At a high level, we expect the scope of cri-o to be restricted to the following functionalities: \
+. \
+ - Support multiple image formats including the existing Docker image format \
+ - Support for multiple means to download images including trust & image verification \
+ - Container image management (managing image layers, overlay filesystems, etc) \
+ - Container process lifecycle management \
+ - Monitoring and logging required to satisfy the CRI \
+ - Resource isolation as required by the CRI \
+ "
+
+SRCREV_cri-o = "65faae67828fb3eb3eac05b582aae9f9d1dea51c"
+SRC_URI = "\
+ git://github.com/kubernetes-incubator/cri-o.git;nobranch=1;name=cri-o \
+ file://0001-Makefile-force-symlinks.patch \
+ file://crio.conf \
+ "
+
+# Apache-2.0 for docker
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://src/import/LICENSE;md5=e3fc50a88d0a364313df4b21ef20c29e"
+
+GO_IMPORT = "import"
+
+PV = "1.0.0-rc3-dev+git${SRCREV_cri-o}"
+
+DEPENDS = " \
+ glib-2.0 \
+ btrfs-tools \
+ gpgme \
+ ostree \
+ libdevmapper \
+ "
+RDEPENDS_${PN} = " \
+ cni \
+ "
+
+PACKAGES =+ "${PN}-config"
+
+RDEPENDS_${PN} += " virtual/containerd virtual/runc"
+RDEPENDS_${PN} += " e2fsprogs-mke2fs"
+
+inherit systemd
+inherit go
+inherit goarch
+inherit pkgconfig
+
+EXTRA_OEMAKE="BUILDTAGS=''"
+
+do_compile() {
+ export GOARCH="${TARGET_GOARCH}"
+ export GOROOT="${STAGING_LIBDIR_NATIVE}/${TARGET_SYS}/go"
+ export GOPATH="${S}/src/import:${S}/src/import/vendor"
+
+ # Pass the needed cflags/ldflags so that cgo
+ # can find the needed headers files and libraries
+ export CGO_ENABLED="1"
+ export CFLAGS=""
+ export LDFLAGS=""
+ export CGO_CFLAGS="${BUILDSDK_CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+ export CGO_LDFLAGS="${BUILDSDK_LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+
+ # link fixups for compilation
+ rm -f ${S}/src/import/vendor/src
+ ln -sf ./ ${S}/src/import/vendor/src
+
+ mkdir -p ${S}/src/import/vendor/github.com/kubernetes-incubator/cri-o
+ ln -sf ../../../../cmd ${S}/src/import/vendor/github.com/kubernetes-incubator/cri-o/cmd
+ ln -sf ../../../../test ${S}/src/import/vendor/github.com/kubernetes-incubator/cri-o/test
+ ln -sf ../../../../oci ${S}/src/import/vendor/github.com/kubernetes-incubator/cri-o/oci
+ ln -sf ../../../../server ${S}/src/import/vendor/github.com/kubernetes-incubator/cri-o/server
+ ln -sf ../../../../pkg ${S}/src/import/vendor/github.com/kubernetes-incubator/cri-o/pkg
+ ln -sf ../../../../libpod ${S}/src/import/vendor/github.com/kubernetes-incubator/cri-o/libpod
+ ln -sf ../../../../libkpod ${S}/src/import/vendor/github.com/kubernetes-incubator/cri-o/libkpod
+ ln -sf ../../../../utils ${S}/src/import/vendor/github.com/kubernetes-incubator/cri-o/utils
+
+ export GOPATH="${S}/src/import/.gopath:${S}/src/import/vendor:${STAGING_DIR_TARGET}/${prefix}/local/go"
+ export GOROOT="${STAGING_DIR_NATIVE}/${nonarch_libdir}/${HOST_SYS}/go"
+
+ # Pass the needed cflags/ldflags so that cgo
+ # can find the needed headers files and libraries
+ export CGO_ENABLED="1"
+ export CGO_CFLAGS="${CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+ export CGO_LDFLAGS="${LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+
+ cd ${S}/src/import
+
+ oe_runmake binaries
+}
+
+SYSTEMD_PACKAGES = "${@bb.utils.contains('DISTRO_FEATURES','systemd','${PN}','',d)}"
+SYSTEMD_SERVICE_${PN} = "${@bb.utils.contains('DISTRO_FEATURES','systemd','crio.service','',d)}"
+SYSTEMD_AUTO_ENABLE_${PN} = "enable"
+
+do_install() {
+ localbindir="/usr/local/bin"
+
+ install -d ${D}${localbindir}
+ install -d ${D}/${libexecdir}/crio
+ install -d ${D}/${sysconfdir}/crio
+ install -d ${D}${systemd_unitdir}/system/
+
+ install ${WORKDIR}/crio.conf ${D}/${sysconfdir}/crio/crio.conf
+
+ # sample config files, they'll go in the ${PN}-config below
+ install -d ${D}/${sysconfdir}/crio/config/
+ install -m 755 -D ${S}/src/import/test/testdata/* ${D}/${sysconfdir}/crio/config/
+
+ install ${S}/src/import/crio ${D}/${localbindir}
+ install ${S}/src/import/crioctl ${D}/${localbindir}
+ install ${S}/src/import/kpod ${D}/${localbindir}
+
+ install ${S}/src/import/conmon/conmon ${D}/${libexecdir}/crio
+ install ${S}/src/import/pause/pause ${D}/${libexecdir}/crio
+
+ install -m 0644 ${S}/src/import/contrib/systemd/crio.service ${D}${systemd_unitdir}/system/
+ install -m 0644 ${S}/src/import/contrib/systemd/crio-shutdown.service ${D}${systemd_unitdir}/system/
+}
+
+FILES_${PN}-config = "${sysconfdir}/crio/config/*"
+FILES_${PN} += "${systemd_unitdir}/system/*"
+FILES_${PN} += "/usr/local/bin/*"
+
+INHIBIT_PACKAGE_STRIP = "1"
+INSANE_SKIP_${PN} += "ldflags already-stripped"
diff --git a/import-layers/meta-virtualization/recipes-containers/cri-o/files/0001-Makefile-force-symlinks.patch b/import-layers/meta-virtualization/recipes-containers/cri-o/files/0001-Makefile-force-symlinks.patch
new file mode 100644
index 000000000..320eac86a
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/cri-o/files/0001-Makefile-force-symlinks.patch
@@ -0,0 +1,26 @@
+From a4433978bf324525b4c260b0e9615ae27271fe55 Mon Sep 17 00:00:00 2001
+From: Bruce Ashfield <bruce.ashfield@windriver.com>
+Date: Wed, 20 Sep 2017 12:05:40 -0400
+Subject: [PATCH] Makefile: force symlinks
+
+Signed-off-by: Bruce Ashfield <bruce.ashfield@windriver.com>
+---
+ Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/import/Makefile b/src/import/Makefile
+index e3e5050bb7f6..4ad3fb7ff0a9 100644
+--- a/src/import/Makefile
++++ b/src/import/Makefile
+@@ -53,7 +53,7 @@ help:
+ .gopathok:
+ ifeq ("$(wildcard $(GOPKGDIR))","")
+ mkdir -p "$(GOPKGBASEDIR)"
+- ln -s "$(CURDIR)" "$(GOPKGBASEDIR)"
++ ln -sf "$(CURDIR)" "$(GOPKGBASEDIR)"
+ endif
+ touch "$(GOPATH)/.gopathok"
+
+--
+2.4.0.53.g8440f74
+
diff --git a/import-layers/meta-virtualization/recipes-containers/cri-o/files/crio.conf b/import-layers/meta-virtualization/recipes-containers/cri-o/files/crio.conf
new file mode 100644
index 000000000..51d7f404f
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/cri-o/files/crio.conf
@@ -0,0 +1,147 @@
+# generated via: crio --config="" config --default
+
+# The "crio" table contains all of the server options.
+[crio]
+
+# root is a path to the "root directory". CRIO stores all of its data,
+# including container images, in this directory.
+root = "/var/lib/containers/storage"
+
+# run is a path to the "run directory". CRIO stores all of its state
+# in this directory.
+runroot = "/var/run/containers/storage"
+
+# storage_driver select which storage driver is used to manage storage
+# of images and containers.
+storage_driver = ""
+
+# storage_option is used to pass an option to the storage driver.
+storage_option = [
+]
+
+# The "crio.api" table contains settings for the kubelet/gRPC
+# interface (which is also used by crioctl).
+[crio.api]
+
+# listen is the path to the AF_LOCAL socket on which crio will listen.
+listen = "/var/run/crio.sock"
+
+# stream_address is the IP address on which the stream server will listen
+stream_address = ""
+
+# stream_port is the port on which the stream server will listen
+stream_port = "10010"
+
+# file_locking is whether file-based locking will be used instead of
+# in-memory locking
+file_locking = true
+
+# The "crio.runtime" table contains settings pertaining to the OCI
+# runtime used and options for how to set up and manage the OCI runtime.
+[crio.runtime]
+
+# runtime is the OCI compatible runtime used for trusted container workloads.
+# This is a mandatory setting as this runtime will be the default one
+# and will also be used for untrusted container workloads if
+# runtime_untrusted_workload is not set.
+runtime = "/usr/bin/runc"
+
+# runtime_untrusted_workload is the OCI compatible runtime used for untrusted
+# container workloads. This is an optional setting, except if
+# default_container_trust is set to "untrusted".
+runtime_untrusted_workload = ""
+
+# default_workload_trust is the default level of trust crio puts in container
+# workloads. It can either be "trusted" or "untrusted", and the default
+# is "trusted".
+# Containers can be run through different container runtimes, depending on
+# the trust hints we receive from kubelet:
+# - If kubelet tags a container workload as untrusted, crio will try first to
+# run it through the untrusted container workload runtime. If it is not set,
+# crio will use the trusted runtime.
+# - If kubelet does not provide any information about the container workload trust
+# level, the selected runtime will depend on the default_container_trust setting.
+# If it is set to "untrusted", then all containers except for the host privileged
+# ones, will be run by the runtime_untrusted_workload runtime. Host privileged
+# containers are by definition trusted and will always use the trusted container
+# runtime. If default_container_trust is set to "trusted", crio will use the trusted
+# container runtime for all containers.
+default_workload_trust = "trusted"
+
+# conmon is the path to conmon binary, used for managing the runtime.
+conmon = "/usr/libexec/crio/conmon"
+
+# conmon_env is the environment variable list for conmon process,
+# used for passing necessary environment variable to conmon or runtime.
+conmon_env = [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+]
+
+# selinux indicates whether or not SELinux will be used for pod
+# separation on the host. If you enable this flag, SELinux must be running
+# on the host.
+selinux = false
+
+# seccomp_profile is the seccomp json profile path which is used as the
+# default for the runtime.
+seccomp_profile = "/etc/crio/seccomp.json"
+
+# apparmor_profile is the apparmor profile name which is used as the
+# default for the runtime.
+apparmor_profile = "crio-default"
+
+# cgroup_manager is the cgroup management implementation to be used
+# for the runtime.
+cgroup_manager = "cgroupfs"
+
+# hooks_dir_path is the oci hooks directory for automatically executed hooks
+hooks_dir_path = "/usr/share/containers/oci/hooks.d"
+
+# pids_limit is the number of processes allowed in a container
+pids_limit = 1024
+
+# The "crio.image" table contains settings pertaining to the
+# management of OCI images.
+[crio.image]
+
+# default_transport is the prefix we try prepending to an image name if the
+# image name as we receive it can't be parsed as a valid source reference
+default_transport = "docker://"
+
+# pause_image is the image which we use to instantiate infra containers.
+pause_image = "kubernetes/pause"
+
+# pause_command is the command to run in a pause_image to have a container just
+# sit there. If the image contains the necessary information, this value need
+# not be specified.
+pause_command = "/pause"
+
+# signature_policy is the name of the file which decides what sort of policy we
+# use when deciding whether or not to trust an image that we've pulled.
+# Outside of testing situations, it is strongly advised that this be left
+# unspecified so that the default system-wide policy will be used.
+signature_policy = ""
+
+# image_volumes controls how image volumes are handled.
+# The valid values are mkdir and ignore.
+image_volumes = "mkdir"
+
+# insecure_registries is used to skip TLS verification when pulling images.
+insecure_registries = [
+]
+
+# registries is used to specify a comma separated list of registries to be used
+# when pulling an unqualified image (e.g. fedora:rawhide).
+registries = [
+]
+
+# The "crio.network" table contains settings pertaining to the
+# management of CNI plugins.
+[crio.network]
+
+# network_dir is is where CNI network configuration
+# files are stored.
+network_dir = "/etc/cni/net.d/"
+
+# plugin_dir is is where CNI plugin binaries are stored.
+plugin_dir = "/opt/cni/bin/"
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/criu_git.bb b/import-layers/meta-virtualization/recipes-containers/criu/criu_git.bb
index 21dee2b11..3a021031a 100644
--- a/import-layers/meta-virtualization/recipes-containers/criu/criu_git.bb
+++ b/import-layers/meta-virtualization/recipes-containers/criu/criu_git.bb
@@ -13,21 +13,20 @@ EXCLUDE_FROM_WORLD = "1"
LIC_FILES_CHKSUM = "file://COPYING;md5=412de458544c1cb6a2b512cd399286e2"
-SRCREV = "c031417255f6a5c4409d15ff0b36af5f6e90c559"
-PR = "r0"
-PV = "2.5+git${SRCPV}"
+SRCREV = "a31c1854e10580a09621e539c3ec052b875a8e06"
+PV = "3.4+git${SRCPV}"
SRC_URI = "git://github.com/xemul/criu.git;protocol=git \
file://0001-criu-Fix-toolchain-hardcode.patch \
file://0002-criu-Skip-documentation-install.patch \
file://0001-criu-Change-libraries-install-directory.patch \
- ${@bb.utils.contains('PACKAGECONFIG', 'selinux', '', 'file://disable-selinux.patch', d)} \
file://lib-Makefile-overwrite-install-lib-to-allow-multiarc.patch \
"
COMPATIBLE_HOST = "(x86_64|arm|aarch64).*-linux"
-DEPENDS += "libnl libcap protobuf-c-native protobuf-c util-linux-native"
+DEPENDS += "libnl libcap protobuf-c-native protobuf-c util-linux-native libbsd libnet"
+RDEPENDS_${PN} = "bash"
S = "${WORKDIR}/git"
@@ -77,3 +76,8 @@ FILES_${PN} += "${systemd_unitdir}/ \
${libdir}/pycriu/ \
${libdir}/crit-0.0.1-py2.7.egg-info \
"
+
+FILES_${PN}-staticdev += " \
+ ${libexecdir}/compel/std.lib.a \
+ ${libexecdir}/compel/fds.lib.a \
+ "
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Change-libraries-install-directory.patch b/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Change-libraries-install-directory.patch
index a72140500..4908e474a 100644
--- a/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Change-libraries-install-directory.patch
+++ b/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Change-libraries-install-directory.patch
@@ -1,26 +1,26 @@
-From 7ebde06e00b591a88397dad74a1aa47fd562eb50 Mon Sep 17 00:00:00 2001
-From: Jianchuan Wang <jianchuan.wang@windriver.com>
-Date: Tue, 16 Aug 2016 09:48:08 +0800
-Subject: [PATCH 1/2] criu: Change libraries install directory
+From 78390305829316633acee2ca5607331b0e37a104 Mon Sep 17 00:00:00 2001
+From: Mark Asselstine <mark.asselstine@windriver.com>
+Date: Fri, 8 Sep 2017 15:11:31 -0400
+Subject: [PATCH] criu: Change libraries install directory
Install the libraries into /usr/lib(or /usr/lib64)
Signed-off-by: Jianchuan Wang <jianchuan.wang@windriver.com>
+Signed-off-by: Mark Asselstine <mark.asselstine@windriver.com>
---
Makefile.install | 13 -------------
1 file changed, 13 deletions(-)
diff --git a/Makefile.install b/Makefile.install
-index dbc22e1..a30dc96 100644
+index 3987bcc..73d98a4 100644
--- a/Makefile.install
+++ b/Makefile.install
-@@ -11,19 +11,6 @@ LIBDIR ?= $(PREFIX)/lib
- INCLUDEDIR ?= $(PREFIX)/include/criu
- LIBEXECDIR ?= $(PREFIX)/libexec
+@@ -9,19 +9,6 @@ LIBEXECDIR ?= $(PREFIX)/libexec
+ RUNDIR ?= /run
--#
+ #
-# For recent Debian/Ubuntu with multiarch support.
--DEB_HOST_MULTIARCH ?= $(shell dpkg-architecture -qDEB_HOST_MULTIARCH 2>/dev/null)
+-DEB_HOST_MULTIARCH := $(shell dpkg-architecture -qDEB_HOST_MULTIARCH 2>/dev/null)
-ifneq "$(DEB_HOST_MULTIARCH)" ""
- LIBDIR ?= $(PREFIX)/lib/$(DEB_HOST_MULTIARCH)
-else
@@ -31,8 +31,9 @@ index dbc22e1..a30dc96 100644
- endif
-endif
-
- export BINDIR SBINDIR MANDIR SYSTEMDUNITDIR LOGROTATEDIR
- export INCLUDEDIR LIBDIR DESTDIR PREFIX LIBEXECDIR
+-#
+ # LIBDIR falls back to the standard path.
+ LIBDIR ?= $(PREFIX)/lib
--
2.7.4
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Fix-toolchain-hardcode.patch b/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Fix-toolchain-hardcode.patch
index d30f2ac2c..dc5b89774 100644
--- a/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Fix-toolchain-hardcode.patch
+++ b/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Fix-toolchain-hardcode.patch
@@ -1,6 +1,6 @@
-From 057d30f15e81dcc4162d6fbee06f126564596397 Mon Sep 17 00:00:00 2001
-From: Jianchuan Wang <jianchuan.wang@windriver.com>
-Date: Wed, 7 Sep 2016 23:55:15 -0400
+From af679853a45fe63f680c99e70416c8ac620d23b8 Mon Sep 17 00:00:00 2001
+From: Mark Asselstine <mark.asselstine@windriver.com>
+Date: Fri, 8 Sep 2017 15:02:14 -0400
Subject: [PATCH] criu: Fix toolchain hardcode
Replace ":=" to "?=" so that the toolchain used by bitbake build system will
@@ -8,45 +8,32 @@ be taken.
Signed-off-by: Yang Shi <yang.shi@windriver.com>
Signed-off-by: Jianchuan Wang <jianchuan.wang@windriver.com>
+Signed-off-by: Mark Asselstine <mark.asselstine@windriver.com>
---
Makefile | 2 +-
- criu/pie/Makefile | 2 +-
scripts/nmk/scripts/include.mk | 2 +-
scripts/nmk/scripts/tools.mk | 40 ++++++++++++++++++++--------------------
- 4 files changed, 23 insertions(+), 23 deletions(-)
+ 3 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/Makefile b/Makefile
-index 52cbd6a..f66279b 100644
+index f2583a2..d7f51e5 100644
--- a/Makefile
+++ b/Makefile
-@@ -60,7 +60,7 @@ LDARCH ?= $(SRCARCH)
-
- export SRCARCH LDARCH VDSO
+@@ -17,7 +17,7 @@ ifeq ($(origin HOSTCFLAGS), undefined)
+ HOSTCFLAGS := $(CFLAGS) $(USERCFLAGS)
+ endif
-UNAME-M := $(shell uname -m)
+UNAME-M ?= $(shell uname -m)
- export UNAME-M
- ifeq ($(ARCH),arm)
-diff --git a/criu/pie/Makefile b/criu/pie/Makefile
-index 125b02f..9975871 100644
---- a/criu/pie/Makefile
-+++ b/criu/pie/Makefile
-@@ -17,7 +17,7 @@ restorer-obj-e += ./$(ARCH_DIR)/syscalls.built-in.o
#
- CFLAGS := $(filter-out -pg $(CFLAGS-GCOV),$(CFLAGS))
- CFLAGS += -iquote $(SRC_DIR)/criu/pie/piegen
--CFLAGS += -iquote $(SRC_DIR)/criu/arch/$(ARCH)/include
-+CFLAGS += -iquote $(SRC_DIR)/criu/arch/$(SRCARCH)/include
- CFLAGS += -iquote $(SRC_DIR)/criu/include
- CFLAGS += -iquote $(SRC_DIR)
-
+ # Supported Architectures
diff --git a/scripts/nmk/scripts/include.mk b/scripts/nmk/scripts/include.mk
-index 4c496f7..a7250cd 100644
+index 04ccb3a..0d63bc7 100644
--- a/scripts/nmk/scripts/include.mk
+++ b/scripts/nmk/scripts/include.mk
@@ -20,7 +20,7 @@ SUBARCH := $(shell uname -m | sed \
- -e s/aarch64.*/arm64/)
+ -e s/aarch64.*/aarch64/)
ARCH ?= $(SUBARCH)
-SRCARCH := $(ARCH)
@@ -55,16 +42,20 @@ index 4c496f7..a7250cd 100644
export SUBARCH ARCH SRCARCH
diff --git a/scripts/nmk/scripts/tools.mk b/scripts/nmk/scripts/tools.mk
-index 0538dde..e4af068 100644
+index 56dba84..1698821 100644
--- a/scripts/nmk/scripts/tools.mk
+++ b/scripts/nmk/scripts/tools.mk
-@@ -2,28 +2,28 @@ ifndef ____nmk_defined__tools
+@@ -2,30 +2,30 @@ ifndef ____nmk_defined__tools
#
# System tools shorthands
-RM := rm -f
--LD := $(CROSS_COMPILE)ld
--CC := $(CROSS_COMPILE)gcc
++RM ?= rm -f
+ HOSTLD ?= ld
+-LD := $(CROSS_COMPILE)$(HOSTLD)
++LD ?= $(CROSS_COMPILE)$(HOSTLD)
+ HOSTCC ?= gcc
+-CC := $(CROSS_COMPILE)$(HOSTCC)
-CPP := $(CC) -E
-AS := $(CROSS_COMPILE)as
-AR := $(CROSS_COMPILE)ar
@@ -79,9 +70,7 @@ index 0538dde..e4af068 100644
-PYTHON := python
-FIND := find
-SH := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
-+RM ?= rm -f
-+LD ?= $(CROSS_COMPILE)ld
-+CC ?= $(CROSS_COMPILE)gcc
++CC ?= $(CROSS_COMPILE)$(HOSTCC)
+CPP ?= $(CC) -E
+AS ?= $(CROSS_COMPILE)as
+AR ?= $(CROSS_COMPILE)ar
@@ -105,8 +94,8 @@ index 0538dde..e4af068 100644
+ETAGS ?= etags
+CTAGS ?= ctags
- export RM LD CC CPP AS AR STRIP OBJCOPY OBJDUMP
+ export RM HOSTLD LD HOSTCC CC CPP AS AR STRIP OBJCOPY OBJDUMP
export NM SH MAKE MKDIR AWK PERL PYTHON SH CSCOPE
--
-2.8.1
+2.7.4
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/disable-selinux.patch b/import-layers/meta-virtualization/recipes-containers/criu/files/disable-selinux.patch
deleted file mode 100644
index 5d5d03526..000000000
--- a/import-layers/meta-virtualization/recipes-containers/criu/files/disable-selinux.patch
+++ /dev/null
@@ -1,39 +0,0 @@
-From bd2eeaddfc1f12f87184d870cc9a1adde1cf0b10 Mon Sep 17 00:00:00 2001
-From: Mark Asselstine <mark.asselstine@windriver.com>
-Date: Mon, 24 Apr 2017 13:08:48 -0400
-Subject: [PATCH] criu/Makefile.config: explicitly remove selinux support
-
-Upstream-Status: Inappropriate [disable feature]
-
-It shows warning when build crius if libselinux has been built already:
-
- WARNING: QA Issue: criu rdepends on libselinux, but it isn't a build dependency? [build-deps]
-
-Apply this patch to disable selinux support when 'selinux' is not in PACKAGECONF.
-
-Signed-off-by: Kai Kang <kai.kang@windriver.com>
-[MA: Context updated to apply against criu v2.5]
-Signed-off-by: Mark Asselstine <mark.asselstine@windriver.com>
----
- criu/Makefile.config | 5 -----
- 1 file changed, 5 deletions(-)
-
-diff --git a/criu/Makefile.config b/criu/Makefile.config
-index f531b3b..37216f8 100644
---- a/criu/Makefile.config
-+++ b/criu/Makefile.config
-@@ -7,11 +7,6 @@ ifeq ($(call try-cc,$(FEATURE_TEST_LIBBSD_DEV),-lbsd),true)
- FEATURE_DEFINES += -DCONFIG_HAS_LIBBSD
- endif
-
--ifeq ($(call pkg-config-check,libselinux),y)
-- LIBS += -lselinux
-- FEATURE_DEFINES += -DCONFIG_HAS_SELINUX
--endif
--
- export DEFINES += $(FEATURE_DEFINES)
- export CFLAGS += $(FEATURE_DEFINES)
-
---
-2.7.4
-
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/lib-Makefile-overwrite-install-lib-to-allow-multiarc.patch b/import-layers/meta-virtualization/recipes-containers/criu/files/lib-Makefile-overwrite-install-lib-to-allow-multiarc.patch
index 8bda8c426..c2512a0ea 100644
--- a/import-layers/meta-virtualization/recipes-containers/criu/files/lib-Makefile-overwrite-install-lib-to-allow-multiarc.patch
+++ b/import-layers/meta-virtualization/recipes-containers/criu/files/lib-Makefile-overwrite-install-lib-to-allow-multiarc.patch
@@ -1,6 +1,6 @@
-From 2e0c5c66786016f6443da2c1ff15ad77f018ec9b Mon Sep 17 00:00:00 2001
+From 89f9b87904bd312b817ffaa7d83abfd5e84d723d Mon Sep 17 00:00:00 2001
From: Mark Asselstine <mark.asselstine@windriver.com>
-Date: Mon, 24 Apr 2017 16:12:05 -0400
+Date: Fri, 8 Sep 2017 15:40:49 -0400
Subject: [PATCH] lib/Makefile: overwrite install-lib, to allow multiarch
I am not sure why Yocto installs python modules in arch specific
@@ -13,15 +13,15 @@ Signed-off-by: Mark Asselstine <mark.asselstine@windriver.com>
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/Makefile b/lib/Makefile
-index f1c0821..c714d12 100644
+index b1bb057..06f5c5d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
-@@ -56,7 +56,7 @@ install: lib-c lib-py ../crit/crit c/criu.pc.in
- $(Q) sed -e 's,@version@,$(CRIU_VERSION),' -e 's,@libdir@,$(LIBDIR),' -e 's,@includedir@,$(dir $(INCLUDEDIR)),' c/criu.pc.in > c/criu.pc
- $(Q) install -m 644 c/criu.pc $(DESTDIR)$(LIBDIR)/pkgconfig
+@@ -56,7 +56,7 @@ install: lib-c lib-py crit/crit lib/c/criu.pc.in
+ $(Q) sed -e 's,@version@,$(CRIU_VERSION),' -e 's,@libdir@,$(LIBDIR),' -e 's,@includedir@,$(dir $(INCLUDEDIR)/criu/),' lib/c/criu.pc.in > lib/c/criu.pc
+ $(Q) install -m 644 lib/c/criu.pc $(DESTDIR)$(LIBDIR)/pkgconfig
$(E) " INSTALL " crit
-- $(Q) python ../scripts/crit-setup.py install --root=$(DESTDIR) --prefix=$(PREFIX) --record $(CRIT_SETUP_FILES)
-+ $(Q) python ../scripts/crit-setup.py install --root=$(DESTDIR) --prefix=$(PREFIX) --record $(CRIT_SETUP_FILES) --install-lib=$(INSTALL_LIB)
+- $(Q) $(PYTHON_BIN) scripts/crit-setup.py install --prefix=$(DESTDIR)$(PREFIX) --record $(CRIT_SETUP_FILES)
++ $(Q) $(PYTHON_BIN) scripts/crit-setup.py install --prefix=$(DESTDIR)$(PREFIX) --record $(CRIT_SETUP_FILES) --install-lib=$(DESTDIR)$(INSTALL_LIB)
.PHONY: install
uninstall:
diff --git a/import-layers/meta-virtualization/recipes-containers/docker-compose/files/0001-Allow-newer-versions-of-requests.patch b/import-layers/meta-virtualization/recipes-containers/docker-compose/files/0001-Allow-newer-versions-of-requests.patch
new file mode 100644
index 000000000..6fc7bb4ce
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/docker-compose/files/0001-Allow-newer-versions-of-requests.patch
@@ -0,0 +1,32 @@
+From 15cf1a31f5af8f09531bb837b92bd6ea49bd1744 Mon Sep 17 00:00:00 2001
+From: Pascal Bach <pascal.bach@siemens.com>
+Date: Wed, 13 Sep 2017 08:41:21 +0200
+Subject: [PATCH] Allow newer versions of requests
+
+docker compose has strict requirements to use requests < 2.12
+
+However it works without issues with newer versions, so this patch removes the check.
+
+Upstream-Status: Pending
+
+Signed-off-by: Pascal Bach <pascal.bach@siemens.com>
+---
+ setup.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/setup.py b/setup.py
+index 192a0f6..f444757 100644
+--- a/setup.py
++++ b/setup.py
+@@ -33,7 +33,7 @@ install_requires = [
+ 'cached-property >= 1.2.0, < 2',
+ 'docopt >= 0.6.1, < 0.7',
+ 'PyYAML >= 3.10, < 4',
+- 'requests >= 2.6.1, != 2.11.0, < 2.12',
++ 'requests >= 2.6.1, != 2.11.0',
+ 'texttable >= 0.9.0, < 0.10',
+ 'websocket-client >= 0.32.0, < 1.0',
+ 'docker >= 2.5.1, < 3.0',
+--
+2.1.4
+
diff --git a/import-layers/meta-virtualization/recipes-containers/docker-compose/python3-docker-compose_1.16.1.bb b/import-layers/meta-virtualization/recipes-containers/docker-compose/python3-docker-compose_1.16.1.bb
new file mode 100644
index 000000000..4e761d0b1
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/docker-compose/python3-docker-compose_1.16.1.bb
@@ -0,0 +1,32 @@
+SUMMARY = "Multi-container orchestration for Docker"
+HOMEPAGE = "https://www.docker.com/"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=435b266b3899aa8a959f17d41c56def8"
+
+SRC_URI += "file://0001-Allow-newer-versions-of-requests.patch"
+
+inherit pypi setuptools3
+
+SRC_URI[md5sum] = "8dcadf09143600fcb573b43f446c8f9a"
+SRC_URI[sha256sum] = "fb46a6a2c4d193a3ff1e4d7208eea920b629c81dc92257c87f3f93095cfb0bdf"
+
+RDEPENDS_${PN} = "\
+ ${PYTHON_PN}-cached-property \
+ ${PYTHON_PN}-certifi \
+ ${PYTHON_PN}-chardet \
+ ${PYTHON_PN}-colorama \
+ ${PYTHON_PN}-docker \
+ ${PYTHON_PN}-docker-pycreds \
+ ${PYTHON_PN}-dockerpty \
+ ${PYTHON_PN}-docopt \
+ ${PYTHON_PN}-enum \
+ ${PYTHON_PN}-idna \
+ ${PYTHON_PN}-jsonschema \
+ ${PYTHON_PN}-pyyaml \
+ ${PYTHON_PN}-requests \
+ ${PYTHON_PN}-six \
+ ${PYTHON_PN}-terminal \
+ ${PYTHON_PN}-texttable \
+ ${PYTHON_PN}-urllib3 \
+ ${PYTHON_PN}-websocket-client \
+ "
diff --git a/import-layers/meta-virtualization/recipes-containers/docker-distribution/docker-distribution_git.bb b/import-layers/meta-virtualization/recipes-containers/docker-distribution/docker-distribution_git.bb
index 08b6d70be..add5ce1d5 100644
--- a/import-layers/meta-virtualization/recipes-containers/docker-distribution/docker-distribution_git.bb
+++ b/import-layers/meta-virtualization/recipes-containers/docker-distribution/docker-distribution_git.bb
@@ -3,16 +3,18 @@ SUMMARY = "The Docker toolset to pack, ship, store, and deliver content"
LICENSE = "Apache-2.0"
LIC_FILES_CHKSUM = "file://LICENSE;md5=d2794c0df5b907fdace235a619d80314"
-SRCREV_distribution="0810eba2adf048b77621472991211924d9ec31c5"
-SRC_URI = "git://github.com/docker/distribution.git;branch=master;name=distribution;destsuffix=git/src/github.com/docker/distribution \
+SRCREV_distribution="48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89"
+SRC_URI = "git://github.com/docker/distribution.git;branch=release/2.6;name=distribution;destsuffix=git/src/github.com/docker/distribution \
file://docker-registry.service \
"
PACKAGES =+ "docker-registry"
-PV = "v2.6.0-rc+git${SRCPV}"
+PV = "v2.6.2"
S = "${WORKDIR}/git/src/github.com/docker/distribution"
+GO_IMPORT = "import"
+
inherit goarch
inherit go
@@ -33,6 +35,8 @@ do_compile() {
export GO_GCFLAGS=""
export CGO_LDFLAGS="${BUILDSDK_LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+ cd ${S}
+
oe_runmake binaries
}
diff --git a/import-layers/meta-virtualization/recipes-containers/docker/docker_git.bb b/import-layers/meta-virtualization/recipes-containers/docker/docker_git.bb
index 74170cbfa..4ba5e817b 100644
--- a/import-layers/meta-virtualization/recipes-containers/docker/docker_git.bb
+++ b/import-layers/meta-virtualization/recipes-containers/docker/docker_git.bb
@@ -18,11 +18,13 @@ DESCRIPTION = "Linux container runtime \
subtle and/or glaring issues. \
"
-SRCREV_docker = "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178"
-SRCREV_libnetwork="0f534354b813003a754606689722fe253101bc4e"
+SRCREV_docker = "e639a70fbe999d96354a5bcf560231b7b8aa935c"
+SRCREV_libnetwork = "26addf43a5d925ff79d262dbbdb5344bc2b6e198"
+SRCREV_cli = "a765218f1988e85b68aa3977f34893ec7b059a60"
SRC_URI = "\
- git://github.com/docker/docker.git;nobranch=1;name=docker \
+ git://github.com/moby/moby.git;nobranch=1;name=docker \
git://github.com/docker/libnetwork.git;branch=master;name=libnetwork;destsuffix=libnetwork \
+ git://github.com/docker/cli;branch=master;name=cli;destsuffix=cli \
file://docker.init \
file://hi.Dockerfile \
file://context-use-golang.org-x-net-pkg-until-we-move-to-go.patch \
@@ -30,11 +32,13 @@ SRC_URI = "\
# Apache-2.0 for docker
LICENSE = "Apache-2.0"
-LIC_FILES_CHKSUM = "file://LICENSE;md5=aadc30f9c14d876ded7bedc0afd2d3d7"
+LIC_FILES_CHKSUM = "file://src/import/LICENSE;md5=9740d093a080530b5c5c6573df9af45a"
+
+GO_IMPORT = "import"
S = "${WORKDIR}/git"
-DOCKER_VERSION = "1.13.0"
+DOCKER_VERSION = "17.06.0"
PV = "${DOCKER_VERSION}+git${SRCREV_docker}"
DEPENDS = " \
@@ -43,7 +47,6 @@ DEPENDS = " \
go-context \
go-mux \
go-patricia \
- go-libtrust \
go-logrus \
go-fsnotify \
go-dbus \
@@ -80,20 +83,21 @@ do_compile() {
# Set GOPATH. See 'PACKAGERS.md'. Don't rely on
# docker to download its dependencies but rather
# use dependencies packaged independently.
- cd ${S}
+ cd ${S}/src/import
rm -rf .gopath
mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")"
ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}"
mkdir -p .gopath/src/github.com/docker
- ln -sf ../../../../../libnetwork .gopath/src/github.com/docker/libnetwork
+ ln -sf ${WORKDIR}/libnetwork .gopath/src/github.com/docker/libnetwork
+ ln -sf ${WORKDIR}/cli .gopath/src/github.com/docker/cli
- export GOPATH="${S}/.gopath:${S}/vendor:${STAGING_DIR_TARGET}/${prefix}/local/go"
+ export GOPATH="${S}/src/import/.gopath:${S}/src/import/vendor:${STAGING_DIR_TARGET}/${prefix}/local/go"
export GOROOT="${STAGING_DIR_NATIVE}/${nonarch_libdir}/${HOST_SYS}/go"
- cd -
# Pass the needed cflags/ldflags so that cgo
# can find the needed headers files and libraries
+ export GOARCH=${TARGET_GOARCH}
export CGO_ENABLED="1"
export CGO_CFLAGS="${CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
export CGO_LDFLAGS="${LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
@@ -107,7 +111,10 @@ do_compile() {
./hack/make.sh dynbinary
# build the proxy
- go build -o ${S}/docker-proxy github.com/docker/libnetwork/cmd/proxy
+ go build -o ${S}/src/import/docker-proxy github.com/docker/libnetwork/cmd/proxy
+
+ # build the cli
+ go build -o ${S}/src/import/bundles/latest/dynbinary-client/docker github.com/docker/cli/cmd/docker
}
SYSTEMD_PACKAGES = "${@bb.utils.contains('DISTRO_FEATURES','systemd','${PN}','',d)}"
@@ -117,19 +124,19 @@ SYSTEMD_AUTO_ENABLE_${PN} = "enable"
INITSCRIPT_PACKAGES += "${@bb.utils.contains('DISTRO_FEATURES','sysvinit','${PN}','',d)}"
INITSCRIPT_NAME_${PN} = "${@bb.utils.contains('DISTRO_FEATURES','sysvinit','docker.init','',d)}"
-INITSCRIPT_PARAMS_${PN} = "${OS_DEFAULT_INITSCRIPT_PARAMS}"
+INITSCRIPT_PARAMS_${PN} = "defaults"
do_install() {
mkdir -p ${D}/${bindir}
- cp ${S}/bundles/latest/dynbinary-client/docker ${D}/${bindir}/docker
- cp ${S}/bundles/latest/dynbinary-daemon/dockerd ${D}/${bindir}/dockerd
- cp ${S}/docker-proxy ${D}/${bindir}/docker-proxy
+ cp ${S}/src/import/bundles/latest/dynbinary-client/docker ${D}/${bindir}/docker
+ cp ${S}/src/import/bundles/latest/dynbinary-daemon/dockerd ${D}/${bindir}/dockerd
+ cp ${S}/src/import/docker-proxy ${D}/${bindir}/docker-proxy
if ${@bb.utils.contains('DISTRO_FEATURES','systemd','true','false',d)}; then
install -d ${D}${systemd_unitdir}/system
- install -m 644 ${S}/contrib/init/systemd/docker.* ${D}/${systemd_unitdir}/system
+ install -m 644 ${S}/src/import/contrib/init/systemd/docker.* ${D}/${systemd_unitdir}/system
# replaces one copied from above with one that uses the local registry for a mirror
- install -m 644 ${S}/contrib/init/systemd/docker.service ${D}/${systemd_unitdir}/system
+ install -m 644 ${S}/src/import/contrib/init/systemd/docker.service ${D}/${systemd_unitdir}/system
else
install -d ${D}${sysconfdir}/init.d
install -m 0755 ${WORKDIR}/docker.init ${D}${sysconfdir}/init.d/docker.init
@@ -137,7 +144,7 @@ do_install() {
mkdir -p ${D}${datadir}/docker/
cp ${WORKDIR}/hi.Dockerfile ${D}${datadir}/docker/
- install -m 0755 ${S}/contrib/check-config.sh ${D}${datadir}/docker/
+ install -m 0755 ${S}/src/import/contrib/check-config.sh ${D}${datadir}/docker/
}
inherit useradd
diff --git a/import-layers/meta-virtualization/recipes-containers/docker/files/context-use-golang.org-x-net-pkg-until-we-move-to-go.patch b/import-layers/meta-virtualization/recipes-containers/docker/files/context-use-golang.org-x-net-pkg-until-we-move-to-go.patch
index 240b74418..7ed606ff5 100644
--- a/import-layers/meta-virtualization/recipes-containers/docker/files/context-use-golang.org-x-net-pkg-until-we-move-to-go.patch
+++ b/import-layers/meta-virtualization/recipes-containers/docker/files/context-use-golang.org-x-net-pkg-until-we-move-to-go.patch
@@ -11,20 +11,16 @@ walwrap.go:4:2: cannot find package "context" in any of:
Signed-off-by: Mark Asselstine <mark.asselstine@windriver.com>
---
- client/README.md | 2 +-
- client/client.go | 2 +-
- daemon/info_unix.go | 2 +-
- integration-cli/docker_api_attach_test.go | 2 +-
- integration-cli/docker_cli_save_load_unix_test.go | 2 +-
- vendor/github.com/docker/swarmkit/manager/logbroker/subscription.go | 2 +-
- vendor/github.com/docker/swarmkit/manager/state/raft/storage/walwrap.go | 2 +-
- 7 files changed, 7 insertions(+), 7 deletions(-)
+ client/README.md | 2 +-
+ client/client.go | 2 +-
+ daemon/info_unix.go | 2 +-
+ integration-cli/docker_api_attach_test.go | 2 +-
+ integration-cli/docker_cli_save_load_unix_test.go | 2 +-
+ 5 files changed, 5 insertions(+), 5 deletions(-)
-diff --git a/client/README.md b/client/README.md
-index 059dfb3..9de54aa 100644
---- a/client/README.md
-+++ b/client/README.md
-@@ -8,7 +8,7 @@ For example, to list running containers (the equivalent of `docker ps`):
+--- a/src/import/client/README.md
++++ b/src/import/client/README.md
+@@ -8,7 +8,7 @@ For example, to list running containers
package main
import (
@@ -33,11 +29,9 @@ index 059dfb3..9de54aa 100644
"fmt"
"github.com/docker/docker/api/types"
-diff --git a/client/client.go b/client/client.go
-index a9bdab6..95933af 100644
---- a/client/client.go
-+++ b/client/client.go
-@@ -19,7 +19,7 @@ For example, to list running containers (the equivalent of "docker ps"):
+--- a/src/import/client/client.go
++++ b/src/import/client/client.go
+@@ -19,7 +19,7 @@ For example, to list running containers
package main
import (
@@ -46,10 +40,8 @@ index a9bdab6..95933af 100644
"fmt"
"github.com/docker/docker/api/types"
-diff --git a/daemon/info_unix.go b/daemon/info_unix.go
-index 9c41c0e..57f8a7b 100644
---- a/daemon/info_unix.go
-+++ b/daemon/info_unix.go
+--- a/src/import/daemon/info_unix.go
++++ b/src/import/daemon/info_unix.go
@@ -3,7 +3,7 @@
package daemon
@@ -59,10 +51,8 @@ index 9c41c0e..57f8a7b 100644
"os/exec"
"strings"
-diff --git a/integration-cli/docker_api_attach_test.go b/integration-cli/docker_api_attach_test.go
-index d43bf3a..e5802a7 100644
---- a/integration-cli/docker_api_attach_test.go
-+++ b/integration-cli/docker_api_attach_test.go
+--- a/src/import/integration-cli/docker_api_attach_test.go
++++ b/src/import/integration-cli/docker_api_attach_test.go
@@ -3,7 +3,7 @@ package main
import (
"bufio"
@@ -72,10 +62,8 @@ index d43bf3a..e5802a7 100644
"io"
"net"
"net/http"
-diff --git a/integration-cli/docker_cli_save_load_unix_test.go b/integration-cli/docker_cli_save_load_unix_test.go
-index 22445e5..d0afc8c 100644
---- a/integration-cli/docker_cli_save_load_unix_test.go
-+++ b/integration-cli/docker_cli_save_load_unix_test.go
+--- a/src/import/integration-cli/docker_cli_save_load_unix_test.go
++++ b/src/import/integration-cli/docker_cli_save_load_unix_test.go
@@ -3,7 +3,7 @@
package main
@@ -85,32 +73,3 @@ index 22445e5..d0afc8c 100644
"fmt"
"io/ioutil"
"os"
-diff --git a/vendor/github.com/docker/swarmkit/manager/logbroker/subscription.go b/vendor/github.com/docker/swarmkit/manager/logbroker/subscription.go
-index 6b3295a..cbfcf7e 100644
---- a/vendor/github.com/docker/swarmkit/manager/logbroker/subscription.go
-+++ b/vendor/github.com/docker/swarmkit/manager/logbroker/subscription.go
-@@ -1,7 +1,7 @@
- package logbroker
-
- import (
-- "context"
-+ "golang.org/x/net/context"
- "fmt"
- "strings"
- "sync"
-diff --git a/vendor/github.com/docker/swarmkit/manager/state/raft/storage/walwrap.go b/vendor/github.com/docker/swarmkit/manager/state/raft/storage/walwrap.go
-index 5a6c71a..efe5921 100644
---- a/vendor/github.com/docker/swarmkit/manager/state/raft/storage/walwrap.go
-+++ b/vendor/github.com/docker/swarmkit/manager/state/raft/storage/walwrap.go
-@@ -1,7 +1,7 @@
- package storage
-
- import (
-- "context"
-+ "golang.org/x/net/context"
- "io"
- "io/ioutil"
- "os"
---
-2.7.4
-
diff --git a/import-layers/meta-virtualization/recipes-containers/docker/files/docker.init b/import-layers/meta-virtualization/recipes-containers/docker/files/docker.init
index 9c01c7581..2e8eb9e40 100644
--- a/import-layers/meta-virtualization/recipes-containers/docker/files/docker.init
+++ b/import-layers/meta-virtualization/recipes-containers/docker/files/docker.init
@@ -28,6 +28,7 @@ exec="/usr/bin/$prog"
pidfile="/var/run/$prog.pid"
lockfile="/var/lock/subsys/$prog"
logfile="/var/log/$prog"
+other_args="--registry-mirror=http://localhost:5000 --insecure-registry=http://localhost:5000 --raw-logs"
[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
@@ -38,8 +39,8 @@ start() {
if ! [ -f $pidfile ]; then
printf "Starting $prog:\t"
- echo "\n$(date)\n" >> $logfile
- "$unshare" -m -- $exec -d $other_args &>> $logfile &
+ echo -e "\n$(date)\n" >> $logfile
+ "$unshare" -m -- $exec daemon $other_args &>> $logfile &
pid=$!
touch $lockfile
# wait up to 10 seconds for the pidfile to exist. see
diff --git a/import-layers/meta-virtualization/recipes-containers/kubernetes/kubernetes_git.bb b/import-layers/meta-virtualization/recipes-containers/kubernetes/kubernetes_git.bb
new file mode 100644
index 000000000..2c7161ec0
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/kubernetes/kubernetes_git.bb
@@ -0,0 +1,98 @@
+HOMEPAGE = "git://github.com/kubernetes/kubernetes"
+SUMMARY = "Production-Grade Container Scheduling and Management"
+DESCRIPTION = "Kubernetes is an open source system for managing containerized \
+applications across multiple hosts, providing basic mechanisms for deployment, \
+maintenance, and scaling of applications. \
+"
+
+SRCREV_kubernetes = "4b839465f84e7faf876c51703aaf49b37fd10d9c"
+SRC_URI = "git://github.com/kubernetes/kubernetes.git;nobranch=1;name=kubernetes \
+ "
+
+DEPENDS += "rsync-native \
+ coreutils-native \
+ "
+
+PACKAGES =+ "kubeadm"
+PACKAGES =+ "kubectl"
+PACKAGES =+ "kubelet"
+
+ALLOW_EMPTY_${PN} = "1"
+
+# Note: we are explicitly *not* adding docker to the rdepends, since we allow
+# backends like cri-o to be used.
+RDEPENDS_${PN} += "kubeadm \
+ kubectl \
+ kubelet \
+ cni"
+
+RDEPENDS_kubeadm = "kubelet kubectl"
+RDEPENDS_kubelet = "iptables socat util-linux ethtool iproute2 ebtables"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://src/import/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57"
+
+GO_IMPORT = "import"
+
+PV = "1.9.0-alpha.1+git${SRCREV_kubernetes}"
+
+inherit systemd
+inherit go
+inherit goarch
+
+do_compile() {
+ export GOARCH="${TARGET_GOARCH}"
+ export GOROOT="${STAGING_LIBDIR_NATIVE}/${TARGET_SYS}/go"
+ export GOPATH="${S}/src/import:${S}/src/import/vendor"
+
+ # Pass the needed cflags/ldflags so that cgo
+ # can find the needed headers files and libraries
+ export CGO_ENABLED="1"
+ export CFLAGS=""
+ export LDFLAGS=""
+ export CGO_CFLAGS="${BUILDSDK_CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+ export CGO_LDFLAGS="${BUILDSDK_LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+
+ # link fixups for compilation
+ rm -f ${S}/src/import/vendor/src
+ ln -sf ./ ${S}/src/import/vendor/src
+
+ export GOPATH="${S}/src/import/.gopath:${S}/src/import/vendor:${STAGING_DIR_TARGET}/${prefix}/local/go"
+ export GOROOT="${STAGING_DIR_NATIVE}/${nonarch_libdir}/${HOST_SYS}/go"
+
+ # Pass the needed cflags/ldflags so that cgo
+ # can find the needed headers files and libraries
+ export CGO_ENABLED="1"
+ export CGO_CFLAGS="${CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+ export CGO_LDFLAGS="${LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+
+ cd ${S}/src/import
+ # to limit what is built, use 'WHAT', i.e. make WHAT=cmd/kubelet
+ make all
+}
+
+do_install() {
+ install -d ${D}${bindir}
+ install -d ${D}${systemd_unitdir}/system/
+ install -d ${D}${systemd_unitdir}/system/kubelet.service.d/
+
+ install -d ${D}${sysconfdir}/kubernetes/manifests/
+
+ install -m 755 -D ${S}/src/import/_output/bin/kube* ${D}/${bindir}
+
+ install -m 0644 ${S}/src/import/build/debs/kubelet.service ${D}${systemd_unitdir}/system/
+ install -m 0644 ${S}/src/import/build/debs/10-kubeadm.conf ${D}${systemd_unitdir}/system/kubelet.service.d/
+}
+
+SYSTEMD_PACKAGES = "${@bb.utils.contains('DISTRO_FEATURES','systemd','kubelet','',d)}"
+SYSTEMD_SERVICE_kubelet = "${@bb.utils.contains('DISTRO_FEATURES','systemd','kubelet.service','',d)}"
+SYSTEMD_AUTO_ENABLE_kubelet = "enable"
+
+FILES_kubeadm = "${bindir}/kubeadm ${systemd_unitdir}/system/kubelet.service.d/*"
+FILES_kubectl = "${bindir}/kubectl"
+FILES_kubelet = "${bindir}/kubelet ${systemd_unitdir}/system/kubelet.service ${sysconfdir}/kubernetes/manifests/"
+
+INHIBIT_PACKAGE_STRIP = "1"
+INSANE_SKIP_${PN} += "ldflags already-stripped"
+
+deltask compile_ptest_base
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/files/Generate-lxc-restore-net-properly.patch b/import-layers/meta-virtualization/recipes-containers/lxc/files/Generate-lxc-restore-net-properly.patch
deleted file mode 100644
index 5adb730c8..000000000
--- a/import-layers/meta-virtualization/recipes-containers/lxc/files/Generate-lxc-restore-net-properly.patch
+++ /dev/null
@@ -1,121 +0,0 @@
-From e08f3573b3561f1f0490624f7ca95b7ccd8157cb Mon Sep 17 00:00:00 2001
-Message-Id: <e08f3573b3561f1f0490624f7ca95b7ccd8157cb.1435177418.git.Jim.Somerville@windriver.com>
-From: Jim Somerville <Jim.Somerville@windriver.com>
-Date: Wed, 24 Jun 2015 16:16:38 -0400
-Subject: [PATCH 1/1] Generate lxc-restore-net properly
-
-It's a script that should be run through the configure
-mechanism the same as the others. We simply rename it
-to have a .in extension and add it to configure.ac .
-
-Also, by generating the script from a .in file, it gets
-placed into the build directory. This plays nice with
-build systems that keep the src separate from the build
-directory. Without this change, the install step won't
-find the lxc-restore-net script as it still just resides
-in the src directory and not in the build directory.
-
-Upstream-Status: Not applicable. This script has already
-been rearchitected out of existence by
-cba98d127bf490b018a016b792ae05fd2d29c5ee:
-"c/r: use criu option instead of lxc-restore-net
-
-As of criu 1.5, the --veth-pair argument supports an additional parameter that
-is the bridge name to attach to. This enables us to get rid of the goofy
-action-script hack that passed bridge names as environment variables.
-
-This patch is on top of the systemd/lxcfs mount rework patch, as we probably
-want to wait to use 1.5 options until it has been out for a while and is in
-distros.
-
-Signed-off-by: Tycho Andersen <tycho.andersen@canonical.com>
-Acked-by: Serge E. Hallyn <serge.hallyn@ubuntu.com>"
-
-Signed-off-by: Jim Somerville <Jim.Somerville@windriver.com>
----
- configure.ac | 1 +
- src/lxc/lxc-restore-net | 26 --------------------------
- src/lxc/lxc-restore-net.in | 26 ++++++++++++++++++++++++++
- 3 files changed, 27 insertions(+), 26 deletions(-)
- delete mode 100755 src/lxc/lxc-restore-net
- create mode 100755 src/lxc/lxc-restore-net.in
-
-diff --git a/configure.ac b/configure.ac
-index 574b2cd..4972803 100644
---- a/configure.ac
-+++ b/configure.ac
-@@ -768,6 +768,7 @@ AC_CONFIG_FILES([
- src/lxc/legacy/lxc-ls
- src/lxc/lxc.functions
- src/lxc/version.h
-+ src/lxc/lxc-restore-net
- src/python-lxc/Makefile
- src/python-lxc/setup.py
-
-diff --git a/src/lxc/lxc-restore-net b/src/lxc/lxc-restore-net
-deleted file mode 100755
-index 6ae3c19..0000000
---- a/src/lxc/lxc-restore-net
-+++ /dev/null
-@@ -1,26 +0,0 @@
--#!/bin/sh
--
--set -e
--
--i=0
--while true; do
-- eval "bridge=\$LXC_CRIU_BRIDGE$i"
-- eval "veth=\$LXC_CRIU_VETH$i"
--
-- if [ -z "$bridge" ] || [ -z "$veth" ]; then
-- exit 0
-- fi
--
-- if [ "$CRTOOLS_SCRIPT_ACTION" = "network-lock" ]; then
-- brctl delif $bridge $veth
-- fi
--
-- if [ "$CRTOOLS_SCRIPT_ACTION" = "network-unlock" ]; then
-- brctl addif $bridge $veth
-- ip link set dev $veth up
-- fi
--
-- i=$((i+1))
--done
--
--exit 1
-diff --git a/src/lxc/lxc-restore-net.in b/src/lxc/lxc-restore-net.in
-new file mode 100755
-index 0000000..6ae3c19
---- /dev/null
-+++ b/src/lxc/lxc-restore-net.in
-@@ -0,0 +1,26 @@
-+#!/bin/sh
-+
-+set -e
-+
-+i=0
-+while true; do
-+ eval "bridge=\$LXC_CRIU_BRIDGE$i"
-+ eval "veth=\$LXC_CRIU_VETH$i"
-+
-+ if [ -z "$bridge" ] || [ -z "$veth" ]; then
-+ exit 0
-+ fi
-+
-+ if [ "$CRTOOLS_SCRIPT_ACTION" = "network-lock" ]; then
-+ brctl delif $bridge $veth
-+ fi
-+
-+ if [ "$CRTOOLS_SCRIPT_ACTION" = "network-unlock" ]; then
-+ brctl addif $bridge $veth
-+ ip link set dev $veth up
-+ fi
-+
-+ i=$((i+1))
-+done
-+
-+exit 1
---
-1.8.3.2
-
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/files/Use-AC_HEADER_MAJOR-to-detect-major-minor-makedev.patch b/import-layers/meta-virtualization/recipes-containers/lxc/files/Use-AC_HEADER_MAJOR-to-detect-major-minor-makedev.patch
deleted file mode 100644
index f9cecc075..000000000
--- a/import-layers/meta-virtualization/recipes-containers/lxc/files/Use-AC_HEADER_MAJOR-to-detect-major-minor-makedev.patch
+++ /dev/null
@@ -1,119 +0,0 @@
-From 5c957671a511441b112b137b88bf0b1f31adac20 Mon Sep 17 00:00:00 2001
-From: Sergei Trofimovich <siarheit@google.com>
-Date: Sat, 21 Jan 2017 11:57:13 +0000
-Subject: [PATCH] Use AC_HEADER_MAJOR to detect major()/minor()/makedev()
-
-commit af6824fce9c9536fbcabef8d5547f6c486f55fdf from
-git://github.com/lxc/lxc.git
-
-Before the change build failed on Gentoo as:
-
- bdev/lxclvm.c: In function 'lvm_detect':
- bdev/lxclvm.c:140:4: error: implicit declaration of function 'major' [-Werror=implicit-function-declaration]
- major(statbuf.st_rdev), minor(statbuf.st_rdev));
- ^~~~~
- bdev/lxclvm.c:140:28: error: implicit declaration of function 'minor' [-Werror=implicit-function-declaration]
- major(statbuf.st_rdev), minor(statbuf.st_rdev));
- ^~~~~
-
-glibc plans to remove <sys/sysmacros.h> from glibc's <sys/types.h>:
- https://sourceware.org/ml/libc-alpha/2015-11/msg00253.html
-
-Gentoo already applied glibc patch to experimental glibc-2.24
-to start preparingfor the change.
-
-Autoconf has AC_HEADER_MAJOR to find out which header defines
-reqiured macros:
- https://www.gnu.org/software/autoconf/manual/autoconf-2.69/html_node/Particular-Headers.html
-
-This change should also increase portability across other libcs.
-
-Bug: https://bugs.gentoo.org/604360
-Signed-off-by: Sergei Trofimovich <siarheit@google.com>
-Signed-off-by: Mark Asselstine <mark.asselstine@windriver.com>
----
- configure.ac | 3 +++
- src/lxc/bdev/lxclvm.c | 9 +++++++++
- src/lxc/conf.c | 8 ++++++++
- src/lxc/lxccontainer.c | 8 ++++++++
- 4 files changed, 28 insertions(+)
-
-diff --git a/configure.ac b/configure.ac
-index 8f31c29..924baa1 100644
---- a/configure.ac
-+++ b/configure.ac
-@@ -601,6 +601,9 @@ AC_CHECK_DECLS([PR_CAPBSET_DROP], [], [], [#include <sys/prctl.h>])
- # Check for some headers
- AC_CHECK_HEADERS([sys/signalfd.h pty.h ifaddrs.h sys/capability.h sys/personality.h utmpx.h sys/timerfd.h])
-
-+# lookup major()/minor()/makedev()
-+AC_HEADER_MAJOR
-+
- # Check for some syscalls functions
- AC_CHECK_FUNCS([setns pivot_root sethostname unshare rand_r confstr faccessat])
-
-diff --git a/src/lxc/bdev/lxclvm.c b/src/lxc/bdev/lxclvm.c
-index 3d41b10..419d1c2 100644
---- a/src/lxc/bdev/lxclvm.c
-+++ b/src/lxc/bdev/lxclvm.c
-@@ -32,10 +32,19 @@
- #include <sys/wait.h>
-
- #include "bdev.h"
-+#include "config.h"
- #include "log.h"
- #include "lxclvm.h"
- #include "utils.h"
-
-+/* major()/minor() */
-+#ifdef MAJOR_IN_MKDEV
-+# include <sys/mkdev.h>
-+#endif
-+#ifdef MAJOR_IN_SYSMACROS
-+# include <sys/sysmacros.h>
-+#endif
-+
- lxc_log_define(lxclvm, lxc);
-
- extern char *dir_new_path(char *src, const char *oldname, const char *name,
-diff --git a/src/lxc/conf.c b/src/lxc/conf.c
-index 3b023ef..53406ca 100644
---- a/src/lxc/conf.c
-+++ b/src/lxc/conf.c
-@@ -39,6 +39,14 @@
- #include <grp.h>
- #include <time.h>
-
-+/* makedev() */
-+#ifdef MAJOR_IN_MKDEV
-+# include <sys/mkdev.h>
-+#endif
-+#ifdef MAJOR_IN_SYSMACROS
-+# include <sys/sysmacros.h>
-+#endif
-+
- #ifdef HAVE_STATVFS
- #include <sys/statvfs.h>
- #endif
-diff --git a/src/lxc/lxccontainer.c b/src/lxc/lxccontainer.c
-index 9f12ca2..aa02833 100644
---- a/src/lxc/lxccontainer.c
-+++ b/src/lxc/lxccontainer.c
-@@ -61,6 +61,14 @@
- #include "utils.h"
- #include "version.h"
-
-+/* major()/minor() */
-+#ifdef MAJOR_IN_MKDEV
-+# include <sys/mkdev.h>
-+#endif
-+#ifdef MAJOR_IN_SYSMACROS
-+# include <sys/sysmacros.h>
-+#endif
-+
- #if HAVE_IFADDRS_H
- #include <ifaddrs.h>
- #else
---
-2.7.4
-
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/files/automake-ensure-VPATH-builds-correctly.patch b/import-layers/meta-virtualization/recipes-containers/lxc/files/automake-ensure-VPATH-builds-correctly.patch
index 2b5c853c2..61c0e2930 100644
--- a/import-layers/meta-virtualization/recipes-containers/lxc/files/automake-ensure-VPATH-builds-correctly.patch
+++ b/import-layers/meta-virtualization/recipes-containers/lxc/files/automake-ensure-VPATH-builds-correctly.patch
@@ -15,7 +15,7 @@ index d74c10d..6225f78 100644
@@ -66,7 +66,7 @@ buildtest-TESTS: $(TESTS)
install-ptest:
install -d $(TEST_DIR)
- install -D ../lxc/liblxc.so $(TEST_DIR)/../lxc/liblxc.so
+ install -D ../lxc/.libs/liblxc.so $(TEST_DIR)/../lxc/liblxc.so
- install -D ../../config/test-driver $(TEST_DIR)/../../config/test-driver
+ install -D $(top_srcdir)/config/test-driver $(TEST_DIR)/../../config/test-driver
cp Makefile $(TEST_DIR)
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/files/cgroups-work-around-issue-in-gcc-7.patch b/import-layers/meta-virtualization/recipes-containers/lxc/files/cgroups-work-around-issue-in-gcc-7.patch
new file mode 100644
index 000000000..90740fb32
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/lxc/files/cgroups-work-around-issue-in-gcc-7.patch
@@ -0,0 +1,34 @@
+From 58a2d817a82100d287c60c63315d81445cdba3f9 Mon Sep 17 00:00:00 2001
+From: Mark Asselstine <mark.asselstine@windriver.com>
+Date: Thu, 15 Jun 2017 15:12:08 -0400
+Subject: [PATCH] cgroups: work around issue in gcc 7
+
+This works around
+https://bugzilla.yoctoproject.org/show_bug.cgi?id=11672
+https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78969
+
+By removing a single cgroup entry. For the majority of usecases this
+loss of a single entry should not be an issue and once gcc 7 is fixed
+we can revert this.
+
+Signed-off-by: Mark Asselstine <mark.asselstine@windriver.com>
+---
+ src/lxc/cgroups/cgfsng.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/lxc/cgroups/cgfsng.c b/src/lxc/cgroups/cgfsng.c
+index ebd548b..c520abd 100644
+--- a/src/lxc/cgroups/cgfsng.c
++++ b/src/lxc/cgroups/cgfsng.c
+@@ -1373,7 +1373,7 @@ static inline bool cgfsng_create(void *hdata)
+ offset = cgname + len - 5;
+
+ again:
+- if (idx == 1000) {
++ if (idx == 999) {
+ ERROR("Too many conflicting cgroup names");
+ goto out_free;
+ }
+--
+2.7.4
+
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/files/runtest.patch b/import-layers/meta-virtualization/recipes-containers/lxc/files/runtest.patch
index e4e034b2e..6572265f2 100644
--- a/import-layers/meta-virtualization/recipes-containers/lxc/files/runtest.patch
+++ b/import-layers/meta-virtualization/recipes-containers/lxc/files/runtest.patch
@@ -19,7 +19,7 @@ diff -uNr a/src/tests/Makefile.am b/src/tests/Makefile.am
+
+install-ptest:
+ install -d $(TEST_DIR)
-+ install -D ../lxc/liblxc.so $(TEST_DIR)/../lxc/liblxc.so
++ install -D ../lxc/.libs/liblxc.so $(TEST_DIR)/../lxc/liblxc.so
+ install -D ../../config/test-driver $(TEST_DIR)/../../config/test-driver
+ cp Makefile $(TEST_DIR)
+ @(for file in $(TESTS); do install $$file $(TEST_DIR); done;)
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/lxc_2.0.0.bb b/import-layers/meta-virtualization/recipes-containers/lxc/lxc_2.0.8.bb
index c98d1a7f6..93d5a1067 100644
--- a/import-layers/meta-virtualization/recipes-containers/lxc/lxc_2.0.0.bb
+++ b/import-layers/meta-virtualization/recipes-containers/lxc/lxc_2.0.8.bb
@@ -2,7 +2,6 @@ DESCRIPTION = "lxc aims to use these new functionnalities to provide an userspac
SECTION = "console/utils"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c"
-PRIORITY = "optional"
DEPENDS = "libxml2 libcap"
RDEPENDS_${PN} = " \
rsync \
@@ -10,7 +9,6 @@ RDEPENDS_${PN} = " \
libcap-bin \
bridge-utils \
dnsmasq \
- initscripts \
perl-module-strict \
perl-module-getopt-long \
perl-module-vars \
@@ -19,12 +17,12 @@ RDEPENDS_${PN} = " \
perl-module-constant \
perl-module-overload \
perl-module-exporter-heavy \
+ gmp \
+ libidn \
+ gnutls \
+ nettle \
"
-RDEPENDS_${PN}_append_libc-glibc = "\
- glibc-utils \
-"
-
-RDEPENDS_${PN}-ptest += "file make"
+RDEPENDS_${PN}-ptest += "file make gmp nettle gnutls bash"
SRC_URI = "http://linuxcontainers.org/downloads/${BPN}-${PV}.tar.gz \
file://lxc-1.0.0-disable-udhcp-from-busybox-template.patch \
@@ -34,11 +32,11 @@ SRC_URI = "http://linuxcontainers.org/downloads/${BPN}-${PV}.tar.gz \
file://lxc-fix-B-S.patch \
file://lxc-doc-upgrade-to-use-docbook-3.1-DTD.patch \
file://logs-optionally-use-base-filenames-to-report-src-fil.patch \
- file://Use-AC_HEADER_MAJOR-to-detect-major-minor-makedev.patch \
+ file://cgroups-work-around-issue-in-gcc-7.patch \
"
-SRC_URI[md5sum] = "04a7245a614cd3296b0ae9ceeeb83fbb"
-SRC_URI[sha256sum] = "5b737e114d8ef1feb193fba936d77a5697a7c8a10199a068cdd90d1bd27c10e4"
+SRC_URI[md5sum] = "7bfd95280522d7936c0979dfea92cdb5"
+SRC_URI[sha256sum] = "0d8e34b302cfe4c40c6c9ae5097096aa5cc2c1dfceea3f0f22e3e16c4a4e8494"
S = "${WORKDIR}/${BPN}-${PV}"
@@ -81,25 +79,27 @@ SYSTEMD_AUTO_ENABLE_${PN}-setup = "disable"
INITSCRIPT_PACKAGES = "${PN}-setup"
INITSCRIPT_NAME_{PN}-setup = "lxc"
-INITSCRIPT_PARAMS_${PN}-setup = "${OS_DEFAULT_INITSCRIPT_PARAMS}"
+INITSCRIPT_PARAMS_${PN}-setup = "defaults"
FILES_${PN}-doc = "${mandir} ${infodir}"
# For LXC the docdir only contains example configuration files and should be included in the lxc package
FILES_${PN} += "${docdir}"
FILES_${PN} += "${libdir}/python3*"
-FILES_${PN} += "${datadir}/lua/*"
-FILES_${PN} += "${libdir}/lua/lxc/*"
-FILES_${PN}-dbg += "${libdir}/lua/lxc/.debug"
-FILES_${PN}-dbg += "${libexecdir}/lxc/.debug ${libexecdir}/lxc/hooks/.debug"
-PACKAGES =+ "${PN}-templates ${PN}-setup ${PN}-networking"
+FILES_${PN} += "${datadir}/bash-completion"
+FILES_${PN}-dbg += "${libexecdir}/lxc/.debug"
+FILES_${PN}-dbg += "${libexecdir}/lxc/hooks/.debug"
+PACKAGES =+ "${PN}-templates ${PN}-setup ${PN}-networking ${PN}-lua"
+FILES_lua-${PN} = "${datadir}/lua ${libdir}/lua"
+FILES_lua-${PN}-dbg += "${libdir}/lua/lxc/.debug"
FILES_${PN}-templates += "${datadir}/lxc/templates"
RDEPENDS_${PN}-templates += "bash"
ALLOW_EMPTY_${PN}-networking = "1"
-FILES_${PN}-setup += "${sysconfdir}/tmpfiles.d"
-FILES_${PN}-setup += "${systemd_system_unitdir}"
-FILES_${PN}-setup += "${sysconfdir}/init.d"
+FILES_${PN}-setup += "/etc/tmpfiles.d"
+FILES_${PN}-setup += "/lib/systemd/system"
+FILES_${PN}-setup += "/usr/lib/systemd/system"
+FILES_${PN}-setup += "/etc/init.d"
PRIVATE_LIBS_${PN}-ptest = "liblxc.so.1"
diff --git a/import-layers/meta-virtualization/recipes-containers/oci-image-tools/files/0001-image-manifest-Recursively-remove-pre-existing-entri.patch b/import-layers/meta-virtualization/recipes-containers/oci-image-tools/files/0001-image-manifest-Recursively-remove-pre-existing-entri.patch
new file mode 100644
index 000000000..5594f9762
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/oci-image-tools/files/0001-image-manifest-Recursively-remove-pre-existing-entri.patch
@@ -0,0 +1,78 @@
+From 1f205c0aec5ea9e983d61a64e7ce871ae416bebd Mon Sep 17 00:00:00 2001
+From: "W. Trevor King" <wking@tremily.us>
+Date: Tue, 18 Oct 2016 02:16:46 -0700
+Subject: [PATCH 1/2] image/manifest: Recursively remove pre-existing entries
+ when unpacking
+
+Implementing the logic that is in-flight with [1], but using recursive
+removal [2]. GNU tar has a --recursive-unlink option that's not
+enabled by default, with the motivation being something like "folks
+would be mad if we blew away a full tree and replaced it with a broken
+symlink" [3]. That makes sense for working filesystems, but we're
+building the rootfs from scratch here so losing information is not a
+concern. This commit always uses recursive removal to get that old
+thing off the filesystem (whatever it takes ;).
+
+The exception to the removal is if both the tar entry and existing
+path occupant are directories. In this case we want to use GNU tar's
+default --overwrite-dir behavior, but unpackLayer's metadata handling
+is currently very weak so I've left it at "don't delete the old
+directory".
+
+The reworked directory case also fixes a minor bug from 44210d05
+(cmd/oci-image-tool: fix unpacking..., 2016-07-22, #177) where the:
+
+ if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
+
+block would not error out if the Lstat failed for a reason besides the
+acceptable IsNotExist. Instead, it would attempt to call MkdirAll,
+which would probably fail for the same reason that Lstat failed
+(e.g. ENOTDIR). But it's better to handle the Lstat errors directly.
+
+[1]: https://github.com/opencontainers/image-spec/pull/317
+[2]: https://github.com/opencontainers/image-spec/pull/317/files#r79214718
+[3]: https://www.gnu.org/software/tar/manual/html_node/Dealing-with-Old-Files.html
+
+Signed-off-by: W. Trevor King <wking@tremily.us>
+---
+ image/manifest.go | 22 +++++++++++++++++++---
+ 1 file changed, 19 insertions(+), 3 deletions(-)
+
+diff --git a/image/manifest.go b/image/manifest.go
+index 8834c1e5f2f0..144bd4f62219 100644
+--- a/src/import/image/manifest.go
++++ b/src/import/image/manifest.go
+@@ -253,11 +253,27 @@ loop:
+ continue loop
+ }
+
++ if hdr.Typeflag != tar.TypeDir {
++ err = os.RemoveAll(path)
++ if err != nil && !os.IsNotExist(err) {
++ return err
++ }
++ }
++
+ switch hdr.Typeflag {
+ case tar.TypeDir:
+- if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
+- if err2 := os.MkdirAll(path, info.Mode()); err2 != nil {
+- return errors.Wrap(err2, "error creating directory")
++ fi, err := os.Lstat(path)
++ if err != nil && !os.IsNotExist(err) {
++ return err
++ }
++ if os.IsNotExist(err) || !fi.IsDir() {
++ err = os.RemoveAll(path)
++ if err != nil && !os.IsNotExist(err) {
++ return err
++ }
++ err = os.MkdirAll(path, info.Mode())
++ if err != nil {
++ return err
+ }
+ }
+
+--
+2.4.0.53.g8440f74
+
diff --git a/import-layers/meta-virtualization/recipes-containers/oci-image-tools/files/0002-image-manifest-Split-unpackLayerEntry-into-its-own-f.patch b/import-layers/meta-virtualization/recipes-containers/oci-image-tools/files/0002-image-manifest-Split-unpackLayerEntry-into-its-own-f.patch
new file mode 100644
index 000000000..69bdcdb50
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/oci-image-tools/files/0002-image-manifest-Split-unpackLayerEntry-into-its-own-f.patch
@@ -0,0 +1,242 @@
+From 1e55f2a83b1f644803b640b72171b4ae0d95217b Mon Sep 17 00:00:00 2001
+From: "W. Trevor King" <wking@tremily.us>
+Date: Thu, 20 Oct 2016 23:30:22 -0700
+Subject: [PATCH 2/2] image/manifest: Split unpackLayerEntry into its own
+ function
+
+To help address:
+
+ $ make lint
+ checking lint
+ image/manifest.go:140::warning: cyclomatic complexity 39 of function unpackLayer() is high (> 35) (gocyclo)
+ ...
+
+Signed-off-by: W. Trevor King <wking@tremily.us>
+---
+ image/manifest.go | 185 +++++++++++++++++++++++++++++-------------------------
+ 1 file changed, 100 insertions(+), 85 deletions(-)
+
+diff --git a/image/manifest.go b/image/manifest.go
+index 144bd4f62219..dfd5a83f70e4 100644
+--- a/src/import/image/manifest.go
++++ b/src/import/image/manifest.go
+@@ -218,116 +218,131 @@ loop:
+ return errors.Wrapf(err, "error advancing tar stream")
+ }
+
+- hdr.Name = filepath.Clean(hdr.Name)
+- if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+- // Not the root directory, ensure that the parent directory exists
+- parent := filepath.Dir(hdr.Name)
+- parentPath := filepath.Join(dest, parent)
+- if _, err2 := os.Lstat(parentPath); err2 != nil && os.IsNotExist(err2) {
+- if err3 := os.MkdirAll(parentPath, 0755); err3 != nil {
+- return err3
+- }
+- }
+- }
+- path := filepath.Join(dest, hdr.Name)
+- if entries[path] {
+- return fmt.Errorf("duplicate entry for %s", path)
+- }
+- entries[path] = true
+- rel, err := filepath.Rel(dest, path)
++ var whiteout bool
++ whiteout, err = unpackLayerEntry(dest, hdr, tr, &entries)
+ if err != nil {
+ return err
+ }
+- info := hdr.FileInfo()
+- if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+- return fmt.Errorf("%q is outside of %q", hdr.Name, dest)
++ if whiteout {
++ continue loop
+ }
+
+- if strings.HasPrefix(info.Name(), ".wh.") {
+- path = strings.Replace(path, ".wh.", "", 1)
++ // Directory mtimes must be handled at the end to avoid further
++ // file creation in them to modify the directory mtime
++ if hdr.Typeflag == tar.TypeDir {
++ dirs = append(dirs, hdr)
++ }
++ }
++ for _, hdr := range dirs {
++ path := filepath.Join(dest, hdr.Name)
+
+- if err := os.RemoveAll(path); err != nil {
+- return errors.Wrap(err, "unable to delete whiteout path")
++ finfo := hdr.FileInfo()
++ // I believe the old version was using time.Now().UTC() to overcome an
++ // invalid error from chtimes.....but here we lose hdr.AccessTime like this...
++ if err := os.Chtimes(path, time.Now().UTC(), finfo.ModTime()); err != nil {
++ return errors.Wrap(err, "error changing time")
++ }
++ }
++ return nil
++}
++
++// unpackLayerEntry unpacks a single entry from a layer.
++func unpackLayerEntry(dest string, header *tar.Header, reader io.Reader, entries *map[string]bool) (whiteout bool, err error) {
++ header.Name = filepath.Clean(header.Name)
++ if !strings.HasSuffix(header.Name, string(os.PathSeparator)) {
++ // Not the root directory, ensure that the parent directory exists
++ parent := filepath.Dir(header.Name)
++ parentPath := filepath.Join(dest, parent)
++ if _, err2 := os.Lstat(parentPath); err2 != nil && os.IsNotExist(err2) {
++ if err3 := os.MkdirAll(parentPath, 0755); err3 != nil {
++ return false, err3
+ }
++ }
++ }
++ path := filepath.Join(dest, header.Name)
++ if (*entries)[path] {
++ return false, fmt.Errorf("duplicate entry for %s", path)
++ }
++ (*entries)[path] = true
++ rel, err := filepath.Rel(dest, path)
++ if err != nil {
++ return false, err
++ }
++ info := header.FileInfo()
++ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
++ return false, fmt.Errorf("%q is outside of %q", header.Name, dest)
++ }
+
+- continue loop
++ if strings.HasPrefix(info.Name(), ".wh.") {
++ path = strings.Replace(path, ".wh.", "", 1)
++
++ if err = os.RemoveAll(path); err != nil {
++ return true, errors.Wrap(err, "unable to delete whiteout path")
+ }
+
+- if hdr.Typeflag != tar.TypeDir {
+- err = os.RemoveAll(path)
+- if err != nil && !os.IsNotExist(err) {
+- return err
+- }
++ return true, nil
++ }
++
++ if header.Typeflag != tar.TypeDir {
++ err = os.RemoveAll(path)
++ if err != nil && !os.IsNotExist(err) {
++ return false, err
+ }
++ }
+
+- switch hdr.Typeflag {
+- case tar.TypeDir:
+- fi, err := os.Lstat(path)
++ switch header.Typeflag {
++ case tar.TypeDir:
++ fi, err := os.Lstat(path)
++ if err != nil && !os.IsNotExist(err) {
++ return false, err
++ }
++ if os.IsNotExist(err) || !fi.IsDir() {
++ err = os.RemoveAll(path)
+ if err != nil && !os.IsNotExist(err) {
+- return err
+- }
+- if os.IsNotExist(err) || !fi.IsDir() {
+- err = os.RemoveAll(path)
+- if err != nil && !os.IsNotExist(err) {
+- return err
+- }
+- err = os.MkdirAll(path, info.Mode())
+- if err != nil {
+- return err
+- }
++ return false, err
+ }
+-
+- case tar.TypeReg, tar.TypeRegA:
+- f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, info.Mode())
++ err = os.MkdirAll(path, info.Mode())
+ if err != nil {
+- return errors.Wrap(err, "unable to open file")
++ return false, err
+ }
++ }
+
+- if _, err := io.Copy(f, tr); err != nil {
+- f.Close()
+- return errors.Wrap(err, "unable to copy")
+- }
+- f.Close()
++ case tar.TypeReg, tar.TypeRegA:
++ f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, info.Mode())
++ if err != nil {
++ return false, errors.Wrap(err, "unable to open file")
++ }
+
+- case tar.TypeLink:
+- target := filepath.Join(dest, hdr.Linkname)
++ if _, err := io.Copy(f, reader); err != nil {
++ f.Close()
++ return false, errors.Wrap(err, "unable to copy")
++ }
++ f.Close()
+
+- if !strings.HasPrefix(target, dest) {
+- return fmt.Errorf("invalid hardlink %q -> %q", target, hdr.Linkname)
+- }
++ case tar.TypeLink:
++ target := filepath.Join(dest, header.Linkname)
+
+- if err := os.Link(target, path); err != nil {
+- return err
+- }
++ if !strings.HasPrefix(target, dest) {
++ return false, fmt.Errorf("invalid hardlink %q -> %q", target, header.Linkname)
++ }
+
+- case tar.TypeSymlink:
+- target := filepath.Join(filepath.Dir(path), hdr.Linkname)
++ if err := os.Link(target, path); err != nil {
++ return false, err
++ }
+
+- if !strings.HasPrefix(target, dest) {
+- return fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)
+- }
++ case tar.TypeSymlink:
++ target := filepath.Join(filepath.Dir(path), header.Linkname)
+
+- if err := os.Symlink(hdr.Linkname, path); err != nil {
+- return err
+- }
+- case tar.TypeXGlobalHeader:
+- return nil
++ if !strings.HasPrefix(target, dest) {
++ return false, fmt.Errorf("invalid symlink %q -> %q", path, header.Linkname)
+ }
+- // Directory mtimes must be handled at the end to avoid further
+- // file creation in them to modify the directory mtime
+- if hdr.Typeflag == tar.TypeDir {
+- dirs = append(dirs, hdr)
+- }
+- }
+- for _, hdr := range dirs {
+- path := filepath.Join(dest, hdr.Name)
+
+- finfo := hdr.FileInfo()
+- // I believe the old version was using time.Now().UTC() to overcome an
+- // invalid error from chtimes.....but here we lose hdr.AccessTime like this...
+- if err := os.Chtimes(path, time.Now().UTC(), finfo.ModTime()); err != nil {
+- return errors.Wrap(err, "error changing time")
++ if err := os.Symlink(header.Linkname, path); err != nil {
++ return false, err
+ }
++ case tar.TypeXGlobalHeader:
++ return false, nil
+ }
+- return nil
++
++ return false, nil
+ }
+--
+2.4.0.53.g8440f74
+
diff --git a/import-layers/meta-virtualization/recipes-containers/oci-image-tools/oci-image-tools_git.bb b/import-layers/meta-virtualization/recipes-containers/oci-image-tools/oci-image-tools_git.bb
index 29a892664..68d73c37e 100644
--- a/import-layers/meta-virtualization/recipes-containers/oci-image-tools/oci-image-tools_git.bb
+++ b/import-layers/meta-virtualization/recipes-containers/oci-image-tools/oci-image-tools_git.bb
@@ -1,7 +1,7 @@
HOMEPAGE = "https://github.com/opencontainers/image-tools"
SUMMARY = "A collection of tools for working with the OCI image format specification"
LICENSE = "Apache-2"
-LIC_FILES_CHKSUM = "file://LICENSE;md5=e3fc50a88d0a364313df4b21ef20c29e"
+LIC_FILES_CHKSUM = "file://src/import/LICENSE;md5=e3fc50a88d0a364313df4b21ef20c29e"
DEPENDS = "\
oci-image-spec \
@@ -12,11 +12,13 @@ DEPENDS = "\
spf13-pflag \
"
-SRC_URI = "git://github.com/opencontainers/image-tools.git"
-SRCREV = "a358e03fde4e3628bf9fb7656bf643b63f975636"
-PV = "0.1.0+git${SRCPV}"
+SRC_URI = "git://github.com/opencontainers/image-tools.git \
+ file://0001-image-manifest-Recursively-remove-pre-existing-entri.patch \
+ file://0002-image-manifest-Split-unpackLayerEntry-into-its-own-f.patch"
-S = "${WORKDIR}/git"
+SRCREV = "4abe1a166f9be97e8e71b1bb4d7599cc29323011"
+PV = "0.2.0-dev+git${SRCPV}"
+GO_IMPORT = "import"
inherit goarch
inherit go
@@ -35,10 +37,11 @@ do_compile() {
#
# We also need to link in the ipallocator directory as that is not under
# a src directory.
- ln -sfn . "${S}/vendor/src"
- mkdir -p "${S}/vendor/src/github.com/opencontainers/image-tools/"
- ln -sfn "${S}/image" "${S}/vendor/src/github.com/opencontainers/image-tools/image"
- export GOPATH="${S}/vendor"
+ ln -sfn . "${S}/src/import/vendor/src"
+ mkdir -p "${S}/src/import/vendor/src/github.com/opencontainers/image-tools/"
+ ln -sfn "${S}/src/import/image" "${S}/src/import/vendor/src/github.com/opencontainers/image-tools/image"
+ ln -sfn "${S}/src/import/version" "${S}/src/import/vendor/src/github.com/opencontainers/image-tools/version"
+ export GOPATH="${S}/src/import/vendor"
# Pass the needed cflags/ldflags so that cgo
# can find the needed headers files and libraries
@@ -47,15 +50,14 @@ do_compile() {
export LDFLAGS=""
export CGO_CFLAGS="${BUILDSDK_CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
export CGO_LDFLAGS="${BUILDSDK_LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+ cd ${S}/src/import
- oe_runmake tools
+ oe_runmake tool
}
do_install() {
install -d ${D}/${sbindir}
- install ${S}/oci-create-runtime-bundle ${D}/${sbindir}/
- install ${S}/oci-image-validate ${D}/${sbindir}/
- install ${S}/oci-unpack ${D}/${sbindir}/
+ install ${S}/src/import/oci-image-tool ${D}/${sbindir}/
}
INSANE_SKIP_${PN} += "ldflags"
diff --git a/import-layers/meta-virtualization/recipes-containers/oci-runtime-spec/oci-runtime-spec_git.bb b/import-layers/meta-virtualization/recipes-containers/oci-runtime-spec/oci-runtime-spec_git.bb
index 82f852f97..deba7b3e1 100644
--- a/import-layers/meta-virtualization/recipes-containers/oci-runtime-spec/oci-runtime-spec_git.bb
+++ b/import-layers/meta-virtualization/recipes-containers/oci-runtime-spec/oci-runtime-spec_git.bb
@@ -9,8 +9,8 @@ SRCNAME = "runtime-spec"
PKG_NAME = "github.com/opencontainers/${SRCNAME}"
SRC_URI = "git://${PKG_NAME}.git;destsuffix=git/src/${PKG_NAME}"
-SRCREV = "4af0c72f92aacf1b43618d7986197d8209fadf0b"
-PV = "v1.0.0-rc4+git${SRCPV}"
+SRCREV = "a39b1cd4fdf7743ab721cc9da58abbee2f8624d1"
+PV = "v1.0.0-rc6+git${SRCPV}"
S = "${WORKDIR}/git"
@@ -38,4 +38,4 @@ runtime_spec_file_sysroot_preprocess () {
FILES_${PN} += "${prefix}/local/go/src/${PKG_NAME}/*"
-CLEANBROKEN = "1" \ No newline at end of file
+CLEANBROKEN = "1"
diff --git a/import-layers/meta-virtualization/recipes-containers/oci-runtime-tools/files/0001-Revert-implement-add-set-function-for-hooks-items.patch b/import-layers/meta-virtualization/recipes-containers/oci-runtime-tools/files/0001-Revert-implement-add-set-function-for-hooks-items.patch
new file mode 100644
index 000000000..99a9310b9
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/oci-runtime-tools/files/0001-Revert-implement-add-set-function-for-hooks-items.patch
@@ -0,0 +1,202 @@
+From 2911eaabab92ec2cdea2b173c3429db4a52bee2f Mon Sep 17 00:00:00 2001
+From: Bruce Ashfield <bruce.ashfield@windriver.com>
+Date: Wed, 20 Sep 2017 23:28:52 -0400
+Subject: [PATCH] Revert "implement add/set function for hooks items"
+
+This reverts commit df3a46feb971386f922c7c2c2822b88301f87cb0.
+---
+ cmd/oci-runtime-tool/generate.go | 12 ++++++------
+ generate/generate.go | 42 ++++++----------------------------------
+ 2 files changed, 12 insertions(+), 42 deletions(-)
+
+diff --git a/src/import/cmd/oci-runtime-tool/generate.go b/src/import/cmd/oci-runtime-tool/generate.go
+index ed11fe8f3729..7121ce5fe07e 100644
+--- a/src/import/cmd/oci-runtime-tool/generate.go
++++ b/src/import/cmd/oci-runtime-tool/generate.go
+@@ -354,7 +354,7 @@ func setupSpec(g *generate.Generator, context *cli.Context) error {
+ for _, postStartEnv := range postStartEnvs {
+ path, env, err := parseHookEnv(postStartEnv)
+ if err != nil {
+- return err
++ return nil
+ }
+ g.AddPostStartHookEnv(path, env)
+ }
+@@ -387,7 +387,7 @@ func setupSpec(g *generate.Generator, context *cli.Context) error {
+ for _, postStopEnv := range postStopEnvs {
+ path, env, err := parseHookEnv(postStopEnv)
+ if err != nil {
+- return err
++ return nil
+ }
+ g.AddPostStopHookEnv(path, env)
+ }
+@@ -398,7 +398,7 @@ func setupSpec(g *generate.Generator, context *cli.Context) error {
+ for _, postStopTimeout := range postStopTimeouts {
+ path, timeout, err := parseHookTimeout(postStopTimeout)
+ if err != nil {
+- return err
++ return nil
+ }
+ g.AddPostStopHookTimeout(path, timeout)
+ }
+@@ -409,7 +409,7 @@ func setupSpec(g *generate.Generator, context *cli.Context) error {
+ for _, hook := range preStartHooks {
+ path, args, err := parseHook(hook)
+ if err != nil {
+- return err
++ return nil
+ }
+ g.AddPreStartHook(path, args)
+ }
+@@ -420,7 +420,7 @@ func setupSpec(g *generate.Generator, context *cli.Context) error {
+ for _, preStartEnv := range preStartEnvs {
+ path, env, err := parseHookEnv(preStartEnv)
+ if err != nil {
+- return err
++ return nil
+ }
+ g.AddPreStartHookEnv(path, env)
+ }
+@@ -431,7 +431,7 @@ func setupSpec(g *generate.Generator, context *cli.Context) error {
+ for _, preStartTimeout := range preStartTimeouts {
+ path, timeout, err := parseHookTimeout(preStartTimeout)
+ if err != nil {
+- return err
++ return nil
+ }
+ g.AddPreStartHookTimeout(path, timeout)
+ }
+diff --git a/src/import/generate/generate.go b/src/import/generate/generate.go
+index 84762c3cbd05..ef5d2cc95b3c 100644
+--- a/src/import/generate/generate.go
++++ b/src/import/generate/generate.go
+@@ -744,39 +744,29 @@ func (g *Generator) ClearPreStartHooks() {
+ func (g *Generator) AddPreStartHook(path string, args []string) {
+ g.initSpecHooks()
+ hook := rspec.Hook{Path: path, Args: args}
+- for i, hook := range g.spec.Hooks.Prestart {
+- if hook.Path == path {
+- g.spec.Hooks.Prestart[i] = hook
+- return
+- }
+- }
+ g.spec.Hooks.Prestart = append(g.spec.Hooks.Prestart, hook)
+ }
+
+ // AddPreStartHookEnv adds envs of a prestart hook into g.spec.Hooks.Prestart.
+ func (g *Generator) AddPreStartHookEnv(path string, envs []string) {
+- g.initSpecHooks()
++ g.initSpec()
+ for i, hook := range g.spec.Hooks.Prestart {
+ if hook.Path == path {
+ g.spec.Hooks.Prestart[i].Env = envs
+ return
+ }
+ }
+- hook := rspec.Hook{Path: path, Env: envs}
+- g.spec.Hooks.Prestart = append(g.spec.Hooks.Prestart, hook)
+ }
+
+ // AddPreStartHookTimeout adds timeout of a prestart hook into g.spec.Hooks.Prestart.
+ func (g *Generator) AddPreStartHookTimeout(path string, timeout int) {
+- g.initSpecHooks()
++ g.initSpec()
+ for i, hook := range g.spec.Hooks.Prestart {
+ if hook.Path == path {
+ g.spec.Hooks.Prestart[i].Timeout = &timeout
+ return
+ }
+ }
+- hook := rspec.Hook{Path: path, Timeout: &timeout}
+- g.spec.Hooks.Prestart = append(g.spec.Hooks.Prestart, hook)
+ }
+
+ // ClearPostStopHooks clear g.spec.Hooks.Poststop.
+@@ -794,39 +784,29 @@ func (g *Generator) ClearPostStopHooks() {
+ func (g *Generator) AddPostStopHook(path string, args []string) {
+ g.initSpecHooks()
+ hook := rspec.Hook{Path: path, Args: args}
+- for i, hook := range g.spec.Hooks.Poststop {
+- if hook.Path == path {
+- g.spec.Hooks.Poststop[i] = hook
+- return
+- }
+- }
+ g.spec.Hooks.Poststop = append(g.spec.Hooks.Poststop, hook)
+ }
+
+ // AddPostStopHookEnv adds envs of a poststop hook into g.spec.Hooks.Poststop.
+ func (g *Generator) AddPostStopHookEnv(path string, envs []string) {
+- g.initSpecHooks()
++ g.initSpec()
+ for i, hook := range g.spec.Hooks.Poststop {
+ if hook.Path == path {
+ g.spec.Hooks.Poststop[i].Env = envs
+ return
+ }
+ }
+- hook := rspec.Hook{Path: path, Env: envs}
+- g.spec.Hooks.Poststop = append(g.spec.Hooks.Poststop, hook)
+ }
+
+ // AddPostStopHookTimeout adds timeout of a poststop hook into g.spec.Hooks.Poststop.
+ func (g *Generator) AddPostStopHookTimeout(path string, timeout int) {
+- g.initSpecHooks()
++ g.initSpec()
+ for i, hook := range g.spec.Hooks.Poststop {
+ if hook.Path == path {
+ g.spec.Hooks.Poststop[i].Timeout = &timeout
+ return
+ }
+ }
+- hook := rspec.Hook{Path: path, Timeout: &timeout}
+- g.spec.Hooks.Poststop = append(g.spec.Hooks.Poststop, hook)
+ }
+
+ // ClearPostStartHooks clear g.spec.Hooks.Poststart.
+@@ -844,39 +824,29 @@ func (g *Generator) ClearPostStartHooks() {
+ func (g *Generator) AddPostStartHook(path string, args []string) {
+ g.initSpecHooks()
+ hook := rspec.Hook{Path: path, Args: args}
+- for i, hook := range g.spec.Hooks.Poststart {
+- if hook.Path == path {
+- g.spec.Hooks.Poststart[i] = hook
+- return
+- }
+- }
+ g.spec.Hooks.Poststart = append(g.spec.Hooks.Poststart, hook)
+ }
+
+ // AddPostStartHookEnv adds envs of a poststart hook into g.spec.Hooks.Poststart.
+ func (g *Generator) AddPostStartHookEnv(path string, envs []string) {
+- g.initSpecHooks()
++ g.initSpec()
+ for i, hook := range g.spec.Hooks.Poststart {
+ if hook.Path == path {
+ g.spec.Hooks.Poststart[i].Env = envs
+ return
+ }
+ }
+- hook := rspec.Hook{Path: path, Env: envs}
+- g.spec.Hooks.Poststart = append(g.spec.Hooks.Poststart, hook)
+ }
+
+ // AddPostStartHookTimeout adds timeout of a poststart hook into g.spec.Hooks.Poststart.
+ func (g *Generator) AddPostStartHookTimeout(path string, timeout int) {
+- g.initSpecHooks()
++ g.initSpec()
+ for i, hook := range g.spec.Hooks.Poststart {
+ if hook.Path == path {
+ g.spec.Hooks.Poststart[i].Timeout = &timeout
+ return
+ }
+ }
+- hook := rspec.Hook{Path: path, Timeout: &timeout}
+- g.spec.Hooks.Poststart = append(g.spec.Hooks.Poststart, hook)
+ }
+
+ // AddTmpfsMount adds a tmpfs mount into g.spec.Mounts.
+--
+2.4.0.53.g8440f74
+
diff --git a/import-layers/meta-virtualization/recipes-containers/oci-runtime-tools/oci-runtime-tools_git.bb b/import-layers/meta-virtualization/recipes-containers/oci-runtime-tools/oci-runtime-tools_git.bb
index 4f77dced2..61b68b5ff 100644
--- a/import-layers/meta-virtualization/recipes-containers/oci-runtime-tools/oci-runtime-tools_git.bb
+++ b/import-layers/meta-virtualization/recipes-containers/oci-runtime-tools/oci-runtime-tools_git.bb
@@ -1,14 +1,17 @@
HOMEPAGE = "https://github.com/opencontainers/runtime-tools"
SUMMARY = "oci-runtime-tool is a collection of tools for working with the OCI runtime specification"
LICENSE = "GPLv2"
-LIC_FILES_CHKSUM = "file://LICENSE;md5=b355a61a394a504dacde901c958f662c"
+LIC_FILES_CHKSUM = "file://src/import/LICENSE;md5=b355a61a394a504dacde901c958f662c"
-SRC_URI = "git://github.com/opencontainers/runtime-tools.git"
+SRC_URI = "git://github.com/opencontainers/runtime-tools.git \
+ file://0001-Revert-implement-add-set-function-for-hooks-items.patch \
+ "
-SRCREV = "038b0c99b82f3c08de31f6b09e693eb24644affd"
-PV = "0.0.1+git${SRCPV}"
+SRCREV = "6e7da8148f4de2c9e9c9d3b345576898d4f412cb"
+PV = "0.1.0+git${SRCPV}"
+GO_IMPORT = "import"
-S = "${WORKDIR}/git"
+INSANE_SKIP_${PN} += "ldflags"
inherit goarch
inherit go
@@ -16,7 +19,7 @@ inherit go
do_compile() {
export GOARCH="${TARGET_GOARCH}"
export GOROOT="${STAGING_LIBDIR_NATIVE}/${TARGET_SYS}/go"
- export GOPATH="${S}"
+ export GOPATH="${S}/src/import:${S}/src/import/vendor"
# Pass the needed cflags/ldflags so that cgo
# can find the needed headers files and libraries
@@ -26,10 +29,24 @@ do_compile() {
export CGO_CFLAGS="${BUILDSDK_CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
export CGO_LDFLAGS="${BUILDSDK_LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+ # link fixups for compilation
+ rm -f ${S}/src/import/vendor/src
+ ln -sf ./ ${S}/src/import/vendor/src
+ mkdir -p ${S}/src/import/vendor/github.com/opencontainers/runtime-tools
+ ln -sf ../../../../generate ${S}/src/import/vendor/github.com/opencontainers/runtime-tools/generate
+ ln -sf ../../../../validate ${S}/src/import/vendor/github.com/opencontainers/runtime-tools/validate
+ ln -sf ../../../../cmd ${S}/src/import/vendor/github.com/opencontainers/runtime-tools/cmd
+ ln -sf ../../../../error ${S}/src/import/vendor/github.com/opencontainers/runtime-tools/error
+ ln -sf ../../../../specerror ${S}/src/import/vendor/github.com/opencontainers/runtime-tools/specerror
+ cd ${S}/src/import
+
oe_runmake
}
do_install() {
install -d ${D}/${sbindir}
- install ${S}/oci-runtime-tool ${D}/${sbindir}/oci-runtime-tool
+ install ${S}/src/import/oci-runtime-tool ${D}/${sbindir}/oci-runtime-tool
}
+
+deltask compile_ptest_base
+
diff --git a/import-layers/meta-virtualization/recipes-containers/oci-systemd-hook/oci-systemd-hook/0001-selinux-drop-selinux-support.patch b/import-layers/meta-virtualization/recipes-containers/oci-systemd-hook/oci-systemd-hook/0001-selinux-drop-selinux-support.patch
index 507cd08e6..5016f6e75 100644
--- a/import-layers/meta-virtualization/recipes-containers/oci-systemd-hook/oci-systemd-hook/0001-selinux-drop-selinux-support.patch
+++ b/import-layers/meta-virtualization/recipes-containers/oci-systemd-hook/oci-systemd-hook/0001-selinux-drop-selinux-support.patch
@@ -5,25 +5,34 @@ Subject: [PATCH] selinux: drop selinux support
Signed-off-by: Bruce Ashfield <bruce.ashfield@windriver.com>
---
- src/systemdhook.c | 9 ---------
- 1 file changed, 9 deletions(-)
+ src/systemdhook.c | 12 ------------
+ 1 file changed, 12 deletions(-)
-diff --git a/src/systemdhook.c b/src/systemdhook.c
-index 274e5b910699..b9e8f1be376a 100644
--- a/src/systemdhook.c
+++ b/src/systemdhook.c
-@@ -14,7 +14,6 @@
+@@ -16,7 +16,6 @@
#include <errno.h>
#include <inttypes.h>
#include <linux/limits.h>
-#include <selinux/selinux.h>
#include <yajl/yajl_tree.h>
+ #include <stdbool.h>
- #include "config.h"
-@@ -538,14 +537,6 @@ static int prestart(const char *rootfs,
+@@ -129,9 +128,6 @@ static int chperm(const char *path, cons
+ closedir(dir);
+ return -1;
}
- }
+- if (setfilecon (full_path, label) < 0) {
+- pr_perror("Failed to set context %s on %s", label, full_path);
+- }
+ if (doChown) {
+ /* Change uid and gid to something the container can handle */
+@@ -496,14 +492,6 @@ static int prestart(const char *rootfs,
+ return -1;
+ }
+ }
+-
- if (strcmp("", mount_label)) {
- rc = setfilecon(journal_dir, (security_context_t)mount_label);
- if (rc < 0) {
@@ -31,10 +40,6 @@ index 274e5b910699..b9e8f1be376a 100644
- return -1;
- }
- }
--
- if (makepath(cont_journal_dir, 0755) == -1) {
- if (errno != EEXIST) {
- pr_perror("Failed to mkdir container journal dir: %s", cont_journal_dir);
---
-2.4.0.53.g8440f74
-
+
+ /* Attempt to creare /var/log/journal inside of rootfs,
+ if successful, or directory exists, mount tmpfs on top of
diff --git a/import-layers/meta-virtualization/recipes-containers/oci-systemd-hook/oci-systemd-hook_git.bb b/import-layers/meta-virtualization/recipes-containers/oci-systemd-hook/oci-systemd-hook_git.bb
index 872872a11..fc8890533 100644
--- a/import-layers/meta-virtualization/recipes-containers/oci-systemd-hook/oci-systemd-hook_git.bb
+++ b/import-layers/meta-virtualization/recipes-containers/oci-systemd-hook/oci-systemd-hook_git.bb
@@ -6,7 +6,7 @@ PRIORITY = "optional"
DEPENDS = "yajl util-linux"
-SRCREV = "ca515c1f399bd0b16e94b7c34aa1ef20498beca6"
+SRCREV = "1ac958a4197a9ea52174812fc7d7d036af8140d3"
SRC_URI = "git://github.com/projectatomic/oci-systemd-hook \
file://0001-selinux-drop-selinux-support.patch \
file://0001-configure-drop-selinux-support.patch \
diff --git a/import-layers/meta-virtualization/recipes-containers/riddler/riddler_git.bb b/import-layers/meta-virtualization/recipes-containers/riddler/riddler_git.bb
index ae6c5ec96..9f7fe6b4b 100644
--- a/import-layers/meta-virtualization/recipes-containers/riddler/riddler_git.bb
+++ b/import-layers/meta-virtualization/recipes-containers/riddler/riddler_git.bb
@@ -1,11 +1,12 @@
HOMEPAGE = "https://github.com/jfrazelle/riddler"
SUMMARY = "Convert `docker inspect` to opencontainers (OCI compatible) runc spec."
LICENSE = "MIT"
-LIC_FILES_CHKSUM = "file://LICENSE;md5=20ce4c6a4f32d6ee4a68e3a7506db3f1"
+LIC_FILES_CHKSUM = "file://src/import/LICENSE;md5=20ce4c6a4f32d6ee4a68e3a7506db3f1"
SRC_URI = "git://github.com/jfrazelle/riddler;branch=master"
SRCREV = "23befa0b232877b5b502b828e24161d801bd67f6"
PV = "0.1.0+git${SRCPV}"
+GO_IMPORT = "import"
S = "${WORKDIR}/git"
@@ -26,10 +27,10 @@ do_compile() {
#
# We also need to link in the ipallocator directory as that is not under
# a src directory.
- ln -sfn . "${S}/vendor/src"
- mkdir -p "${S}/vendor/src/github.com/jessfraz/riddler"
- ln -sfn "${S}/parse" "${S}/vendor/src/github.com/jessfraz/riddler/parse"
- export GOPATH="${S}/vendor"
+ ln -sfn . "${S}/src/import/vendor/src"
+ mkdir -p "${S}/src/import/vendor/src/github.com/jessfraz/riddler"
+ ln -sfn "${S}/src/import/parse" "${S}/src/import/vendor/src/github.com/jessfraz/riddler/parse"
+ export GOPATH="${S}/src/import/vendor"
# Pass the needed cflags/ldflags so that cgo
# can find the needed headers files and libraries
@@ -38,11 +39,12 @@ do_compile() {
export LDFLAGS=""
export CGO_CFLAGS="${BUILDSDK_CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
export CGO_LDFLAGS="${BUILDSDK_LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+ cd ${S}/src/import
oe_runmake static
}
do_install() {
install -d ${D}/${sbindir}
- install ${S}/riddler ${D}/${sbindir}/riddler
+ install ${S}/src/import/riddler ${D}/${sbindir}/riddler
}
diff --git a/import-layers/meta-virtualization/recipes-containers/runc/runc-docker/0001-Disable-building-recvtty.patch b/import-layers/meta-virtualization/recipes-containers/runc/runc-docker/0001-Disable-building-recvtty.patch
new file mode 100644
index 000000000..fa1f695b2
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/runc/runc-docker/0001-Disable-building-recvtty.patch
@@ -0,0 +1,26 @@
+From aa2fc7b0eacba61175f083cc8d8adc233bcd0575 Mon Sep 17 00:00:00 2001
+From: Paul Barker <pbarker@toganlabs.com>
+Date: Thu, 12 Oct 2017 11:34:24 +0000
+Subject: [PATCH] Disable building recvtty
+
+Signed-off-by: Paul Barker <pbarker@toganlabs.com>
+Upstream-status: Inappropriate
+---
+ Makefile | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/Makefile b/Makefile
+index 0fcf508..24f47dc 100644
+--- a/src/import/Makefile
++++ b/src/import/Makefile
+@@ -38,7 +38,6 @@ contrib/cmd/recvtty/recvtty: $(SOURCES)
+
+ static: $(SOURCES)
+ CGO_ENABLED=1 $(GO) build -i $(EXTRA_FLAGS) -tags "$(BUILDTAGS) cgo static_build" -ldflags "-w -extldflags -static -X main.gitCommit=${COMMIT} -X main.version=${VERSION} $(EXTRA_LDFLAGS)" -o runc .
+- CGO_ENABLED=1 $(GO) build -i $(EXTRA_FLAGS) -tags "$(BUILDTAGS) cgo static_build" -ldflags "-w -extldflags -static -X main.gitCommit=${COMMIT} -X main.version=${VERSION} $(EXTRA_LDFLAGS)" -o contrib/cmd/recvtty/recvtty ./contrib/cmd/recvtty
+
+ release:
+ @flag_list=(seccomp selinux apparmor static); \
+--
+2.7.4
+
diff --git a/import-layers/meta-virtualization/recipes-containers/runc/runc-docker/0001-Update-to-runtime-spec-198f23f827eea397d4331d7eb048d.patch b/import-layers/meta-virtualization/recipes-containers/runc/runc-docker/0001-Update-to-runtime-spec-198f23f827eea397d4331d7eb048d.patch
new file mode 100644
index 000000000..bcc76fc4e
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/runc/runc-docker/0001-Update-to-runtime-spec-198f23f827eea397d4331d7eb048d.patch
@@ -0,0 +1,89 @@
+From e8ef6025a4f48620baf91737cd37eb5e6a40f48c Mon Sep 17 00:00:00 2001
+From: Justin Cormack <justin.cormack@docker.com>
+Date: Fri, 23 Jun 2017 17:14:59 -0700
+Subject: [PATCH 1/3] Update to runtime spec
+ 198f23f827eea397d4331d7eb048d9d4c7ff7bee
+
+Updates memory limits to be int64, and removes Platform from spec.
+
+Signed-off-by: Justin Cormack <justin.cormack@docker.com>
+---
+ vendor.conf | 2 +-
+ .../opencontainers/runtime-spec/specs-go/config.go | 23 ++++++----------------
+ 2 files changed, 7 insertions(+), 18 deletions(-)
+
+diff --git a/vendor.conf b/vendor.conf
+index e23e7ea7..09a8a924 100644
+--- a/src/import/vendor.conf
++++ b/src/import/vendor.conf
+@@ -1,7 +1,7 @@
+ # OCI runtime-spec. When updating this, make sure you use a version tag rather
+ # than a commit ID so it's much more obvious what version of the spec we are
+ # using.
+-github.com/opencontainers/runtime-spec 239c4e44f2a612ed85f6db9c66247aa33f437e91
++github.com/opencontainers/runtime-spec 198f23f827eea397d4331d7eb048d9d4c7ff7bee
+ # Core libcontainer functionality.
+ github.com/mrunalp/fileutils ed869b029674c0e9ce4c0dfa781405c2d9946d08
+ github.com/opencontainers/selinux v1.0.0-rc1
+diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
+index 8bf8d924..68ab112e 100644
+--- a/src/import/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
++++ b/src/import/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
+@@ -6,8 +6,6 @@ import "os"
+ type Spec struct {
+ // Version of the Open Container Runtime Specification with which the bundle complies.
+ Version string `json:"ociVersion"`
+- // Platform specifies the configuration's target platform.
+- Platform Platform `json:"platform"`
+ // Process configures the container process.
+ Process *Process `json:"process,omitempty"`
+ // Root configures the container's root filesystem.
+@@ -101,15 +99,6 @@ type Root struct {
+ Readonly bool `json:"readonly,omitempty"`
+ }
+
+-// Platform specifies OS and arch information for the host system that the container
+-// is created for.
+-type Platform struct {
+- // OS is the operating system.
+- OS string `json:"os"`
+- // Arch is the architecture
+- Arch string `json:"arch"`
+-}
+-
+ // Mount specifies a mount for a container.
+ type Mount struct {
+ // Destination is the absolute path where the mount will be placed in the container.
+@@ -284,15 +273,15 @@ type LinuxBlockIO struct {
+ // LinuxMemory for Linux cgroup 'memory' resource management
+ type LinuxMemory struct {
+ // Memory limit (in bytes).
+- Limit *uint64 `json:"limit,omitempty"`
++ Limit *int64 `json:"limit,omitempty"`
+ // Memory reservation or soft_limit (in bytes).
+- Reservation *uint64 `json:"reservation,omitempty"`
++ Reservation *int64 `json:"reservation,omitempty"`
+ // Total memory limit (memory + swap).
+- Swap *uint64 `json:"swap,omitempty"`
++ Swap *int64 `json:"swap,omitempty"`
+ // Kernel memory limit (in bytes).
+- Kernel *uint64 `json:"kernel,omitempty"`
++ Kernel *int64 `json:"kernel,omitempty"`
+ // Kernel memory limit for tcp (in bytes)
+- KernelTCP *uint64 `json:"kernelTCP,omitempty"`
++ KernelTCP *int64 `json:"kernelTCP,omitempty"`
+ // How aggressive the kernel will swap memory pages.
+ Swappiness *uint64 `json:"swappiness,omitempty"`
+ }
+@@ -486,7 +475,7 @@ type WindowsNetwork struct {
+ EndpointList []string `json:"endpointList,omitempty"`
+ // Specifies if unqualified DNS name resolution is allowed.
+ AllowUnqualifiedDNSQuery bool `json:"allowUnqualifiedDNSQuery,omitempty"`
+- // Comma seperated list of DNS suffixes to use for name resolution.
++ // Comma separated list of DNS suffixes to use for name resolution.
+ DNSSearchList []string `json:"DNSSearchList,omitempty"`
+ // Name (ID) of the container that we will share with the network stack.
+ NetworkSharedContainerName string `json:"networkSharedContainerName,omitempty"`
+--
+2.11.0
+
diff --git a/import-layers/meta-virtualization/recipes-containers/runc/runc-docker/0001-Use-correct-go-cross-compiler.patch b/import-layers/meta-virtualization/recipes-containers/runc/runc-docker/0001-Use-correct-go-cross-compiler.patch
new file mode 100644
index 000000000..8f5171ab1
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/runc/runc-docker/0001-Use-correct-go-cross-compiler.patch
@@ -0,0 +1,85 @@
+From 037c20b3b3ef5e9ead0282aa64f9b88c0c18934d Mon Sep 17 00:00:00 2001
+From: Paul Barker <pbarker@toganlabs.com>
+Date: Thu, 5 Oct 2017 13:14:40 +0000
+Subject: [PATCH] Use correct go cross-compiler
+
+We need to use '${GO}' as set by OpenEmbedded instead of just 'go'. Just using
+'go' will invoke go-native.
+
+Signed-off-by: Paul Barker <pbarker@toganlabs.com>
+Upstream-status: Inappropriate
+---
+ Makefile | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/Makefile b/Makefile
+index 8117892..0fcf508 100644
+--- a/src/import/Makefile
++++ b/src/import/Makefile
+@@ -27,18 +27,18 @@ SHELL := $(shell command -v bash 2>/dev/null)
+ .DEFAULT: runc
+
+ runc: $(SOURCES)
+- go build -i $(EXTRA_FLAGS) -ldflags "-X main.gitCommit=${COMMIT} -X main.version=${VERSION} $(EXTRA_LDFLAGS)" -tags "$(BUILDTAGS)" -o runc .
++ $(GO) build -i $(EXTRA_FLAGS) -ldflags "-X main.gitCommit=${COMMIT} -X main.version=${VERSION} $(EXTRA_LDFLAGS)" -tags "$(BUILDTAGS)" -o runc .
+
+ all: runc recvtty
+
+ recvtty: contrib/cmd/recvtty/recvtty
+
+ contrib/cmd/recvtty/recvtty: $(SOURCES)
+- go build -i $(EXTRA_FLAGS) -ldflags "-X main.gitCommit=${COMMIT} -X main.version=${VERSION} $(EXTRA_LDFLAGS)" -tags "$(BUILDTAGS)" -o contrib/cmd/recvtty/recvtty ./contrib/cmd/recvtty
++ $(GO) build -i $(EXTRA_FLAGS) -ldflags "-X main.gitCommit=${COMMIT} -X main.version=${VERSION} $(EXTRA_LDFLAGS)" -tags "$(BUILDTAGS)" -o contrib/cmd/recvtty/recvtty ./contrib/cmd/recvtty
+
+ static: $(SOURCES)
+- CGO_ENABLED=1 go build -i $(EXTRA_FLAGS) -tags "$(BUILDTAGS) cgo static_build" -ldflags "-w -extldflags -static -X main.gitCommit=${COMMIT} -X main.version=${VERSION} $(EXTRA_LDFLAGS)" -o runc .
+- CGO_ENABLED=1 go build -i $(EXTRA_FLAGS) -tags "$(BUILDTAGS) cgo static_build" -ldflags "-w -extldflags -static -X main.gitCommit=${COMMIT} -X main.version=${VERSION} $(EXTRA_LDFLAGS)" -o contrib/cmd/recvtty/recvtty ./contrib/cmd/recvtty
++ CGO_ENABLED=1 $(GO) build -i $(EXTRA_FLAGS) -tags "$(BUILDTAGS) cgo static_build" -ldflags "-w -extldflags -static -X main.gitCommit=${COMMIT} -X main.version=${VERSION} $(EXTRA_LDFLAGS)" -o runc .
++ CGO_ENABLED=1 $(GO) build -i $(EXTRA_FLAGS) -tags "$(BUILDTAGS) cgo static_build" -ldflags "-w -extldflags -static -X main.gitCommit=${COMMIT} -X main.version=${VERSION} $(EXTRA_LDFLAGS)" -o contrib/cmd/recvtty/recvtty ./contrib/cmd/recvtty
+
+ release:
+ @flag_list=(seccomp selinux apparmor static); \
+@@ -62,15 +62,15 @@ release:
+ CGO_ENABLED=1; \
+ }; \
+ echo "Building target: $$output"; \
+- go build -i $(EXTRA_FLAGS) -ldflags "$$ldflags $(EXTRA_LDFLAGS)" -tags "$$tags" -o "$$output" .; \
++ $(GO) build -i $(EXTRA_FLAGS) -ldflags "$$ldflags $(EXTRA_LDFLAGS)" -tags "$$tags" -o "$$output" .; \
+ done
+
+ dbuild: runcimage
+ docker run --rm -v $(CURDIR):/go/src/$(PROJECT) --privileged $(RUNC_IMAGE) make clean all
+
+ lint:
+- go vet $(allpackages)
+- go fmt $(allpackages)
++ $(GO) vet $(allpackages)
++ $(GO) fmt $(allpackages)
+
+ man:
+ man/md2man-all.sh
+@@ -88,7 +88,7 @@ unittest: runcimage
+ docker run -e TESTFLAGS -t --privileged --rm -v $(CURDIR):/go/src/$(PROJECT) $(RUNC_IMAGE) make localunittest
+
+ localunittest: all
+- go test -timeout 3m -tags "$(BUILDTAGS)" ${TESTFLAGS} -v $(allpackages)
++ $(GO) test -timeout 3m -tags "$(BUILDTAGS)" ${TESTFLAGS} -v $(allpackages)
+
+ integration: runcimage
+ docker run -e TESTFLAGS -t --privileged --rm -v $(CURDIR):/go/src/$(PROJECT) $(RUNC_IMAGE) make localintegration
+@@ -134,10 +134,10 @@ clean:
+ validate:
+ script/validate-gofmt
+ script/validate-shfmt
+- go vet $(allpackages)
++ $(GO) vet $(allpackages)
+
+ ci: validate localtest
+
+ # memoize allpackages, so that it's executed only once and only if used
+-_allpackages = $(shell go list ./... | grep -v vendor)
++_allpackages = $(shell $(GO) list ./... | grep -v vendor)
+ allpackages = $(if $(__allpackages),,$(eval __allpackages := $$(_allpackages)))$(__allpackages)
+--
+2.7.4
+
diff --git a/import-layers/meta-virtualization/recipes-containers/runc/runc-docker/0001-runc-Add-console-socket-dev-null.patch b/import-layers/meta-virtualization/recipes-containers/runc/runc-docker/0001-runc-Add-console-socket-dev-null.patch
new file mode 100644
index 000000000..48c1250d4
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/runc/runc-docker/0001-runc-Add-console-socket-dev-null.patch
@@ -0,0 +1,33 @@
+From 3fff2a3505fba1d1ff0074edff15708a77f6cfa9 Mon Sep 17 00:00:00 2001
+From: Jason Wessel <jason.wessel@windriver.com>
+Date: Wed, 12 Jul 2017 13:35:03 -0700
+Subject: [PATCH] runc: Add --console-socket=/dev/null
+
+This allows for setting up a detached session where you do not want to
+set the terminal to false in the config.json. More or less this is a
+runtime override.
+
+Signed-off-by: Jason Wessel <jason.wessel@windriver.com>
+---
+ utils_linux.go | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/utils_linux.go b/utils_linux.go
+index 8085f7fe..e6d31b35 100644
+--- a/src/import/utils_linux.go
++++ b/src/import/utils_linux.go
+@@ -227,6 +227,11 @@ type runner struct {
+ }
+
+ func (r *runner) run(config *specs.Process) (int, error) {
++ if (r.consoleSocket == "/dev/null") {
++ r.detach = false
++ r.consoleSocket = ""
++ config.Terminal = false
++ }
+ if err := r.checkTerminal(config); err != nil {
+ r.destroy()
+ return -1, err
+--
+2.11.0
+
diff --git a/import-layers/meta-virtualization/recipes-containers/runc/runc-docker/0002-Remove-Platform-as-no-longer-in-OCI-spec.patch b/import-layers/meta-virtualization/recipes-containers/runc/runc-docker/0002-Remove-Platform-as-no-longer-in-OCI-spec.patch
new file mode 100644
index 000000000..7970dec39
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/runc/runc-docker/0002-Remove-Platform-as-no-longer-in-OCI-spec.patch
@@ -0,0 +1,75 @@
+From e1146182a8cebb5a6133a9e298a5e4acf99652e9 Mon Sep 17 00:00:00 2001
+From: Justin Cormack <justin.cormack@docker.com>
+Date: Fri, 23 Jun 2017 17:16:08 -0700
+Subject: [PATCH 2/3] Remove Platform as no longer in OCI spec
+
+This was never used, just validated, so was removed from spec.
+
+Signed-off-by: Justin Cormack <justin.cormack@docker.com>
+---
+ libcontainer/specconv/example.go | 5 -----
+ spec.go | 14 --------------
+ 2 files changed, 19 deletions(-)
+
+diff --git a/libcontainer/specconv/example.go b/libcontainer/specconv/example.go
+index 33134116..d6621194 100644
+--- a/src/import/libcontainer/specconv/example.go
++++ b/src/import/libcontainer/specconv/example.go
+@@ -2,7 +2,6 @@ package specconv
+
+ import (
+ "os"
+- "runtime"
+ "strings"
+
+ "github.com/opencontainers/runtime-spec/specs-go"
+@@ -15,10 +14,6 @@ func sPtr(s string) *string { return &s }
+ func Example() *specs.Spec {
+ return &specs.Spec{
+ Version: specs.Version,
+- Platform: specs.Platform{
+- OS: runtime.GOOS,
+- Arch: runtime.GOARCH,
+- },
+ Root: specs.Root{
+ Path: "rootfs",
+ Readonly: true,
+diff --git a/spec.go b/spec.go
+index 92d38f57..876937d2 100644
+--- a/src/import/spec.go
++++ b/src/import/spec.go
+@@ -7,7 +7,6 @@ import (
+ "fmt"
+ "io/ioutil"
+ "os"
+- "runtime"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/specconv"
+@@ -131,9 +130,6 @@ func loadSpec(cPath string) (spec *specs.Spec, err error) {
+ if err = json.NewDecoder(cf).Decode(&spec); err != nil {
+ return nil, err
+ }
+- if err = validatePlatform(&spec.Platform); err != nil {
+- return nil, err
+- }
+ return spec, validateProcessSpec(spec.Process)
+ }
+
+@@ -148,13 +144,3 @@ func createLibContainerRlimit(rlimit specs.LinuxRlimit) (configs.Rlimit, error)
+ Soft: rlimit.Soft,
+ }, nil
+ }
+-
+-func validatePlatform(platform *specs.Platform) error {
+- if platform.OS != runtime.GOOS {
+- return fmt.Errorf("target os %s mismatch with current os %s", platform.OS, runtime.GOOS)
+- }
+- if platform.Arch != runtime.GOARCH {
+- return fmt.Errorf("target arch %s mismatch with current arch %s", platform.Arch, runtime.GOARCH)
+- }
+- return nil
+-}
+--
+2.11.0
+
diff --git a/import-layers/meta-virtualization/recipes-containers/runc/runc-docker/0003-Update-memory-specs-to-use-int64-not-uint64.patch b/import-layers/meta-virtualization/recipes-containers/runc/runc-docker/0003-Update-memory-specs-to-use-int64-not-uint64.patch
new file mode 100644
index 000000000..50a9b7fc7
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/runc/runc-docker/0003-Update-memory-specs-to-use-int64-not-uint64.patch
@@ -0,0 +1,194 @@
+From 3d9074ead33a5c27dc20bb49457c69c6d2ae6b57 Mon Sep 17 00:00:00 2001
+From: Justin Cormack <justin.cormack@docker.com>
+Date: Fri, 23 Jun 2017 17:17:00 -0700
+Subject: [PATCH 3/3] Update memory specs to use int64 not uint64
+
+replace #1492 #1494
+fix #1422
+
+Since https://github.com/opencontainers/runtime-spec/pull/876 the memory
+specifications are now `int64`, as that better matches the visible interface where
+`-1` is a valid value. Otherwise finding the correct value was difficult as it
+was kernel dependent.
+
+Signed-off-by: Justin Cormack <justin.cormack@docker.com>
+---
+ libcontainer/cgroups/fs/memory.go | 36 +++++++++++++++++-------------------
+ libcontainer/configs/cgroup_linux.go | 10 +++++-----
+ update.go | 14 +++++++-------
+ 3 files changed, 29 insertions(+), 31 deletions(-)
+
+diff --git a/libcontainer/cgroups/fs/memory.go b/libcontainer/cgroups/fs/memory.go
+index da2cc9f8..b739c631 100644
+--- a/src/import/libcontainer/cgroups/fs/memory.go
++++ b/src/import/libcontainer/cgroups/fs/memory.go
+@@ -73,14 +73,14 @@ func EnableKernelMemoryAccounting(path string) error {
+ // until a limit is set on the cgroup and limit cannot be set once the
+ // cgroup has children, or if there are already tasks in the cgroup.
+ for _, i := range []int64{1, -1} {
+- if err := setKernelMemory(path, uint64(i)); err != nil {
++ if err := setKernelMemory(path, i); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+-func setKernelMemory(path string, kernelMemoryLimit uint64) error {
++func setKernelMemory(path string, kernelMemoryLimit int64) error {
+ if path == "" {
+ return fmt.Errorf("no such directory for %s", cgroupKernelMemoryLimit)
+ }
+@@ -88,7 +88,7 @@ func setKernelMemory(path string, kernelMemoryLimit uint64) error {
+ // kernel memory is not enabled on the system so we should do nothing
+ return nil
+ }
+- if err := ioutil.WriteFile(filepath.Join(path, cgroupKernelMemoryLimit), []byte(strconv.FormatUint(kernelMemoryLimit, 10)), 0700); err != nil {
++ if err := ioutil.WriteFile(filepath.Join(path, cgroupKernelMemoryLimit), []byte(strconv.FormatInt(kernelMemoryLimit, 10)), 0700); err != nil {
+ // Check if the error number returned by the syscall is "EBUSY"
+ // The EBUSY signal is returned on attempts to write to the
+ // memory.kmem.limit_in_bytes file if the cgroup has children or
+@@ -106,14 +106,12 @@ func setKernelMemory(path string, kernelMemoryLimit uint64) error {
+ }
+
+ func setMemoryAndSwap(path string, cgroup *configs.Cgroup) error {
+- ulimited := -1
+-
+- // If the memory update is set to uint64(-1) we should also
+- // set swap to uint64(-1), it means unlimited memory.
+- if cgroup.Resources.Memory == uint64(ulimited) {
+- // Only set swap if it's enbled in kernel
++ // If the memory update is set to -1 we should also
++ // set swap to -1, it means unlimited memory.
++ if cgroup.Resources.Memory == -1 {
++ // Only set swap if it's enabled in kernel
+ if cgroups.PathExists(filepath.Join(path, cgroupMemorySwapLimit)) {
+- cgroup.Resources.MemorySwap = uint64(ulimited)
++ cgroup.Resources.MemorySwap = -1
+ }
+ }
+
+@@ -128,29 +126,29 @@ func setMemoryAndSwap(path string, cgroup *configs.Cgroup) error {
+ // When update memory limit, we should adapt the write sequence
+ // for memory and swap memory, so it won't fail because the new
+ // value and the old value don't fit kernel's validation.
+- if cgroup.Resources.MemorySwap == uint64(ulimited) || memoryUsage.Limit < cgroup.Resources.MemorySwap {
+- if err := writeFile(path, cgroupMemorySwapLimit, strconv.FormatUint(cgroup.Resources.MemorySwap, 10)); err != nil {
++ if cgroup.Resources.MemorySwap == -1 || memoryUsage.Limit < uint64(cgroup.Resources.MemorySwap) {
++ if err := writeFile(path, cgroupMemorySwapLimit, strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil {
+ return err
+ }
+- if err := writeFile(path, cgroupMemoryLimit, strconv.FormatUint(cgroup.Resources.Memory, 10)); err != nil {
++ if err := writeFile(path, cgroupMemoryLimit, strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil {
+ return err
+ }
+ } else {
+- if err := writeFile(path, cgroupMemoryLimit, strconv.FormatUint(cgroup.Resources.Memory, 10)); err != nil {
++ if err := writeFile(path, cgroupMemoryLimit, strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil {
+ return err
+ }
+- if err := writeFile(path, cgroupMemorySwapLimit, strconv.FormatUint(cgroup.Resources.MemorySwap, 10)); err != nil {
++ if err := writeFile(path, cgroupMemorySwapLimit, strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil {
+ return err
+ }
+ }
+ } else {
+ if cgroup.Resources.Memory != 0 {
+- if err := writeFile(path, cgroupMemoryLimit, strconv.FormatUint(cgroup.Resources.Memory, 10)); err != nil {
++ if err := writeFile(path, cgroupMemoryLimit, strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil {
+ return err
+ }
+ }
+ if cgroup.Resources.MemorySwap != 0 {
+- if err := writeFile(path, cgroupMemorySwapLimit, strconv.FormatUint(cgroup.Resources.MemorySwap, 10)); err != nil {
++ if err := writeFile(path, cgroupMemorySwapLimit, strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil {
+ return err
+ }
+ }
+@@ -171,13 +169,13 @@ func (s *MemoryGroup) Set(path string, cgroup *configs.Cgroup) error {
+ }
+
+ if cgroup.Resources.MemoryReservation != 0 {
+- if err := writeFile(path, "memory.soft_limit_in_bytes", strconv.FormatUint(cgroup.Resources.MemoryReservation, 10)); err != nil {
++ if err := writeFile(path, "memory.soft_limit_in_bytes", strconv.FormatInt(cgroup.Resources.MemoryReservation, 10)); err != nil {
+ return err
+ }
+ }
+
+ if cgroup.Resources.KernelMemoryTCP != 0 {
+- if err := writeFile(path, "memory.kmem.tcp.limit_in_bytes", strconv.FormatUint(cgroup.Resources.KernelMemoryTCP, 10)); err != nil {
++ if err := writeFile(path, "memory.kmem.tcp.limit_in_bytes", strconv.FormatInt(cgroup.Resources.KernelMemoryTCP, 10)); err != nil {
+ return err
+ }
+ }
+diff --git a/libcontainer/configs/cgroup_linux.go b/libcontainer/configs/cgroup_linux.go
+index 3e0509de..e15a662f 100644
+--- a/src/import/libcontainer/configs/cgroup_linux.go
++++ b/src/import/libcontainer/configs/cgroup_linux.go
+@@ -43,19 +43,19 @@ type Resources struct {
+ Devices []*Device `json:"devices"`
+
+ // Memory limit (in bytes)
+- Memory uint64 `json:"memory"`
++ Memory int64 `json:"memory"`
+
+ // Memory reservation or soft_limit (in bytes)
+- MemoryReservation uint64 `json:"memory_reservation"`
++ MemoryReservation int64 `json:"memory_reservation"`
+
+ // Total memory usage (memory + swap); set `-1` to enable unlimited swap
+- MemorySwap uint64 `json:"memory_swap"`
++ MemorySwap int64 `json:"memory_swap"`
+
+ // Kernel memory limit (in bytes)
+- KernelMemory uint64 `json:"kernel_memory"`
++ KernelMemory int64 `json:"kernel_memory"`
+
+ // Kernel memory limit for TCP use (in bytes)
+- KernelMemoryTCP uint64 `json:"kernel_memory_tcp"`
++ KernelMemoryTCP int64 `json:"kernel_memory_tcp"`
+
+ // CPU shares (relative weight vs. other containers)
+ CpuShares uint64 `json:"cpu_shares"`
+diff --git a/update.go b/update.go
+index 0ea90d60..133be999 100644
+--- a/src/import/update.go
++++ b/src/import/update.go
+@@ -124,11 +124,11 @@ other options are ignored.
+
+ r := specs.LinuxResources{
+ Memory: &specs.LinuxMemory{
+- Limit: u64Ptr(0),
+- Reservation: u64Ptr(0),
+- Swap: u64Ptr(0),
+- Kernel: u64Ptr(0),
+- KernelTCP: u64Ptr(0),
++ Limit: i64Ptr(0),
++ Reservation: i64Ptr(0),
++ Swap: i64Ptr(0),
++ Kernel: i64Ptr(0),
++ KernelTCP: i64Ptr(0),
+ },
+ CPU: &specs.LinuxCPU{
+ Shares: u64Ptr(0),
+@@ -213,7 +213,7 @@ other options are ignored.
+ }
+ for _, pair := range []struct {
+ opt string
+- dest *uint64
++ dest *int64
+ }{
+ {"memory", r.Memory.Limit},
+ {"memory-swap", r.Memory.Swap},
+@@ -232,7 +232,7 @@ other options are ignored.
+ } else {
+ v = -1
+ }
+- *pair.dest = uint64(v)
++ *pair.dest = v
+ }
+ }
+ r.Pids.Limit = int64(context.Int("pids-limit"))
+--
+2.11.0
+
diff --git a/import-layers/meta-virtualization/recipes-containers/runc/runc-docker_git.bb b/import-layers/meta-virtualization/recipes-containers/runc/runc-docker_git.bb
index 96d48cae5..9db48ee6f 100644
--- a/import-layers/meta-virtualization/recipes-containers/runc/runc-docker_git.bb
+++ b/import-layers/meta-virtualization/recipes-containers/runc/runc-docker_git.bb
@@ -2,10 +2,14 @@ include runc.inc
# Note: this rev is before the required protocol field, update when all components
# have been updated to match.
-SRCREV = "2f7393a47307a16f8cee44a37b262e8b81021e3e"
-SRC_URI = "git://github.com/docker/runc.git;nobranch=1 \
+SRCREV_runc-docker = "9d6821d1b53908e249487741eccd567249ca1d99"
+SRC_URI = "git://github.com/docker/runc.git;nobranch=1;name=runc-docker \
+ file://0001-Update-to-runtime-spec-198f23f827eea397d4331d7eb048d.patch \
+ file://0002-Remove-Platform-as-no-longer-in-OCI-spec.patch \
+ file://0003-Update-memory-specs-to-use-int64-not-uint64.patch \
+ file://0001-runc-Add-console-socket-dev-null.patch \
+ file://0001-Use-correct-go-cross-compiler.patch \
+ file://0001-Disable-building-recvtty.patch \
"
-RUNC_VERSION = "1.0.0-rc2"
-PROVIDES += "virtual/runc"
-RPROVIDES_${PN} = "virtual/runc"
+RUNC_VERSION = "1.0.0-rc3"
diff --git a/import-layers/meta-virtualization/recipes-containers/runc/runc-opencontainers/0001-Use-correct-go-cross-compiler.patch b/import-layers/meta-virtualization/recipes-containers/runc/runc-opencontainers/0001-Use-correct-go-cross-compiler.patch
new file mode 100644
index 000000000..67d701481
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/runc/runc-opencontainers/0001-Use-correct-go-cross-compiler.patch
@@ -0,0 +1,85 @@
+From 621e5e9a196daaaf5eb430a413fe51218cf42c89 Mon Sep 17 00:00:00 2001
+From: Paul Barker <pbarker@toganlabs.com>
+Date: Wed, 4 Oct 2017 15:45:27 +0000
+Subject: [PATCH] Use correct go cross-compiler
+
+We need to use '${GO}' as set by OpenEmbedded instead of just 'go'. Just using
+'go' will invoke go-native.
+
+Signed-off-by: Paul Barker <pbarker@toganlabs.com>
+Upstream-status: Inappropriate
+---
+ Makefile | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/Makefile b/Makefile
+index 6781ac7..74e551d 100644
+--- a/src/import/Makefile
++++ b/src/import/Makefile
+@@ -27,18 +27,18 @@ SHELL := $(shell command -v bash 2>/dev/null)
+ .DEFAULT: runc
+
+ runc: $(SOURCES)
+- go build -i $(EXTRA_FLAGS) -ldflags "-X main.gitCommit=${COMMIT} -X main.version=${VERSION} $(EXTRA_LDFLAGS)" -tags "$(BUILDTAGS)" -o runc .
++ $(GO) build -i $(EXTRA_FLAGS) -ldflags "-X main.gitCommit=${COMMIT} -X main.version=${VERSION} $(EXTRA_LDFLAGS)" -tags "$(BUILDTAGS)" -o runc .
+
+ all: runc recvtty
+
+ recvtty: contrib/cmd/recvtty/recvtty
+
+ contrib/cmd/recvtty/recvtty: $(SOURCES)
+- go build -i $(EXTRA_FLAGS) -ldflags "-X main.gitCommit=${COMMIT} -X main.version=${VERSION} $(EXTRA_LDFLAGS)" -tags "$(BUILDTAGS)" -o contrib/cmd/recvtty/recvtty ./contrib/cmd/recvtty
++ $(GO) build -i $(EXTRA_FLAGS) -ldflags "-X main.gitCommit=${COMMIT} -X main.version=${VERSION} $(EXTRA_LDFLAGS)" -tags "$(BUILDTAGS)" -o contrib/cmd/recvtty/recvtty ./contrib/cmd/recvtty
+
+ static: $(SOURCES)
+- CGO_ENABLED=1 go build -i $(EXTRA_FLAGS) -tags "$(BUILDTAGS) cgo static_build" -ldflags "-w -extldflags -static -X main.gitCommit=${COMMIT} -X main.version=${VERSION} $(EXTRA_LDFLAGS)" -o runc .
+- CGO_ENABLED=1 go build -i $(EXTRA_FLAGS) -tags "$(BUILDTAGS) cgo static_build" -ldflags "-w -extldflags -static -X main.gitCommit=${COMMIT} -X main.version=${VERSION} $(EXTRA_LDFLAGS)" -o contrib/cmd/recvtty/recvtty ./contrib/cmd/recvtty
++ CGO_ENABLED=1 $(GO) build -i $(EXTRA_FLAGS) -tags "$(BUILDTAGS) cgo static_build" -ldflags "-w -extldflags -static -X main.gitCommit=${COMMIT} -X main.version=${VERSION} $(EXTRA_LDFLAGS)" -o runc .
++ CGO_ENABLED=1 $(GO) build -i $(EXTRA_FLAGS) -tags "$(BUILDTAGS) cgo static_build" -ldflags "-w -extldflags -static -X main.gitCommit=${COMMIT} -X main.version=${VERSION} $(EXTRA_LDFLAGS)" -o contrib/cmd/recvtty/recvtty ./contrib/cmd/recvtty
+
+ release:
+ @flag_list=(seccomp selinux apparmor static); \
+@@ -62,15 +62,15 @@ release:
+ CGO_ENABLED=1; \
+ }; \
+ echo "Building target: $$output"; \
+- go build -i $(EXTRA_FLAGS) -ldflags "$$ldflags $(EXTRA_LDFLAGS)" -tags "$$tags" -o "$$output" .; \
++ $(GO) build -i $(EXTRA_FLAGS) -ldflags "$$ldflags $(EXTRA_LDFLAGS)" -tags "$$tags" -o "$$output" .; \
+ done
+
+ dbuild: runcimage
+ docker run --rm -v $(CURDIR):/go/src/$(PROJECT) --privileged $(RUNC_IMAGE) make clean all
+
+ lint:
+- go vet $(allpackages)
+- go fmt $(allpackages)
++ $(GO) vet $(allpackages)
++ $(GO) fmt $(allpackages)
+
+ man:
+ man/md2man-all.sh
+@@ -88,7 +88,7 @@ unittest: runcimage
+ docker run -e TESTFLAGS -t --privileged --rm -v $(CURDIR):/go/src/$(PROJECT) $(RUNC_IMAGE) make localunittest
+
+ localunittest: all
+- go test -timeout 3m -tags "$(BUILDTAGS)" ${TESTFLAGS} -v $(allpackages)
++ $(GO) test -timeout 3m -tags "$(BUILDTAGS)" ${TESTFLAGS} -v $(allpackages)
+
+ integration: runcimage
+ docker run -e TESTFLAGS -t --privileged --rm -v $(CURDIR):/go/src/$(PROJECT) $(RUNC_IMAGE) make localintegration
+@@ -133,10 +133,10 @@ clean:
+
+ validate:
+ script/validate-gofmt
+- go vet $(allpackages)
++ $(GO) vet $(allpackages)
+
+ ci: validate localtest
+
+ # memoize allpackages, so that it's executed only once and only if used
+-_allpackages = $(shell go list ./... | grep -v vendor)
++_allpackages = $(shell $(GO) list ./... | grep -v vendor)
+ allpackages = $(if $(__allpackages),,$(eval __allpackages := $$(_allpackages)))$(__allpackages)
+--
+2.7.4
+
diff --git a/import-layers/meta-virtualization/recipes-containers/runc/runc-opencontainers_git.bb b/import-layers/meta-virtualization/recipes-containers/runc/runc-opencontainers_git.bb
index 4a6e8cd16..a97676b60 100644
--- a/import-layers/meta-virtualization/recipes-containers/runc/runc-opencontainers_git.bb
+++ b/import-layers/meta-virtualization/recipes-containers/runc/runc-opencontainers_git.bb
@@ -1,15 +1,8 @@
include runc.inc
-SRCREV = "75f8da7c889acc4509a0cf6f0d3a8f9584778375"
-SRC_URI = "git://github.com/opencontainers/runc;branch=master \
- "
-RUNC_VERSION = "1.0.0-rc3"
-PROVIDES += "virtual/runc"
-RPROVIDES_${PN} = "virtual/runc"
-
-do_compile_prepend() {
- # Go looks in a src directory under any directory in GOPATH but
- # runc-opencontainers uses 'vendor' instead of 'vendor/src'. We can fix
- # this with a symlink.
- ln -sfn . "${S}/vendor/src"
-}
+SRCREV = "2e7cfe036e2c6dc51ccca6eb7fa3ee6b63976dcd"
+SRC_URI = " \
+ git://github.com/opencontainers/runc;branch=master \
+ file://0001-Use-correct-go-cross-compiler.patch \
+ "
+RUNC_VERSION = "1.0.0-rc4"
diff --git a/import-layers/meta-virtualization/recipes-containers/runc/runc.inc b/import-layers/meta-virtualization/recipes-containers/runc/runc.inc
index 4808547b1..017910309 100644
--- a/import-layers/meta-virtualization/recipes-containers/runc/runc.inc
+++ b/import-layers/meta-virtualization/recipes-containers/runc/runc.inc
@@ -4,38 +4,40 @@ DESCRIPTION = "runc is a CLI tool for spawning and running containers according
# Apache-2.0 for containerd
LICENSE = "Apache-2.0"
-LIC_FILES_CHKSUM = "file://LICENSE;md5=435b266b3899aa8a959f17d41c56def8"
+LIC_FILES_CHKSUM = "file://src/import/LICENSE;md5=435b266b3899aa8a959f17d41c56def8"
S = "${WORKDIR}/git"
-PV = "${RUNC_VERSION}+git${SRCREV}"
+PV = "${RUNC_VERSION}+git${SRCPV}"
inherit go
RRECOMMENDS_${PN} = "lxc docker"
+PROVIDES += "virtual/runc"
+RPROVIDES_${PN} = "virtual/runc"
+
+GO_IMPORT = "import"
LIBCONTAINER_PACKAGE="github.com/opencontainers/runc/libcontainer"
do_configure[noexec] = "1"
EXTRA_OEMAKE="BUILDTAGS=''"
-inherit goarch
-
do_compile() {
- export GOARCH="${TARGET_GOARCH}"
- export GOROOT="${STAGING_LIBDIR_NATIVE}/${TARGET_SYS}/go"
# Set GOPATH. See 'PACKAGERS.md'. Don't rely on
# docker to download its dependencies but rather
# use dependencies packaged independently.
- cd ${S}
+ cd ${S}/src/import
rm -rf .gopath
dname=`dirname "${LIBCONTAINER_PACKAGE}"`
bname=`basename "${LIBCONTAINER_PACKAGE}"`
mkdir -p .gopath/src/${dname}
(cd .gopath/src/${dname}; ln -sf ../../../../../${bname} ${bname})
- export GOPATH="${S}/.gopath:${S}/vendor:${STAGING_DIR_TARGET}/${prefix}/local/go"
- export GOROOT="${STAGING_DIR_NATIVE}/${nonarch_libdir}/${HOST_SYS}/go"
- cd -
+ export GOPATH="${S}/src/import/.gopath:${S}/src/import/vendor:${STAGING_DIR_TARGET}/${prefix}/local/go"
+
+ # Fix up symlink for go-cross compiler
+ rm -f ${S}/src/import/vendor/src
+ ln -sf ./ ${S}/src/import/vendor/src
# Pass the needed cflags/ldflags so that cgo
# can find the needed headers files and libraries
@@ -51,7 +53,7 @@ do_compile() {
do_install() {
mkdir -p ${D}/${bindir}
- cp ${S}/runc ${D}/${bindir}/runc
+ cp ${S}/src/import/runc ${D}/${bindir}/runc
ln -sf runc ${D}/${bindir}/docker-runc
}
diff --git a/import-layers/meta-virtualization/recipes-containers/runc/runc/0001-nsexec-fix-build-against-musl-libc.patch b/import-layers/meta-virtualization/recipes-containers/runc/runc/0001-nsexec-fix-build-against-musl-libc.patch
deleted file mode 100644
index aa57636f7..000000000
--- a/import-layers/meta-virtualization/recipes-containers/runc/runc/0001-nsexec-fix-build-against-musl-libc.patch
+++ /dev/null
@@ -1,48 +0,0 @@
-From ac6bd953192fa6752a07be7501f69f7cffe33e8e Mon Sep 17 00:00:00 2001
-From: Natanael Copa <natanael.copa@docker.com>
-Date: Tue, 19 Apr 2016 10:43:00 +0200
-Subject: [PATCH] nsexec: fix build against musl libc
-
-Remove a wrongly added include which was added in commit 3c2e77ee (Add a
-compatibility header for CentOS/RHEL 6, 2016-01-29) apparently to
-fix this compile error on centos 6:
-
-> In file included from
-> Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c:20:
-> /usr/include/linux/netlink.h:35: error: expected specifier-qualifier-list before 'sa_family_t'
-
-The glibc bits/sockaddr.h says that this header should never be included
-directly[1]. Instead, sys/socket.h should be used.
-
-The problem was correctly fixed later, in commit 394fb55 (Fix build
-error on centos6, 2016-03-02) so the incorrect bits/sockaddr.h can
-safely be removed.
-
-This is needed to build musl libc.
-
-Fixes #761
-
-[1]: https://github.molgen.mpg.de/git-mirror/glibc/blob/20003c49884422da7ffbc459cdeee768a6fee07b/bits/sockaddr.h#L20
-
-Signed-off-by: Natanael Copa <natanael.copa@docker.com>
-Signed-off-by: Paul Barker <paul@paulbarker.me.uk>
-Upstream-status: Backport
----
- libcontainer/nsenter/nsexec.c | 1 -
- 1 file changed, 1 deletion(-)
-
-diff --git a/libcontainer/nsenter/nsexec.c b/libcontainer/nsenter/nsexec.c
-index 8f37d6c..40a8f89 100644
---- a/libcontainer/nsenter/nsexec.c
-+++ b/libcontainer/nsenter/nsexec.c
-@@ -18,7 +18,6 @@
- #include <unistd.h>
- #include <grp.h>
-
--#include <bits/sockaddr.h>
- #include <linux/types.h>
-
- // All arguments should be above the stack because it grows down
---
-2.1.4
-
diff --git a/import-layers/meta-virtualization/recipes-containers/singularity/README b/import-layers/meta-virtualization/recipes-containers/singularity/README
new file mode 100644
index 000000000..582480f82
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/singularity/README
@@ -0,0 +1,46 @@
+Singularity is a container platform based on the principle of mobility of
+compute, and it is designed for use within HPC clusters. For more info see
+singularity.lbl.gov.
+
+To test whether the software functions correctly, you can use `singularity
+selftest`. This is what you would expect to see:
+
+~# singularity selftest
+ + sh -c test -f /etc/singularity/singularity.conf (retval=0) OK
+ + test -u /usr/libexec/singularity/bin/action-suid (retval=0) OK
+ + test -u /usr/libexec/singularity/bin/create-suid (retval=0) OK
+ + test -u /usr/libexec/singularity/bin/expand-suid (retval=0) OK
+ + test -u /usr/libexec/singularity/bin/export-suid (retval=0) OK
+ + test -u /usr/libexec/singularity/bin/import-suid (retval=0) OK
+ + test -u /usr/libexec/singularity/bin/mount-suid (retval=0) OK
+
+You can also pull a container from Docker Hub to prove full functionality
+(Test was performed on a Raspberry Pi 3, hence the arm32v7 part of the Docker
+link. Make sure you pull an image which is compatible with your hardware.)
+For instance:
+
+~# singularity pull docker://arm32v7/debian:latest
+Initializing Singularity image subsystem
+Opening image file: debian-latest.img
+Creating 200MiB image
+Binding image to loop
+Creating file system within image
+Image is done: debian-latest.img
+Docker image path: index.docker.io/arm32v7/debian:latest
+Cache folder set to /home/root/.singularity/docker
+[1/1] |===================================| 100.0%
+Importing: base Singularity environment
+Importing: /home/root/.singularity/docker/sha256:ed4f1f0d0a0457e7f76ffb25a8d6a193007709dd312b7647cb44fc6979ec4a53.tar.gz
+Importing: /home/root/.singularity/metadata/sha256:89997b2c16b29c5a3a316e314172ef21b36f67cc3200b1c4d95927f716dbee83.tar.gz
+Done. Container is at: debian-latest.img
+~# singularity shell debian-latest.img
+Singularity: Invoking an interactive shell within container...
+
+Singularity debian-latest.img:~> echo "Hello from within the container!"
+Hello from within the container!
+Singularity debian-latest.img:~> ls /
+bin dev home lost+found mnt proc run singularity sys usr
+boot etc lib media opt root sbin srv tmp var
+Singularity debian-latest.img:~> exit
+exit
+~#
diff --git a/import-layers/meta-virtualization/recipes-containers/singularity/singularity_git.bb b/import-layers/meta-virtualization/recipes-containers/singularity/singularity_git.bb
new file mode 100644
index 000000000..6fee8f350
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/singularity/singularity_git.bb
@@ -0,0 +1,35 @@
+# Skip QA check for library symbolic links (core issue is a packaging problem within
+# Singularity build / config: read up on the dev-so test for more info)
+INSANE_SKIP_${PN} += "dev-so"
+
+RDEPENDS_${PN} += "glibc python3 ca-certificates openssl bash e2fsprogs-mke2fs"
+# Singularity expects to find python3 under the name python, therefore both
+# cannot be installed at the same time.
+RCONFLICTS_${PN} = "python"
+
+LICENSE = "BSD"
+LIC_FILES_CHKSUM = "file://COPYRIGHT.md;md5=be78c34e483dd7d8439358b1e024b294 \
+ file://LICENSE-LBNL.md;md5=45a007b527e1a9507aa7fa869f8d7ede \
+ file://LICENSE.md;md5=df4326b473db6424033f1d98a5645e30 \
+ file://debian/copyright;md5=ed267cf386d9b75ab1f27f407e935b10"
+
+SRC_URI = "git://github.com/singularityware/singularity.git;protocol=https"
+PV = "2.3.1+git${SRCPV}"
+SRCREV = "e214d4ebf0a1274b1c63b095fd55ae61c7e92947"
+
+S = "${WORKDIR}/git"
+
+inherit pythonnative autotools-brokensep
+EXTRA_OECONF = "--prefix=/usr/local"
+
+pkg_postinst_${PN}() {
+ # Singularity requires "python" to resolve to "python3" within the commandline.
+ # This creates a symbolic link from python3 to python. A side-effect of this is
+ # that scripts which expect Python 2 may fail to run correctly.
+ ln -sr $D${bindir}/python3 $D${bindir}/python
+
+ # python3 expects CA certificates to be installed in a different place to where
+ # they are actually installed. These lines link the two locations.
+ rm -r $D${libdir}/ssl-1.1/certs
+ ln -sr $D${sysconfdir}/ssl/certs $D${libdir}/ssl-1.1
+}
diff --git a/import-layers/meta-virtualization/recipes-core/runv/runv_git.bb b/import-layers/meta-virtualization/recipes-core/runv/runv_git.bb
new file mode 100644
index 000000000..5125d9062
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-core/runv/runv_git.bb
@@ -0,0 +1,82 @@
+HOMEPAGE = "https://github.com/hyperhq/runv"
+SUMMARY = "Hypervisor-based Runtime for OCI"
+DESCRIPTION = "Hypervisor-based Runtime for OCI"
+
+SRCREV_runv = "b360a686abc6c6e896382990ef1b93ef07c7a677"
+SRC_URI = "\
+ git://github.com/hyperhq/runv.git;nobranch=1;name=runv \
+ "
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://src/import/LICENSE;md5=4106a50540bdec3b9734f9c70882d382"
+
+GO_IMPORT = "import"
+
+PV = "0.4.0+git${SRCREV_runv}"
+
+inherit go
+inherit goarch
+inherit pkgconfig
+inherit autotools-brokensep
+
+PACKAGECONFIG[xen] = "--with-xen,--without-xen,"
+AUTOTOOLS_SCRIPT_PATH = "${S}/src/import/"
+
+RDEPENDS_${PN} += " qemu"
+
+do_compile() {
+ export GOARCH="${TARGET_GOARCH}"
+ export GOROOT="${STAGING_LIBDIR_NATIVE}/${TARGET_SYS}/go"
+ export GOPATH="${S}/src/import:${S}/src/import/vendor"
+
+ # Pass the needed cflags/ldflags so that cgo
+ # can find the needed headers files and libraries
+ export CGO_ENABLED="1"
+ export CFLAGS=""
+ export LDFLAGS=""
+ export CGO_CFLAGS="${BUILDSDK_CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+ export CGO_LDFLAGS="${BUILDSDK_LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+
+ # link fixups for compilation
+ rm -f ${S}/src/import/vendor/src
+ ln -sf ./ ${S}/src/import/vendor/src
+
+ mkdir -p ${S}/src/import/vendor/github.com/hyperhq/runv
+
+ echo fff
+ pwd
+ ln -sf src/import/cli
+ ln -sf ../../../../api ${S}/src/import/vendor/github.com/hyperhq/runv/api
+ ln -sf ../../../../cli ${S}/src/import/vendor/github.com/hyperhq/runv/cli
+ ln -sf ../../../../lib ${S}/src/import/vendor/github.com/hyperhq/runv/lib
+ ln -sf ../../../../driverloader ${S}/src/import/vendor/github.com/hyperhq/runv/driverloader
+ ln -sf ../../../../factory ${S}/src/import/vendor/github.com/hyperhq/runv/factory
+ ln -sf ../../../../hyperstart ${S}/src/import/vendor/github.com/hyperhq/runv/hyperstart
+ ln -sf ../../../../hypervisor ${S}/src/import/vendor/github.com/hyperhq/runv/hypervisor
+ ln -sf ../../../../template ${S}/src/import/vendor/github.com/hyperhq/runv/template
+
+ export GOPATH="${S}/src/import/.gopath:${S}/src/import/vendor:${STAGING_DIR_TARGET}/${prefix}/local/go"
+ export GOROOT="${STAGING_DIR_NATIVE}/${nonarch_libdir}/${HOST_SYS}/go"
+
+ # Pass the needed cflags/ldflags so that cgo
+ # can find the needed headers files and libraries
+ export CGO_ENABLED="1"
+ export CGO_CFLAGS="${CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+ export CGO_LDFLAGS="${LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+
+ oe_runmake build-runv
+}
+
+do_install() {
+ localbindir="/usr/local/bin"
+
+ install -d ${D}${localbindir}
+ install -m 755 ${S}/runv ${D}/${localbindir}
+}
+
+deltask compile_ptest_base
+
+FILES_${PN} += "/usr/local/bin/*"
+
+INHIBIT_PACKAGE_STRIP = "1"
+INSANE_SKIP_${PN} += "ldflags already-stripped"
diff --git a/import-layers/meta-virtualization/recipes-devtools/go/go-systemd_git.bb b/import-layers/meta-virtualization/recipes-devtools/go/go-systemd_git.bb
index 5cbab92e2..b61c97a6f 100644
--- a/import-layers/meta-virtualization/recipes-devtools/go/go-systemd_git.bb
+++ b/import-layers/meta-virtualization/recipes-devtools/go/go-systemd_git.bb
@@ -12,9 +12,7 @@ SRC_URI = "git://${PKG_NAME}.git"
SRCREV = "b4a58d95188dd092ae20072bac14cece0e67c388"
PV = "4+git${SRCREV}"
-DEPENDS += " \
- bash \
-"
+RDEPENDS_${PN} += "bash"
S = "${WORKDIR}/git"
diff --git a/import-layers/meta-virtualization/recipes-devtools/protobuf/files/disable_tests.patch b/import-layers/meta-virtualization/recipes-devtools/protobuf/files/disable_tests.patch
deleted file mode 100644
index f5e71ca62..000000000
--- a/import-layers/meta-virtualization/recipes-devtools/protobuf/files/disable_tests.patch
+++ /dev/null
@@ -1,19 +0,0 @@
-diff -Naur protobuf-c-0.15.old/src/Makefile.am protobuf-c-0.15/src/Makefile.am
---- protobuf-c-0.15.old/src/Makefile.am 2012-11-28 14:59:57.845251943 +0100
-+++ protobuf-c-0.15/src/Makefile.am 2012-11-28 15:00:23.549252632 +0100
-@@ -1,5 +1,5 @@
- if BUILD_PROTOC_C
--SUBDIRS = . test
-+
- bin_PROGRAMS = protoc-c
- protoc_c_SOURCES = \
- google/protobuf/compiler/c/c_service.cc \
-@@ -23,7 +23,7 @@
- lib_LTLIBRARIES = libprotobuf-c.la
- protobufcincludedir = $(includedir)/google/protobuf-c
-
--EXTRA_DIST = CMakeLists.txt test/CMakeLists.txt
-+EXTRA_DIST = CMakeLists.txt
-
- libprotobuf_c_la_SOURCES = \
- google/protobuf-c/protobuf-c-dispatch.c \
diff --git a/import-layers/meta-virtualization/recipes-devtools/protobuf/files/protobuf-allow-running-python-scripts-from-anywhere.patch b/import-layers/meta-virtualization/recipes-devtools/protobuf/files/protobuf-allow-running-python-scripts-from-anywhere.patch
deleted file mode 100644
index 8b2934284..000000000
--- a/import-layers/meta-virtualization/recipes-devtools/protobuf/files/protobuf-allow-running-python-scripts-from-anywhere.patch
+++ /dev/null
@@ -1,38 +0,0 @@
-From 46e331263eb92e47510e88478b255f226d30245c Mon Sep 17 00:00:00 2001
-From: Keith Holman <Keith.Holman@windriver.com>
-Date: Mon, 18 Aug 2014 15:19:35 -0400
-Subject: [PATCH] protobuf: allow running python scripts from anywhere
-
-The Makefile to generate the examples with Google Protocol Buffers
-generates some scripts for python. However, these generated scripts
-only work if they are ran in the same directory as the source files.
-This fix generates scripts to execute from anywhere on the system.
-
-Signed-off-by: Keith Holman <Keith.Holman@windriver.com>
----
- examples/Makefile | 6 ++++--
- 1 file changed, 4 insertions(+), 2 deletions(-)
-
-diff --git a/examples/Makefile b/examples/Makefile
-index 8dc9083..a993d63 100644
---- a/examples/Makefile
-+++ b/examples/Makefile
-@@ -48,11 +48,13 @@ list_people_java: javac_middleman
- add_person_python: add_person.py protoc_middleman
- @echo "Writing shortcut script add_person_python..."
- @echo '#! /bin/sh' > add_person_python
-- @echo './add_person.py "$$@"' >> add_person_python
-+ @echo 'SCRIPT_DIR=$$(dirname $$0)' >> add_person_python
-+ @echo '$$SCRIPT_DIR/add_person.py "$$@"' >> add_person_python
- @chmod +x add_person_python
-
- list_people_python: list_people.py protoc_middleman
- @echo "Writing shortcut script list_people_python..."
- @echo '#! /bin/sh' > list_people_python
-- @echo './list_people.py "$$@"' >> list_people_python
-+ @echo 'SCRIPT_DIR=$$(dirname $$0)' >> list_people_python
-+ @echo '$$SCRIPT_DIR/list_people.py "$$@"' >> list_people_python
- @chmod +x list_people_python
---
-1.9.3
-
diff --git a/import-layers/meta-virtualization/recipes-devtools/protobuf/files/run-ptest b/import-layers/meta-virtualization/recipes-devtools/protobuf/files/run-ptest
deleted file mode 100755
index a5a7b0f9b..000000000
--- a/import-layers/meta-virtualization/recipes-devtools/protobuf/files/run-ptest
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/bash
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-TEST_FILE="/tmp/test.data"
-
-RETVAL=0
-# Test every writing test application
-for write_exe_full_path in ${DIR}/add_person_*; do
- if [ -x "${write_exe_full_path}" ]; then
- write_exe=`basename ${write_exe_full_path}`
- echo "Generating new test file using ${write_exe}..."
- ${write_exe_full_path} "${TEST_FILE}"
- RETVAL=$?
-
- # Test every reading test application
- for read_exe_full_path in ${DIR}/list_people_*; do
- read_exe=`basename ${read_exe_full_path}`
- echo "Test: Write with ${write_exe}; Read with ${read_exe}..."
- if [ -x "${read_exe_full_path}" ]; then
- ${read_exe_full_path} "${TEST_FILE}"
- RETVAL=$?
- fi
- done
-
- # Cleanup...
- if [ -e "${TEST_FILE}" ]; then
- rm "${TEST_FILE}"
- fi
- fi
-done
-
-exit $RETVAL
-
diff --git a/import-layers/meta-virtualization/recipes-devtools/protobuf/protobuf-c_1.2.1.bb b/import-layers/meta-virtualization/recipes-devtools/protobuf/protobuf-c_1.2.1.bb
deleted file mode 100644
index ff2499e50..000000000
--- a/import-layers/meta-virtualization/recipes-devtools/protobuf/protobuf-c_1.2.1.bb
+++ /dev/null
@@ -1,28 +0,0 @@
-SUMMARY = "protobuf-c"
-DESCRIPTION = "This package provides a code generator and runtime libraries to use Protocol Buffers from pure C"
-HOMEPAGE = "http://code.google.com/p/protobuf-c/"
-SECTION = "console/tools"
-LICENSE = "Apache-2.0"
-
-LIC_FILES_CHKSUM = "file://LICENSE;md5=235c3195a3968524dc1524b4ebea0c0e"
-
-COMPATIBLE_HOST = "(x86_64|arm|aarch64).*-linux"
-
-DEPENDS = "protobuf protobuf-c-native"
-
-SRC_URI[md5sum] = "e544249c329391fff512c3874895cfbe"
-SRC_URI[sha256sum] = "846eb4846f19598affdc349d817a8c4c0c68fd940303e6934725c889f16f00bd"
-SRC_URI = "https://github.com/protobuf-c/protobuf-c/releases/download/v1.2.1/protobuf-c-1.2.1.tar.gz "
-#SRC_URI_append_class-target ="file://0001-protobuf-c-Remove-the-rules-which-depend-on-the-nati.patch"
-
-inherit autotools pkgconfig
-
-BBCLASSEXTEND = "native nativesdk"
-
-do_configure_prepend_class-target() {
- export PKG_CONFIG_PATH="${STAGING_LIBDIR_NATIVE}/pkgconfig:${PKG_CONFIG_PATH}"
-}
-
-do_install_append_class-native() {
- install -m 755 ${B}/t/generated-code2/cxx-generate-packed-data ${D}/${bindir}
-}
diff --git a/import-layers/meta-virtualization/recipes-devtools/protobuf/protobuf-native_3.1.0.bb b/import-layers/meta-virtualization/recipes-devtools/protobuf/protobuf-native_3.1.0.bb
deleted file mode 100644
index c32f6278d..000000000
--- a/import-layers/meta-virtualization/recipes-devtools/protobuf/protobuf-native_3.1.0.bb
+++ /dev/null
@@ -1,19 +0,0 @@
-SUMMARY = "protobuf"
-DESCRIPTION = "Protocol Buffers are a way of encoding structured data in \
-an efficient yet extensible format. Google uses Protocol Buffers for \
-almost all of its internal RPC protocols and file formats."
-HOMEPAGE = "http://code.google.com/p/protobuf/"
-SECTION = "console/tools"
-LICENSE = "BSD-3-Clause"
-
-LIC_FILES_CHKSUM = "file://LICENSE;md5=35953c752efc9299b184f91bef540095"
-
-SRC_URI[md5sum] = "14a532a7538551d5def317bfca41dace"
-SRC_URI[sha256sum] = "0a0ae63cbffc274efb573bdde9a253e3f32e458c41261df51c5dbc5ad541e8f7"
-SRC_URI = "https://github.com/google/protobuf/archive/v3.1.0.tar.gz;downloadfilename=protobuf-3.1.0.tar.gz \
- "
-
-EXTRA_OECONF += " --with-protoc=echo"
-
-inherit native autotools
-
diff --git a/import-layers/meta-virtualization/recipes-devtools/protobuf/protobuf_3.1.0.bb b/import-layers/meta-virtualization/recipes-devtools/protobuf/protobuf_3.1.0.bb
deleted file mode 100644
index b948e66ae..000000000
--- a/import-layers/meta-virtualization/recipes-devtools/protobuf/protobuf_3.1.0.bb
+++ /dev/null
@@ -1,98 +0,0 @@
-SUMMARY = "protobuf"
-DESCRIPTION = "Protocol Buffers are a way of encoding structured data in \
-an efficient yet extensible format. Google uses Protocol Buffers for \
-almost all of its internal RPC protocols and file formats."
-HOMEPAGE = "http://code.google.com/p/protobuf/"
-SECTION = "console/tools"
-LICENSE = "BSD-3-Clause"
-
-LIC_FILES_CHKSUM = "file://LICENSE;md5=35953c752efc9299b184f91bef540095"
-
-PR = "r0"
-EXCLUDE_FROM_WORLD = "1"
-
-SRC_URI[md5sum] = "14a532a7538551d5def317bfca41dace"
-SRC_URI[sha256sum] = "0a0ae63cbffc274efb573bdde9a253e3f32e458c41261df51c5dbc5ad541e8f7"
-SRC_URI = "https://github.com/google/protobuf/archive/v3.1.0.tar.gz;downloadfilename=protobuf-3.1.0.tar.gz\
- file://protobuf-allow-running-python-scripts-from-anywhere.patch \
- file://run-ptest \
- "
-
-COMPATIBLE_HOST = "(x86_64|arm|aarch64).*-linux"
-
-EXTRA_OECONF += " --with-protoc=${STAGING_BINDIR_NATIVE}/protoc"
-inherit autotools setuptools ptest
-
-DEPENDS += "protobuf-native"
-RDEPENDS_${PN}-ptest = "bash"
-
-PYTHON_SRC_DIR="python"
-TEST_SRC_DIR="examples"
-LANG_SUPPORT="cpp python"
-
-do_compile() {
- # Compile protoc compiler
- base_do_compile
-}
-
-do_compile_ptest() {
- # Modify makefile to use the cross-compiler
- sed -e "s|c++|${CXX} \$(LDFLAGS)|g" -i "${S}/${TEST_SRC_DIR}/Makefile"
-
- mkdir -p "${B}/${TEST_SRC_DIR}"
-
- # Add the location of the cross-compiled header and library files
- # which haven't been installed yet.
- cp "${B}/protobuf.pc" "${B}/${TEST_SRC_DIR}/protobuf.pc"
- sed -e 's|libdir=|libdir=${PKG_CONFIG_SYSROOT_DIR}|' -i "${B}/${TEST_SRC_DIR}/protobuf.pc"
- sed -e 's|Cflags:|Cflags: -I${S}/src|' -i "${B}/${TEST_SRC_DIR}/protobuf.pc"
- sed -e 's|Libs:|Libs: -L${B}/src/.libs|' -i "${B}/${TEST_SRC_DIR}/protobuf.pc"
- export PKG_CONFIG_PATH="${B}/${TEST_SRC_DIR}"
-
- # Save the pkgcfg sysroot variable, and update it to nothing so
- # that it doesn't append the sysroot to the beginning of paths.
- # The header and library files aren't installed to the target
- # system yet. So the absolute paths were specified above.
- save_pkg_config_sysroot_dir=$PKG_CONFIG_SYSROOT_DIR
- export PKG_CONFIG_SYSROOT_DIR=
-
- # Compile the tests
- for lang in ${LANG_SUPPORT}; do
- oe_runmake -C "${S}/${TEST_SRC_DIR}" ${lang}
- done
-
- # Restore the pkgconfig sysroot variable
- export PKG_CONFIG_SYSROOT_DIR=$save_pkg_config_sysroot_dir
-}
-
-do_install() {
- local olddir=`pwd`
-
- # Install protoc compiler
- autotools_do_install
-
- # Install header files
- export PROTOC="${STAGING_BINDIR_NATIVE}/protoc"
- cd "${S}/${PYTHON_SRC_DIR}"
- distutils_do_install
-
- cd "$olddir"
-}
-
-do_install_ptest() {
- local olddir=`pwd`
-
- cd "${S}/${TEST_SRC_DIR}"
- install -d "${D}/${PTEST_PATH}"
- for i in add_person* list_people*; do
- if [ -x "$i" ]; then
- install "$i" "${D}/${PTEST_PATH}"
- fi
- done
- cp "${S}/${TEST_SRC_DIR}/addressbook_pb2.py" "${D}/${PTEST_PATH}"
-
- cd "$olddir"
-}
-
-BBCLASSEXTEND = "nativesdk"
-
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-docopt.inc b/import-layers/meta-virtualization/recipes-devtools/python/python-docopt.inc
new file mode 100644
index 000000000..4f464c132
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-docopt.inc
@@ -0,0 +1,9 @@
+SUMMARY = "Pythonic argument parser, that will make you smile"
+HOMEPAGE = "http://docopt.org/"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE-MIT;md5=09b77fb74986791a3d4a0e746a37d88f"
+
+inherit pypi
+
+SRC_URI[md5sum] = "4bc74561b37fad5d3e7d037f82a4c3b1"
+SRC_URI[sha256sum] = "49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491"
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-gevent_1.0.1.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-gevent_1.0.1.bb
index 8ac12956b..47f1267ab 100644
--- a/import-layers/meta-virtualization/recipes-devtools/python/python-gevent_1.0.1.bb
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-gevent_1.0.1.bb
@@ -13,7 +13,7 @@ RDEPENDS_${PN} += "python-greenlet python-mime python-pprint python-re"
SRCNAME = "gevent"
-SRC_URI = "https://pypi.python.org/packages/source/g/gevent/${SRCNAME}-${PV}.tar.gz"
+SRC_URI = "http://pypi.python.org/packages/source/g/gevent/${SRCNAME}-${PV}.tar.gz"
SRC_URI[md5sum] = "7b952591d1a0174d6eb6ac47bd975ab6"
SRC_URI[sha256sum] = "4627e215d058f71d95e6b26d9e7be4c263788a4756bd2858a93775f6c072df43"
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-itsdangerous_0.24.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-itsdangerous_0.24.bb
deleted file mode 100644
index 3e229f8fe..000000000
--- a/import-layers/meta-virtualization/recipes-devtools/python/python-itsdangerous_0.24.bb
+++ /dev/null
@@ -1,22 +0,0 @@
-DESCRIPTION = "Various helpers to pass trusted data to untrusted environments and back"
-HOMEPAGE = "https://pypi.python.org/pypi/itsdangerous/"
-SECTION = "devel/python"
-LICENSE = "BSD-3-Clause"
-LIC_FILES_CHKSUM = "file://LICENSE;md5=b61841e2bf5f07884148e2a6f1bcab0c"
-
-PR = "r0"
-SRCNAME = "itsdangerous"
-
-SRC_URI = "https://pypi.python.org/packages/source/i/${SRCNAME}/${SRCNAME}-${PV}.tar.gz"
-
-SRC_URI[md5sum] = "a3d55aa79369aef5345c036a8a26307f"
-SRC_URI[sha256sum] = "cbb3fcf8d3e33df861709ecaf89d9e6629cff0a217bc2848f1b41cd30d360519"
-
-S = "${WORKDIR}/${SRCNAME}-${PV}"
-
-inherit setuptools
-
-RDEPENDS_${PN} += "python-json python-netclient python-zlib python-datetime python-lang python-crypt"
-
-CLEANBROKEN = "1"
-
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-m2crypto_0.22.3.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-m2crypto_0.22.3.bb
index 9814d3def..95d6eecf4 100644
--- a/import-layers/meta-virtualization/recipes-devtools/python/python-m2crypto_0.22.3.bb
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-m2crypto_0.22.3.bb
@@ -14,7 +14,7 @@ LICENSE = "BSD"
LIC_FILES_CHKSUM = "file://PKG-INFO;md5=0ccca7097c1d29fa42e75e9c15c6ff2e"
SRCNAME = "M2Crypto"
-SRC_URI = "https://pypi.python.org/packages/source/M/M2Crypto/${SRCNAME}-${PV}.tar.gz \
+SRC_URI = "http://pypi.python.org/packages/source/M/M2Crypto/${SRCNAME}-${PV}.tar.gz \
file://m2crypto-Fix-build-with-SWIG-3.0.5.patch \
"
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-pyyaml_3.11.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-pyyaml_3.11.bb
index 9a6f2f62b..cb1db8c60 100644
--- a/import-layers/meta-virtualization/recipes-devtools/python/python-pyyaml_3.11.bb
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-pyyaml_3.11.bb
@@ -18,7 +18,7 @@ LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://LICENSE;md5=6015f088759b10e0bc2bf64898d4ae17"
SRCNAME = "PyYAML"
-SRC_URI = "https://pyyaml.org/download/pyyaml/${SRCNAME}-${PV}.tar.gz"
+SRC_URI = "http://pyyaml.org/download/pyyaml/${SRCNAME}-${PV}.tar.gz"
SRC_URI[md5sum] = "f50e08ef0fe55178479d3a618efe21db"
SRC_URI[sha256sum] = "c36c938a872e5ff494938b33b14aaa156cb439ec67548fcab3535bb78b0846e8"
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-requests_2.8.1.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-requests_2.8.1.bb
deleted file mode 100644
index d2667800d..000000000
--- a/import-layers/meta-virtualization/recipes-devtools/python/python-requests_2.8.1.bb
+++ /dev/null
@@ -1,28 +0,0 @@
-HOMEPAGE = "http://python-requests.org"
-SUMMARY = "Python HTTP for Humans."
-DESCRIPTION = "\
- Requests is an Apache2 Licensed HTTP library, written in Python, \
- for human beings. \
- . \
- Most existing Python modules for sending HTTP requests are extremely \
- verbose and cumbersome. Python's builtin urllib2 module provides most \
- of the HTTP capabilities you should need, but the api is thoroughly \
- broken. It requires an enormous amount of work (even method overrides) \
- to perform the simplest of tasks. \
- . \
- Things shouldn't be this way. Not in Python \
- "
-SECTION = "devel/python"
-LICENSE = "Apache-2.0"
-LIC_FILES_CHKSUM = "file://LICENSE;md5=58c7e163c9f8ee037246da101c6afd1e"
-
-SRCNAME = "requests"
-
-SRC_URI = "https://pypi.python.org/packages/source/r/requests/${SRCNAME}-${PV}.tar.gz"
-
-SRC_URI[md5sum] = "a27ea3d72d7822906ddce5e252d6add9"
-SRC_URI[sha256sum] = "84fe8d5bf4dcdcc49002446c47a146d17ac10facf00d9086659064ac43b6c25b"
-
-S = "${WORKDIR}/${SRCNAME}-${PV}"
-
-inherit setuptools
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-simplejson_3.7.3.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-simplejson_3.7.3.bb
deleted file mode 100644
index 39dfce614..000000000
--- a/import-layers/meta-virtualization/recipes-devtools/python/python-simplejson_3.7.3.bb
+++ /dev/null
@@ -1,31 +0,0 @@
-HOMEPAGE = "http://cheeseshop.python.org/pypi/simplejson"
-SUMMARY = "Simple, fast, extensible JSON encoder/decoder for Python"
-DESCRIPTION = "\
- JSON <http://json.org> encoder and decoder for Python 2.5+ \
- and Python 3.3+. It is pure Python code with no dependencies, \
- but includes an optional C extension for a serious speed boost \
- "
-SECTION = "devel/python"
-LICENSE = "MIT"
-LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=c6338d7abd321c0b50a2a547e441c52e"
-PR = "r0"
-
-SRCNAME = "simplejson"
-
-SRC_URI = "https://pypi.python.org/packages/source/s/simplejson/${SRCNAME}-${PV}.tar.gz"
-SRC_URI[md5sum] = "117346e5ee4ed4434ffe485f8e58f5ed"
-SRC_URI[sha256sum] = "63d7f7b14a20f29f74325a69e6db45925eaf6e3a003eab46c0234fd050a8c93f"
-
-S = "${WORKDIR}/${SRCNAME}-${PV}"
-
-inherit setuptools
-
-RDEPENDS_${PN} = "\
- python-core \
- python-re \
- python-io \
- python-netserver \
- python-numbers \
-"
-
-
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-six_1.10.0.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-six_1.10.0.bb
deleted file mode 100644
index c279f9f1b..000000000
--- a/import-layers/meta-virtualization/recipes-devtools/python/python-six_1.10.0.bb
+++ /dev/null
@@ -1,19 +0,0 @@
-DESCRIPTION = "Python 2 and 3 compatibility utilities"
-HOMEPAGE = "http://pypi.python.org/pypi/six/"
-SECTION = "devel/python"
-LICENSE = "MIT"
-LIC_FILES_CHKSUM = "file://LICENSE;md5=6f00d4a50713fa859858dd9abaa35b21"
-
-SRCNAME = "six"
-
-SRC_URI = "https://pypi.python.org/packages/source/s/${SRCNAME}/${SRCNAME}-${PV}.tar.gz \
-"
-
-SRC_URI[md5sum] = "34eed507548117b2ab523ab14b2f8b55"
-SRC_URI[sha256sum] = "105f8d68616f8248e24bf0e9372ef04d3cc10104f1980f54d57b2ce73a5ad56a"
-
-S = "${WORKDIR}/${SRCNAME}-${PV}"
-
-inherit setuptools
-
-BBCLASSEXTEND = "native" \ No newline at end of file
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-sphinx_1.4.1.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-sphinx_1.4.1.bb
index b4ae86b0f..835b369da 100644
--- a/import-layers/meta-virtualization/recipes-devtools/python/python-sphinx_1.4.1.bb
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-sphinx_1.4.1.bb
@@ -7,7 +7,7 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=72f034adc6f7b05b09bc00d1a05bb065"
PR = "r0"
SRCNAME = "Sphinx"
-SRC_URI = "https://pypi.python.org/packages/source/S/${SRCNAME}/${SRCNAME}-${PV}.tar.gz"
+SRC_URI = "http://pypi.python.org/packages/source/S/${SRCNAME}/${SRCNAME}-${PV}.tar.gz"
SRC_URI[md5sum] = "4c4988e0306a04cef8dccc384281e585"
SRC_URI[sha256sum] = "c6871a784d24aba9270b6b28541537a57e2fcf4d7c799410eba18236bc76d6bc"
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-twisted.inc b/import-layers/meta-virtualization/recipes-devtools/python/python-twisted.inc
new file mode 100644
index 000000000..d734e03b3
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-twisted.inc
@@ -0,0 +1,244 @@
+DESCRIPTION = "Twisted is an event-driven networking framework written in Python and licensed under the LGPL. \
+Twisted supports TCP, UDP, SSL/TLS, multicast, Unix sockets, a large number of protocols \
+(including HTTP, NNTP, IMAP, SSH, IRC, FTP, and others), and much more."
+HOMEPAGE = "http://www.twistedmatrix.com"
+
+#twisted/topfiles/NEWS:655: - Relicensed: Now under the MIT license, rather than LGPL.
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=5602d7228daf59a16f0f1b2640c46bca"
+
+SRC_URI[md5sum] = "83fe6c0c911cc1602dbffb036be0ba79"
+SRC_URI[sha256sum] = "095175638c019ac7c0604f4c291724a16ff1acd062e181b01293bf4dcbc62cf3"
+
+PYPI_PACKAGE = "Twisted"
+PYPI_PACKAGE_EXT = "tar.bz2"
+
+do_install_append() {
+ # remove some useless files before packaging
+ find ${D} \( -name "*.bat" -o -name "*.c" -o -name "*.h" \) -exec rm -f {} \;
+}
+
+PACKAGES += "\
+ ${PN}-zsh \
+ ${PN}-test \
+ ${PN}-protocols \
+ ${PN}-conch \
+ ${PN}-lore \
+ ${PN}-mail \
+ ${PN}-names \
+ ${PN}-news \
+ ${PN}-runner \
+ ${PN}-web \
+ ${PN}-words \
+ ${PN}-flow \
+ ${PN}-pair \
+ ${PN}-core \
+"
+
+PACKAGES =+ "\
+ ${PN}-src \
+ ${PN}-bin \
+"
+
+RDEPENDS_${PN} = "\
+ ${PN}-bin \
+ ${PN}-conch \
+ ${PN}-lore \
+ ${PN}-mail \
+ ${PN}-names \
+ ${PN}-news \
+ ${PN}-runner \
+ ${PN}-web \
+ ${PN}-words \
+"
+
+RDEPENDS_${PN}-core = "python-core python-zopeinterface python-contextlib"
+RDEPENDS_${PN}-test = "${PN}"
+RDEPENDS_${PN}-conch = "${PN}-core ${PN}-protocols"
+RDEPENDS_${PN}-lore = "${PN}-core"
+RDEPENDS_${PN}-mail = "${PN}-core ${PN}-protocols"
+RDEPENDS_${PN}-names = "${PN}-core"
+RDEPENDS_${PN}-news = "${PN}-core ${PN}-protocols"
+RDEPENDS_${PN}-runner = "${PN}-core ${PN}-protocols"
+RDEPENDS_${PN}-web += "${PN}-core ${PN}-protocols"
+RDEPENDS_${PN}-words += "${PN}-core"
+RDEPENDS_${PN}-flow += "${PN}-core"
+RDEPENDS_${PN}-pair += "${PN}-core"
+RDEPENDS_${PN}-dbg = "${PN}"
+
+ALLOW_EMPTY_${PN} = "1"
+FILES_${PN} = ""
+
+FILES_${PN}-test = " \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/test \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/*/test \
+"
+
+FILES_${PN}-protocols = " \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/protocols/*.py* \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/protocols/gps/ \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/protocols/mice/ \
+"
+
+FILES_${PN}-zsh = " \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/python/zsh \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/python/zshcomp.* \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/python/twisted-completion.zsh \
+"
+
+FILES_${PN}-conch = " \
+ ${bindir}/ckeygen \
+ ${bindir}/tkconch \
+ ${bindir}/conch \
+ ${bindir}/conchftp \
+ ${bindir}/cftp \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/twisted_conch.py* \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/conch \
+"
+
+FILES_${PN}-core = " \
+${bindir}/manhole \
+${bindir}/mktap \
+${bindir}/twistd \
+${bindir}/tap2deb \
+${bindir}/tap2rpm \
+${bindir}/tapconvert \
+${bindir}/tkmktap \
+${bindir}/trial \
+${bindir}/easy_install* \
+${bindir}/pyhtmlizer \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/*.so \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/*.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__init__.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/notestplugin.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/testplugin.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/twisted_ftp.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/twisted_inet.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/twisted_manhole.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/twisted_portforward.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/twisted_socks.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/twisted_telnet.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/twisted_trial.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/dropin.cache \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/application \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/cred \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/enterprise \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/internet \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/manhole \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/manhole \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/persisted \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/protocols\
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python\
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/timeoutqueue.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/filepath.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/dxprofile.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/plugin.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/htmlizer.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/__init__.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/dispatch.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/hook.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/threadpool.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/otp.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/usage.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/roots.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/versions.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/urlpath.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/util.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/components.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/logfile.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/runtime.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/reflect.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/context.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/threadable.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/rebuild.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/failure.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/lockfile.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/formmethod.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/finalize.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/win32.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/dist.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/shortcut.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/zipstream.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/release.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/syslog.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/log.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/compat.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/zshcomp.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/procutils.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/text.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/_twisted_zsh_stub \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/scripts/ \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/spread/ \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/tap/ \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/trial/ \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/__init__.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/_version.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/copyright.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/im.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/*.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/python/*.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/*.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/topfiles \
+${libdir}/${PYTHON_DIR}/site-packages/Twisted*egg-info \
+"
+
+FILES_${PN}-lore = " \
+${bindir}/bookify \
+${bindir}/lore \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/twisted_lore.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/lore \
+"
+
+FILES_${PN}-mail = " \
+${bindir}/mailmail \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/twisted_mail.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/mail \
+"
+
+FILES_${PN}-names = " \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/twisted_names.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/names \
+"
+
+FILES_${PN}-news = " \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/twisted_news.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/news \
+"
+
+FILES_${PN}-runner = " \
+${libdir}/site-packages/twisted/runner/portmap.so \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/runner\
+"
+
+FILES_${PN}-web = " \
+${bindir}/websetroot \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/twisted_web.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/web\
+"
+
+FILES_${PN}-words = " \
+${bindir}/im \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/twisted_words.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/words\
+"
+
+FILES_${PN}-flow = " \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/twisted_flow.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/flow \"
+
+FILES_${PN}-pair = " \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/twisted_pair.py* \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/pair \
+"
+
+FILES_${PN}-dbg += " \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/*/.debug \
+${libdir}/${PYTHON_DIR}/site-packages/twisted/*/*/.debug \
+"
+
+RDEPENDS_{PN}-src = "${PN}"
+FILES_${PN}-src = " \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/*.py \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/*/*.py \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/*/*/*.py \
+"
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-webob_1.6.0.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-webob_1.6.0.bb
index 93f2acf1b..71c74ffd1 100644
--- a/import-layers/meta-virtualization/recipes-devtools/python/python-webob_1.6.0.bb
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-webob_1.6.0.bb
@@ -7,7 +7,7 @@ LIC_FILES_CHKSUM = "file://docs/license.txt;md5=8ed3584bcc78c16da363747ccabc5af5
PR = "r0"
SRCNAME = "WebOb"
-SRC_URI = "https://pypi.python.org/packages/source/W/${SRCNAME}/${SRCNAME}-${PV}.tar.gz"
+SRC_URI = "http://pypi.python.org/packages/source/W/${SRCNAME}/${SRCNAME}-${PV}.tar.gz"
SRC_URI[md5sum] = "089d7fc6745f175737800237c7287802"
SRC_URI[sha256sum] = "63d262d8f61b516321f786879c9277fa2209f7f57eb47b537eeecfea383d55b7"
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python3-cached-property_1.3.0.bb b/import-layers/meta-virtualization/recipes-devtools/python/python3-cached-property_1.3.0.bb
new file mode 100644
index 000000000..f01aabd07
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python3-cached-property_1.3.0.bb
@@ -0,0 +1,9 @@
+SUMMARY = "A decorator for caching properties in classes."
+HOMEPAGE = "https://github.com/pydanny/cached-property"
+LICENSE = "BSD-3-Clause"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=db7ff60c4e14f58534201242803d8abc"
+
+inherit pypi setuptools3
+
+SRC_URI[md5sum] = "4a6039f7418007275505e355359396a8"
+SRC_URI[sha256sum] = "458e78b1c7286ece887d92c9bee829da85717994c5e3ddd253a40467f488bc81"
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python3-colorama_0.3.9.bb b/import-layers/meta-virtualization/recipes-devtools/python/python3-colorama_0.3.9.bb
new file mode 100644
index 000000000..458b0dbd0
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python3-colorama_0.3.9.bb
@@ -0,0 +1,9 @@
+SUMMARY = "Cross-platform colored terminal text."
+HOMEPAGE = "https://github.com/tartley/colorama"
+LICENSE = "BSD-2-Clause"
+LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=14d0b64047ed8f510b51ce0495995358"
+
+inherit pypi setuptools3
+
+SRC_URI[md5sum] = "3a0e415259690f4dd7455c2683ee5850"
+SRC_URI[sha256sum] = "48eb22f4f8461b1df5734a074b57042430fb06e1d61bd1e11b078c0fe6d7a1f1"
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python3-docker-pycreds_0.2.1.bb b/import-layers/meta-virtualization/recipes-devtools/python/python3-docker-pycreds_0.2.1.bb
new file mode 100644
index 000000000..403841004
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python3-docker-pycreds_0.2.1.bb
@@ -0,0 +1,9 @@
+SUMMARY = "Python bindings for the docker credentials store API"
+HOMEPAGE = "https://github.com/shin-/dockerpy-creds"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57"
+
+inherit pypi setuptools3
+
+SRC_URI[md5sum] = "0d80d5aebab771faf7e422b759c3055b"
+SRC_URI[sha256sum] = "93833a2cf280b7d8abbe1b8121530413250c6cd4ffed2c1cf085f335262f7348"
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python3-docker_2.5.1.bb b/import-layers/meta-virtualization/recipes-devtools/python/python3-docker_2.5.1.bb
new file mode 100644
index 000000000..341d95cee
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python3-docker_2.5.1.bb
@@ -0,0 +1,17 @@
+SUMMARY = "A Python library for the Docker Engine API."
+HOMEPAGE = "https://github.com/docker/docker-py"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=34f3846f940453127309b920eeb89660"
+
+inherit pypi setuptools3
+
+SRC_URI[md5sum] = "7d917152976df075e6e90ee853df641f"
+SRC_URI[sha256sum] = "b876e6909d8d2360e0540364c3a952a62847137f4674f2439320ede16d6db880"
+
+DEPENDS += "${PYTHON_PN}-pip-native"
+
+RDEPENDS_${PN} += " \
+ python3-docker-pycreds \
+ python3-requests \
+ python3-websocket-client \
+"
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python3-dockerpty_0.4.1.bb b/import-layers/meta-virtualization/recipes-devtools/python/python3-dockerpty_0.4.1.bb
new file mode 100644
index 000000000..a40f2a305
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python3-dockerpty_0.4.1.bb
@@ -0,0 +1,9 @@
+SUMMARY = "Python library to use the pseudo-tty of a docker container"
+HOMEPAGE = "https://github.com/d11wtq/dockerpty"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=afaf767baa20ac524dc12f1071ca493a"
+
+inherit pypi setuptools3
+
+SRC_URI[md5sum] = "028bacb34536f3ee6a2ccd668c27e8e4"
+SRC_URI[sha256sum] = "69a9d69d573a0daa31bcd1c0774eeed5c15c295fe719c61aca550ed1393156ce"
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python3-docopt_0.6.2.bb b/import-layers/meta-virtualization/recipes-devtools/python/python3-docopt_0.6.2.bb
new file mode 100644
index 000000000..4637448ad
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python3-docopt_0.6.2.bb
@@ -0,0 +1,2 @@
+inherit setuptools3
+require python-docopt.inc
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python3-texttable_0.9.1.bb b/import-layers/meta-virtualization/recipes-devtools/python/python3-texttable_0.9.1.bb
new file mode 100644
index 000000000..25e402f14
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python3-texttable_0.9.1.bb
@@ -0,0 +1,9 @@
+SUMMARY = "module for creating simple ASCII tables"
+HOMEPAGE = "https://github.com/foutaise/texttable/"
+LICENSE = "LGPL-3.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=e6a600fd5e1d9cbde2d983680233ad02"
+
+inherit pypi setuptools3
+
+SRC_URI[md5sum] = "a712b5a5464d51c5fc43c64d9d2cd0de"
+SRC_URI[sha256sum] = "119041773ff03596b56392532f9315cb3a3116e404fd6f36e76a7dc088d95c79"
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python3-twisted_13.2.0.bb b/import-layers/meta-virtualization/recipes-devtools/python/python3-twisted_13.2.0.bb
new file mode 100644
index 000000000..98016cca3
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python3-twisted_13.2.0.bb
@@ -0,0 +1,67 @@
+inherit pypi setuptools3
+require python-twisted.inc
+
+RDEPENDS_${PN}-core = "python3-core python3-zopeinterface python3-lang"
+
+FILES_${PN}-core_append += " \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/__pycache__ \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/python/__pycache__/*pyc \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/__init__*.pyc \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/notestplugin*.pyc \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/testplugin*.pyc \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/twisted_ftp*.pyc \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/twisted_inet*.pyc \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/twisted_manhole*.pyc \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/twisted_portforward*.pyc \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/twisted_socks*.pyc \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/twisted_telnet*.pyc \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/twisted_trial*.pyc \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/twisted_core*.pyc \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/twisted_qtstub*.pyc \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/twisted_reactors*.pyc \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/cred*.pyc \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/dropin*.cache \
+"
+
+FILES_${PN}-names_append = " \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/twisted_names*.pyc \
+"
+
+FILES_${PN}-news_append = " \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/twisted_news*.pyc \
+"
+
+FILES_${PN}-protocols_append += " \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/protocols/__pycache__/*pyc \
+"
+
+FILES_${PN}-conch_append = " \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/twisted_conch*.pyc \
+"
+
+FILES_${PN}-lore_append = " \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/twisted_lore*.pyc \
+"
+FILES_${PN}-mail_append = " \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/twisted_mail*.pyc \
+"
+
+FILES_${PN}-web_append = " \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/twisted_web*.pyc \
+"
+
+FILES_${PN}-words_append = " \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/twisted_words*.pyc \
+"
+
+FILES_${PN}-flow_append = " \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/twisted_flow*.pyc \
+"
+
+FILES_${PN}-pair_append = " \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/twisted_pair*.pyc \
+"
+
+FILES_${PN}-runner_append = " \
+ ${libdir}/${PYTHON_DIR}/site-packages/twisted/plugins/__pycache__/twisted_runner*.pyc \
+"
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python3-websocket-client_0.44.0.bb b/import-layers/meta-virtualization/recipes-devtools/python/python3-websocket-client_0.44.0.bb
new file mode 100644
index 000000000..8dfc95f54
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python3-websocket-client_0.44.0.bb
@@ -0,0 +1,11 @@
+SUMMARY = "WebSocket client for python. hybi13 is supported."
+HOMEPAGE = "https://github.com/websocket-client/websocket-client.git"
+LICENSE = "LGPL-3.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=18b09a20dd186af4fd60f1a08311628c"
+
+inherit pypi setuptools3
+
+PYPI_PACKAGE = "websocket_client"
+
+SRC_URI[md5sum] = "73d87aa16a2212da448b30aca9c5bf3b"
+SRC_URI[sha256sum] = "15f585566e2ea7459136a632b9785aa081093064391878a448c382415e948d72"
diff --git a/import-layers/meta-virtualization/recipes-extended/diod/diod_1.0.24.bb b/import-layers/meta-virtualization/recipes-extended/diod/diod_1.0.24.bb
new file mode 100644
index 000000000..a4d159ab7
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/diod/diod_1.0.24.bb
@@ -0,0 +1,32 @@
+SUMMARY = "Diod is a user space server for the kernel v9fs client."
+DESCRIPTION = "\
+Diod is a user space server for the kernel v9fs client (9p.ko, 9pnet.ko). \
+Although the kernel client supports several 9P variants, diod only supports \
+9P2000.L, and only in its feature-complete form, as it appeared in 2.6.38."
+SECTION = "console/network"
+
+LICENSE = "GPLv2"
+LIC_FILES_CHKSUM = "file://COPYING;md5=59530bdf33659b29e73d4adb9f9f6552"
+
+PV = "1.0.24+git${SRCPV}"
+SRCREV = "0ea3fe3d829b5085307cd27a512708d99ef48199"
+SRC_URI = "git://github.com/chaos/diod.git;protocol=git \
+ file://diod \
+ file://diod.conf \
+ file://0001-build-allow-builds-to-work-with-separate-build-dir.patch \
+ file://0002-auto.diod.in-remove-bashisms.patch \
+ "
+DEPENDS = "libcap ncurses tcp-wrappers lua"
+
+S = "${WORKDIR}/git"
+
+inherit autotools systemd
+
+do_install_append () {
+ # install our init based on start-stop-daemon
+ install -D -m 0755 ${WORKDIR}/diod ${D}${sysconfdir}/init.d/diod
+ # install a real(not commented) configuration file for diod
+ install -m 0644 ${WORKDIR}/diod.conf ${D}${sysconfdir}/diod.conf
+}
+
+FILES_${PN} += "${systemd_unitdir}"
diff --git a/import-layers/meta-virtualization/recipes-extended/diod/files/0001-build-allow-builds-to-work-with-separate-build-dir.patch b/import-layers/meta-virtualization/recipes-extended/diod/files/0001-build-allow-builds-to-work-with-separate-build-dir.patch
new file mode 100644
index 000000000..f40e0eb5d
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/diod/files/0001-build-allow-builds-to-work-with-separate-build-dir.patch
@@ -0,0 +1,126 @@
+From 43403468298ef4167baa5d84de2ee2eaf7f4007a Mon Sep 17 00:00:00 2001
+From: Mark Asselstine <mark.asselstine@windriver.com>
+Date: Fri, 9 Jan 2015 18:34:04 +0000
+Subject: [PATCH 1/2] build: allow builds to work with separate build dir
+
+Remove assumptions in include paths that assume the build
+is happening in the source directories.
+
+Upstream-Status: Inappropriate [embedded specific]
+
+Signed-off-by: Mark Asselstine <mark.asselstine@windriver.com>
+---
+ diod/Makefile.am | 2 +-
+ libdiod/Makefile.am | 2 +-
+ liblsd/Makefile.am | 2 +-
+ libnpclient/Makefile.am | 2 +-
+ scripts/Makefile.am | 6 +++---
+ tests/kern/dbench/Makefile.am | 4 ++--
+ utils/Makefile.am | 2 +-
+ 7 files changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/diod/Makefile.am b/diod/Makefile.am
+index 7644be7..fdaf130 100644
+--- a/diod/Makefile.am
++++ b/diod/Makefile.am
+@@ -1,7 +1,7 @@
+ AM_CFLAGS = @GCCWARN@
+
+ AM_CPPFLAGS = \
+- -I../libnpfs -I../liblsd -I../libdiod
++ -I$(srcdir)/../libnpfs -I$(srcdir)/../liblsd -I$(srcdir)/../libdiod
+
+ sbin_PROGRAMS = diod
+
+diff --git a/libdiod/Makefile.am b/libdiod/Makefile.am
+index 4810e14..6905cdd 100644
+--- a/libdiod/Makefile.am
++++ b/libdiod/Makefile.am
+@@ -1,7 +1,7 @@
+ AM_CFLAGS = @GCCWARN@
+
+ AM_CPPFLAGS = \
+- -I../libnpfs -I../libnpclient -I../liblsd
++ -I$(srcdir)/../libnpfs -I$(srcdir)/../libnpclient -I$(srcdir)/../liblsd
+
+ noinst_LIBRARIES = libdiod.a
+
+diff --git a/liblsd/Makefile.am b/liblsd/Makefile.am
+index c3e5658..7e18bf8 100644
+--- a/liblsd/Makefile.am
++++ b/liblsd/Makefile.am
+@@ -1,6 +1,6 @@
+ AM_CFLAGS = @GCCWARN@
+
+-AM_CPPFLAGS = -I../libdiod
++AM_CPPFLAGS = -I$(srcdir)/../libdiod
+
+ noinst_LIBRARIES = liblsd.a
+
+diff --git a/libnpclient/Makefile.am b/libnpclient/Makefile.am
+index 5305df9..cbaf266 100644
+--- a/libnpclient/Makefile.am
++++ b/libnpclient/Makefile.am
+@@ -1,7 +1,7 @@
+ AM_CFLAGS = @GCCWARN@
+
+ AM_CPPFLAGS = \
+- -I../libnpfs
++ -I$(srcdir)/../libnpfs
+
+ noinst_LIBRARIES = libnpclient.a
+
+diff --git a/scripts/Makefile.am b/scripts/Makefile.am
+index 51c24a3..2aba728 100644
+--- a/scripts/Makefile.am
++++ b/scripts/Makefile.am
+@@ -1,9 +1,9 @@
+-systemddir=$(sysconfdir)/systemd/system
++systemddir=/lib/systemd/system
+
+ install-data-local:
+- $(top_srcdir)/config/install-sh -m 755 $(srcdir)/auto.diod \
++ $(top_srcdir)/config/install-sh -m 755 ./auto.diod \
+ $(DESTDIR)$(sysconfdir)/auto.diod
+- $(top_srcdir)/config/install-sh -m 755 $(srcdir)/diod.service \
++ $(top_srcdir)/config/install-sh -m 644 ./diod.service \
+ $(DESTDIR)$(systemddir)/diod.service
+
+ uninstall-local:
+diff --git a/tests/kern/dbench/Makefile.am b/tests/kern/dbench/Makefile.am
+index e0cdefa..1704f9f 100644
+--- a/tests/kern/dbench/Makefile.am
++++ b/tests/kern/dbench/Makefile.am
+@@ -2,7 +2,7 @@ AM_CFLAGS = -w
+
+ # VERSION=4.00
+
+-AM_CPPFLAGS = -DDATADIR=\"$(X_DATADIR)\"
++AM_CPPFLAGS = -I. -DDATADIR=\"$(X_DATADIR)\"
+
+ LDADD = $(LIBPOPT)
+
+@@ -25,6 +25,6 @@ BUILT_SOURCES = proto.h
+ CLEANFILES = proto.h
+
+ proto.h: $(dbench_SOURCES) mkproto.pl
+- perl mkproto.pl $(dbench_SOURCES) > proto.h
++ $(srcdir)/mkproto.pl $(patsubst %,$(srcdir),$(dbench_SOURCES)) > proto.h
+
+ EXTRA_DIST = mkproto.pl
+diff --git a/utils/Makefile.am b/utils/Makefile.am
+index 169b6c4..91d571d 100644
+--- a/utils/Makefile.am
++++ b/utils/Makefile.am
+@@ -1,7 +1,7 @@
+ AM_CFLAGS = @GCCWARN@
+
+ AM_CPPFLAGS = \
+- -I../libnpfs -I../liblsd -I../libdiod -I../libnpclient
++ -I$(srcdir)/../libnpfs -I$(srcdir)/../liblsd -I$(srcdir)/../libdiod -I$(srcdir)/../libnpclient
+
+ sbin_PROGRAMS = diodmount diodcat dtop diodload diodls diodshowmount dioddate
+
+--
+2.11.0
+
diff --git a/import-layers/meta-virtualization/recipes-extended/diod/files/0002-auto.diod.in-remove-bashisms.patch b/import-layers/meta-virtualization/recipes-extended/diod/files/0002-auto.diod.in-remove-bashisms.patch
new file mode 100644
index 000000000..65d6ff6b7
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/diod/files/0002-auto.diod.in-remove-bashisms.patch
@@ -0,0 +1,47 @@
+From 5a9e09dc5de833db11607530351cd87cecbfd17e Mon Sep 17 00:00:00 2001
+From: Roy Li <rongqing.li@windriver.com>
+Date: Thu, 22 Jun 2017 06:32:30 +0000
+Subject: [PATCH 2/2] auto.diod.in: remove bashisms
+
+Upstream-Status: Pending
+
+Signed-off-by: Roy Li <rongqing.li@windriver.com>
+---
+ scripts/auto.diod.in | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/scripts/auto.diod.in b/scripts/auto.diod.in
+index f63e004..4d5fc3a 100755
+--- a/scripts/auto.diod.in
++++ b/scripts/auto.diod.in
+@@ -1,4 +1,4 @@
+-#!/bin/bash
++#!/bin/sh
+ #
+ # auto.diod - executable automounter map for diod file systems
+ #
+@@ -41,15 +41,15 @@ dcatopts="${DIOD_TIMEOUT:+-t $DIOD_TIMEOUT}"
+ for server in $DIOD_SERVERS; do
+ $DIOD_DIODCAT -s $server $dcatopts exports | awk '{print $1}' |\
+ while read path; do
+- if [ "$path" == "/" ]; then
+- if [ "$key" == "ROOT" ]; then
++ if [ "$path" = "/" ]; then
++ if [ "$key" = "ROOT" ]; then
+ echo "$prefix $server:$path"
+ exit 0
+ fi
+- elif [ "$key" == "$(echo $path|sed -e's/^\///' -e's/\//./g')" ] \
+- || [ "$key" == "$(echo $path|sed -e's/^\///' -e's/\//_/g')" ] \
+- || [ "$key" == "$(echo $path|sed -e's/^\///' -e's/\//-/g')" ] \
+- || [ "$key" == "$(basename $path)" ]; then
++ elif [ "$key" = "$(echo $path|sed -e's/^\///' -e's/\//./g')" ] \
++ || [ "$key" = "$(echo $path|sed -e's/^\///' -e's/\//_/g')" ] \
++ || [ "$key" = "$(echo $path|sed -e's/^\///' -e's/\//-/g')" ] \
++ || [ "$key" = "$(basename $path)" ]; then
+ echo "$prefix $server:$path"
+ exit 0
+ fi
+--
+2.11.0
+
diff --git a/import-layers/meta-virtualization/recipes-extended/diod/files/diod b/import-layers/meta-virtualization/recipes-extended/diod/files/diod
new file mode 100644
index 000000000..cd0bf9872
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/diod/files/diod
@@ -0,0 +1,75 @@
+#!/bin/sh
+#
+# chkconfig: 35 20 80
+# description: Distributed Input Output Daemon
+#
+
+# Get function from functions library
+. /etc/init.d/functions
+
+BASE=diod
+PIDFILE=/var/run/$BASE.pid
+PID=`test -f $PIDFILE && cat $PIDFILE`
+DIOD_BIN=/usr/sbin/$BASE
+DIOD_CONF=/etc/$BASE.conf
+DIOD_OPTS="-c $DIOD_CONF"
+DIOD="$DIOD_BIN $DIOD_OPTS"
+RETVAL=0
+
+# Start the service $BASE
+start()
+{
+ # Force creation of the log directory even on a tmpfs /var/log.
+ mkdir -p /var/log/diod
+
+ start-stop-daemon --stop --test --quiet --pidfile $PIDFILE
+ status=$?
+ if [ $status -eq 0 ]; then
+ echo "diod service is already running with pid $PID"
+ exit 1
+ else
+ echo -n "Starting $BASE:"
+ start-stop-daemon --start --pidfile $PIDFILE \
+ --exec $DIOD_BIN -- $DIOD_OPTS
+ RETVAL=$?
+ echo
+ [ $RETVAL -ne 0 ] && exit $RETVAL
+ fi
+}
+
+# Stop the service $BASE
+stop()
+{
+ echo -n "Stopping $BASE:"
+ start-stop-daemon --stop --test --quiet --pidfile $PIDFILE
+ status=$?
+ if [ $status -eq 0 ]; then
+ start-stop-daemon --stop --quiet --pidfile $PIDFILE
+ [ -w $PIDFILE ] && rm -f $PIDFILE
+ else
+ start-stop-daemon --stop --quiet --name $BASE
+ fi
+}
+
+
+### service arguments ###
+case $1 in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ status)
+ status $BASE
+ ;;
+ restart | force-reload)
+ $0 stop
+ $0 start
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|status|restart}."
+ exit 1
+esac
+
+exit 0
diff --git a/import-layers/meta-virtualization/recipes-extended/diod/files/diod.conf b/import-layers/meta-virtualization/recipes-extended/diod/files/diod.conf
new file mode 100644
index 000000000..e97743a0c
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/diod/files/diod.conf
@@ -0,0 +1,15 @@
+--
+-- /etc/diod.conf - config file for diod distributed I/O daemon
+--
+-- NOTE: This config file is a lua script that diod runs, then extracts
+-- the value of certain globally defined variables. See diod.conf(5).
+
+listen = { "0.0.0.0:564" }
+nwthreads = 16
+auth_required = 1
+logdest = "syslog:daemon:err"
+
+exports = { "/g/g0", "/g/g10" }
+
+allsquash = 0
+squashuser = "nobody"
diff --git a/import-layers/meta-virtualization/recipes-extended/images/cloud-image-controller.bb b/import-layers/meta-virtualization/recipes-extended/images/cloud-image-controller.bb
index 0b2a67b58..c816545f7 100644
--- a/import-layers/meta-virtualization/recipes-extended/images/cloud-image-controller.bb
+++ b/import-layers/meta-virtualization/recipes-extended/images/cloud-image-controller.bb
@@ -3,7 +3,6 @@ EXTRA_IMAGE_FEATURES = "tools-debug debug-tweaks"
IMAGE_INSTALL = "\
${CORE_IMAGE_BASE_INSTALL} \
- ${ROOTFS_PKGMANAGE_BOOTSTRAP} \
packagegroup-core-basic \
openvswitch \
openvswitch-controller \
@@ -26,9 +25,8 @@ IMAGE_INSTALL = "\
"
inherit core-image
-inherit image-vm
-IMAGE_FSTYPES = "vmdk tar.gz"
+IMAGE_FSTYPES = "wic.vmdk tar.gz"
# Ensure extra space for guest images
#IMAGE_ROOTFS_EXTRA_SPACE = "41943040"
diff --git a/import-layers/meta-virtualization/recipes-extended/images/cloud-image-guest.bb b/import-layers/meta-virtualization/recipes-extended/images/cloud-image-guest.bb
index e24bf0d49..2957506d4 100644
--- a/import-layers/meta-virtualization/recipes-extended/images/cloud-image-guest.bb
+++ b/import-layers/meta-virtualization/recipes-extended/images/cloud-image-guest.bb
@@ -3,7 +3,6 @@ EXTRA_IMAGE_FEATURES = "tools-debug debug-tweaks"
IMAGE_INSTALL = "\
${CORE_IMAGE_BASE_INSTALL} \
- ${ROOTFS_PKGMANAGE_BOOTSTRAP} \
packagegroup-core-basic \
openflow \
qemu \
@@ -13,6 +12,5 @@ IMAGE_INSTALL = "\
"
inherit core-image
-inherit image-vm
-IMAGE_FSTYPES += "vmdk"
+IMAGE_FSTYPES += "wic.vmdk"
diff --git a/import-layers/meta-virtualization/recipes-extended/images/xen-bootimg.inc b/import-layers/meta-virtualization/recipes-extended/images/xen-bootimg.inc
deleted file mode 100644
index 093aa683d..000000000
--- a/import-layers/meta-virtualization/recipes-extended/images/xen-bootimg.inc
+++ /dev/null
@@ -1,35 +0,0 @@
-SYSLINUX_TIMEOUT = "10"
-SYSLINUX_LABEL = "boot"
-SYSLINUX_XEN_APPEND = "dom0_mem=1048576"
-SYSLINUX_KERNEL_APPEND = "ramdisk_size=32768 root=/dev/ram0 rw console=tty0 console=ttyS0,115200n8"
-#LABELS_append = " ${SYSLINUX_LABEL} "
-
-INITRD = "${DEPLOY_DIR_IMAGE}/${IMAGE_BASENAME}-${MACHINE}.cpio.lzma"
-
-do_bootimg[depends] += "${IMAGE_BASENAME}:do_rootfs"
-
-inherit bootimg
-
-syslinux_populate_append() {
- install -m 0444 ${STAGING_LIBDIR}/syslinux/mboot.c32 ${HDDDIR}${SYSLINUXDIR}/mboot.c32
-}
-
-grubefi_populate_append() {
- install -m 0644 ${DEPLOY_DIR_IMAGE}/xen-${MACHINE}.gz ${DEST}${EFIDIR}/xen.gz
-}
-
-populate_append() {
- install -m 0644 ${DEPLOY_DIR_IMAGE}/xen-${MACHINE}.gz ${DEST}/xen.gz
-}
-
-build_syslinux_cfg() {
- echo ALLOWOPTIONS 1 > ${SYSLINUXCFG}
- echo SERIAL 0 115200 > ${SYSLINUXCFG}
- echo DEFAULT ${SYSLINUX_LABEL} >> ${SYSLINUXCFG}
- echo TIMEOUT ${SYSLINUX_TIMEOUT} >> ${SYSLINUXCFG}
- echo PROMPT 1 >> ${SYSLINUXCFG}
- echo LABEL ${SYSLINUX_LABEL} >> ${SYSLINUXCFG}
- echo KERNEL mboot.c32 >> ${SYSLINUXCFG}
- echo APPEND xen.gz ${SYSLINUX_XEN_APPEND} --- vmlinuz ${SYSLINUX_KERNEL_APPEND} --- initrd >> ${SYSLINUXCFG}
-}
-
diff --git a/import-layers/meta-virtualization/recipes-extended/images/xen-guest-image-minimal.bb b/import-layers/meta-virtualization/recipes-extended/images/xen-guest-image-minimal.bb
index ab7e92c37..d311eaef9 100644
--- a/import-layers/meta-virtualization/recipes-extended/images/xen-guest-image-minimal.bb
+++ b/import-layers/meta-virtualization/recipes-extended/images/xen-guest-image-minimal.bb
@@ -1,14 +1,19 @@
DESCRIPTION = "A Xen guest image."
-inherit core-image
+inherit core-image distro_features_check
IMAGE_INSTALL += " \
packagegroup-core-boot \
${@bb.utils.contains('MACHINE_FEATURES', 'acpi', 'kernel-module-xen-acpi-processor', '', d)} \
"
-IMAGE_INSTALL += "${@bb.utils.contains('DISTRO_FEATURES', 'x11', ' xf86-video-fbdev', '', d)}"
-IMAGE_INSTALL += "${@bb.utils.contains('DISTRO_FEATURES', 'x11', ' xf86-video-vesa', '', d)}"
+IMAGE_INSTALL += "${@bb.utils.contains('IMAGE_FEATURES', 'x11', ' xf86-video-fbdev', '', d)}"
+
+# Install xf86-video-vesa on x86 platforms.
+IMAGE_INSTALL_append_x86-64 = "${@bb.utils.contains('IMAGE_FEATURES', 'x11', ' xf86-video-vesa', '', d)}"
+IMAGE_INSTALL_append_x86 = "${@bb.utils.contains('IMAGE_FEATURES', 'x11', ' xf86-video-vesa', '', d)}"
+
+REQUIRED_DISTRO_FEATURES += "${@bb.utils.contains('IMAGE_FEATURES', 'x11', ' x11', '', d)} xen"
LICENSE = "MIT"
diff --git a/import-layers/meta-virtualization/recipes-extended/images/xen-image-minimal.bb b/import-layers/meta-virtualization/recipes-extended/images/xen-image-minimal.bb
index b8c200220..c39d37826 100644
--- a/import-layers/meta-virtualization/recipes-extended/images/xen-image-minimal.bb
+++ b/import-layers/meta-virtualization/recipes-extended/images/xen-image-minimal.bb
@@ -10,12 +10,17 @@ IMAGE_INSTALL += " \
kernel-module-xen-gntalloc \
kernel-module-xen-gntdev \
kernel-module-xen-netback \
- ${@bb.utils.contains('MACHINE_FEATURES', 'pci', 'kernel-module-xen-pciback', '', d)} \
+ ${@bb.utils.contains('MACHINE_FEATURES', 'pci', "${XEN_PCIBACK_MODULE}", '', d)} \
kernel-module-xen-wdt \
xen-base \
qemu \
"
+# Linux kernel option CONFIG_XEN_PCIDEV_BACKEND depends on X86
+XEN_PCIBACK_MODULE = ""
+XEN_PCIBACK_MODULE_x86 = "kernel-module-xen-pciback"
+XEN_PCIBACK_MODULE_x86-64 = "kernel-module-xen-pciback"
+
LICENSE = "MIT"
inherit core-image
@@ -42,7 +47,7 @@ grubefi_populate_append() {
install -m 0644 ${DEPLOY_DIR_IMAGE}/xen-${MACHINE}.gz ${DEST}${EFIDIR}/xen.gz
}
-populate_append() {
+syslinux_populate_append() {
install -m 0644 ${DEPLOY_DIR_IMAGE}/xen-${MACHINE}.gz ${DEST}/xen.gz
}
@@ -50,12 +55,12 @@ SYSLINUX_XEN_ARGS ?= "loglvl=all guest_loglvl=all console=com1,vga com1=115200,8
SYSLINUX_KERNEL_ARGS ?= "ramdisk_size=32768 root=/dev/ram0 rw console=hvc0 earlyprintk=xen console=tty0 panic=10 LABEL=boot debugshell=5"
build_syslinux_cfg () {
- echo "ALLOWOPTIONS 1" > ${SYSLINUXCFG}
- echo "DEFAULT boot" >> ${SYSLINUXCFG}
- echo "TIMEOUT 10" >> ${SYSLINUXCFG}
- echo "PROMPT 1" >> ${SYSLINUXCFG}
- echo "LABEL boot" >> ${SYSLINUXCFG}
- echo " KERNEL mboot.c32" >> ${SYSLINUXCFG}
- echo " APPEND /xen.gz ${SYSLINUX_XEN_ARGS} --- /vmlinuz ${SYSLINUX_KERNEL_ARGS} --- /initrd" >> ${SYSLINUXCFG}
+ echo "ALLOWOPTIONS 1" > ${SYSLINUX_CFG}
+ echo "DEFAULT boot" >> ${SYSLINUX_CFG}
+ echo "TIMEOUT 10" >> ${SYSLINUX_CFG}
+ echo "PROMPT 1" >> ${SYSLINUX_CFG}
+ echo "LABEL boot" >> ${SYSLINUX_CFG}
+ echo " KERNEL mboot.c32" >> ${SYSLINUX_CFG}
+ echo " APPEND /xen.gz ${SYSLINUX_XEN_ARGS} --- /vmlinuz ${SYSLINUX_KERNEL_ARGS} --- /initrd" >> ${SYSLINUX_CFG}
}
diff --git a/import-layers/meta-virtualization/recipes-extended/ipxe/ipxe_git.bb b/import-layers/meta-virtualization/recipes-extended/ipxe/ipxe_git.bb
index d52c5675f..c0c8a7b78 100644
--- a/import-layers/meta-virtualization/recipes-extended/ipxe/ipxe_git.bb
+++ b/import-layers/meta-virtualization/recipes-extended/ipxe/ipxe_git.bb
@@ -8,7 +8,7 @@ SRCREV = "8c43891db4eb131d019360ccfb619f235b17eb58"
PV = "gitr${SRCPV}"
PR = "r0"
-SRC_URI = "git://git.ipxe.org/ipxe.git;protocol=git"
+SRC_URI = "git://git.ipxe.org/ipxe.git;protocol=https"
FILES_${PN} = "/usr/share/firmware/*.rom"
diff --git a/import-layers/meta-virtualization/recipes-extended/kvmtool/files/0001-kvmtool-9p-fixed-compilation-error.patch b/import-layers/meta-virtualization/recipes-extended/kvmtool/files/0001-kvmtool-9p-fixed-compilation-error.patch
new file mode 100644
index 000000000..63911fc27
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/kvmtool/files/0001-kvmtool-9p-fixed-compilation-error.patch
@@ -0,0 +1,27 @@
+From bcd954ffdb9383030e02d356b51e09e4e2a7105a Mon Sep 17 00:00:00 2001
+From: Dariusz Pelowski <dariusz.pelowski@gmail.com>
+Date: Sun, 5 Nov 2017 12:39:52 +0100
+Subject: [PATCH 1/2] kvmtool: 9p: fixed compilation error
+
+makedev is defined in sys/sysmacros.h
+
+Signed-off-by: Dariusz Pelowski <dariusz.pelowski@gmail.com>
+---
+ virtio/9p.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/virtio/9p.c b/virtio/9p.c
+index 6acbfdd..1dee2c2 100644
+--- a/virtio/9p.c
++++ b/virtio/9p.c
+@@ -15,6 +15,7 @@
+ #include <string.h>
+ #include <errno.h>
+ #include <sys/vfs.h>
++#include <sys/sysmacros.h>
+
+ #include <linux/virtio_ring.h>
+ #include <linux/virtio_9p.h>
+--
+2.15.0
+
diff --git a/import-layers/meta-virtualization/recipes-extended/kvmtool/files/0002-kvmtool-add-EXTRA_CFLAGS-variable.patch b/import-layers/meta-virtualization/recipes-extended/kvmtool/files/0002-kvmtool-add-EXTRA_CFLAGS-variable.patch
new file mode 100644
index 000000000..262531caa
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/kvmtool/files/0002-kvmtool-add-EXTRA_CFLAGS-variable.patch
@@ -0,0 +1,29 @@
+From 977a4d41012d1814f5a5330cacc2e4944de387cc Mon Sep 17 00:00:00 2001
+From: Dariusz Pelowski <dariusz.pelowski@gmail.com>
+Date: Sun, 5 Nov 2017 12:39:21 +0100
+Subject: [PATCH 2/2] kvmtool: add EXTRA_CFLAGS variable
+
+to avoid CFLAGS overriding introduce new EXTRA_CFLAGS variable
+for setting via command argument
+
+Signed-off-by: Dariusz Pelowski <dariusz.pelowski@gmail.com>
+---
+ Makefile | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/Makefile b/Makefile
+index 64a0a16..bca1b26 100644
+--- a/Makefile
++++ b/Makefile
+@@ -2,6 +2,8 @@
+ # Define WERROR=0 to disable -Werror.
+ #
+
++CFLAGS += $(EXTRA_CFLAGS)
++
+ ifeq ($(strip $(V)),)
+ E = @echo
+ Q = @
+--
+2.15.0
+
diff --git a/import-layers/meta-virtualization/recipes-extended/kvmtool/kvmtool.bb b/import-layers/meta-virtualization/recipes-extended/kvmtool/kvmtool.bb
index b3bf2029d..33fec3118 100644
--- a/import-layers/meta-virtualization/recipes-extended/kvmtool/kvmtool.bb
+++ b/import-layers/meta-virtualization/recipes-extended/kvmtool/kvmtool.bb
@@ -5,10 +5,15 @@ LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=fcb02dc552a041dee27e4b85c7396067"
DEPENDS = "dtc libaio zlib"
+do_configure[depends] += "virtual/kernel:do_shared_workdir"
+
+inherit kernel-arch
SRC_URI = "git://git.kernel.org/pub/scm/linux/kernel/git/will/kvmtool.git \
file://external-crosscompiler.patch \
file://0001-Avoid-pointers-for-address-of-packed-members.patch \
+ file://0001-kvmtool-9p-fixed-compilation-error.patch \
+ file://0002-kvmtool-add-EXTRA_CFLAGS-variable.patch \
"
SRCREV = "3fea89a924511f9f8fe05a892098fad77c1eca0d"
@@ -16,7 +21,7 @@ PV = "3.18.0+git${SRCREV}"
S = "${WORKDIR}/git"
-EXTRA_OEMAKE='ARCH="${TARGET_ARCH}" V=1'
+EXTRA_OEMAKE='V=1 EXTRA_CFLAGS="-I${STAGING_KERNEL_DIR}/arch/${ARCH}/include -I${STAGING_KERNEL_BUILDDIR}/arch/${ARCH}/include/generated/"'
do_install() {
install -d ${D}${bindir}
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt_1.3.5.bb b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt_1.3.5.bb
index 19b7567d5..8ed2505e4 100644
--- a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt_1.3.5.bb
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt_1.3.5.bb
@@ -14,7 +14,7 @@ DEPENDS = "bridge-utils gnutls libxml2 lvm2 avahi parted curl libpcap util-linux
#
RDEPENDS_${PN} = "gettext-runtime"
-RDEPENDS_${PN}-ptest += "make gawk"
+RDEPENDS_${PN}-ptest += "make gawk perl"
RDEPENDS_libvirt-libvirtd += "bridge-utils iptables pm-utils dnsmasq netcat-openbsd"
RDEPENDS_libvirt-libvirtd_append_x86-64 = " dmidecode"
diff --git a/import-layers/meta-virtualization/recipes-extended/seabios/seabios_1.9.1.bb b/import-layers/meta-virtualization/recipes-extended/seabios/seabios_1.9.1.bb
index 5c095ac28..144b13c35 100644
--- a/import-layers/meta-virtualization/recipes-extended/seabios/seabios_1.9.1.bb
+++ b/import-layers/meta-virtualization/recipes-extended/seabios/seabios_1.9.1.bb
@@ -4,7 +4,7 @@ LICENSE = "LGPLv3"
SECTION = "firmware"
SRC_URI = " \
- http://code.coreboot.org/p/seabios/downloads/get/${PN}-${PV}.tar.gz \
+ https://code.coreboot.org/p/seabios/downloads/get/${PN}-${PV}.tar.gz \
file://hostcc.patch \
"
diff --git a/import-layers/meta-virtualization/recipes-extended/vgabios/biossums_0.7a.bb b/import-layers/meta-virtualization/recipes-extended/vgabios/biossums_0.7a.bb
new file mode 100644
index 000000000..e66bade7f
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/vgabios/biossums_0.7a.bb
@@ -0,0 +1,37 @@
+DESCRIPTION = "biossums tool for building Plex86/Bochs LGPL VGABios"
+HOMEPAGE = "http://www.nongnu.org/vgabios/"
+LICENSE = "LGPLv2.1"
+SECTION = "firmware"
+
+LIC_FILES_CHKSUM = "file://COPYING;md5=dcf3c825659e82539645da41a7908589"
+
+SRC_URI = "http://savannah.gnu.org/download/vgabios/vgabios-${PV}.tgz"
+
+SRC_URI[md5sum] = "2c0fe5c0ca08082a9293e3a7b23dc900"
+SRC_URI[sha256sum] = "9d24c33d4bfb7831e2069cf3644936a53ef3de21d467872b54ce2ea30881b865"
+
+BBCLASSEXTEND = "native"
+
+FILES_${PN} = "${bindir}/biossums"
+
+S = "${WORKDIR}/vgabios-${PV}"
+
+do_configure() {
+ # Don't override the compiler or its flags:
+ sed 's,^CC,DISABLED_CC,' -i Makefile
+ sed 's,^CFLAGS,DISABLED_CFLAGS,' -i Makefile
+ sed 's,^LDFLAGS,DISABLED_LDFLAGS,' -i Makefile
+ # Supply the C flags to the compiler:
+ sed 's,-o biossums,$(CFLAGS) -o biossums,' -i Makefile
+}
+
+do_compile() {
+ # clean removes binaries distributed with source
+ oe_runmake clean
+ oe_runmake biossums
+}
+
+do_install() {
+ mkdir -p "${D}${bindir}"
+ install -m 0755 biossums "${D}${bindir}"
+}
diff --git a/import-layers/meta-virtualization/recipes-extended/vgabios/vgabios_0.7a.bb b/import-layers/meta-virtualization/recipes-extended/vgabios/vgabios_0.7a.bb
index 0ed8bb4bd..8c8904194 100644
--- a/import-layers/meta-virtualization/recipes-extended/vgabios/vgabios_0.7a.bb
+++ b/import-layers/meta-virtualization/recipes-extended/vgabios/vgabios_0.7a.bb
@@ -3,7 +3,7 @@ HOMEPAGE = "http://www.nongnu.org/vgabios/"
LICENSE = "LGPLv2.1"
SECTION = "firmware"
-DEPENDS = "dev86-native"
+DEPENDS = "dev86-native biossums-native"
LIC_FILES_CHKSUM = "file://COPYING;md5=dcf3c825659e82539645da41a7908589"
@@ -20,7 +20,9 @@ FILES_${PN}-dbg = "/usr/share/firmware/${PN}-${PV}*.debug.bin"
S = "${WORKDIR}/${PN}-${PV}"
do_configure() {
- echo "Skip do_configure"
+ # Override to use the native-built biossums tool:
+ sed 's,./biossums,biossums,' -i Makefile
+ sed 's,$(CC) -o biossums biossums.c,touch biossums,' -i Makefile
}
do_install() {
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/files/fix-libxc-xc_dom_arm-missing-initialization.patch b/import-layers/meta-virtualization/recipes-extended/xen/files/fix-libxc-xc_dom_arm-missing-initialization.patch
new file mode 100644
index 000000000..05016a7a3
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/files/fix-libxc-xc_dom_arm-missing-initialization.patch
@@ -0,0 +1,36 @@
+commit 88bfbf90e35f1213f9967a97dee0b2039f9998a4
+Author: Bernd Kuhls <bernd.kuhls@t-online.de>
+Date: Sat Aug 19 16:21:42 2017 +0200
+
+ tools/libxc/xc_dom_arm: add missing variable initialization
+
+ The variable domctl.u.address_size.size may remain uninitialized if
+ guest_type is not one of xen-3.0-aarch64 or xen-3.0-armv7l. And the
+ code precisely checks if this variable is still 0 to decide if the
+ guest type is supported or not.
+
+ This fixes the following build failure with gcc 7.x:
+
+ xc_dom_arm.c:229:31: error: 'domctl.u.address_size.size' may be used uninitialized in this function [-Werror=maybe-uninitialized]
+ if ( domctl.u.address_size.size == 0 )
+
+ Patch originally taken from
+ https://www.mail-archive.com/xen-devel@lists.xen.org/msg109313.html.
+
+ Signed-off-by: Bernd Kuhls <bernd.kuhls@t-online.de>
+ Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ Acked-by: Wei Liu <wei.liu2@citrix.com>
+
+diff --git a/tools/libxc/xc_dom_arm.c b/tools/libxc/xc_dom_arm.c
+index e7d4bd0..e669fb0 100644
+--- a/tools/libxc/xc_dom_arm.c
++++ b/tools/libxc/xc_dom_arm.c
+@@ -223,6 +223,8 @@ static int set_mode(xc_interface *xch, domid_t domid, char *guest_type)
+
+ domctl.domain = domid;
+ domctl.cmd = XEN_DOMCTL_set_address_size;
++ domctl.u.address_size.size = 0;
++
+ for ( i = 0; i < ARRAY_SIZE(types); i++ )
+ if ( !strcmp(types[i].guest, guest_type) )
+ domctl.u.address_size.size = types[i].size;
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/xen.inc b/import-layers/meta-virtualization/recipes-extended/xen/xen.inc
index 37fb4ce37..cb314f88b 100644
--- a/import-layers/meta-virtualization/recipes-extended/xen/xen.inc
+++ b/import-layers/meta-virtualization/recipes-extended/xen/xen.inc
@@ -914,6 +914,9 @@ do_configure() {
# do configure
oe_runconf
+ if [ ! -e ${STAGING_INCDIR}/bits/long-double-32.h ]; then
+ cp ${STAGING_INCDIR}/bits/long-double-64.h ${STAGING_INCDIR}/bits/long-double-32.h
+ fi
}
do_compile() {
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/xen_4.8.0.bb b/import-layers/meta-virtualization/recipes-extended/xen/xen_4.8.0.bb
deleted file mode 100644
index 35c91373f..000000000
--- a/import-layers/meta-virtualization/recipes-extended/xen/xen_4.8.0.bb
+++ /dev/null
@@ -1,10 +0,0 @@
-require xen.inc
-
-SRC_URI = " \
- https://downloads.xenproject.org/release/xen/${PV}/xen-${PV}.tar.gz \
- "
-
-SRC_URI[md5sum] = "d738f7c741110342621cb8a4d10b0191"
-SRC_URI[sha256sum] = "1e15c713ab7ba3bfda8b4a285ed973529364fd1100e6dd5a61f29583dc667b04"
-
-S = "${WORKDIR}/xen-${PV}"
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/xen_4.9.0.bb b/import-layers/meta-virtualization/recipes-extended/xen/xen_4.9.0.bb
new file mode 100644
index 000000000..8e9c8024b
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/xen_4.9.0.bb
@@ -0,0 +1,12 @@
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+require xen.inc
+
+SRC_URI = " \
+ https://downloads.xenproject.org/release/xen/${PV}/xen-${PV}.tar.gz \
+ file://fix-libxc-xc_dom_arm-missing-initialization.patch \
+ "
+
+SRC_URI[md5sum] = "f0a753637630f982dfbdb64121fd71e1"
+SRC_URI[sha256sum] = "cade643fe3310d4d6f97d0c215c6fa323bc1130d7e64d7e2043ffaa73a96f33b"
+
+S = "${WORKDIR}/xen-${PV}"
diff --git a/import-layers/meta-virtualization/recipes-graphics/xorg-xserver/xserver-xorg_%.bbappend b/import-layers/meta-virtualization/recipes-graphics/xorg-xserver/xserver-xorg_%.bbappend
new file mode 100644
index 000000000..95034045b
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-graphics/xorg-xserver/xserver-xorg_%.bbappend
@@ -0,0 +1,13 @@
+# Change the default Xserver OpenGL configuration for non-x86 distros with Xen: deselect 'glamor'.
+# This removes the dependency on libegl to simplify the domU build.
+#
+# To override this (eg. if wanted for dom0 images) define:
+# REMOVED_OPENGL_PKGCONFIGS = ""
+
+XEN_REMOVED_OPENGL_PKGCONFIGS ?= "glamor"
+XEN_REMOVED_OPENGL_PKGCONFIGS_x86 = ""
+XEN_REMOVED_OPENGL_PKGCONFIGS_x86-64 = ""
+
+REMOVED_OPENGL_PKGCONFIGS ?= "${@bb.utils.contains('DISTRO_FEATURES', 'xen', "${XEN_REMOVED_OPENGL_PKGCONFIGS}", '', d)}"
+
+OPENGL_PKGCONFIGS_remove = "${REMOVED_OPENGL_PKGCONFIGS}"
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/docker.cfg b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/docker.cfg
new file mode 100644
index 000000000..4ee8e3699
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/docker.cfg
@@ -0,0 +1,12 @@
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_NF_NAT=m
+CONFIG_NF_CONNTRACK_IPV4=y
+
+CONFIG_DM_THIN_PROVISIONING=m
+
+
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+
+CONFIG_OVERLAY_FS=y
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/docker.scc b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/docker.scc
new file mode 100644
index 000000000..e317456cb
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/docker.scc
@@ -0,0 +1,4 @@
+define KFEATURE_DESCRIPTION "Enable Features needed by docker in addition to LXC features"
+define KFEATURE_COMPATIBILITY board
+
+kconf non-hardware docker.cfg
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/lxc.cfg b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/lxc.cfg
index fa2344a76..c25d60f97 100644
--- a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/lxc.cfg
+++ b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/lxc.cfg
@@ -15,7 +15,7 @@ CONFIG_PID_NS=y
CONFIG_NET_NS=y
CONFIG_CLS_CGROUP=m
-CONFIG_BLK_CGROUP=m
+CONFIG_BLK_CGROUP=y
CONFIG_NETPRIO_CGROUP=m
CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.1.bbappend b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.1.bbappend
deleted file mode 100644
index f3be89ed0..000000000
--- a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.1.bbappend
+++ /dev/null
@@ -1,19 +0,0 @@
-FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
-
-SRC_URI += "file://xt-checksum.scc \
- file://ebtables.scc \
- file://vswitch.scc \
- file://lxc.scc \
- "
-KERNEL_FEATURES_append = " features/kvm/qemu-kvm-enable.scc"
-
-KERNEL_MODULE_AUTOLOAD += "openvswitch"
-KERNEL_MODULE_AUTOLOAD += "kvm"
-KERNEL_MODULE_AUTOLOAD += "kvm-amd"
-KERNEL_MODULE_AUTOLOAD += "kvm-intel"
-
-# aufs kernel support required for xen-image-minimal
-KERNEL_FEATURES_append += "${@bb.utils.contains('DISTRO_FEATURES', 'aufs', ' features/aufs/aufs-enable.scc', '', d)}"
-
-# xen kernel support
-SRC_URI += "${@bb.utils.contains('DISTRO_FEATURES', 'xen', ' file://xen.scc', '', d)}"
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.10.bbappend b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.10.bbappend
index f3be89ed0..617caccbd 100644
--- a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.10.bbappend
+++ b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.10.bbappend
@@ -1,19 +1 @@
-FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
-
-SRC_URI += "file://xt-checksum.scc \
- file://ebtables.scc \
- file://vswitch.scc \
- file://lxc.scc \
- "
-KERNEL_FEATURES_append = " features/kvm/qemu-kvm-enable.scc"
-
-KERNEL_MODULE_AUTOLOAD += "openvswitch"
-KERNEL_MODULE_AUTOLOAD += "kvm"
-KERNEL_MODULE_AUTOLOAD += "kvm-amd"
-KERNEL_MODULE_AUTOLOAD += "kvm-intel"
-
-# aufs kernel support required for xen-image-minimal
-KERNEL_FEATURES_append += "${@bb.utils.contains('DISTRO_FEATURES', 'aufs', ' features/aufs/aufs-enable.scc', '', d)}"
-
-# xen kernel support
-SRC_URI += "${@bb.utils.contains('DISTRO_FEATURES', 'xen', ' file://xen.scc', '', d)}"
+require ${@bb.utils.contains('DISTRO_FEATURES', 'virtualization', '${BPN}_virtualization.inc', '', d)}
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.12.bbappend b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.12.bbappend
new file mode 100644
index 000000000..617caccbd
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.12.bbappend
@@ -0,0 +1 @@
+require ${@bb.utils.contains('DISTRO_FEATURES', 'virtualization', '${BPN}_virtualization.inc', '', d)}
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.4.bbappend b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.4.bbappend
index f3be89ed0..617caccbd 100644
--- a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.4.bbappend
+++ b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.4.bbappend
@@ -1,19 +1 @@
-FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
-
-SRC_URI += "file://xt-checksum.scc \
- file://ebtables.scc \
- file://vswitch.scc \
- file://lxc.scc \
- "
-KERNEL_FEATURES_append = " features/kvm/qemu-kvm-enable.scc"
-
-KERNEL_MODULE_AUTOLOAD += "openvswitch"
-KERNEL_MODULE_AUTOLOAD += "kvm"
-KERNEL_MODULE_AUTOLOAD += "kvm-amd"
-KERNEL_MODULE_AUTOLOAD += "kvm-intel"
-
-# aufs kernel support required for xen-image-minimal
-KERNEL_FEATURES_append += "${@bb.utils.contains('DISTRO_FEATURES', 'aufs', ' features/aufs/aufs-enable.scc', '', d)}"
-
-# xen kernel support
-SRC_URI += "${@bb.utils.contains('DISTRO_FEATURES', 'xen', ' file://xen.scc', '', d)}"
+require ${@bb.utils.contains('DISTRO_FEATURES', 'virtualization', '${BPN}_virtualization.inc', '', d)}
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.9.bbappend b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.9.bbappend
index f3be89ed0..617caccbd 100644
--- a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.9.bbappend
+++ b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.9.bbappend
@@ -1,19 +1 @@
-FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
-
-SRC_URI += "file://xt-checksum.scc \
- file://ebtables.scc \
- file://vswitch.scc \
- file://lxc.scc \
- "
-KERNEL_FEATURES_append = " features/kvm/qemu-kvm-enable.scc"
-
-KERNEL_MODULE_AUTOLOAD += "openvswitch"
-KERNEL_MODULE_AUTOLOAD += "kvm"
-KERNEL_MODULE_AUTOLOAD += "kvm-amd"
-KERNEL_MODULE_AUTOLOAD += "kvm-intel"
-
-# aufs kernel support required for xen-image-minimal
-KERNEL_FEATURES_append += "${@bb.utils.contains('DISTRO_FEATURES', 'aufs', ' features/aufs/aufs-enable.scc', '', d)}"
-
-# xen kernel support
-SRC_URI += "${@bb.utils.contains('DISTRO_FEATURES', 'xen', ' file://xen.scc', '', d)}"
+require ${@bb.utils.contains('DISTRO_FEATURES', 'virtualization', '${BPN}_virtualization.inc', '', d)}
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_virtualization.inc b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_virtualization.inc
new file mode 100644
index 000000000..9905ed9b2
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_virtualization.inc
@@ -0,0 +1,20 @@
+FILESEXTRAPATHS_prepend := "${THISDIR}/linux-yocto:"
+
+SRC_URI += "file://xt-checksum.scc \
+ file://ebtables.scc \
+ file://vswitch.scc \
+ file://lxc.scc \
+ file://docker.scc \
+ "
+KERNEL_FEATURES_append = " ${@bb.utils.contains('DISTRO_FEATURES', 'kvm', 'features/kvm/qemu-kvm-enable.scc', '', d)}"
+
+KERNEL_MODULE_AUTOLOAD += "openvswitch"
+KERNEL_MODULE_AUTOLOAD += "${@bb.utils.contains('DISTRO_FEATURES', 'kvm', 'kvm', '', d)}"
+KERNEL_MODULE_AUTOLOAD += "${@bb.utils.contains('DISTRO_FEATURES', 'kvm', 'kvm-amd', '', d)}"
+KERNEL_MODULE_AUTOLOAD += "${@bb.utils.contains('DISTRO_FEATURES', 'kvm', 'kvm-intel', '', d)}"
+
+# aufs kernel support required for xen-image-minimal
+KERNEL_FEATURES_append += "${@bb.utils.contains('DISTRO_FEATURES', 'aufs', ' features/aufs/aufs-enable.scc', '', d)}"
+
+# xen kernel support
+SRC_URI += "${@bb.utils.contains('DISTRO_FEATURES', 'xen', ' file://xen.scc', '', d)}"
diff --git a/import-layers/meta-virtualization/recipes-networking/cni/cni_git.bb b/import-layers/meta-virtualization/recipes-networking/cni/cni_git.bb
new file mode 100644
index 000000000..427a812f8
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/cni/cni_git.bb
@@ -0,0 +1,95 @@
+HOMEPAGE = "https://github.com/containernetworking/cni"
+SUMMARY = "Container Network Interface - networking for Linux containers"
+DESCRIPTION = "CNI (Container Network Interface), a Cloud Native Computing \
+Foundation project, consists of a specification and libraries for writing \
+plugins to configure network interfaces in Linux containers, along with a \
+number of supported plugins. CNI concerns itself only with network connectivity \
+of containers and removing allocated resources when the container is deleted. \
+Because of this focus, CNI has a wide range of support and the specification \
+is simple to implement. \
+"
+
+SRCREV_cni = "4b9e11a5266fe50222ed00c5973c6ea4a384a4bb"
+SRCREV_plugins = "c238c93b5e7c681f1935ff813b30e82f96f6c367"
+SRC_URI = "\
+ git://github.com/containernetworking/cni.git;nobranch=1;name=cni \
+ git://github.com/containernetworking/plugins.git;nobranch=1;destsuffix=plugins;name=plugins \
+ "
+
+RPROVIDES_${PN} += "kubernetes-cni"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://src/import/LICENSE;md5=fa818a259cbed7ce8bc2a22d35a464fc"
+
+GO_IMPORT = "import"
+
+PV = "0.6.0+git${SRCREV_cni}"
+
+inherit go
+inherit goarch
+
+do_compile() {
+ export GOARCH="${TARGET_GOARCH}"
+ export GOROOT="${STAGING_LIBDIR_NATIVE}/${TARGET_SYS}/go"
+ export GOPATH="${S}/src/import:${S}/src/import/vendor"
+
+ # Pass the needed cflags/ldflags so that cgo
+ # can find the needed headers files and libraries
+ export CGO_ENABLED="1"
+ export CFLAGS=""
+ export LDFLAGS=""
+ export CGO_CFLAGS="${BUILDSDK_CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+ export CGO_LDFLAGS="${BUILDSDK_LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+
+ # link fixups for compilation
+ rm -f ${S}/src/import/vendor/src
+ ln -sf ./ ${S}/src/import/vendor/src
+ rm -rf ${S}/src/import/plugins
+ rm -rf ${S}/src/import/vendor/github.com/containernetworking/plugins
+
+ mkdir -p ${S}/src/import/vendor/github.com/containernetworking/cni
+
+ ln -sf ../../../../libcni ${S}/src/import/vendor/github.com/containernetworking/cni/libcni
+ ln -sf ../../../../pkg ${S}/src/import/vendor/github.com/containernetworking/cni/pkg
+ ln -sf ../../../../cnitool ${S}/src/import/vendor/github.com/containernetworking/cni/cnitool
+ ln -sf ${WORKDIR}/plugins ${S}/src/import/vendor/github.com/containernetworking/plugins
+
+ export GOPATH="${S}/src/import/.gopath:${S}/src/import/vendor:${STAGING_DIR_TARGET}/${prefix}/local/go"
+ export GOROOT="${STAGING_DIR_NATIVE}/${nonarch_libdir}/${HOST_SYS}/go"
+
+ # Pass the needed cflags/ldflags so that cgo
+ # can find the needed headers files and libraries
+ export CGO_ENABLED="1"
+ export CGO_CFLAGS="${CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+ export CGO_LDFLAGS="${LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+
+ cd ${S}/src/import/vendor/github.com/containernetworking/cni/libcni
+ go build
+
+ cd ${S}/src/import/vendor/github.com/containernetworking/cni/cnitool
+ go build
+
+ cd ${S}/src/import/vendor/github.com/containernetworking/plugins/
+ PLUGINS="plugins/meta/* plugins/main/*"
+ mkdir -p ${WORKDIR}/plugins/bin/
+ for p in $PLUGINS; do
+ plugin="$(basename "$p")"
+ echo "building: $p"
+ go build -o ${WORKDIR}/plugins/bin/$plugin github.com/containernetworking/plugins/$p
+ done
+}
+
+do_install() {
+ localbindir="/opt/cni/bin"
+
+ install -d ${D}${localbindir}
+ install -d ${D}/${sysconfdir}/cni/net.d
+
+ install -m 755 ${S}/src/import/cnitool/cnitool ${D}/${localbindir}
+ install -m 755 -D ${WORKDIR}/plugins/bin/* ${D}/${localbindir}
+}
+
+FILES_${PN} += "/opt/cni/bin/*"
+
+INHIBIT_PACKAGE_STRIP = "1"
+INSANE_SKIP_${PN} += "ldflags already-stripped"
diff --git a/import-layers/meta-virtualization/recipes-networking/netns/files/0001-Use-correct-go-cross-compiler.patch b/import-layers/meta-virtualization/recipes-networking/netns/files/0001-Use-correct-go-cross-compiler.patch
new file mode 100644
index 000000000..ed66e11ba
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/netns/files/0001-Use-correct-go-cross-compiler.patch
@@ -0,0 +1,77 @@
+From d5c319bb61155d94bef2571a095d82983d786b94 Mon Sep 17 00:00:00 2001
+From: Paul Barker <pbarker@toganlabs.com>
+Date: Fri, 13 Oct 2017 17:58:11 +0000
+Subject: [PATCH] Use correct go cross-compiler
+
+Signed-off-by: Paul Barker <pbarker@toganlabs.com>
+Upstream-status: Pending
+---
+ Makefile | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/Makefile b/Makefile
+index cb9a46d..633f884 100644
+--- a/src/import/Makefile
++++ b/src/import/Makefile
+@@ -33,12 +33,12 @@ build: $(NAME) ## Builds a dynamic executable or package
+
+ $(NAME): *.go VERSION
+ @echo "+ $@"
+- go build -tags "$(BUILDTAGS)" ${GO_LDFLAGS} -o $(NAME) .
++ $(GO) build -tags "$(BUILDTAGS)" ${GO_LDFLAGS} -o $(NAME) .
+
+ .PHONY: static
+ static: ## Builds a static executable
+ @echo "+ $@"
+- CGO_ENABLED=0 go build \
++ CGO_ENABLED=0 $(GO) build \
+ -tags "$(BUILDTAGS) static_build" \
+ ${GO_LDFLAGS_STATIC} -o $(NAME) .
+
+@@ -55,21 +55,21 @@ lint: ## Verifies `golint` passes
+ .PHONY: test
+ test: ## Runs the go tests
+ @echo "+ $@"
+- @go test -v -tags "$(BUILDTAGS) cgo" $(shell go list ./... | grep -v vendor)
++ @$(GO) test -v -tags "$(BUILDTAGS) cgo" $(shell $(GO) list ./... | grep -v vendor)
+
+ .PHONY: vet
+ vet: ## Verifies `go vet` passes
+ @echo "+ $@"
+- @go vet $(shell go list ./... | grep -v vendor) | grep -v '.pb.go:' | tee /dev/stderr
++ @$(GO) vet $(shell $(GO) list ./... | grep -v vendor) | grep -v '.pb.go:' | tee /dev/stderr
+
+ .PHONY: install
+ install: ## Installs the executable or package
+ @echo "+ $@"
+- @go install .
++ @$(GO) install .
+
+ define buildpretty
+ mkdir -p $(BUILDDIR)/$(1)/$(2);
+-GOOS=$(1) GOARCH=$(2) CGO_ENABLED=0 go build \
++GOOS=$(1) GOARCH=$(2) CGO_ENABLED=0 $(GO) build \
+ -o $(BUILDDIR)/$(1)/$(2)/$(NAME) \
+ -a -tags "$(BUILDTAGS) static_build netgo" \
+ -installsuffix netgo ${GO_LDFLAGS_STATIC} .;
+@@ -83,7 +83,7 @@ cross: *.go VERSION ## Builds the cross-compiled binaries, creating a clean dire
+ $(foreach GOOSARCH,$(GOOSARCHES), $(call buildpretty,$(subst /,,$(dir $(GOOSARCH))),$(notdir $(GOOSARCH))))
+
+ define buildrelease
+-GOOS=$(1) GOARCH=$(2) CGO_ENABLED=0 go build \
++GOOS=$(1) GOARCH=$(2) CGO_ENABLED=0 $(GO) build \
+ -o $(BUILDDIR)/$(NAME)-$(1)-$(2) \
+ -a -tags "$(BUILDTAGS) static_build netgo" \
+ -installsuffix netgo ${GO_LDFLAGS_STATIC} .;
+@@ -99,7 +99,7 @@ release: *.go VERSION ## Builds the cross-compiled binaries, naming them in such
+ .PHONY: bump-version
+ BUMP := patch
+ bump-version: ## Bump the version in the version file. Set KIND to [ patch | major | minor ]
+- @go get -u github.com/jessfraz/junk/sembump # update sembump tool
++ @$(GO) get -u github.com/jessfraz/junk/sembump # update sembump tool
+ $(eval NEW_VERSION = $(shell sembump --kind $(BUMP) $(VERSION)))
+ @echo "Bumping VERSION from $(VERSION) to $(NEW_VERSION)"
+ echo $(NEW_VERSION) > VERSION
+--
+2.7.4
+
diff --git a/import-layers/meta-virtualization/recipes-networking/netns/netns_git.bb b/import-layers/meta-virtualization/recipes-networking/netns/netns_git.bb
index 641d55fc1..d35836ef0 100644
--- a/import-layers/meta-virtualization/recipes-networking/netns/netns_git.bb
+++ b/import-layers/meta-virtualization/recipes-networking/netns/netns_git.bb
@@ -1,11 +1,14 @@
HOMEPAGE = "https://github.com/jfrazelle/netns"
SUMMARY = "Runc hook for setting up default bridge networking."
LICENSE = "MIT"
-LIC_FILES_CHKSUM = "file://LICENSE;md5=20ce4c6a4f32d6ee4a68e3a7506db3f1"
+LIC_FILES_CHKSUM = "file://src/import/LICENSE;md5=20ce4c6a4f32d6ee4a68e3a7506db3f1"
-SRC_URI = "git://github.com/jessfraz/netns;branch=master"
-SRCREV = "85b1ab9fcccbaa404a2636b52a48bbde02437cf7"
-PV = "0.1.0+git${SRCPV}"
+SRC_URI = "git://github.com/jessfraz/netns;branch=master \
+ file://0001-Use-correct-go-cross-compiler.patch \
+ "
+SRCREV = "74e23a0e5c4e7ac011aafcc4623586c196f1b3ef"
+PV = "0.2.1"
+GO_IMPORT = "import"
S = "${WORKDIR}/git"
@@ -20,12 +23,13 @@ do_compile() {
# Go looks in a src directory under any directory in GOPATH but netns
# uses 'vendor' instead of 'vendor/src'. We can fix this with a symlink.
#
- # We also need to link in the ipallocator directory as that is not under
- # a src directory.
- ln -sfn . "${S}/vendor/src"
- mkdir -p "${S}/vendor/src/github.com/jessfraz/netns"
- ln -sfn "${S}/ipallocator" "${S}/vendor/src/github.com/jessfraz/netns/ipallocator"
- export GOPATH="${S}/vendor"
+ # We also need to link in the ipallocator and version directories as
+ # they are not under the src directory.
+ ln -sfn . "${S}/src/import/vendor/src"
+ mkdir -p "${S}/src/import/vendor/src/github.com/jessfraz/netns"
+ ln -sfn "${S}/src/import/ipallocator" "${S}/src/import/vendor/src/github.com/jessfraz/netns/ipallocator"
+ ln -sfn "${S}/src/import/version" "${S}/src/import/vendor/src/github.com/jessfraz/netns/version"
+ export GOPATH="${S}/src/import/vendor"
# Pass the needed cflags/ldflags so that cgo
# can find the needed headers files and libraries
@@ -35,10 +39,11 @@ do_compile() {
export CGO_CFLAGS="${BUILDSDK_CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
export CGO_LDFLAGS="${BUILDSDK_LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+ cd ${S}/src/import
oe_runmake static
}
do_install() {
install -d ${D}/${sbindir}
- install ${S}/netns ${D}/${sbindir}/netns
+ install ${S}/src/import/netns ${D}/${sbindir}/netns
}
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/files/configure-Only-link-against-libpcap-on-FreeBSD.patch b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/configure-Only-link-against-libpcap-on-FreeBSD.patch
deleted file mode 100644
index 0a44b85eb..000000000
--- a/import-layers/meta-virtualization/recipes-networking/openvswitch/files/configure-Only-link-against-libpcap-on-FreeBSD.patch
+++ /dev/null
@@ -1,70 +0,0 @@
-From d30e714ccb9d13caf39d14d5b2fc9523b678ed51 Mon Sep 17 00:00:00 2001
-From: Ben Pfaff <blp@nicira.com>
-Date: Thu, 14 Mar 2013 15:20:55 -0700
-Subject: [PATCH] configure: Only link against libpcap on FreeBSD.
-
-commit d30e714ccb9d13caf39d14d5b2fc9523b678ed51 upstream
-http://git.openvswitch.org/git/openvswitch
-
-On other platforms there is no benefit to linking against libpcap, because
-it is not used.
-
-Signed-off-by: Ben Pfaff <blp@nicira.com>
-CC: Ed Maste <emaste@freebsd.org>
----
- acinclude.m4 | 7 ++++++-
- configure.ac | 3 +--
- 2 files changed, 7 insertions(+), 3 deletions(-)
-
-diff --git a/acinclude.m4 b/acinclude.m4
-index f0610c9..19a47dd 100644
---- a/acinclude.m4
-+++ b/acinclude.m4
-@@ -1,6 +1,6 @@
- # -*- autoconf -*-
-
--# Copyright (c) 2008, 2009, 2010, 2011, 2012 Nicira, Inc.
-+# Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
-@@ -295,6 +295,8 @@ AC_DEFUN([OVS_CHECK_IF_PACKET],
- fi])
-
- dnl Checks for net/if_dl.h.
-+dnl
-+dnl (We use this as a proxy for checking whether we're building on FreeBSD.)
- AC_DEFUN([OVS_CHECK_IF_DL],
- [AC_CHECK_HEADER([net/if_dl.h],
- [HAVE_IF_DL=yes],
-@@ -303,6 +305,9 @@ AC_DEFUN([OVS_CHECK_IF_DL],
- if test "$HAVE_IF_DL" = yes; then
- AC_DEFINE([HAVE_IF_DL], [1],
- [Define to 1 if net/if_dl.h is available.])
-+
-+ # On FreeBSD we use libpcap to access network devices.
-+ AC_SEARCH_LIBS([pcap_open_live], [pcap])
- fi])
-
- dnl Checks for buggy strtok_r.
-diff --git a/configure.ac b/configure.ac
-index 1cacd29..bd49179 100644
---- a/configure.ac
-+++ b/configure.ac
-@@ -1,4 +1,4 @@
--# Copyright (c) 2008, 2009, 2010, 2011, 2012 Nicira, Inc.
-+# Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
-@@ -44,7 +44,6 @@ AC_SYS_LARGEFILE
- AC_SEARCH_LIBS([pow], [m])
- AC_SEARCH_LIBS([clock_gettime], [rt])
- AC_SEARCH_LIBS([timer_create], [rt])
--AC_SEARCH_LIBS([pcap_open_live], [pcap])
-
- OVS_CHECK_ESX
- OVS_CHECK_COVERAGE
---
-1.8.3.2
-
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/files/python-make-remaining-scripts-use-usr-bin-env.patch b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/python-make-remaining-scripts-use-usr-bin-env.patch
index 68f46ff57..4441c1ea7 100644
--- a/import-layers/meta-virtualization/recipes-networking/openvswitch/files/python-make-remaining-scripts-use-usr-bin-env.patch
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/python-make-remaining-scripts-use-usr-bin-env.patch
@@ -33,7 +33,7 @@ index f79f235..c7b8730 100755
+++ b/build-aux/check-structs
@@ -1,4 +1,4 @@
-#! /usr/bin/python
-+#! /usr/bin/env python
++#! /usr/bin/env python3
import os.path
import sys
@@ -43,7 +43,7 @@ index 184447b..92ea18d 100755
+++ b/build-aux/extract-ofp-actions
@@ -1,4 +1,4 @@
-#! /usr/bin/python
-+#! /usr/bin/env python
++#! /usr/bin/env python3
import sys
import os.path
@@ -53,7 +53,7 @@ index 9642593..11e6de7 100755
+++ b/build-aux/extract-ofp-errors
@@ -1,4 +1,4 @@
-#! /usr/bin/python
-+#! /usr/bin/env python
++#! /usr/bin/env python3
import sys
import os.path
@@ -63,7 +63,7 @@ index 498b887..5d6bcec 100755
+++ b/build-aux/extract-ofp-fields
@@ -1,4 +1,4 @@
-#! /usr/bin/python
-+#! /usr/bin/env python
++#! /usr/bin/env python3
import getopt
import sys
@@ -73,7 +73,7 @@ index 1813638..e9e3c25 100755
+++ b/build-aux/extract-ofp-msgs
@@ -1,4 +1,4 @@
-#! /usr/bin/python
-+#! /usr/bin/env python
++#! /usr/bin/env python3
import sys
import os.path
@@ -83,7 +83,7 @@ index bd4e879..9d81503 100755
+++ b/build-aux/xml2nroff
@@ -1,4 +1,4 @@
-#! /usr/bin/python
-+#! /usr/bin/env python
++#! /usr/bin/env python3
# Copyright (c) 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
#
@@ -93,7 +93,7 @@ index 52de3db..c90f02b 100755
+++ b/ovn/utilities/ovn-docker-overlay-driver
@@ -1,4 +1,4 @@
-#! /usr/bin/python
-+#! /usr/bin/env python
++#! /usr/bin/env python3
# Copyright (C) 2015 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -103,7 +103,7 @@ index 2c9c4b6..89b804c 100755
+++ b/ovn/utilities/ovn-docker-underlay-driver
@@ -1,4 +1,4 @@
-#! /usr/bin/python
-+#! /usr/bin/env python
++#! /usr/bin/env python3
# Copyright (C) 2015 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -113,7 +113,7 @@ index 5cf26ee..f76f4bd 100755
+++ b/ovsdb/ovsdb-doc
@@ -1,4 +1,4 @@
-#! /usr/bin/python
-+#! /usr/bin/env python
++#! /usr/bin/env python3
# Copyright (c) 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
#
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/files/python-switch-remaining-scripts-to-use-python3.patch b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/python-switch-remaining-scripts-to-use-python3.patch
new file mode 100644
index 000000000..a02b2a403
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/python-switch-remaining-scripts-to-use-python3.patch
@@ -0,0 +1,113 @@
+From 176528ca3a8b76c9d0bb71b1e56eeebccc655c71 Mon Sep 17 00:00:00 2001
+From: Mark Asselstine <mark.asselstine@windriver.com>
+Date: Wed, 3 May 2017 10:39:12 -0400
+Subject: [PATCH] python: switch remaining scripts to use python3
+
+Work to remove the main openvswitch package's dependency on python 2.
+
+Signed-off-by: Mark Asselstine <mark.asselstine@windriver.com>
+---
+ ofproto/ipfix-gen-entities | 2 +-
+ tests/test-l7.py | 2 +-
+ utilities/checkpatch.py | 2 +-
+ utilities/ovs-dev.py | 2 +-
+ utilities/ovs-pipegen.py | 2 +-
+ vtep/ovs-vtep | 2 +-
+ xenserver/etc_xapi.d_plugins_openvswitch-cfg-update | 2 +-
+ xenserver/opt_xensource_libexec_interface-reconfigure | 2 +-
+ xenserver/usr_share_openvswitch_scripts_ovs-xapi-sync | 2 +-
+ 9 files changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/ofproto/ipfix-gen-entities b/ofproto/ipfix-gen-entities
+index 0be7199..d2cce42 100755
+--- a/ofproto/ipfix-gen-entities
++++ b/ofproto/ipfix-gen-entities
+@@ -1,4 +1,4 @@
+-#! /usr/bin/env python
++#! /usr/bin/env python3
+ #
+ # Copyright (C) 2012 Nicira, Inc.
+ #
+diff --git a/tests/test-l7.py b/tests/test-l7.py
+index d7854a1..f09defb 100755
+--- a/tests/test-l7.py
++++ b/tests/test-l7.py
+@@ -1,4 +1,4 @@
+-#!/usr/bin/env python
++#!/usr/bin/env python3
+ # Copyright (c) 2015, 2016 Nicira, Inc.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License");
+diff --git a/utilities/checkpatch.py b/utilities/checkpatch.py
+index 26eb5c3..2e1932b 100755
+--- a/utilities/checkpatch.py
++++ b/utilities/checkpatch.py
+@@ -1,4 +1,4 @@
+-#!/usr/bin/env python
++#!/usr/bin/env python3
+ # Copyright (c) 2016 Red Hat, Inc.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License");
+diff --git a/utilities/ovs-dev.py b/utilities/ovs-dev.py
+index 9ce0f04..839e13e 100755
+--- a/utilities/ovs-dev.py
++++ b/utilities/ovs-dev.py
+@@ -1,4 +1,4 @@
+-#!/usr/bin/env python
++#!/usr/bin/env python3
+ # Copyright (c) 2013, 2014, 2015, 2016 Nicira, Inc.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License");
+diff --git a/utilities/ovs-pipegen.py b/utilities/ovs-pipegen.py
+index 4bf240f..2a8f13e 100755
+--- a/utilities/ovs-pipegen.py
++++ b/utilities/ovs-pipegen.py
+@@ -1,4 +1,4 @@
+-#! /usr/bin/env python
++#! /usr/bin/env python3
+ # Copyright (c) 2013, 2014, 2015 Nicira, Inc.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License");
+diff --git a/vtep/ovs-vtep b/vtep/ovs-vtep
+index fd652d4..19d63f9 100755
+--- a/vtep/ovs-vtep
++++ b/vtep/ovs-vtep
+@@ -1,4 +1,4 @@
+-#! /usr/bin/env python
++#! /usr/bin/env python3
+ # Copyright (C) 2013 Nicira, Inc. All Rights Reserved.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License");
+diff --git a/xenserver/etc_xapi.d_plugins_openvswitch-cfg-update b/xenserver/etc_xapi.d_plugins_openvswitch-cfg-update
+index e7404e3..5edad76 100755
+--- a/xenserver/etc_xapi.d_plugins_openvswitch-cfg-update
++++ b/xenserver/etc_xapi.d_plugins_openvswitch-cfg-update
+@@ -1,4 +1,4 @@
+-#!/usr/bin/env python
++#!/usr/bin/env python3
+ #
+ # xapi plugin script to update the cache of configuration items in the
+ # ovs-vswitchd configuration that are managed in the xapi database when
+diff --git a/xenserver/opt_xensource_libexec_interface-reconfigure b/xenserver/opt_xensource_libexec_interface-reconfigure
+index ea4a742..c6745ee 100755
+--- a/xenserver/opt_xensource_libexec_interface-reconfigure
++++ b/xenserver/opt_xensource_libexec_interface-reconfigure
+@@ -1,4 +1,4 @@
+-#!/usr/bin/env python
++#!/usr/bin/env python3
+ #
+ # Copyright (c) 2008,2009 Citrix Systems, Inc.
+ #
+diff --git a/xenserver/usr_share_openvswitch_scripts_ovs-xapi-sync b/xenserver/usr_share_openvswitch_scripts_ovs-xapi-sync
+index a776c00..d5ff8af 100755
+--- a/xenserver/usr_share_openvswitch_scripts_ovs-xapi-sync
++++ b/xenserver/usr_share_openvswitch_scripts_ovs-xapi-sync
+@@ -1,4 +1,4 @@
+-#! /usr/bin/env python
++#! /usr/bin/env python3
+ # Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License");
+--
+2.7.4
+
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0001-Python3-compatibility-Convert-print-statements.patch b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0001-Python3-compatibility-Convert-print-statements.patch
new file mode 100644
index 000000000..d6197588d
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0001-Python3-compatibility-Convert-print-statements.patch
@@ -0,0 +1,1264 @@
+From c5c18f9c5f1b7217d43af43be9736c1762c7ebba Mon Sep 17 00:00:00 2001
+From: Jason Wessel <jason.wessel@windriver.com>
+Date: Thu, 29 Jun 2017 20:33:23 -0700
+Subject: [PATCH 1/8] Python3 compatibility: Convert print statements
+
+Commit d34a1cc02536f9a812517a71accec3fbd3c6c98b from
+https://github.com/openvswitch/ovs.git
+
+This patch fixes up all the print statements to work with python3 or
+python2.
+
+Signed-off-by: Jason Wessel <jason.wessel@windriver.com>
+Signed-off-by: Ben Pfaff <blp@ovn.org>
+---
+ build-aux/check-structs | 4 +-
+ build-aux/extract-ofp-actions | 68 +++---
+ build-aux/extract-ofp-errors | 2 +-
+ build-aux/extract-ofp-fields | 2 +-
+ build-aux/extract-ofp-msgs | 6 +-
+ ovsdb/ovsdb-doc | 6 +-
+ ovsdb/ovsdb-idlc.in | 523 +++++++++++++++++++++---------------------
+ 7 files changed, 306 insertions(+), 305 deletions(-)
+
+diff --git a/build-aux/check-structs b/build-aux/check-structs
+index f79f235..bae511f 100755
+--- a/build-aux/check-structs
++++ b/build-aux/check-structs
+@@ -211,7 +211,7 @@ def checkStructs():
+
+ if '--help' in sys.argv:
+ argv0 = os.path.basename(sys.argv[0])
+- print '''\
++ print('''\
+ %(argv0)s, for checking struct and struct member alignment
+ usage: %(argv0)s -Ipath HEADER [HEADER]...
+
+@@ -226,7 +226,7 @@ assertions using OFP_ASSERT.
+
+ This program is specialized for reading Open vSwitch's OpenFlow header
+ files. It will not work on arbitrary header files without extensions.\
+-''' % {"argv0": argv0}
++''' % {"argv0": argv0})
+ sys.exit(0)
+
+ global fileName
+diff --git a/build-aux/extract-ofp-actions b/build-aux/extract-ofp-actions
+index 0062ab8..874e6b4 100755
+--- a/build-aux/extract-ofp-actions
++++ b/build-aux/extract-ofp-actions
+@@ -67,7 +67,7 @@ def fatal(msg):
+
+ def usage():
+ argv0 = os.path.basename(sys.argv[0])
+- print ('''\
++ print('''\
+ %(argv0)s, for extracting OpenFlow action data
+ usage: %(argv0)s OFP_ACTIONS.C [--prototypes | --definitions]
+
+@@ -238,36 +238,36 @@ def extract_ofp_actions(fn, definitions):
+ if n_errors:
+ sys.exit(1)
+
+- print """\
++ print("""\
+ /* Generated automatically; do not modify! -*- buffer-read-only: t -*- */
+-"""
++""")
+
+ if definitions:
+- print "/* Verify that structs used as actions are reasonable sizes. */"
++ print("/* Verify that structs used as actions are reasonable sizes. */")
+ for s in sorted(arg_structs):
+- print "BUILD_ASSERT_DECL(sizeof(%s) %% OFP_ACTION_ALIGN == 0);" % s
++ print("BUILD_ASSERT_DECL(sizeof(%s) %% OFP_ACTION_ALIGN == 0);" % s)
+
+- print "\nstatic struct ofpact_raw_instance all_raw_instances[] = {"
++ print("\nstatic struct ofpact_raw_instance all_raw_instances[] = {")
+ for vendor in domain:
+ for type_ in domain[vendor]:
+ for version in domain[vendor][type_]:
+ d = domain[vendor][type_][version]
+- print " { { 0x%08x, %2d, 0x%02x }, " % (
+- vendor, type_, version)
+- print " %s," % d["enum"]
+- print " HMAP_NODE_NULL_INITIALIZER,"
+- print " HMAP_NODE_NULL_INITIALIZER,"
+- print " %s," % d["min_length"]
+- print " %s," % d["max_length"]
+- print " %s," % d["arg_ofs"]
+- print " %s," % d["arg_len"]
+- print " \"%s\"," % re.sub('_RAW[0-9]*', '', d["enum"], 1)
++ print(" { { 0x%08x, %2d, 0x%02x }, " % (
++ vendor, type_, version))
++ print(" %s," % d["enum"])
++ print(" HMAP_NODE_NULL_INITIALIZER,")
++ print(" HMAP_NODE_NULL_INITIALIZER,")
++ print(" %s," % d["min_length"])
++ print(" %s," % d["max_length"])
++ print(" %s," % d["arg_ofs"])
++ print(" %s," % d["arg_len"])
++ print(" \"%s\"," % re.sub('_RAW[0-9]*', '', d["enum"], 1))
+ if d["deprecation"]:
+- print " \"%s\"," % re.sub(r'(["\\])', r'\\\1', d["deprecation"])
++ print(" \"%s\"," % re.sub(r'(["\\])', r'\\\1', d["deprecation"]))
+ else:
+- print " NULL,"
+- print " },"
+- print "};";
++ print(" NULL,")
++ print(" },")
++ print("};")
+
+ for versions in enums.values():
+ need_ofp_version = False
+@@ -314,11 +314,11 @@ def extract_ofp_actions(fn, definitions):
+ decl += "}"
+ else:
+ decl += ";"
+- print decl
+- print
++ print(decl)
++ print("")
+
+ if definitions:
+- print """\
++ print("""\
+ static enum ofperr
+ ofpact_decode(const struct ofp_action_header *a, enum ofp_raw_action_type raw,
+ enum ofp_version version, uint64_t arg,
+@@ -326,14 +326,14 @@ ofpact_decode(const struct ofp_action_header *a, enum ofp_raw_action_type raw,
+ uint64_t *tlv_bitmap, struct ofpbuf *out)
+ {
+ switch (raw) {\
+-"""
++""")
+ for versions in enums.values():
+ enum = versions[0]["enum"]
+- print " case %s:" % enum
++ print(" case %s:" % enum)
+ base_argtype = versions[0]["base_argtype"]
+ arg_vl_mff_map = versions[0]["arg_vl_mff_map"]
+ if base_argtype == 'void':
+- print " return decode_%s(out);" % enum
++ print(" return decode_%s(out);" % enum)
+ else:
+ if base_argtype.startswith('struct'):
+ arg = "ALIGNED_CAST(const %s *, a)" % base_argtype
+@@ -344,16 +344,16 @@ ofpact_decode(const struct ofp_action_header *a, enum ofp_raw_action_type raw,
+ else:
+ arg = "arg"
+ if arg_vl_mff_map:
+- print " return decode_%s(%s, version, vl_mff_map, tlv_bitmap, out);" % (enum, arg)
++ print(" return decode_%s(%s, version, vl_mff_map, tlv_bitmap, out);" % (enum, arg))
+ else:
+- print " return decode_%s(%s, version, out);" % (enum, arg)
+- print
+- print """\
++ print(" return decode_%s(%s, version, out);" % (enum, arg))
++ print("")
++ print("""\
+ default:
+ OVS_NOT_REACHED();
+ }
+ }\
+-"""
++""")
+ else:
+ for versions in enums.values():
+ enum = versions[0]["enum"]
+@@ -368,15 +368,15 @@ ofpact_decode(const struct ofp_action_header *a, enum ofp_raw_action_type raw,
+ if arg_vl_mff_map:
+ prototype += 'const struct vl_mff_map *, uint64_t *, '
+ prototype += "struct ofpbuf *);"
+- print prototype
++ print(prototype)
+
+- print """
++ print("""
+ static enum ofperr ofpact_decode(const struct ofp_action_header *,
+ enum ofp_raw_action_type raw,
+ enum ofp_version version,
+ uint64_t arg, const struct vl_mff_map *vl_mff_map,
+ uint64_t *tlv_bitmap, struct ofpbuf *out);
+-"""
++""")
+
+ if __name__ == '__main__':
+ if '--help' in sys.argv:
+diff --git a/build-aux/extract-ofp-errors b/build-aux/extract-ofp-errors
+index 2312b76..336a240 100755
+--- a/build-aux/extract-ofp-errors
++++ b/build-aux/extract-ofp-errors
+@@ -426,7 +426,7 @@ static const struct ofperr_domain %s = {
+ vendor, type_, code = map[enum]
+ if code == None:
+ code = -1
+- print " { %#8x, %2d, %3d }, /* %s */" % (vendor, type_, code, enum)
++ print (" { %#8x, %2d, %3d }, /* %s */" % (vendor, type_, code, enum))
+ else:
+ print (" { -1, -1, -1 }, /* %s */" % enum)
+ print ("""\
+diff --git a/build-aux/extract-ofp-fields b/build-aux/extract-ofp-fields
+index 498b887..425a85f 100755
+--- a/build-aux/extract-ofp-fields
++++ b/build-aux/extract-ofp-fields
+@@ -728,7 +728,7 @@ def make_ovs_fields(meta_flow_h, meta_flow_xml):
+ ovs\-fields \- protocol header fields in OpenFlow and Open vSwitch
+ .
+ .PP
+-''') % version
++''' % version)
+
+ recursively_replace(doc, 'oxm_classes', make_oxm_classes_xml(document))
+
+diff --git a/build-aux/extract-ofp-msgs b/build-aux/extract-ofp-msgs
+index 1813638..a67e870 100755
+--- a/build-aux/extract-ofp-msgs
++++ b/build-aux/extract-ofp-msgs
+@@ -56,14 +56,14 @@ def fatal(msg):
+
+ def usage():
+ argv0 = os.path.basename(sys.argv[0])
+- print '''\
++ print('''\
+ %(argv0)s, for extracting OpenFlow message types from header files
+ usage: %(argv0)s INPUT OUTPUT
+ where INPUT is the name of the input header file
+ and OUTPUT is the output file name.
+ Despite OUTPUT, the output is written to stdout, and the OUTPUT argument
+ only controls #line directives in the output.\
+-''' % {"argv0": argv0}
++''' % {"argv0": argv0})
+ sys.exit(0)
+
+ def make_sizeof(s):
+@@ -378,5 +378,5 @@ if __name__ == '__main__':
+ line_number = 0
+
+ for line in extract_ofp_msgs(sys.argv[2]):
+- print line
++ print(line)
+
+diff --git a/ovsdb/ovsdb-doc b/ovsdb/ovsdb-doc
+index 5cf26ee..b34fb11 100755
+--- a/ovsdb/ovsdb-doc
++++ b/ovsdb/ovsdb-doc
+@@ -258,7 +258,7 @@ represent strong references; thin lines represent weak references.
+ return s
+
+ def usage():
+- print """\
++ print("""\
+ %(argv0)s: ovsdb schema documentation generator
+ Prints documentation for an OVSDB schema as an nroff-formatted manpage.
+ usage: %(argv0)s [OPTIONS] SCHEMA XML
+@@ -269,7 +269,7 @@ The following options are also available:
+ --er-diagram=DIAGRAM.PIC include E-R diagram from DIAGRAM.PIC
+ --version=VERSION use VERSION to display on document footer
+ -h, --help display this help message\
+-""" % {'argv0': argv0}
++""" % {'argv0': argv0})
+ sys.exit(0)
+
+ if __name__ == "__main__":
+@@ -304,7 +304,7 @@ if __name__ == "__main__":
+ for line in s.split("\n"):
+ line = line.strip()
+ if len(line):
+- print line
++ print(line)
+
+ except error.Error, e:
+ sys.stderr.write("%s: %s\n" % (argv0, e.msg))
+diff --git a/ovsdb/ovsdb-idlc.in b/ovsdb/ovsdb-idlc.in
+index 721ab50..1064448 100755
+--- a/ovsdb/ovsdb-idlc.in
++++ b/ovsdb/ovsdb-idlc.in
+@@ -1,5 +1,6 @@
+ #! @PYTHON@
+
++from __future__ import print_function
+ import getopt
+ import os
+ import re
+@@ -123,7 +124,7 @@ def sorted_columns(table):
+ def printCIDLHeader(schemaFile):
+ schema = parseSchema(schemaFile)
+ prefix = schema.idlPrefix
+- print '''\
++ print('''\
+ /* Generated automatically -- do not modify! -*- buffer-read-only: t -*- */
+
+ #ifndef %(prefix)sIDL_HEADER
+@@ -135,39 +136,39 @@ def printCIDLHeader(schemaFile):
+ #include "ovsdb-data.h"
+ #include "ovsdb-idl-provider.h"
+ #include "smap.h"
+-#include "uuid.h"''' % {'prefix': prefix.upper()}
++#include "uuid.h"''' % {'prefix': prefix.upper()})
+
+ for tableName, table in sorted(schema.tables.iteritems()):
+ structName = "%s%s" % (prefix, tableName.lower())
+
+- print " "
+- print "/* %s table. */" % tableName
+- print "struct %s {" % structName
+- print "\tstruct ovsdb_idl_row header_;"
++ print(" ")
++ print("/* %s table. */" % tableName)
++ print("struct %s {" % structName)
++ print("\tstruct ovsdb_idl_row header_;")
+ for columnName, column in sorted_columns(table):
+- print "\n\t/* %s column. */" % columnName
++ print("\n\t/* %s column. */" % columnName)
+ comment, members = cMembers(prefix, tableName,
+ columnName, column, False)
+ for member in members:
+- print "\t%(type)s%(name)s;%(comment)s" % member
+- print "};"
++ print("\t%(type)s%(name)s;%(comment)s" % member)
++ print("};")
+
+ # Column indexes.
+ printEnum("%s_column_id" % structName.lower(), ["%s_COL_%s" % (structName.upper(), columnName.upper())
+ for columnName, column in sorted_columns(table)]
+ + ["%s_N_COLUMNS" % structName.upper()])
+
+- print
++ print("")
+ for columnName in table.columns:
+- print "#define %(s)s_col_%(c)s (%(s)s_columns[%(S)s_COL_%(C)s])" % {
++ print("#define %(s)s_col_%(c)s (%(s)s_columns[%(S)s_COL_%(C)s])" % {
+ 's': structName,
+ 'S': structName.upper(),
+ 'c': columnName,
+- 'C': columnName.upper()}
++ 'C': columnName.upper()})
+
+- print "\nextern struct ovsdb_idl_column %s_columns[%s_N_COLUMNS];" % (structName, structName.upper())
++ print("\nextern struct ovsdb_idl_column %s_columns[%s_N_COLUMNS];" % (structName, structName.upper()))
+
+- print '''
++ print('''
+ const struct %(s)s *%(s)s_get_for_uuid(const struct ovsdb_idl *, const struct uuid *);
+ const struct %(s)s *%(s)s_first(const struct ovsdb_idl *);
+ const struct %(s)s *%(s)s_next(const struct %(s)s *);
+@@ -205,87 +206,87 @@ void %(s)s_init(struct %(s)s *);
+ void %(s)s_delete(const struct %(s)s *);
+ struct %(s)s *%(s)s_insert(struct ovsdb_idl_txn *);
+ bool %(s)s_is_updated(const struct %(s)s *, enum %(s)s_column_id);
+-''' % {'s': structName, 'S': structName.upper()}
++''' % {'s': structName, 'S': structName.upper()})
+
+ for columnName, column in sorted_columns(table):
+- print 'void %(s)s_verify_%(c)s(const struct %(s)s *);' % {'s': structName, 'c': columnName}
++ print('void %(s)s_verify_%(c)s(const struct %(s)s *);' % {'s': structName, 'c': columnName})
+
+- print
++ print("")
+ for columnName, column in sorted_columns(table):
+ if column.type.value:
+ valueParam = ', enum ovsdb_atomic_type value_type'
+ else:
+ valueParam = ''
+- print 'const struct ovsdb_datum *%(s)s_get_%(c)s(const struct %(s)s *, enum ovsdb_atomic_type key_type%(v)s);' % {
+- 's': structName, 'c': columnName, 'v': valueParam}
++ print('const struct ovsdb_datum *%(s)s_get_%(c)s(const struct %(s)s *, enum ovsdb_atomic_type key_type%(v)s);' % {
++ 's': structName, 'c': columnName, 'v': valueParam})
+
+- print
++ print("")
+ for columnName, column in sorted_columns(table):
+- print 'void %(s)s_set_%(c)s(const struct %(s)s *,' % {'s': structName, 'c': columnName},
++ print('void %(s)s_set_%(c)s(const struct %(s)s *,' % {'s': structName, 'c': columnName}, end=' ')
+ if column.type.is_smap():
+ args = ['const struct smap *']
+ else:
+ comment, members = cMembers(prefix, tableName, columnName,
+ column, True)
+ args = ['%(type)s%(name)s' % member for member in members]
+- print '%s);' % ', '.join(args)
++ print('%s);' % ', '.join(args))
+
+- print
++ print("")
+ for columnName, column in sorted_columns(table):
+ if column.type.is_map():
+- print 'void %(s)s_update_%(c)s_setkey(const struct %(s)s *, ' % {'s': structName, 'c': columnName},
+- print '%(coltype)s, %(valtype)s);' % {'coltype':column.type.key.to_const_c_type(prefix), 'valtype':column.type.value.to_const_c_type(prefix)}
+- print 'void %(s)s_update_%(c)s_delkey(const struct %(s)s *, ' % {'s': structName, 'c': columnName},
+- print '%(coltype)s);' % {'coltype':column.type.key.to_const_c_type(prefix)}
++ print('void %(s)s_update_%(c)s_setkey(const struct %(s)s *, ' % {'s': structName, 'c': columnName}, end=' ')
++ print('%(coltype)s, %(valtype)s);' % {'coltype':column.type.key.to_const_c_type(prefix), 'valtype':column.type.value.to_const_c_type(prefix)})
++ print('void %(s)s_update_%(c)s_delkey(const struct %(s)s *, ' % {'s': structName, 'c': columnName}, end=' ')
++ print('%(coltype)s);' % {'coltype':column.type.key.to_const_c_type(prefix)})
+ if column.type.is_set():
+- print 'void %(s)s_update_%(c)s_addvalue(const struct %(s)s *, ' % {'s': structName, 'c': columnName},
+- print '%(valtype)s);' % {'valtype':column.type.key.to_const_c_type(prefix)}
+- print 'void %(s)s_update_%(c)s_delvalue(const struct %(s)s *, ' % {'s': structName, 'c': columnName},
+- print '%(valtype)s);' % {'valtype':column.type.key.to_const_c_type(prefix)}
++ print('void %(s)s_update_%(c)s_addvalue(const struct %(s)s *, ' % {'s': structName, 'c': columnName}, end=' ')
++ print('%(valtype)s);' % {'valtype':column.type.key.to_const_c_type(prefix)})
++ print('void %(s)s_update_%(c)s_delvalue(const struct %(s)s *, ' % {'s': structName, 'c': columnName}, end=' ')
++ print('%(valtype)s);' % {'valtype':column.type.key.to_const_c_type(prefix)})
+
+- print 'void %(s)s_add_clause_%(c)s(struct ovsdb_idl_condition *, enum ovsdb_function function,' % {'s': structName, 'c': columnName},
++ print('void %(s)s_add_clause_%(c)s(struct ovsdb_idl_condition *, enum ovsdb_function function,' % {'s': structName, 'c': columnName}, end=' ')
+ if column.type.is_smap():
+ args = ['const struct smap *']
+ else:
+ comment, members = cMembers(prefix, tableName, columnName,
+ column, True, refTable=False)
+ args = ['%(type)s%(name)s' % member for member in members]
+- print '%s);' % ', '.join(args)
++ print('%s);' % ', '.join(args))
+
+- print 'void %(s)s_set_condition(struct ovsdb_idl *, struct ovsdb_idl_condition *);' % {'s': structName},
++ print('void %(s)s_set_condition(struct ovsdb_idl *, struct ovsdb_idl_condition *);' % {'s': structName})
+
+- print
++ print("")
+
+ # Table indexes.
+ printEnum("%stable_id" % prefix.lower(), ["%sTABLE_%s" % (prefix.upper(), tableName.upper()) for tableName in sorted(schema.tables)] + ["%sN_TABLES" % prefix.upper()])
+- print
++ print("")
+ for tableName in schema.tables:
+- print "#define %(p)stable_%(t)s (%(p)stable_classes[%(P)sTABLE_%(T)s])" % {
++ print("#define %(p)stable_%(t)s (%(p)stable_classes[%(P)sTABLE_%(T)s])" % {
+ 'p': prefix,
+ 'P': prefix.upper(),
+ 't': tableName.lower(),
+- 'T': tableName.upper()}
+- print "\nextern struct ovsdb_idl_table_class %stable_classes[%sN_TABLES];" % (prefix, prefix.upper())
++ 'T': tableName.upper()})
++ print("\nextern struct ovsdb_idl_table_class %stable_classes[%sN_TABLES];" % (prefix, prefix.upper()))
+
+- print "\nextern struct ovsdb_idl_class %sidl_class;" % prefix
++ print("\nextern struct ovsdb_idl_class %sidl_class;" % prefix)
+
+- print "\nconst char * %sget_db_version(void);" % prefix
+- print "\n#endif /* %(prefix)sIDL_HEADER */" % {'prefix': prefix.upper()}
++ print("\nconst char * %sget_db_version(void);" % prefix)
++ print("\n#endif /* %(prefix)sIDL_HEADER */" % {'prefix': prefix.upper()})
+
+ def printEnum(type, members):
+ if len(members) == 0:
+ return
+
+- print "\nenum %s {" % type
++ print("\nenum %s {" % type)
+ for member in members[:-1]:
+- print " %s," % member
+- print " %s" % members[-1]
+- print "};"
++ print(" %s," % member)
++ print(" %s" % members[-1])
++ print("};")
+
+ def printCIDLSource(schemaFile):
+ schema = parseSchema(schemaFile)
+ prefix = schema.idlPrefix
+- print '''\
++ print('''\
+ /* Generated automatically -- do not modify! -*- buffer-read-only: t -*- */
+
+ #include <config.h>
+@@ -296,33 +297,33 @@ def printCIDLSource(schemaFile):
+ #include "ovsdb-error.h"
+ #include "util.h"
+
+-''' % schema.idlHeader
++''' % schema.idlHeader)
+
+ # Cast functions.
+ for tableName, table in sorted(schema.tables.iteritems()):
+ structName = "%s%s" % (prefix, tableName.lower())
+- print '''
++ print('''
+ static struct %(s)s *
+ %(s)s_cast(const struct ovsdb_idl_row *row)
+ {
+ return row ? CONTAINER_OF(row, struct %(s)s, header_) : NULL;
+ }\
+-''' % {'s': structName}
++''' % {'s': structName})
+
+
+ for tableName, table in sorted(schema.tables.iteritems()):
+ structName = "%s%s" % (prefix, tableName.lower())
+- print " "
+- print "/* %s table. */" % (tableName)
++ print(" ")
++ print("/* %s table. */" % (tableName))
+
+ # Parse functions.
+ for columnName, column in sorted_columns(table):
+- print '''
++ print('''
+ static void
+ %(s)s_parse_%(c)s(struct ovsdb_idl_row *row_, const struct ovsdb_datum *datum)
+ {
+ struct %(s)s *row = %(s)s_cast(row_);''' % {'s': structName,
+- 'c': columnName}
++ 'c': columnName})
+ type = column.type
+ if type.value:
+ keyVar = "row->key_%s" % columnName
+@@ -332,89 +333,89 @@ static void
+ valueVar = None
+
+ if type.is_smap():
+- print " smap_init(&row->%s);" % columnName
+- print " for (size_t i = 0; i < datum->n; i++) {"
+- print " smap_add(&row->%s," % columnName
+- print " datum->keys[i].string,"
+- print " datum->values[i].string);"
+- print " }"
++ print(" smap_init(&row->%s);" % columnName)
++ print(" for (size_t i = 0; i < datum->n; i++) {")
++ print(" smap_add(&row->%s," % columnName)
++ print(" datum->keys[i].string,")
++ print(" datum->values[i].string);")
++ print(" }")
+ elif (type.n_min == 1 and type.n_max == 1) or type.is_optional_pointer():
+- print
+- print " if (datum->n >= 1) {"
++ print("")
++ print(" if (datum->n >= 1) {")
+ if not type.key.ref_table:
+- print " %s = datum->keys[0].%s;" % (keyVar, type.key.type.to_string())
++ print(" %s = datum->keys[0].%s;" % (keyVar, type.key.type.to_string()))
+ else:
+- print " %s = %s%s_cast(ovsdb_idl_get_row_arc(row_, &%stable_%s, &datum->keys[0].uuid));" % (keyVar, prefix, type.key.ref_table.name.lower(), prefix, type.key.ref_table.name.lower())
++ print(" %s = %s%s_cast(ovsdb_idl_get_row_arc(row_, &%stable_%s, &datum->keys[0].uuid));" % (keyVar, prefix, type.key.ref_table.name.lower(), prefix, type.key.ref_table.name.lower()))
+
+ if valueVar:
+ if not type.value.ref_table:
+- print " %s = datum->values[0].%s;" % (valueVar, type.value.type.to_string())
++ print(" %s = datum->values[0].%s;" % (valueVar, type.value.type.to_string()))
+ else:
+- print " %s = %s%s_cast(ovsdb_idl_get_row_arc(row_, &%stable_%s, &datum->values[0].uuid));" % (valueVar, prefix, type.value.ref_table.name.lower(), prefix, type.value.ref_table.name.lower())
+- print " } else {"
+- print " %s" % type.key.initCDefault(keyVar, type.n_min == 0)
++ print(" %s = %s%s_cast(ovsdb_idl_get_row_arc(row_, &%stable_%s, &datum->values[0].uuid));" % (valueVar, prefix, type.value.ref_table.name.lower(), prefix, type.value.ref_table.name.lower()))
++ print(" } else {")
++ print(" %s" % type.key.initCDefault(keyVar, type.n_min == 0))
+ if valueVar:
+- print " %s" % type.value.initCDefault(valueVar, type.n_min == 0)
+- print " }"
++ print(" %s" % type.value.initCDefault(valueVar, type.n_min == 0))
++ print(" }")
+ else:
+ if type.n_max != sys.maxint:
+- print " size_t n = MIN(%d, datum->n);" % type.n_max
++ print(" size_t n = MIN(%d, datum->n);" % type.n_max)
+ nMax = "n"
+ else:
+ nMax = "datum->n"
+- print " %s = NULL;" % keyVar
++ print(" %s = NULL;" % keyVar)
+ if valueVar:
+- print " %s = NULL;" % valueVar
+- print " row->n_%s = 0;" % columnName
+- print " for (size_t i = 0; i < %s; i++) {" % nMax
++ print(" %s = NULL;" % valueVar)
++ print(" row->n_%s = 0;" % columnName)
++ print(" for (size_t i = 0; i < %s; i++) {" % nMax)
+ if type.key.ref_table:
+- print """\
++ print("""\
+ struct %s%s *keyRow = %s%s_cast(ovsdb_idl_get_row_arc(row_, &%stable_%s, &datum->keys[i].uuid));
+ if (!keyRow) {
+ continue;
+ }\
+-""" % (prefix, type.key.ref_table.name.lower(), prefix, type.key.ref_table.name.lower(), prefix, type.key.ref_table.name.lower())
++""" % (prefix, type.key.ref_table.name.lower(), prefix, type.key.ref_table.name.lower(), prefix, type.key.ref_table.name.lower()))
+ keySrc = "keyRow"
+ else:
+ keySrc = "datum->keys[i].%s" % type.key.type.to_string()
+ if type.value and type.value.ref_table:
+- print """\
++ print("""\
+ struct %s%s *valueRow = %s%s_cast(ovsdb_idl_get_row_arc(row_, &%stable_%s, &datum->values[i].uuid));
+ if (!valueRow) {
+ continue;
+ }\
+-""" % (prefix, type.value.ref_table.name.lower(), prefix, type.value.ref_table.name.lower(), prefix, type.value.ref_table.name.lower())
++""" % (prefix, type.value.ref_table.name.lower(), prefix, type.value.ref_table.name.lower(), prefix, type.value.ref_table.name.lower()))
+ valueSrc = "valueRow"
+ elif valueVar:
+ valueSrc = "datum->values[i].%s" % type.value.type.to_string()
+- print " if (!row->n_%s) {" % (columnName)
++ print(" if (!row->n_%s) {" % (columnName))
+
+- print " %s = xmalloc(%s * sizeof *%s);" % (
+- keyVar, nMax, keyVar)
++ print(" %s = xmalloc(%s * sizeof *%s);" % (
++ keyVar, nMax, keyVar))
+ if valueVar:
+- print " %s = xmalloc(%s * sizeof *%s);" % (
+- valueVar, nMax, valueVar)
+- print " }"
+- print " %s[row->n_%s] = %s;" % (keyVar, columnName, keySrc)
++ print(" %s = xmalloc(%s * sizeof *%s);" % (
++ valueVar, nMax, valueVar))
++ print(" }")
++ print(" %s[row->n_%s] = %s;" % (keyVar, columnName, keySrc))
+ if valueVar:
+- print " %s[row->n_%s] = %s;" % (valueVar, columnName, valueSrc)
+- print " row->n_%s++;" % columnName
+- print " }"
+- print "}"
++ print(" %s[row->n_%s] = %s;" % (valueVar, columnName, valueSrc))
++ print(" row->n_%s++;" % columnName)
++ print(" }")
++ print("}")
+
+ # Unparse functions.
+ for columnName, column in sorted_columns(table):
+ type = column.type
+ if type.is_smap() or (type.n_min != 1 or type.n_max != 1) and not type.is_optional_pointer():
+- print '''
++ print('''
+ static void
+ %(s)s_unparse_%(c)s(struct ovsdb_idl_row *row_)
+ {
+ struct %(s)s *row = %(s)s_cast(row_);''' % {'s': structName,
+- 'c': columnName}
++ 'c': columnName})
+
+ if type.is_smap():
+- print " smap_destroy(&row->%s);" % columnName
++ print(" smap_destroy(&row->%s);" % columnName)
+ else:
+ if type.value:
+ keyVar = "row->key_%s" % columnName
+@@ -422,45 +423,45 @@ static void
+ else:
+ keyVar = "row->%s" % columnName
+ valueVar = None
+- print " free(%s);" % keyVar
++ print(" free(%s);" % keyVar)
+ if valueVar:
+- print " free(%s);" % valueVar
+- print '}'
++ print(" free(%s);" % valueVar)
++ print('}')
+ else:
+- print '''
++ print('''
+ static void
+ %(s)s_unparse_%(c)s(struct ovsdb_idl_row *row OVS_UNUSED)
+ {
+ /* Nothing to do. */
+-}''' % {'s': structName, 'c': columnName}
++}''' % {'s': structName, 'c': columnName})
+
+ # Generic Row Initialization function.
+- print """
++ print("""
+ static void
+ %(s)s_init__(struct ovsdb_idl_row *row)
+ {
+ %(s)s_init(%(s)s_cast(row));
+-}""" % {'s': structName}
++}""" % {'s': structName})
+
+ # Row Initialization function.
+- print """
++ print("""
+ /* Clears the contents of 'row' in table "%(t)s". */
+ void
+ %(s)s_init(struct %(s)s *row)
+ {
+- memset(row, 0, sizeof *row); """ % {'s': structName, 't': tableName}
++ memset(row, 0, sizeof *row); """ % {'s': structName, 't': tableName})
+ for columnName, column in sorted_columns(table):
+ if column.type.is_smap():
+- print " smap_init(&row->%s);" % columnName
++ print(" smap_init(&row->%s);" % columnName)
+ elif (column.type.n_min == 1 and
+ column.type.n_max == 1 and
+ column.type.key.type == ovs.db.types.StringType and
+ not column.type.value):
+- print " row->%s = \"\";" % columnName
+- print "}"
++ print(" row->%s = \"\";" % columnName)
++ print("}")
+
+ # First, next functions.
+- print '''
++ print('''
+ /* Searches table "%(t)s" in 'idl' for a row with UUID 'uuid'. Returns
+ * a pointer to the row if there is one, otherwise a null pointer. */
+ const struct %(s)s *
+@@ -514,9 +515,9 @@ const struct %(s)s
+ 'P': prefix.upper(),
+ 't': tableName,
+ 'tl': tableName.lower(),
+- 'T': tableName.upper()}
++ 'T': tableName.upper()})
+
+- print '''
++ print('''
+
+ /* Deletes 'row' from table "%(t)s". 'row' may be freed, so it must not be
+ * accessed afterward.
+@@ -550,11 +551,11 @@ bool
+ 'P': prefix.upper(),
+ 't': tableName,
+ 'tl': tableName.lower(),
+- 'T': tableName.upper()}
++ 'T': tableName.upper()})
+
+ # Verify functions.
+ for columnName, column in sorted_columns(table):
+- print '''
++ print('''
+ /* Causes the original contents of column "%(c)s" in 'row' to be
+ * verified as a prerequisite to completing the transaction. That is, if
+ * "%(c)s" in 'row' changed (or if 'row' was deleted) between the
+@@ -585,7 +586,7 @@ void
+ }''' % {'s': structName,
+ 'S': structName.upper(),
+ 'c': columnName,
+- 'C': columnName.upper()}
++ 'C': columnName.upper()})
+
+ # Get functions.
+ for columnName, column in sorted_columns(table):
+@@ -597,7 +598,7 @@ void
+ valueParam = ''
+ valueType = ''
+ valueComment = ''
+- print """
++ print("""
+ /* Returns the "%(c)s" column's value from the "%(t)s" table in 'row'
+ * as a struct ovsdb_datum. This is useful occasionally: for example,
+ * ovsdb_datum_find_key() is an easier and more efficient way to search
+@@ -625,7 +626,7 @@ const struct ovsdb_datum *
+ return ovsdb_idl_read(&row->header_, &%(s)s_col_%(c)s);
+ }""" % {'t': tableName, 's': structName, 'c': columnName,
+ 'kt': column.type.key.toAtomicType(),
+- 'v': valueParam, 'vt': valueType, 'vc': valueComment}
++ 'v': valueParam, 'vt': valueType, 'vc': valueComment})
+
+ # Set functions.
+ for columnName, column in sorted_columns(table):
+@@ -635,8 +636,8 @@ const struct ovsdb_datum *
+ column, True)
+
+ if type.is_smap():
+- print comment
+- print """void
++ print(comment)
++ print("""void
+ %(s)s_set_%(c)s(const struct %(s)s *row, const struct smap *%(c)s)
+ {
+ struct ovsdb_datum datum;
+@@ -654,7 +655,7 @@ const struct ovsdb_datum *
+ 's': structName,
+ 'S': structName.upper(),
+ 'c': columnName,
+- 'C': columnName.upper()}
++ 'C': columnName.upper()})
+ continue
+
+ keyVar = members[0]['name']
+@@ -668,84 +669,84 @@ const struct ovsdb_datum *
+ if len(members) > 1:
+ nVar = members[1]['name']
+
+- print comment
+- print """\
++ print(comment)
++ print("""\
+ void
+ %(s)s_set_%(c)s(const struct %(s)s *row, %(args)s)
+ {
+ struct ovsdb_datum datum;""" % {'s': structName,
+ 'c': columnName,
+ 'args': ', '.join(['%(type)s%(name)s'
+- % m for m in members])}
++ % m for m in members])})
+ if type.n_min == 1 and type.n_max == 1:
+- print " union ovsdb_atom key;"
++ print(" union ovsdb_atom key;")
+ if type.value:
+- print " union ovsdb_atom value;"
+- print
+- print " datum.n = 1;"
+- print " datum.keys = &key;"
+- print " " + type.key.assign_c_value_casting_away_const("key.%s" % type.key.type.to_string(), keyVar)
++ print(" union ovsdb_atom value;")
++ print("")
++ print(" datum.n = 1;")
++ print(" datum.keys = &key;")
++ print(" " + type.key.assign_c_value_casting_away_const("key.%s" % type.key.type.to_string(), keyVar))
+ if type.value:
+- print " datum.values = &value;"
+- print " "+ type.value.assign_c_value_casting_away_const("value.%s" % type.value.type.to_string(), valueVar)
++ print(" datum.values = &value;")
++ print(" "+ type.value.assign_c_value_casting_away_const("value.%s" % type.value.type.to_string(), valueVar))
+ else:
+- print " datum.values = NULL;"
++ print(" datum.values = NULL;")
+ txn_write_func = "ovsdb_idl_txn_write_clone"
+ elif type.is_optional_pointer():
+- print " union ovsdb_atom key;"
+- print
+- print " if (%s) {" % keyVar
+- print " datum.n = 1;"
+- print " datum.keys = &key;"
+- print " " + type.key.assign_c_value_casting_away_const("key.%s" % type.key.type.to_string(), keyVar)
+- print " } else {"
+- print " datum.n = 0;"
+- print " datum.keys = NULL;"
+- print " }"
+- print " datum.values = NULL;"
++ print(" union ovsdb_atom key;")
++ print("")
++ print(" if (%s) {" % keyVar)
++ print(" datum.n = 1;")
++ print(" datum.keys = &key;")
++ print(" " + type.key.assign_c_value_casting_away_const("key.%s" % type.key.type.to_string(), keyVar))
++ print(" } else {")
++ print(" datum.n = 0;")
++ print(" datum.keys = NULL;")
++ print(" }")
++ print(" datum.values = NULL;")
+ txn_write_func = "ovsdb_idl_txn_write_clone"
+ elif type.n_max == 1:
+- print " union ovsdb_atom key;"
+- print
+- print " if (%s) {" % nVar
+- print " datum.n = 1;"
+- print " datum.keys = &key;"
+- print " " + type.key.assign_c_value_casting_away_const("key.%s" % type.key.type.to_string(), "*" + keyVar)
+- print " } else {"
+- print " datum.n = 0;"
+- print " datum.keys = NULL;"
+- print " }"
+- print " datum.values = NULL;"
++ print(" union ovsdb_atom key;")
++ print("")
++ print(" if (%s) {" % nVar)
++ print(" datum.n = 1;")
++ print(" datum.keys = &key;")
++ print(" " + type.key.assign_c_value_casting_away_const("key.%s" % type.key.type.to_string(), "*" + keyVar))
++ print(" } else {")
++ print(" datum.n = 0;")
++ print(" datum.keys = NULL;")
++ print(" }")
++ print(" datum.values = NULL;")
+ txn_write_func = "ovsdb_idl_txn_write_clone"
+ else:
+- print
+- print " datum.n = %s;" % nVar
+- print " datum.keys = %s ? xmalloc(%s * sizeof *datum.keys) : NULL;" % (nVar, nVar)
++ print("")
++ print(" datum.n = %s;" % nVar)
++ print(" datum.keys = %s ? xmalloc(%s * sizeof *datum.keys) : NULL;" % (nVar, nVar))
+ if type.value:
+- print " datum.values = xmalloc(%s * sizeof *datum.values);" % nVar
++ print(" datum.values = xmalloc(%s * sizeof *datum.values);" % nVar)
+ else:
+- print " datum.values = NULL;"
+- print " for (size_t i = 0; i < %s; i++) {" % nVar
+- print " " + type.key.copyCValue("datum.keys[i].%s" % type.key.type.to_string(), "%s[i]" % keyVar)
++ print(" datum.values = NULL;")
++ print(" for (size_t i = 0; i < %s; i++) {" % nVar)
++ print(" " + type.key.copyCValue("datum.keys[i].%s" % type.key.type.to_string(), "%s[i]" % keyVar))
+ if type.value:
+- print " " + type.value.copyCValue("datum.values[i].%s" % type.value.type.to_string(), "%s[i]" % valueVar)
+- print " }"
++ print(" " + type.value.copyCValue("datum.values[i].%s" % type.value.type.to_string(), "%s[i]" % valueVar))
++ print(" }")
+ if type.value:
+ valueType = type.value.toAtomicType()
+ else:
+ valueType = "OVSDB_TYPE_VOID"
+ txn_write_func = "ovsdb_idl_txn_write"
+- print " %(f)s(&row->header_, &%(s)s_col_%(c)s, &datum);" \
++ print(" %(f)s(&row->header_, &%(s)s_col_%(c)s, &datum);" \
+ % {'f': txn_write_func,
+ 's': structName,
+ 'S': structName.upper(),
+- 'c': columnName}
+- print "}"
++ 'c': columnName})
++ print("}")
+ # Update/Delete of partial map column functions
+ for columnName, column in sorted_columns(table):
+ type = column.type
+ if type.is_map():
+- print '''
++ print('''
+ /* Sets an element of the "%(c)s" map column from the "%(t)s" table in 'row'
+ * to 'new_value' given the key value 'new_key'.
+ *
+@@ -761,17 +762,17 @@ void
+ datum->values = xmalloc(datum->n * sizeof *datum->values);
+ ''' % {'s': structName, 'c': columnName,'coltype':column.type.key.to_const_c_type(prefix),
+ 'valtype':column.type.value.to_const_c_type(prefix), 'S': structName.upper(),
+- 'C': columnName.upper(), 't': tableName}
++ 'C': columnName.upper(), 't': tableName})
+
+- print " "+ type.key.copyCValue("datum->keys[0].%s" % type.key.type.to_string(), "new_key")
+- print " "+ type.value.copyCValue("datum->values[0].%s" % type.value.type.to_string(), "new_value")
+- print '''
++ print(" "+ type.key.copyCValue("datum->keys[0].%s" % type.key.type.to_string(), "new_key"))
++ print(" "+ type.value.copyCValue("datum->values[0].%s" % type.value.type.to_string(), "new_value"))
++ print('''
+ ovsdb_idl_txn_write_partial_map(&row->header_,
+ &%(s)s_col_%(c)s,
+ datum);
+ }''' % {'s': structName, 'c': columnName,'coltype':column.type.key.toCType(prefix),
+- 'valtype':column.type.value.to_const_c_type(prefix), 'S': structName.upper()}
+- print '''
++ 'valtype':column.type.value.to_const_c_type(prefix), 'S': structName.upper()})
++ print('''
+ /* Deletes an element of the "%(c)s" map column from the "%(t)s" table in 'row'
+ * given the key value 'delete_key'.
+ *
+@@ -787,19 +788,19 @@ void
+ datum->values = NULL;
+ ''' % {'s': structName, 'c': columnName,'coltype':column.type.key.to_const_c_type(prefix),
+ 'valtype':column.type.value.to_const_c_type(prefix), 'S': structName.upper(),
+- 'C': columnName.upper(), 't': tableName}
++ 'C': columnName.upper(), 't': tableName})
+
+- print " "+ type.key.copyCValue("datum->keys[0].%s" % type.key.type.to_string(), "delete_key")
+- print '''
++ print(" "+ type.key.copyCValue("datum->keys[0].%s" % type.key.type.to_string(), "delete_key"))
++ print('''
+ ovsdb_idl_txn_delete_partial_map(&row->header_,
+ &%(s)s_col_%(c)s,
+ datum);
+ }''' % {'s': structName, 'c': columnName,'coltype':column.type.key.toCType(prefix),
+- 'valtype':column.type.value.to_const_c_type(prefix), 'S': structName.upper()}
++ 'valtype':column.type.value.to_const_c_type(prefix), 'S': structName.upper()})
+ # End Update/Delete of partial maps
+ # Update/Delete of partial set column functions
+ if type.is_set():
+- print '''
++ print('''
+ /* Adds the value 'new_value' to the "%(c)s" set column from the "%(t)s" table
+ * in 'row'.
+ *
+@@ -814,16 +815,16 @@ void
+ datum->keys = xmalloc(datum->n * sizeof *datum->values);
+ datum->values = NULL;
+ ''' % {'s': structName, 'c': columnName,
+- 'valtype':column.type.key.to_const_c_type(prefix), 't': tableName}
++ 'valtype':column.type.key.to_const_c_type(prefix), 't': tableName})
+
+- print " "+ type.key.copyCValue("datum->keys[0].%s" % type.key.type.to_string(), "new_value")
+- print '''
++ print(" "+ type.key.copyCValue("datum->keys[0].%s" % type.key.type.to_string(), "new_value"))
++ print('''
+ ovsdb_idl_txn_write_partial_set(&row->header_,
+ &%(s)s_col_%(c)s,
+ datum);
+ }''' % {'s': structName, 'c': columnName,'coltype':column.type.key.toCType(prefix),
+- 'valtype':column.type.key.to_const_c_type(prefix), 'S': structName.upper()}
+- print '''
++ 'valtype':column.type.key.to_const_c_type(prefix), 'S': structName.upper()})
++ print('''
+ /* Deletes the value 'delete_value' from the "%(c)s" set column from the
+ * "%(t)s" table in 'row'.
+ *
+@@ -839,15 +840,15 @@ void
+ datum->values = NULL;
+ ''' % {'s': structName, 'c': columnName,'coltype':column.type.key.to_const_c_type(prefix),
+ 'valtype':column.type.key.to_const_c_type(prefix), 'S': structName.upper(),
+- 'C': columnName.upper(), 't': tableName}
++ 'C': columnName.upper(), 't': tableName})
+
+- print " "+ type.key.copyCValue("datum->keys[0].%s" % type.key.type.to_string(), "delete_value")
+- print '''
++ print(" "+ type.key.copyCValue("datum->keys[0].%s" % type.key.type.to_string(), "delete_value"))
++ print('''
+ ovsdb_idl_txn_delete_partial_set(&row->header_,
+ &%(s)s_col_%(c)s,
+ datum);
+ }''' % {'s': structName, 'c': columnName,'coltype':column.type.key.toCType(prefix),
+- 'valtype':column.type.key.to_const_c_type(prefix), 'S': structName.upper()}
++ 'valtype':column.type.key.to_const_c_type(prefix), 'S': structName.upper()})
+ # End Update/Delete of partial set
+
+ # Add clause functions.
+@@ -858,8 +859,8 @@ void
+ column, True, refTable=False)
+
+ if type.is_smap():
+- print comment
+- print """void
++ print(comment)
++ print("""void
+ %(s)s_add_clause_%(c)s(struct ovsdb_idl_condition *cond, enum ovsdb_function function, const struct smap *%(c)s)
+ {
+ struct ovsdb_datum datum;
+@@ -884,7 +885,7 @@ void
+ 'P': prefix.upper(),
+ 's': structName,
+ 'S': structName.upper(),
+- 'c': columnName}
++ 'c': columnName})
+ continue
+
+ keyVar = members[0]['name']
+@@ -898,73 +899,73 @@ void
+ if len(members) > 1:
+ nVar = members[1]['name']
+
+- print comment
+- print 'void'
+- print '%(s)s_add_clause_%(c)s(struct ovsdb_idl_condition *cond, enum ovsdb_function function, %(args)s)' % \
++ print(comment)
++ print('void')
++ print('%(s)s_add_clause_%(c)s(struct ovsdb_idl_condition *cond, enum ovsdb_function function, %(args)s)' % \
+ {'s': structName, 'c': columnName,
+- 'args': ', '.join(['%(type)s%(name)s' % m for m in members])}
+- print "{"
+- print " struct ovsdb_datum datum;"
++ 'args': ', '.join(['%(type)s%(name)s' % m for m in members])})
++ print("{")
++ print(" struct ovsdb_datum datum;")
+ free = []
+ if type.n_min == 1 and type.n_max == 1:
+- print " union ovsdb_atom key;"
++ print(" union ovsdb_atom key;")
+ if type.value:
+- print " union ovsdb_atom value;"
+- print
+- print " datum.n = 1;"
+- print " datum.keys = &key;"
+- print " " + type.key.assign_c_value_casting_away_const("key.%s" % type.key.type.to_string(), keyVar, refTable=False)
++ print(" union ovsdb_atom value;")
++ print("")
++ print(" datum.n = 1;")
++ print(" datum.keys = &key;")
++ print(" " + type.key.assign_c_value_casting_away_const("key.%s" % type.key.type.to_string(), keyVar, refTable=False))
+ if type.value:
+- print " datum.values = &value;"
+- print " "+ type.value.assign_c_value_casting_away_const("value.%s" % type.value.type.to_string(), valueVar, refTable=False)
++ print(" datum.values = &value;")
++ print(" "+ type.value.assign_c_value_casting_away_const("value.%s" % type.value.type.to_string(), valueVar, refTable=False))
+ else:
+- print " datum.values = NULL;"
++ print(" datum.values = NULL;")
+ elif type.is_optional_pointer():
+- print " union ovsdb_atom key;"
+- print
+- print " if (%s) {" % keyVar
+- print " datum.n = 1;"
+- print " datum.keys = &key;"
+- print " " + type.key.assign_c_value_casting_away_const("key.%s" % type.key.type.to_string(), keyVar, refTable=False)
+- print " } else {"
+- print " datum.n = 0;"
+- print " datum.keys = NULL;"
+- print " }"
+- print " datum.values = NULL;"
++ print(" union ovsdb_atom key;")
++ print("")
++ print(" if (%s) {" % keyVar)
++ print(" datum.n = 1;")
++ print(" datum.keys = &key;")
++ print(" " + type.key.assign_c_value_casting_away_const("key.%s" % type.key.type.to_string(), keyVar, refTable=False))
++ print(" } else {")
++ print(" datum.n = 0;")
++ print(" datum.keys = NULL;")
++ print(" }")
++ print(" datum.values = NULL;")
+ elif type.n_max == 1:
+- print " union ovsdb_atom key;"
+- print
+- print " if (%s) {" % nVar
+- print " datum.n = 1;"
+- print " datum.keys = &key;"
+- print " " + type.key.assign_c_value_casting_away_const("key.%s" % type.key.type.to_string(), "*" + keyVar, refTable=False)
+- print " } else {"
+- print " datum.n = 0;"
+- print " datum.keys = NULL;"
+- print " }"
+- print " datum.values = NULL;"
++ print(" union ovsdb_atom key;")
++ print("")
++ print(" if (%s) {" % nVar)
++ print(" datum.n = 1;")
++ print(" datum.keys = &key;")
++ print(" " + type.key.assign_c_value_casting_away_const("key.%s" % type.key.type.to_string(), "*" + keyVar, refTable=False))
++ print(" } else {")
++ print(" datum.n = 0;")
++ print(" datum.keys = NULL;")
++ print(" }")
++ print(" datum.values = NULL;")
+ else:
+- print " datum.n = %s;" % nVar
+- print " datum.keys = %s ? xmalloc(%s * sizeof *datum.keys) : NULL;" % (nVar, nVar)
++ print(" datum.n = %s;" % nVar)
++ print(" datum.keys = %s ? xmalloc(%s * sizeof *datum.keys) : NULL;" % (nVar, nVar))
+ free += ['datum.keys']
+ if type.value:
+- print " datum.values = xmalloc(%s * sizeof *datum.values);" % nVar
++ print(" datum.values = xmalloc(%s * sizeof *datum.values);" % nVar)
+ free += ['datum.values']
+ else:
+- print " datum.values = NULL;"
+- print " for (size_t i = 0; i < %s; i++) {" % nVar
+- print " " + type.key.assign_c_value_casting_away_const("datum.keys[i].%s" % type.key.type.to_string(), "%s[i]" % keyVar, refTable=False)
++ print(" datum.values = NULL;")
++ print(" for (size_t i = 0; i < %s; i++) {" % nVar)
++ print(" " + type.key.assign_c_value_casting_away_const("datum.keys[i].%s" % type.key.type.to_string(), "%s[i]" % keyVar, refTable=False))
+ if type.value:
+- print " " + type.value.assign_c_value_casting_away_const("datum.values[i].%s" % type.value.type.to_string(), "%s[i]" % valueVar, refTable=False)
+- print " }"
++ print(" " + type.value.assign_c_value_casting_away_const("datum.values[i].%s" % type.value.type.to_string(), "%s[i]" % valueVar, refTable=False))
++ print(" }")
+ if type.value:
+ valueType = type.value.toAtomicType()
+ else:
+ valueType = "OVSDB_TYPE_VOID"
+- print " ovsdb_datum_sort_unique(&datum, %s, %s);" % (
+- type.key.toAtomicType(), valueType)
++ print(" ovsdb_datum_sort_unique(&datum, %s, %s);" % (
++ type.key.toAtomicType(), valueType))
+
+- print""" ovsdb_idl_condition_add_clause(cond,
++ print(""" ovsdb_idl_condition_add_clause(cond,
+ function,
+ &%(s)s_col_%(c)s,
+ &datum);\
+@@ -974,28 +975,28 @@ void
+ 'P': prefix.upper(),
+ 's': structName,
+ 'S': structName.upper(),
+- 'c': columnName}
++ 'c': columnName})
+ for var in free:
+- print " free(%s);" % var
+- print "}"
++ print(" free(%s);" % var)
++ print("}")
+
+- print """
++ print("""
+ void
+ %(s)s_set_condition(struct ovsdb_idl *idl, struct ovsdb_idl_condition *condition)
+ {
+ ovsdb_idl_set_condition(idl, &%(p)stable_%(tl)s, condition);
+ }""" % {'p': prefix,
+ 's': structName,
+- 'tl': tableName.lower()}
++ 'tl': tableName.lower()})
+
+ # Table columns.
+ for columnName, column in sorted_columns(table):
+ prereqs = []
+ x = column.type.cInitType("%s_col_%s" % (tableName, columnName), prereqs)
+ if prereqs:
+- print '\n'.join(prereqs)
+- print "\nstruct ovsdb_idl_column %s_columns[%s_N_COLUMNS] = {" % (
+- structName, structName.upper())
++ print('\n'.join(prereqs))
++ print("\nstruct ovsdb_idl_column %s_columns[%s_N_COLUMNS] = {" % (
++ structName, structName.upper()))
+ for columnName, column in sorted_columns(table):
+ if column.mutable:
+ mutable = "true"
+@@ -1003,7 +1004,7 @@ void
+ mutable = "false"
+ type_init = '\n'.join(" " + x
+ for x in column.type.cInitType("%s_col_%s" % (tableName, columnName), prereqs))
+- print """\
++ print("""\
+ [%(P)s%(T)s_COL_%(C)s] = {
+ .name = "%(c)s",
+ .type = {
+@@ -1018,38 +1019,38 @@ void
+ 'C': columnName.upper(),
+ 's': structName,
+ 'mutable': mutable,
+- 'type': type_init}
+- print "};"
++ 'type': type_init})
++ print("};")
+
+ # Table classes.
+- print " "
+- print "struct ovsdb_idl_table_class %stable_classes[%sN_TABLES] = {" % (prefix, prefix.upper())
++ print(" ")
++ print("struct ovsdb_idl_table_class %stable_classes[%sN_TABLES] = {" % (prefix, prefix.upper()))
+ for tableName, table in sorted(schema.tables.iteritems()):
+ structName = "%s%s" % (prefix, tableName.lower())
+ if table.is_root:
+ is_root = "true"
+ else:
+ is_root = "false"
+- print " {\"%s\", %s," % (tableName, is_root)
+- print " %s_columns, ARRAY_SIZE(%s_columns)," % (
+- structName, structName)
+- print " sizeof(struct %s), %s_init__}," % (structName, structName)
+- print "};"
++ print(" {\"%s\", %s," % (tableName, is_root))
++ print(" %s_columns, ARRAY_SIZE(%s_columns)," % (
++ structName, structName))
++ print(" sizeof(struct %s), %s_init__}," % (structName, structName))
++ print("};")
+
+ # IDL class.
+- print "\nstruct ovsdb_idl_class %sidl_class = {" % prefix
+- print " \"%s\", %stable_classes, ARRAY_SIZE(%stable_classes)" % (
+- schema.name, prefix, prefix)
+- print "};"
++ print("\nstruct ovsdb_idl_class %sidl_class = {" % prefix)
++ print(" \"%s\", %stable_classes, ARRAY_SIZE(%stable_classes)" % (
++ schema.name, prefix, prefix))
++ print("};")
+
+- print """
++ print("""
+ /* Return the schema version. The caller must not free the returned value. */
+ const char *
+ %sget_db_version(void)
+ {
+ return "%s";
+ }
+-""" % (prefix, schema.version)
++""" % (prefix, schema.version))
+
+
+
+@@ -1075,7 +1076,7 @@ def ovsdb_escape(string):
+ return re.sub(r'["\\\000-\037]', escape, string)
+
+ def usage():
+- print """\
++ print("""\
+ %(argv0)s: ovsdb schema compiler
+ usage: %(argv0)s [OPTIONS] COMMAND ARG...
+
+@@ -1087,7 +1088,7 @@ The following commands are supported:
+ The following options are also available:
+ -h, --help display this help message
+ -V, --version display version information\
+-""" % {'argv0': argv0}
++""" % {'argv0': argv0})
+ sys.exit(0)
+
+ if __name__ == "__main__":
+@@ -1105,7 +1106,7 @@ if __name__ == "__main__":
+ if key in ['-h', '--help']:
+ usage()
+ elif key in ['-V', '--version']:
+- print "ovsdb-idlc (Open vSwitch) @VERSION@"
++ print("ovsdb-idlc (Open vSwitch) @VERSION@")
+ elif key in ['-C', '--directory']:
+ os.chdir(value)
+ else:
+--
+2.5.0
+
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0002-Python3-compatibility-exception-cleanup.patch b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0002-Python3-compatibility-exception-cleanup.patch
new file mode 100644
index 000000000..59c0f3e4d
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0002-Python3-compatibility-exception-cleanup.patch
@@ -0,0 +1,79 @@
+From c98fee41d130cb946aa4e60fefaa6cbf203f6790 Mon Sep 17 00:00:00 2001
+From: Jason Wessel <jason.wessel@windriver.com>
+Date: Thu, 29 Jun 2017 20:33:23 -0700
+Subject: [PATCH 2/8] Python3 compatibility: exception cleanup
+
+Commit 52e4a477f0b3c0a0ece7adeede6e06e07814f8b9 from
+https://github.com/openvswitch/ovs.git
+
+The exception syntax which is compatible with python2 and python3 is
+to use the "as" form for "except:".
+
+Signed-off-by: Jason Wessel <jason.wessel@windriver.com>
+Signed-off-by: Ben Pfaff <blp@ovn.org>
+---
+ build-aux/extract-ofp-fields | 2 +-
+ ovsdb/ovsdb-doc | 4 ++--
+ ovsdb/ovsdb-idlc.in | 4 ++--
+ 3 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/build-aux/extract-ofp-fields b/build-aux/extract-ofp-fields
+index 425a85f..61e752b 100755
+--- a/build-aux/extract-ofp-fields
++++ b/build-aux/extract-ofp-fields
+@@ -784,7 +784,7 @@ if __name__ == "__main__":
+ try:
+ options, args = getopt.gnu_getopt(sys.argv[1:], 'h',
+ ['help', 'ovs-version='])
+- except getopt.GetoptError, geo:
++ except getopt.GetoptError as geo:
+ sys.stderr.write("%s: %s\n" % (argv0, geo.msg))
+ sys.exit(1)
+
+diff --git a/ovsdb/ovsdb-doc b/ovsdb/ovsdb-doc
+index b34fb11..918e88a 100755
+--- a/ovsdb/ovsdb-doc
++++ b/ovsdb/ovsdb-doc
+@@ -278,7 +278,7 @@ if __name__ == "__main__":
+ options, args = getopt.gnu_getopt(sys.argv[1:], 'hV',
+ ['er-diagram=',
+ 'version=', 'help'])
+- except getopt.GetoptError, geo:
++ except getopt.GetoptError as geo:
+ sys.stderr.write("%s: %s\n" % (argv0, geo.msg))
+ sys.exit(1)
+
+@@ -306,7 +306,7 @@ if __name__ == "__main__":
+ if len(line):
+ print(line)
+
+- except error.Error, e:
++ except error.Error as e:
+ sys.stderr.write("%s: %s\n" % (argv0, e.msg))
+ sys.exit(1)
+
+diff --git a/ovsdb/ovsdb-idlc.in b/ovsdb/ovsdb-idlc.in
+index 1064448..8b85f0d 100755
+--- a/ovsdb/ovsdb-idlc.in
++++ b/ovsdb/ovsdb-idlc.in
+@@ -1098,7 +1098,7 @@ if __name__ == "__main__":
+ ['directory',
+ 'help',
+ 'version'])
+- except getopt.GetoptError, geo:
++ except getopt.GetoptError as geo:
+ sys.stderr.write("%s: %s\n" % (argv0, geo.msg))
+ sys.exit(1)
+
+@@ -1136,7 +1136,7 @@ if __name__ == "__main__":
+ sys.exit(1)
+
+ func(*args[1:])
+- except ovs.db.error.Error, e:
++ except ovs.db.error.Error as e:
+ sys.stderr.write("%s: %s\n" % (argv0, e))
+ sys.exit(1)
+
+--
+2.5.0
+
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0003-Python3-compatibility-execfile-to-exec.patch b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0003-Python3-compatibility-execfile-to-exec.patch
new file mode 100644
index 000000000..a85980ed3
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0003-Python3-compatibility-execfile-to-exec.patch
@@ -0,0 +1,33 @@
+From 9cbae86be03756df76560c15720756f9ac088144 Mon Sep 17 00:00:00 2001
+From: Jason Wessel <jason.wessel@windriver.com>
+Date: Thu, 29 Jun 2017 20:33:23 -0700
+Subject: [PATCH 3/8] Python3 compatibility: execfile to exec
+
+Commit a4d10a7ca937d73873f6f98619d88682e69f5dbe from
+https://github.com/openvswitch/ovs.git
+
+Allow compability with python3 and python2 by changing execfile() to
+exec().
+
+Signed-off-by: Jason Wessel <jason.wessel@windriver.com>
+Signed-off-by: Ben Pfaff <blp@ovn.org>
+---
+ ovsdb/ovsdb-idlc.in | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/ovsdb/ovsdb-idlc.in b/ovsdb/ovsdb-idlc.in
+index 8b85f0d..3fa1a0f 100755
+--- a/ovsdb/ovsdb-idlc.in
++++ b/ovsdb/ovsdb-idlc.in
+@@ -17,7 +17,7 @@ def parseSchema(filename):
+
+ def annotateSchema(schemaFile, annotationFile):
+ schemaJson = ovs.json.from_file(schemaFile)
+- execfile(annotationFile, globals(), {"s": schemaJson})
++ exec(compile(open(annotationFile, "rb").read(), annotationFile, 'exec'), globals(), {"s": schemaJson})
+ ovs.json.to_stream(schemaJson, sys.stdout)
+ sys.stdout.write('\n')
+
+--
+2.5.0
+
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0004-Python3-compatibility-iteritems-to-items.patch b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0004-Python3-compatibility-iteritems-to-items.patch
new file mode 100644
index 000000000..ddc86db2a
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0004-Python3-compatibility-iteritems-to-items.patch
@@ -0,0 +1,102 @@
+From 0f318e472d9897d99395adcfb17cbeaff05677ba Mon Sep 17 00:00:00 2001
+From: Jason Wessel <jason.wessel@windriver.com>
+Date: Thu, 29 Jun 2017 20:33:23 -0700
+Subject: [PATCH 4/8] Python3 compatibility: iteritems to items
+
+Commit 4ab665623cbb4c6506e48b82e0c9fe8585f42e13 from
+https://github.com/openvswitch/ovs.git
+
+Allow compability with python3 and python2 by changing iteritems() to
+items().
+
+Signed-off-by: Jason Wessel <jason.wessel@windriver.com>
+Signed-off-by: Ben Pfaff <blp@ovn.org>
+---
+ build-aux/extract-ofp-actions | 2 +-
+ build-aux/extract-ofp-errors | 2 +-
+ build-aux/extract-ofp-fields | 2 +-
+ ovsdb/ovsdb-idlc.in | 8 ++++----
+ 4 files changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/build-aux/extract-ofp-actions b/build-aux/extract-ofp-actions
+index 874e6b4..c11297c 100755
+--- a/build-aux/extract-ofp-actions
++++ b/build-aux/extract-ofp-actions
+@@ -13,7 +13,7 @@ version_map = {"1.0": 0x01,
+ "1.3": 0x04,
+ "1.4": 0x05,
+ "1.5": 0x06}
+-version_reverse_map = dict((v, k) for (k, v) in version_map.iteritems())
++version_reverse_map = dict((v, k) for (k, v) in version_map.items())
+
+ # Map from vendor name to the length of the action header.
+ vendor_map = {"OF": (0x00000000, 4),
+diff --git a/build-aux/extract-ofp-errors b/build-aux/extract-ofp-errors
+index 336a240..71ae0bd 100755
+--- a/build-aux/extract-ofp-errors
++++ b/build-aux/extract-ofp-errors
+@@ -14,7 +14,7 @@ version_map = {"1.0": 0x01,
+ "1.4": 0x05,
+ "1.5": 0x06,
+ "1.6": 0x07}
+-version_reverse_map = dict((v, k) for (k, v) in version_map.iteritems())
++version_reverse_map = dict((v, k) for (k, v) in version_map.items())
+
+ token = None
+ line = ""
+diff --git a/build-aux/extract-ofp-fields b/build-aux/extract-ofp-fields
+index 61e752b..ef997dd 100755
+--- a/build-aux/extract-ofp-fields
++++ b/build-aux/extract-ofp-fields
+@@ -16,7 +16,7 @@ VERSION = {"1.0": 0x01,
+ "1.3": 0x04,
+ "1.4": 0x05,
+ "1.5": 0x06}
+-VERSION_REVERSE = dict((v,k) for k, v in VERSION.iteritems())
++VERSION_REVERSE = dict((v,k) for k, v in VERSION.items())
+
+ TYPES = {"u8": (1, False),
+ "be16": (2, False),
+diff --git a/ovsdb/ovsdb-idlc.in b/ovsdb/ovsdb-idlc.in
+index 3fa1a0f..615548f 100755
+--- a/ovsdb/ovsdb-idlc.in
++++ b/ovsdb/ovsdb-idlc.in
+@@ -138,7 +138,7 @@ def printCIDLHeader(schemaFile):
+ #include "smap.h"
+ #include "uuid.h"''' % {'prefix': prefix.upper()})
+
+- for tableName, table in sorted(schema.tables.iteritems()):
++ for tableName, table in sorted(schema.tables.items()):
+ structName = "%s%s" % (prefix, tableName.lower())
+
+ print(" ")
+@@ -300,7 +300,7 @@ def printCIDLSource(schemaFile):
+ ''' % schema.idlHeader)
+
+ # Cast functions.
+- for tableName, table in sorted(schema.tables.iteritems()):
++ for tableName, table in sorted(schema.tables.items()):
+ structName = "%s%s" % (prefix, tableName.lower())
+ print('''
+ static struct %(s)s *
+@@ -311,7 +311,7 @@ static struct %(s)s *
+ ''' % {'s': structName})
+
+
+- for tableName, table in sorted(schema.tables.iteritems()):
++ for tableName, table in sorted(schema.tables.items()):
+ structName = "%s%s" % (prefix, tableName.lower())
+ print(" ")
+ print("/* %s table. */" % (tableName))
+@@ -1025,7 +1025,7 @@ void
+ # Table classes.
+ print(" ")
+ print("struct ovsdb_idl_table_class %stable_classes[%sN_TABLES] = {" % (prefix, prefix.upper()))
+- for tableName, table in sorted(schema.tables.iteritems()):
++ for tableName, table in sorted(schema.tables.items()):
+ structName = "%s%s" % (prefix, tableName.lower())
+ if table.is_root:
+ is_root = "true"
+--
+2.5.0
+
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0005-Python3-compatibility-fix-integer-problems.patch b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0005-Python3-compatibility-fix-integer-problems.patch
new file mode 100644
index 000000000..717a97dbe
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0005-Python3-compatibility-fix-integer-problems.patch
@@ -0,0 +1,51 @@
+From bc29f98f0137fa1083a4cacf832d52f740d150a8 Mon Sep 17 00:00:00 2001
+From: Jason Wessel <jason.wessel@windriver.com>
+Date: Thu, 29 Jun 2017 20:33:23 -0700
+Subject: [PATCH 5/8] Python3 compatibility: fix integer problems
+
+Commit fa145f1a53943243f94a32ce98525db8494b0052 from
+https://github.com/openvswitch/ovs.git
+
+In python3 maxint is not defined, but maxsize is defined in both
+python2 and python3.
+
+The put_text() will not automatically use a value which came in as
+float due to a pior math function and python3 will throw an exception.
+The simple answer is to convert it with int() and move on.
+
+Signed-off-by: Jason Wessel <jason.wessel@windriver.com>
+Signed-off-by: Ben Pfaff <blp@ovn.org>
+---
+ ovsdb/ovsdb-idlc.in | 2 +-
+ python/build/nroff.py | 2 ++
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/ovsdb/ovsdb-idlc.in b/ovsdb/ovsdb-idlc.in
+index 615548f..7cbcbf5 100755
+--- a/ovsdb/ovsdb-idlc.in
++++ b/ovsdb/ovsdb-idlc.in
+@@ -358,7 +358,7 @@ static void
+ print(" %s" % type.value.initCDefault(valueVar, type.n_min == 0))
+ print(" }")
+ else:
+- if type.n_max != sys.maxint:
++ if type.n_max != sys.maxsize:
+ print(" size_t n = MIN(%d, datum->n);" % type.n_max)
+ nMax = "n"
+ else:
+diff --git a/python/build/nroff.py b/python/build/nroff.py
+index c23837f..401f699 100644
+--- a/python/build/nroff.py
++++ b/python/build/nroff.py
+@@ -148,6 +148,8 @@ def fatal(msg):
+
+
+ def put_text(text, x, y, s):
++ x = int(x)
++ y = int(y)
+ extend = x + len(s) - len(text[y])
+ if extend > 0:
+ text[y] += ' ' * extend
+--
+2.5.0
+
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0006-Python3-compatibility-math-error-compatibility.patch b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0006-Python3-compatibility-math-error-compatibility.patch
new file mode 100644
index 000000000..54905cd0a
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0006-Python3-compatibility-math-error-compatibility.patch
@@ -0,0 +1,56 @@
+From 3a9fcf1c8f60c160c282c9755ee1c7f9f7e113c3 Mon Sep 17 00:00:00 2001
+From: Jason Wessel <jason.wessel@windriver.com>
+Date: Thu, 29 Jun 2017 20:33:23 -0700
+Subject: [PATCH 6/8] Python3 compatibility: math error compatibility
+
+Commit 3fa5aa4294377e0f35267936d0c5caea3e61db48 from
+https://github.com/openvswitch/ovs.git
+
+The way math is handled with typing is completely different in python3.
+
+% python2<<EOF
+x=10
+y=8
+print((x + (y - 1)) / y * y)
+EOF
+16
+
+python3<<EOF
+x=10
+y=8
+print((x + (y - 1)) / y * y)
+EOF
+17.0
+
+So we need to force an integer for the round function as follows and
+maintain compatibility with python2.
+
+python3<<EOF
+x=10
+y=8
+print(int((x + (y - 1)) / y) * y)
+EOF
+16
+
+Signed-off-by: Jason Wessel <jason.wessel@windriver.com>
+Signed-off-by: Ben Pfaff <blp@ovn.org>
+---
+ build-aux/extract-ofp-actions | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/build-aux/extract-ofp-actions b/build-aux/extract-ofp-actions
+index c11297c..bd7131f 100755
+--- a/build-aux/extract-ofp-actions
++++ b/build-aux/extract-ofp-actions
+@@ -35,7 +35,7 @@ line = ""
+ arg_structs = set()
+
+ def round_up(x, y):
+- return (x + (y - 1)) / y * y
++ return int((x + (y - 1)) / y) * y
+
+ def open_file(fn):
+ global file_name
+--
+2.5.0
+
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0007-Python3-compatibility-unicode-to-str.patch b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0007-Python3-compatibility-unicode-to-str.patch
new file mode 100644
index 000000000..faa32b73b
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0007-Python3-compatibility-unicode-to-str.patch
@@ -0,0 +1,51 @@
+From 2fe58f87b00d0ec24d6997930d0bcdb130c84396 Mon Sep 17 00:00:00 2001
+From: Jason Wessel <jason.wessel@windriver.com>
+Date: Thu, 29 Jun 2017 20:33:23 -0700
+Subject: [PATCH 7/8] Python3 compatibility: unicode to str
+
+Commit 7430959d4ad17db89b8387c3aef58c8b230cad10 from
+https://github.com/openvswitch/ovs.git
+
+When transitioning from python2 to python3 the following type class
+changes occured:
+
+python2 -> python3
+unicode -> str
+str -> bytes
+
+That means we have to check the python version and do the right type
+check python3 will throw an error when it tries to use the unicode
+type because it doesn't exist.
+
+Signed-off-by: Jason Wessel <jason.wessel@windriver.com>
+Signed-off-by: Ben Pfaff <blp@ovn.org>
+---
+ ovsdb/ovsdb-doc | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/ovsdb/ovsdb-doc b/ovsdb/ovsdb-doc
+index 918e88a..406c293 100755
+--- a/ovsdb/ovsdb-doc
++++ b/ovsdb/ovsdb-doc
+@@ -65,9 +65,15 @@ def columnGroupToNroff(table, groupXml, documented_columns):
+ if node.hasAttribute('type'):
+ type_string = node.attributes['type'].nodeValue
+ type_json = ovs.json.from_string(str(type_string))
+- if type(type_json) in (str, unicode):
+- raise error.Error("%s %s:%s has invalid 'type': %s"
+- % (table.name, name, key, type_json))
++ # py2 -> py3 means str -> bytes and unicode -> str
++ try:
++ if type(type_json) in (str, unicode):
++ raise error.Error("%s %s:%s has invalid 'type': %s"
++ % (table.name, name, key, type_json))
++ except:
++ if type(type_json) in (bytes, str):
++ raise error.Error("%s %s:%s has invalid 'type': %s"
++ % (table.name, name, key, type_json))
+ type_ = ovs.db.types.BaseType.from_json(type_json)
+ else:
+ type_ = column.type.value
+--
+2.5.0
+
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0008-AUTHORS-Add-Jason-Wessel.patch b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0008-AUTHORS-Add-Jason-Wessel.patch
new file mode 100644
index 000000000..d2c2be7cf
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/0008-AUTHORS-Add-Jason-Wessel.patch
@@ -0,0 +1,28 @@
+From a3289add8368e0c970ae1c1c84f5df1f817ed43c Mon Sep 17 00:00:00 2001
+From: Ben Pfaff <blp@ovn.org>
+Date: Thu, 6 Jul 2017 14:01:27 -0700
+Subject: [PATCH 8/8] AUTHORS: Add Jason Wessel.
+
+Commit a91c4cfaf863718bc94fb9c88939bd0b0385a6fe from
+https://github.com/openvswitch/ovs.git
+
+Signed-off-by: Ben Pfaff <blp@ovn.org>
+---
+ AUTHORS.rst | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/AUTHORS.rst b/AUTHORS.rst
+index 63e6a8d..d0dc70d 100644
+--- a/AUTHORS.rst
++++ b/AUTHORS.rst
+@@ -156,6 +156,7 @@ Jan Scheurich jan.scheurich@ericsson.com
+ Jan Vansteenkiste jan@vstone.eu
+ Jarno Rajahalme jarno@ovn.org
+ Jason Kölker jason@koelker.net
++Jason Wessel jason.wessel@windriver.com
+ Jasper Capel jasper@capel.tv
+ Jean Tourrilhes jt@hpl.hp.com
+ Jeremy Stribling strib@nicira.com
+--
+2.5.0
+
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/CVE-2017-9263.patch b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/CVE-2017-9263.patch
new file mode 100644
index 000000000..0fc3aa1ab
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/CVE-2017-9263.patch
@@ -0,0 +1,29 @@
+A buggy or malicious switch could send a role status message with a bad
+reason code, which if printed by OVS would cause it to abort. This fixes
+the problem.
+
+CVE: CVE-2017-9263
+Upstream-Status: Submitted
+
+Reported-by: Bhargava Shastry <bshastry at sec.t-labs.tu-berlin.de>
+Signed-off-by: Ben Pfaff <blp at ovn.org>
+---
+ lib/ofp-print.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/lib/ofp-print.c b/lib/ofp-print.c
+index 7ca953100539..1932baf4871f 100644
+--- a/lib/ofp-print.c
++++ b/lib/ofp-print.c
+@@ -2147,7 +2147,8 @@ ofp_print_role_status_message(struct ds *string, const struct ofp_header *oh)
+ break;
+ case OFPCRR_N_REASONS:
+ default:
+- OVS_NOT_REACHED();
++ ds_put_cstr(string, "(unknown)");
++ break;
+ }
+ }
+
+--
+2.10.2
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/openvswitch-add-ptest-c298ef781c2d35d939fe163cbc2f41ea7b1cb8d1.patch b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/openvswitch-add-ptest-b29cb89e9e9fe3119b2e5dd5d4fb79141635b7cc.patch
index 47c81dd98..47c81dd98 100644
--- a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/openvswitch-add-ptest-c298ef781c2d35d939fe163cbc2f41ea7b1cb8d1.patch
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/openvswitch-add-ptest-b29cb89e9e9fe3119b2e5dd5d4fb79141635b7cc.patch
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch.inc b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch.inc
index 1f0b52b3b..58e0fcb07 100644
--- a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch.inc
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch.inc
@@ -11,11 +11,11 @@ HOMEPAGE = "http://openvswitch.org/"
SECTION = "networking"
LICENSE = "Apache-2"
-DEPENDS += "bridge-utils openssl python perl python-six-native coreutils-native"
+DEPENDS += "bridge-utils openssl python3 perl python3-six-native coreutils-native"
RDEPENDS_${PN} += "util-linux-uuidgen util-linux-libuuid coreutils \
- python perl perl-module-strict ${PN}-switch \
- bash python-twisted python-six"
+ python3 perl perl-module-strict ${PN}-switch \
+ bash python3-twisted python3-six"
RDEPENDS_${PN}-testcontroller = "${PN} lsb ${PN}-pki"
RDEPENDS_${PN}-switch = "${PN} openssl procps util-linux-uuidgen"
RDEPENDS_${PN}-pki = "${PN}"
@@ -34,7 +34,7 @@ SRC_URI = "\
"
EXTRA_OECONF += "\
- PYTHON=python \
+ PYTHON=python3 \
PYTHON3=python3 \
PERL=${bindir}/perl \
"
@@ -70,7 +70,7 @@ FILES_${PN} += "${datadir}/ovsdbmonitor"
FILES_${PN} += "/run"
FILES_${PN} += "${libdir}/python${PYTHON_BASEVERSION}/"
-inherit autotools update-rc.d systemd pythonnative
+inherit autotools update-rc.d systemd python3native
SYSTEMD_PACKAGES = "${PN}-switch"
SYSTEMD_SERVICE_${PN}-switch = " \
@@ -86,6 +86,12 @@ INITSCRIPT_PARAMS_${PN}-switch = "defaults 71"
INITSCRIPT_NAME_${PN}-testcontroller = "openvswitch-testcontroller"
INITSCRIPT_PARAMS_${PN}-testcontroller = "defaults 72"
+do_configure_prepend() {
+ # Work around the for Makefile CC=$(if ....) by swapping out any
+ # "-Wa," assembly directives with "-Xassembler
+ CC=`echo '${CC}' | sed 's/-Wa,/-Xassembler /g'`
+}
+
do_install_append() {
install -d ${D}/${sysconfdir}/default/
install -m 660 ${WORKDIR}/openvswitch-switch-setup ${D}/${sysconfdir}/default/openvswitch-switch
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch_git.bb b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch_git.bb
index 1fb82aee5..b4aab3550 100644
--- a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch_git.bb
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch_git.bb
@@ -3,20 +3,20 @@ require openvswitch.inc
DEPENDS += "virtual/kernel"
RDEPENDS_${PN}-ptest += "\
- python-logging python-syslog python-argparse python-io \
- python-fcntl python-shell python-lang python-xml python-math \
- python-datetime python-netclient python sed \
+ python3-logging python3-syslog python3-argparse python3-io \
+ python3-fcntl python3-shell python3-lang python3-xml python3-math \
+ python3-datetime python3-netclient python3 sed \
ldd perl-module-socket perl-module-carp perl-module-exporter \
- perl-module-xsloader python-netserver python-threading \
- python-resource python-subprocess \
+ perl-module-xsloader python3-netserver python3-threading \
+ python3-resource python3-subprocess findutils which \
"
S = "${WORKDIR}/git"
-PV = "2.7.0+${SRCREV}"
+PV = "2.7.1+${SRCREV}"
FILESEXTRAPATHS_append := "${THISDIR}/${PN}-git:"
-SRCREV = "c298ef781c2d35d939fe163cbc2f41ea7b1cb8d1"
+SRCREV = "b29cb89e9e9fe3119b2e5dd5d4fb79141635b7cc"
SRC_URI = "file://openvswitch-switch \
file://openvswitch-switch-setup \
file://openvswitch-testcontroller \
@@ -29,12 +29,30 @@ SRC_URI = "file://openvswitch-switch \
file://python-make-remaining-scripts-use-usr-bin-env.patch \
file://0001-use-the-linux-if_packet.h-Interface-directly.patch \
file://0002-Define-WAIT_ANY-if-not-provided-by-system.patch \
+ file://CVE-2017-9263.patch \
+ file://python-switch-remaining-scripts-to-use-python3.patch \
"
+# Temporarily backport patches to better support py3. These have been
+# merged upstream but are not part of v2.7.1.
+SRC_URI += " \
+ file://0001-Python3-compatibility-Convert-print-statements.patch \
+ file://0002-Python3-compatibility-exception-cleanup.patch \
+ file://0003-Python3-compatibility-execfile-to-exec.patch \
+ file://0004-Python3-compatibility-iteritems-to-items.patch \
+ file://0005-Python3-compatibility-fix-integer-problems.patch \
+ file://0006-Python3-compatibility-math-error-compatibility.patch \
+ file://0007-Python3-compatibility-unicode-to-str.patch \
+ file://0008-AUTHORS-Add-Jason-Wessel.patch \
+"
+
LIC_FILES_CHKSUM = "file://COPYING;md5=17b2c9d4c70853a09c0e143137754b35"
-PACKAGECONFIG ?= ""
-PACKAGECONFIG[dpdk] = "--with-dpdk=${STAGING_DIR_TARGET}/opt/dpdk/${TARGET_ARCH}-native-linuxapp-gcc,,dpdk,"
+DPDK_INSTALL_DIR ?= "/opt/dpdk"
+
+PACKAGECONFIG ?= "libcap-ng"
+PACKAGECONFIG[dpdk] = "--with-dpdk=${STAGING_DIR_TARGET}${DPDK_INSTALL_DIR}/share/${TARGET_ARCH}-native-linuxapp-gcc,,dpdk,dpdk"
+PACKAGECONFIG[libcap-ng] = "--enable-libcapng,--disable-libcapng,libcap-ng,"
# Don't compile kernel modules by default since it heavily depends on
# kernel version. Use the in-kernel module for now.