summaryrefslogtreecommitdiff
path: root/yocto-poky/meta
diff options
context:
space:
mode:
authorPatrick Williams <patrick@stwcx.xyz>2016-03-30 23:21:19 +0300
committerPatrick Williams <patrick@stwcx.xyz>2016-03-30 23:21:19 +0300
commitb4a027550acf2c1051c34f997b8e7e845017af4b (patch)
tree9e38d3c17b42cb1e6765620a87e908973a93c821 /yocto-poky/meta
parent2fe86d90044af218ced8f42fdded6b136f1046d2 (diff)
parentf1e5d6968976c2341c6d554bfcc8895f1b33c26b (diff)
downloadopenbmc-b4a027550acf2c1051c34f997b8e7e845017af4b.tar.xz
Merge commit 'f1e5d6968976c2341c6d554bfcc8895f1b33c26b' from yocto-2.0.1
Diffstat (limited to 'yocto-poky/meta')
-rw-r--r--yocto-poky/meta/classes/allarch.bbclass4
-rw-r--r--yocto-poky/meta/classes/archiver.bbclass62
-rw-r--r--yocto-poky/meta/classes/autotools.bbclass45
-rw-r--r--yocto-poky/meta/classes/base.bbclass6
-rw-r--r--yocto-poky/meta/classes/buildhistory.bbclass2
-rw-r--r--yocto-poky/meta/classes/cpan-base.bbclass4
-rw-r--r--yocto-poky/meta/classes/cpan.bbclass2
-rw-r--r--yocto-poky/meta/classes/cpan_build.bbclass23
-rw-r--r--yocto-poky/meta/classes/cross-canadian.bbclass2
-rw-r--r--yocto-poky/meta/classes/deploy.bbclass1
-rw-r--r--yocto-poky/meta/classes/distrodata.bbclass20
-rw-r--r--yocto-poky/meta/classes/distutils3.bbclass4
-rw-r--r--yocto-poky/meta/classes/externalsrc.bbclass5
-rw-r--r--yocto-poky/meta/classes/fontcache.bbclass19
-rw-r--r--yocto-poky/meta/classes/grub-efi.bbclass3
-rw-r--r--yocto-poky/meta/classes/gtk-icon-cache.bbclass12
-rw-r--r--yocto-poky/meta/classes/gummiboot.bbclass3
-rw-r--r--yocto-poky/meta/classes/image-live.bbclass2
-rw-r--r--yocto-poky/meta/classes/image-mklibs.bbclass2
-rw-r--r--yocto-poky/meta/classes/image-vm.bbclass3
-rw-r--r--yocto-poky/meta/classes/image.bbclass37
-rw-r--r--yocto-poky/meta/classes/image_types.bbclass27
-rw-r--r--yocto-poky/meta/classes/insane.bbclass17
-rw-r--r--yocto-poky/meta/classes/kernel-arch.bbclass7
-rw-r--r--yocto-poky/meta/classes/kernel-yocto.bbclass17
-rw-r--r--yocto-poky/meta/classes/kernel.bbclass17
-rw-r--r--yocto-poky/meta/classes/libc-package.bbclass4
-rw-r--r--yocto-poky/meta/classes/license.bbclass1
-rw-r--r--yocto-poky/meta/classes/metadata_scm.bbclass23
-rw-r--r--yocto-poky/meta/classes/multilib.bbclass5
-rw-r--r--yocto-poky/meta/classes/multilib_global.bbclass28
-rw-r--r--yocto-poky/meta/classes/package.bbclass4
-rw-r--r--yocto-poky/meta/classes/pixbufcache.bbclass4
-rw-r--r--yocto-poky/meta/classes/populate_sdk_base.bbclass12
-rw-r--r--yocto-poky/meta/classes/populate_sdk_ext.bbclass54
-rw-r--r--yocto-poky/meta/classes/prserv.bbclass2
-rw-r--r--yocto-poky/meta/classes/ptest.bbclass10
-rw-r--r--yocto-poky/meta/classes/report-error.bbclass11
-rw-r--r--yocto-poky/meta/classes/sanity.bbclass11
-rw-r--r--yocto-poky/meta/classes/sign_package_feed.bbclass31
-rw-r--r--yocto-poky/meta/classes/sign_rpm.bbclass48
-rw-r--r--yocto-poky/meta/classes/sstate.bbclass30
-rw-r--r--yocto-poky/meta/classes/testimage.bbclass56
-rw-r--r--yocto-poky/meta/classes/toolchain-scripts.bbclass1
-rw-r--r--yocto-poky/meta/classes/uninative.bbclass2
-rw-r--r--yocto-poky/meta/classes/useradd-staticids.bbclass16
-rw-r--r--yocto-poky/meta/classes/useradd_base.bbclass6
-rw-r--r--yocto-poky/meta/conf/bitbake.conf5
-rw-r--r--yocto-poky/meta/conf/distro/include/default-distrovars.inc3
-rw-r--r--yocto-poky/meta/conf/distro/include/distro_alias.inc10
-rw-r--r--yocto-poky/meta/conf/documentation.conf2
-rw-r--r--yocto-poky/meta/conf/layer.conf5
-rw-r--r--yocto-poky/meta/conf/machine/include/qemu.inc2
-rw-r--r--yocto-poky/meta/conf/machine/include/tune-thunderx.inc19
-rw-r--r--yocto-poky/meta/conf/machine/qemux86-64.conf2
-rw-r--r--yocto-poky/meta/conf/machine/qemux86.conf2
-rw-r--r--yocto-poky/meta/conf/multilib.conf3
-rw-r--r--yocto-poky/meta/files/common-licenses/GFDL-1.1206
-rw-r--r--yocto-poky/meta/files/ext-sdk-prepare.sh20
-rw-r--r--yocto-poky/meta/files/toolchain-shar-extract.sh30
-rw-r--r--yocto-poky/meta/files/toolchain-shar-relocate.sh24
-rw-r--r--yocto-poky/meta/lib/oe/copy_buildsystem.py5
-rw-r--r--yocto-poky/meta/lib/oe/distro_check.py108
-rw-r--r--yocto-poky/meta/lib/oe/image.py62
-rw-r--r--yocto-poky/meta/lib/oe/package_manager.py137
-rw-r--r--yocto-poky/meta/lib/oe/patch.py12
-rw-r--r--yocto-poky/meta/lib/oe/recipeutils.py94
-rw-r--r--yocto-poky/meta/lib/oe/rootfs.py8
-rw-r--r--yocto-poky/meta/lib/oe/sdk.py76
-rw-r--r--yocto-poky/meta/lib/oe/sstatesig.py20
-rw-r--r--yocto-poky/meta/lib/oeqa/oetest.py70
-rwxr-xr-xyocto-poky/meta/lib/oeqa/runexported.py42
-rw-r--r--yocto-poky/meta/lib/oeqa/runtime/_ptest.py2
-rw-r--r--yocto-poky/meta/lib/oeqa/runtime/connman.py23
-rw-r--r--yocto-poky/meta/lib/oeqa/runtime/date.py4
-rw-r--r--yocto-poky/meta/lib/oeqa/runtime/files/testsdkmakefile5
-rw-r--r--yocto-poky/meta/lib/oeqa/runtime/kernelmodule.py4
-rw-r--r--yocto-poky/meta/lib/oeqa/runtime/parselogs.py13
-rw-r--r--yocto-poky/meta/lib/oeqa/runtime/scanelf.py2
-rw-r--r--yocto-poky/meta/lib/oeqa/sdk/gcc.py6
-rw-r--r--yocto-poky/meta/lib/oeqa/selftest/archiver.py50
-rw-r--r--yocto-poky/meta/lib/oeqa/selftest/base.py2
-rw-r--r--yocto-poky/meta/lib/oeqa/selftest/bbtests.py60
-rw-r--r--yocto-poky/meta/lib/oeqa/selftest/buildoptions.py11
-rw-r--r--yocto-poky/meta/lib/oeqa/selftest/devtool.py405
-rw-r--r--yocto-poky/meta/lib/oeqa/selftest/imagefeatures.py16
-rw-r--r--yocto-poky/meta/lib/oeqa/selftest/layerappend.py9
-rw-r--r--yocto-poky/meta/lib/oeqa/selftest/manifest.py165
-rw-r--r--yocto-poky/meta/lib/oeqa/selftest/recipetool.py11
-rw-r--r--yocto-poky/meta/lib/oeqa/selftest/sstatetests.py73
-rw-r--r--yocto-poky/meta/lib/oeqa/selftest/wic.py94
-rw-r--r--yocto-poky/meta/lib/oeqa/utils/decorators.py35
-rw-r--r--yocto-poky/meta/lib/oeqa/utils/dump.py15
-rw-r--r--yocto-poky/meta/lib/oeqa/utils/ftools.py20
-rw-r--r--yocto-poky/meta/lib/oeqa/utils/qemurunner.py28
-rw-r--r--yocto-poky/meta/recipes-bsp/gnu-efi/gnu-efi/gcc46-compatibility.patch21
-rw-r--r--yocto-poky/meta/recipes-bsp/gnu-efi/gnu-efi_3.0.3.bb1
-rw-r--r--yocto-poky/meta/recipes-bsp/grub/files/CVE-2015-8370.patch59
-rw-r--r--yocto-poky/meta/recipes-bsp/grub/grub2.inc1
-rw-r--r--yocto-poky/meta/recipes-bsp/gummiboot/gummiboot/0001-console-Fix-C-syntax-errors-for-function-declaration.patch74
-rw-r--r--yocto-poky/meta/recipes-bsp/gummiboot/gummiboot_git.bb3
-rw-r--r--yocto-poky/meta/recipes-bsp/hostap/hostap-utils-0.4.7/0001-Define-_u32-__s32-__u16-__s16-__u8-in-terms-of-c99-t.patch36
-rw-r--r--yocto-poky/meta/recipes-bsp/hostap/hostap-utils.inc4
-rw-r--r--yocto-poky/meta/recipes-connectivity/avahi/avahi-ui_0.6.31.bb3
-rw-r--r--yocto-poky/meta/recipes-connectivity/bind/bind/0001-lib-dns-gen.c-fix-too-long-error.patch34
-rw-r--r--yocto-poky/meta/recipes-connectivity/bind/bind/CVE-2015-8000.patch278
-rw-r--r--yocto-poky/meta/recipes-connectivity/bind/bind/CVE-2015-8461.patch44
-rw-r--r--yocto-poky/meta/recipes-connectivity/bind/bind/CVE-2015-8704.patch28
-rw-r--r--yocto-poky/meta/recipes-connectivity/bind/bind/CVE-2015-8705.patch44
-rw-r--r--yocto-poky/meta/recipes-connectivity/bind/bind_9.10.2-P4.bb5
-rw-r--r--yocto-poky/meta/recipes-connectivity/bluez5/bluez5.inc5
-rw-r--r--yocto-poky/meta/recipes-connectivity/bluez5/bluez5/bluetooth.conf17
-rw-r--r--yocto-poky/meta/recipes-connectivity/connman/connman-conf.bb6
-rw-r--r--yocto-poky/meta/recipes-connectivity/connman/connman-gnome_0.7.bb5
-rw-r--r--yocto-poky/meta/recipes-connectivity/connman/connman.inc12
-rw-r--r--yocto-poky/meta/recipes-connectivity/connman/connman/0001-Detect-backtrace-API-availability-before-using-it.patch55
-rw-r--r--yocto-poky/meta/recipes-connectivity/connman/connman/0002-resolve-musl-does-not-implement-res_ninit.patch77
-rw-r--r--yocto-poky/meta/recipes-connectivity/connman/connman/0003-Fix-header-inclusions-for-musl.patch85
-rw-r--r--yocto-poky/meta/recipes-connectivity/connman/connman/add_xuser_dbus_permission.patch21
-rw-r--r--yocto-poky/meta/recipes-connectivity/connman/connman/connman4
-rw-r--r--yocto-poky/meta/recipes-connectivity/connman/connman_1.30.bb4
-rw-r--r--yocto-poky/meta/recipes-connectivity/iproute2/iproute2.inc6
-rw-r--r--yocto-poky/meta/recipes-connectivity/irda-utils/irda-utils_0.9.18.bb2
-rw-r--r--yocto-poky/meta/recipes-connectivity/openssh/openssh/CVE-2016-1907_2.patch65
-rw-r--r--yocto-poky/meta/recipes-connectivity/openssh/openssh/CVE-2016-1907_3.patch329
-rw-r--r--yocto-poky/meta/recipes-connectivity/openssh/openssh/CVE-2016-1907_upstream_commit.patch33
-rw-r--r--yocto-poky/meta/recipes-connectivity/openssh/openssh/sshd@.service4
-rw-r--r--yocto-poky/meta/recipes-connectivity/openssh/openssh/sshdgenkeys.service21
-rw-r--r--yocto-poky/meta/recipes-connectivity/openssh/openssh_7.1p2.bb (renamed from yocto-poky/meta/recipes-connectivity/openssh/openssh_7.1p1.bb)11
-rw-r--r--yocto-poky/meta/recipes-connectivity/openssl/openssl.inc2
-rw-r--r--yocto-poky/meta/recipes-connectivity/openssl/openssl/0001-Add-test-for-CVE-2015-3194.patch66
-rw-r--r--yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2015-3193-bn-asm-x86_64-mont5.pl-fix-carry-propagating-bug-CVE.patch101
-rw-r--r--yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2015-3194-1-Add-PSS-parameter-check.patch45
-rw-r--r--yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2015-3195-Fix-leak-with-ASN.1-combine.patch66
-rw-r--r--yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2015-3197.patch63
-rw-r--r--yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2016-0701_1.patch102
-rw-r--r--yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2016-0701_2.patch156
-rw-r--r--yocto-poky/meta/recipes-connectivity/openssl/openssl/ptest_makefile_deps.patch248
-rw-r--r--yocto-poky/meta/recipes-connectivity/openssl/openssl_1.0.2d.bb18
-rw-r--r--yocto-poky/meta/recipes-connectivity/socat/socat/CVE-2016-2217.patch372
-rw-r--r--yocto-poky/meta/recipes-connectivity/socat/socat_1.7.3.0.bb1
-rw-r--r--yocto-poky/meta/recipes-core/busybox/busybox.inc3
-rw-r--r--yocto-poky/meta/recipes-core/busybox/busybox/0001-Switch-to-POSIX-utmpx-API.patch388
-rw-r--r--yocto-poky/meta/recipes-core/busybox/busybox/0001-Use-CC-when-linking-instead-of-LD-and-use-CFLAGS-and.patch114
-rw-r--r--yocto-poky/meta/recipes-core/busybox/busybox/0001-randconfig-fix.patch33
-rw-r--r--yocto-poky/meta/recipes-core/busybox/busybox/0002-Passthrough-r-to-linker.patch32
-rw-r--r--yocto-poky/meta/recipes-core/busybox/busybox_1.23.2.bb4
-rw-r--r--yocto-poky/meta/recipes-core/coreutils/coreutils-6.9/loadavg.patch18
-rw-r--r--yocto-poky/meta/recipes-core/coreutils/coreutils_6.9.bb2
-rw-r--r--yocto-poky/meta/recipes-core/coreutils/coreutils_8.24.bb2
-rw-r--r--yocto-poky/meta/recipes-core/dropbear/dropbear/dropbear@.service3
-rw-r--r--yocto-poky/meta/recipes-core/dropbear/dropbear/dropbearkey.service9
-rw-r--r--yocto-poky/meta/recipes-core/glibc/cross-localedef-native_2.22.bb5
-rw-r--r--yocto-poky/meta/recipes-core/glibc/glibc-locale.inc2
-rw-r--r--yocto-poky/meta/recipes-core/glibc/glibc/0028-Clear-ELF_RTYPE_CLASS_EXTERN_PROTECTED_DATA-for-prel.patch84
-rw-r--r--yocto-poky/meta/recipes-core/glibc/glibc/CVE-2015-7547.patch642
-rw-r--r--yocto-poky/meta/recipes-core/glibc/glibc/CVE-2015-8776.patch155
-rw-r--r--yocto-poky/meta/recipes-core/glibc/glibc/CVE-2015-8777.patch123
-rw-r--r--yocto-poky/meta/recipes-core/glibc/glibc/CVE-2015-8779.patch262
-rw-r--r--yocto-poky/meta/recipes-core/glibc/glibc/CVE-2015-9761_1.patch1039
-rw-r--r--yocto-poky/meta/recipes-core/glibc/glibc/CVE-2015-9761_2.patch385
-rw-r--r--yocto-poky/meta/recipes-core/glibc/glibc/strcoll-Remove-incorrect-STRDIFF-based-optimization-.patch323
-rw-r--r--yocto-poky/meta/recipes-core/glibc/glibc/use_64bit_atomics.patch24
-rw-r--r--yocto-poky/meta/recipes-core/glibc/glibc_2.22.bb13
-rw-r--r--yocto-poky/meta/recipes-core/images/build-appliance-image_12.0.1.bb4
-rw-r--r--yocto-poky/meta/recipes-core/initrdscripts/files/init-install-efi.sh9
-rwxr-xr-xyocto-poky/meta/recipes-core/initrdscripts/initramfs-framework/finish2
-rwxr-xr-xyocto-poky/meta/recipes-core/initrdscripts/initramfs-framework/init2
-rw-r--r--yocto-poky/meta/recipes-core/initscripts/initscripts-1.0/sysfs.sh4
-rw-r--r--yocto-poky/meta/recipes-core/kbd/kbd_2.0.2.bb1
-rw-r--r--yocto-poky/meta/recipes-core/libxml/libxml2.inc16
-rw-r--r--yocto-poky/meta/recipes-core/libxml/libxml2/0001-CVE-2015-5312-Another-entity-expansion-issue.patch39
-rw-r--r--yocto-poky/meta/recipes-core/libxml/libxml2/0001-CVE-2015-7497-Avoid-an-heap-buffer-overflow-in-xmlDi.patch40
-rw-r--r--yocto-poky/meta/recipes-core/libxml/libxml2/0001-CVE-2015-7500-Fix-memory-access-error-due-to-incorre.patch131
-rw-r--r--yocto-poky/meta/recipes-core/libxml/libxml2/0001-CVE-2015-8035-Fix-XZ-compression-support-loop.patch38
-rw-r--r--yocto-poky/meta/recipes-core/libxml/libxml2/0001-CVE-2015-8242-Buffer-overead-with-HTML-parser-in-pus.patch49
-rw-r--r--yocto-poky/meta/recipes-core/libxml/libxml2/0001-Fix-a-bug-on-name-parsing-at-the-end-of-current-inpu.patch138
-rw-r--r--yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7498-Avoid-processing-entities-after-encoding-conversion-.patch89
-rw-r--r--yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7499-1-Add-xmlHaltParser-to-stop-the-parser.patch88
-rw-r--r--yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7499-2-Detect-incoherency-on-GROW.patch43
-rw-r--r--yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7941-1-Stop-parsing-on-entities-boundaries-errors.patch39
-rw-r--r--yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7941-2-Cleanup-conditional-section-error-handling.patch56
-rw-r--r--yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7942-2-Fix-an-error-in-previous-Conditional-section-patch.patch35
-rw-r--r--yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7942-Another-variation-of-overflow-in-Conditional-section.patch39
-rw-r--r--yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-8241.patch40
-rw-r--r--yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-8317-Fail-parsing-early-on-if-encoding-conversion-failed.patch42
-rw-r--r--yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-8710.patch71
-rw-r--r--yocto-poky/meta/recipes-core/meta/meta-ide-support.bb1
-rw-r--r--yocto-poky/meta/recipes-core/meta/nativesdk-buildtools-perl-dummy.bb13
-rw-r--r--yocto-poky/meta/recipes-core/meta/signing-keys.bb45
-rw-r--r--yocto-poky/meta/recipes-core/meta/uninative-tarball.bb1
-rw-r--r--yocto-poky/meta/recipes-core/os-release/os-release.bb5
-rw-r--r--yocto-poky/meta/recipes-core/packagegroups/packagegroup-core-standalone-sdk-target.bb2
-rw-r--r--yocto-poky/meta/recipes-core/readline/readline-6.3/readline-cve-2014-2524.patch (renamed from yocto-poky/meta/recipes-core/readline/readline-6.3/readline63-003)0
-rw-r--r--yocto-poky/meta/recipes-core/readline/readline_6.3.bb2
-rw-r--r--yocto-poky/meta/recipes-core/systemd/systemd/0001-fix-build-on-uClibc-exp10.patch22
-rw-r--r--yocto-poky/meta/recipes-core/systemd/systemd/0022-Use-getenv-when-secure-versions-are-not-available.patch39
-rw-r--r--yocto-poky/meta/recipes-core/systemd/systemd/rules-whitelist-hd-devices.patch32
-rw-r--r--yocto-poky/meta/recipes-core/systemd/systemd_225.bb13
-rw-r--r--yocto-poky/meta/recipes-core/uclibc/uclibc-git.inc5
-rw-r--r--yocto-poky/meta/recipes-core/uclibc/uclibc-git/0001-fcntl-Add-AT_EMPTY_PATH-for-all-and-O_PATH-for-arm.patch42
-rw-r--r--yocto-poky/meta/recipes-core/uclibc/uclibc-git/0001-gcc5-optimizes-away-the-write-only-static-functions-.patch51
-rw-r--r--yocto-poky/meta/recipes-core/uclibc/uclibc-git/0001-wire-in-syncfs.patch49
-rw-r--r--yocto-poky/meta/recipes-core/uclibc/uclibc-git/CVE-2016-2224.patch49
-rw-r--r--yocto-poky/meta/recipes-core/uclibc/uclibc-git/CVE-2016-2225.patch32
-rw-r--r--yocto-poky/meta/recipes-core/uclibc/uclibc-git/uClibc.distro2
-rw-r--r--yocto-poky/meta/recipes-core/udev/udev.inc2
-rw-r--r--yocto-poky/meta/recipes-core/util-linux/util-linux.inc6
-rw-r--r--yocto-poky/meta/recipes-core/util-linux/util-linux/runuser-l.pamd3
-rw-r--r--yocto-poky/meta/recipes-core/util-linux/util-linux/runuser.pamd4
-rw-r--r--yocto-poky/meta/recipes-core/util-linux/util-linux_2.26.2.bb2
-rw-r--r--yocto-poky/meta/recipes-devtools/binutils/binutils/binutils-octeon3.patch2
-rw-r--r--yocto-poky/meta/recipes-devtools/build-compare/build-compare_git.bb1
-rw-r--r--yocto-poky/meta/recipes-devtools/ccache/ccache_3.2.3.bb4
-rw-r--r--yocto-poky/meta/recipes-devtools/ccache/files/0002-dev.mk.in-fix-file-name-too-long.patch32
-rw-r--r--yocto-poky/meta/recipes-devtools/dpkg/dpkg/CVE-2015-0860.patch52
-rw-r--r--yocto-poky/meta/recipes-devtools/dpkg/dpkg_1.18.2.bb1
-rw-r--r--yocto-poky/meta/recipes-devtools/e2fsprogs/e2fsprogs/copy-in-create-hardlinks-with-the-correct-directory-.patch81
-rw-r--r--yocto-poky/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.42.9.bb16
-rw-r--r--yocto-poky/meta/recipes-devtools/file/file/host-file.patch32
-rw-r--r--yocto-poky/meta/recipes-devtools/file/file_5.24.bb10
-rw-r--r--yocto-poky/meta/recipes-devtools/gcc/gcc-4.8.inc1
-rw-r--r--yocto-poky/meta/recipes-devtools/gcc/gcc-4.8/0051-gcc-483-universal-initializer-no-warning.patch107
-rw-r--r--yocto-poky/meta/recipes-devtools/gcc/gcc-4.9.inc1
-rw-r--r--yocto-poky/meta/recipes-devtools/gcc/gcc-4.9/0065-gcc-483-universal-initializer-no-warning.patch107
-rw-r--r--yocto-poky/meta/recipes-devtools/gcc/gcc-5.2.inc3
-rw-r--r--yocto-poky/meta/recipes-devtools/gcc/gcc-5.2/0042-cxxflags-for-build.patch123
-rw-r--r--yocto-poky/meta/recipes-devtools/gcc/gcc-common.inc5
-rw-r--r--yocto-poky/meta/recipes-devtools/gcc/gcc-configure-common.inc1
-rw-r--r--yocto-poky/meta/recipes-devtools/gcc/gcc-cross-initial.inc1
-rw-r--r--yocto-poky/meta/recipes-devtools/gcc/gcc-multilib-config.inc3
-rw-r--r--yocto-poky/meta/recipes-devtools/gcc/gcc-runtime.inc3
-rw-r--r--yocto-poky/meta/recipes-devtools/gcc/gcc-shared-source.inc2
-rw-r--r--yocto-poky/meta/recipes-devtools/gcc/gcc-target.inc12
-rw-r--r--yocto-poky/meta/recipes-devtools/gcc/gcc_4.9.bb5
-rw-r--r--yocto-poky/meta/recipes-devtools/gcc/libgcc.inc9
-rw-r--r--yocto-poky/meta/recipes-devtools/git/git-2.5.0/0008-CVE-2015-7545-1.patch446
-rw-r--r--yocto-poky/meta/recipes-devtools/git/git-2.5.0/0009-CVE-2015-7545-2.patch112
-rw-r--r--yocto-poky/meta/recipes-devtools/git/git-2.5.0/0010-CVE-2015-7545-3.patch112
-rw-r--r--yocto-poky/meta/recipes-devtools/git/git-2.5.0/0011-CVE-2015-7545-4.patch150
-rw-r--r--yocto-poky/meta/recipes-devtools/git/git-2.5.0/0012-CVE-2015-7545-5.patch69
-rw-r--r--yocto-poky/meta/recipes-devtools/git/git_2.5.0.bb8
-rw-r--r--yocto-poky/meta/recipes-devtools/guile/guile_2.0.11.bb16
-rw-r--r--yocto-poky/meta/recipes-devtools/i2c-tools/i2c-tools_3.1.2.bb3
-rw-r--r--yocto-poky/meta/recipes-devtools/installer/adt-installer_1.0.bb1
-rw-r--r--yocto-poky/meta/recipes-devtools/libtool/libtool-2.4.6.inc1
-rw-r--r--yocto-poky/meta/recipes-devtools/libtool/libtool/0001-libtool-Fix-support-for-NIOS2-processor.patch68
-rw-r--r--yocto-poky/meta/recipes-devtools/mmc/mmc-utils_git.bb4
-rw-r--r--yocto-poky/meta/recipes-devtools/mtools/mtools_4.0.18.bb5
-rw-r--r--yocto-poky/meta/recipes-devtools/opensp/opensp_1.5.2.bb4
-rw-r--r--yocto-poky/meta/recipes-devtools/opkg/opkg/0001-libopkg-include-stdio.h-for-getting-FILE-defined.patch45
-rw-r--r--yocto-poky/meta/recipes-devtools/opkg/opkg/0001-opkg_conf-create-opkg.lock-in-run-instead-of-var-run.patch34
-rw-r--r--yocto-poky/meta/recipes-devtools/opkg/opkg/0001-string_util-New-file-with-bin_to_hex-function.patch122
-rw-r--r--yocto-poky/meta/recipes-devtools/opkg/opkg/0002-md5-Add-md5_to_string-function.patch110
-rw-r--r--yocto-poky/meta/recipes-devtools/opkg/opkg/0003-sha256-Add-sha256_to_string-function.patch110
-rw-r--r--yocto-poky/meta/recipes-devtools/opkg/opkg/0004-opkg_download-Use-short-cache-file-name.patch85
-rw-r--r--yocto-poky/meta/recipes-devtools/opkg/opkg_0.3.0.bb6
-rw-r--r--yocto-poky/meta/recipes-devtools/perl/perl-native_5.22.0.bb16
-rw-r--r--yocto-poky/meta/recipes-devtools/perl/perl/perl-errno-generation-gcc5.patch23
-rw-r--r--yocto-poky/meta/recipes-devtools/perl/perl_5.22.0.bb3
-rw-r--r--yocto-poky/meta/recipes-devtools/prelink/prelink_git.bb4
-rw-r--r--yocto-poky/meta/recipes-devtools/pseudo/pseudo_1.7.4.bb (renamed from yocto-poky/meta/recipes-devtools/pseudo/pseudo_1.7.3.bb)4
-rw-r--r--yocto-poky/meta/recipes-devtools/pseudo/pseudo_git.bb4
-rw-r--r--yocto-poky/meta/recipes-devtools/python/python-3.4-manifest.inc2
-rw-r--r--yocto-poky/meta/recipes-devtools/python/python-async_0.6.2.bb2
-rw-r--r--yocto-poky/meta/recipes-devtools/python/python-pygtk_2.24.0.bb4
-rw-r--r--yocto-poky/meta/recipes-devtools/python/python-smartpm/smart-cache.py-getPackages-matches-name-version.patch43
-rw-r--r--yocto-poky/meta/recipes-devtools/python/python-smartpm_git.bb1
-rw-r--r--yocto-poky/meta/recipes-devtools/qemu/qemu.inc8
-rw-r--r--yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-7295_1.patch63
-rw-r--r--yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-7295_2.patch58
-rw-r--r--yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-7295_3.patch52
-rw-r--r--yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-7504.patch56
-rw-r--r--yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-7512.patch44
-rw-r--r--yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-8345.patch73
-rw-r--r--yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-8504.patch51
-rw-r--r--yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2016-1568.patch46
-rw-r--r--yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2016-2197.patch59
-rw-r--r--yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2016-2198.patch45
-rw-r--r--yocto-poky/meta/recipes-devtools/qemu/qemu/no-valgrind.patch19
-rw-r--r--yocto-poky/meta/recipes-devtools/qemu/qemu_2.4.0.bb11
-rw-r--r--yocto-poky/meta/recipes-devtools/rpm/rpm/configure.ac-check-for-both-gpg2-and-gpg.patch29
-rw-r--r--yocto-poky/meta/recipes-devtools/rpm/rpm/rpm-check-rootpath-reasonableness.patch2
-rw-r--r--yocto-poky/meta/recipes-devtools/rpm/rpm_4.11.2.bb3
-rw-r--r--yocto-poky/meta/recipes-devtools/rpm/rpm_5.4+cvs.bb1
-rw-r--r--yocto-poky/meta/recipes-devtools/rpm/rpm_5.4.14.bb2
-rw-r--r--yocto-poky/meta/recipes-devtools/rpm/rpmresolve/rpmresolve.c10
-rw-r--r--yocto-poky/meta/recipes-devtools/subversion/subversion-1.8.13/subversion-CVE-2015-3184.patch2094
-rw-r--r--yocto-poky/meta/recipes-devtools/subversion/subversion-1.8.13/subversion-CVE-2015-3187.patch346
-rw-r--r--yocto-poky/meta/recipes-devtools/subversion/subversion_1.8.13.bb3
-rw-r--r--yocto-poky/meta/recipes-devtools/syslinux/syslinux/0010-gcc46-compatibility.patch37
-rw-r--r--yocto-poky/meta/recipes-devtools/syslinux/syslinux/0011-mk-MMD-does-not-take-any-arguments.patch33
-rw-r--r--yocto-poky/meta/recipes-devtools/syslinux/syslinux_6.03.bb2
-rw-r--r--yocto-poky/meta/recipes-devtools/unfs3/unfs3_0.9.22.r490.bb1
-rw-r--r--yocto-poky/meta/recipes-extended/bash/bash.inc2
-rw-r--r--yocto-poky/meta/recipes-extended/byacc/byacc/byacc-open.patch12
-rw-r--r--yocto-poky/meta/recipes-extended/bzip2/bzip2-1.0.6/fix-bunzip2-qt-returns-0-for-corrupt-archives.patch55
-rw-r--r--yocto-poky/meta/recipes-extended/bzip2/bzip2_1.0.6.bb1
-rw-r--r--yocto-poky/meta/recipes-extended/cpio/cpio_v2.inc8
-rw-r--r--yocto-poky/meta/recipes-extended/cronie/cronie_1.5.0.bb2
-rw-r--r--yocto-poky/meta/recipes-extended/cups/cups.inc6
-rw-r--r--yocto-poky/meta/recipes-extended/cwautomacros/cwautomacros_20110201.bb3
-rw-r--r--yocto-poky/meta/recipes-extended/foomatic/foomatic-filters-4.0.17/CVE-2015-8327.patch23
-rw-r--r--yocto-poky/meta/recipes-extended/foomatic/foomatic-filters-4.0.17/CVE-2015-8560.patch23
-rw-r--r--yocto-poky/meta/recipes-extended/foomatic/foomatic-filters_4.0.17.bb4
-rw-r--r--yocto-poky/meta/recipes-extended/ghostscript/ghostscript/png_mak.patch21
-rw-r--r--yocto-poky/meta/recipes-extended/ghostscript/ghostscript_9.16.bb1
-rw-r--r--yocto-poky/meta/recipes-extended/grep/grep_2.21.bb12
-rw-r--r--yocto-poky/meta/recipes-extended/gzip/gzip.inc14
-rw-r--r--yocto-poky/meta/recipes-extended/images/wic-image-minimal.bb14
-rw-r--r--yocto-poky/meta/recipes-extended/images/wic-image-minimal.wks10
-rw-r--r--yocto-poky/meta/recipes-extended/iptables/iptables/0002-configure.ac-only-check-conntrack-when-libnfnetlink-enabled.patch34
-rw-r--r--yocto-poky/meta/recipes-extended/iptables/iptables_1.4.21.bb3
-rw-r--r--yocto-poky/meta/recipes-extended/libaio/libaio/system-linkage.patch37
-rw-r--r--yocto-poky/meta/recipes-extended/libaio/libaio_0.3.110.bb9
-rw-r--r--yocto-poky/meta/recipes-extended/libarchive/libarchive/libarchive-CVE-2015-2304.patch (renamed from yocto-poky/meta/recipes-extended/libarchive/libarchive/0001-Add-ARCHIVE_EXTRACT_SECURE_NOABSOLUTEPATHS-option.patch)0
-rw-r--r--yocto-poky/meta/recipes-extended/libarchive/libarchive_3.1.2.bb2
-rw-r--r--yocto-poky/meta/recipes-extended/libtirpc/libtirpc/remove-des-uclibc.patch22
-rw-r--r--yocto-poky/meta/recipes-extended/libtirpc/libtirpc/va_list.patch18
-rw-r--r--yocto-poky/meta/recipes-extended/libtirpc/libtirpc_0.2.5.bb4
-rw-r--r--yocto-poky/meta/recipes-extended/logrotate/logrotate_3.9.1.bb2
-rw-r--r--yocto-poky/meta/recipes-extended/lsb/lsb_4.1.bb4
-rw-r--r--yocto-poky/meta/recipes-extended/lsb/lsbinitscripts_9.64.bb2
-rw-r--r--yocto-poky/meta/recipes-extended/ltp/ltp/0001-replace-inline-with-static-inline-for-gcc-5.x.patch69
-rw-r--r--yocto-poky/meta/recipes-extended/ltp/ltp_20150420.bb1
-rw-r--r--yocto-poky/meta/recipes-extended/mailx/mailx_12.5-5.bb5
-rw-r--r--yocto-poky/meta/recipes-extended/pam/libpam/use-utmpx.patch233
-rw-r--r--yocto-poky/meta/recipes-extended/pam/libpam_1.2.1.bb4
-rw-r--r--yocto-poky/meta/recipes-extended/quota/quota/remove_non_posix_types.patch13
-rw-r--r--yocto-poky/meta/recipes-extended/quota/quota_4.02.bb2
-rw-r--r--yocto-poky/meta/recipes-extended/rpcbind/rpcbind/0001-uclibc-nss.patch30
-rw-r--r--yocto-poky/meta/recipes-extended/rpcbind/rpcbind/cve-2015-7236.patch83
-rw-r--r--yocto-poky/meta/recipes-extended/rpcbind/rpcbind_0.2.3.bb3
-rw-r--r--yocto-poky/meta/recipes-extended/screen/screen/0001-Fix-stack-overflow-due-to-too-deep-recursion.patch57
-rw-r--r--yocto-poky/meta/recipes-extended/screen/screen_4.3.1.bb1
-rw-r--r--yocto-poky/meta/recipes-extended/sudo/sudo_1.8.14p3.bb2
-rw-r--r--yocto-poky/meta/recipes-extended/sysstat/sysstat/0001-Include-needed-headers-explicitly.patch62
-rw-r--r--yocto-poky/meta/recipes-extended/sysstat/sysstat_11.1.5.bb2
-rw-r--r--yocto-poky/meta/recipes-extended/tar/tar.inc10
-rw-r--r--yocto-poky/meta/recipes-extended/texinfo/texinfo_6.0.bb2
-rw-r--r--yocto-poky/meta/recipes-extended/tzcode/tzcode-native_2015f.bb25
-rw-r--r--yocto-poky/meta/recipes-extended/tzcode/tzcode-native_2016a.bb25
-rw-r--r--yocto-poky/meta/recipes-extended/tzdata/tzdata_2016a.bb (renamed from yocto-poky/meta/recipes-extended/tzdata/tzdata_2015f.bb)11
-rw-r--r--yocto-poky/meta/recipes-extended/unzip/unzip/CVE-2015-7696.patch38
-rw-r--r--yocto-poky/meta/recipes-extended/unzip/unzip/CVE-2015-7697.patch31
-rw-r--r--yocto-poky/meta/recipes-extended/unzip/unzip/cve-2014-9636.patch (renamed from yocto-poky/meta/recipes-extended/unzip/unzip/unzip-6.0_overflow3.diff)0
-rw-r--r--yocto-poky/meta/recipes-extended/unzip/unzip_6.0.bb4
-rw-r--r--yocto-poky/meta/recipes-extended/xz/xz_5.2.1.bb2
-rw-r--r--yocto-poky/meta/recipes-gnome/epiphany/epiphany_3.16.3.bb5
-rw-r--r--yocto-poky/meta/recipes-gnome/gcr/gcr_3.16.0.bb7
-rw-r--r--yocto-poky/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/CVE-2015-7674.patch39
-rw-r--r--yocto-poky/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.30.8.bb26
-rw-r--r--yocto-poky/meta/recipes-gnome/gnome-desktop/gnome-desktop3_3.16.2.bb5
-rw-r--r--yocto-poky/meta/recipes-gnome/gnome/gnome-doc-utils.inc3
-rw-r--r--yocto-poky/meta/recipes-gnome/gtk+/gtk+.inc2
-rw-r--r--yocto-poky/meta/recipes-gnome/gtk+/gtk+3.inc5
-rw-r--r--yocto-poky/meta/recipes-gnome/gtk+/gtk+3/Do-not-try-to-initialize-GL-without-libGL.patch60
-rw-r--r--yocto-poky/meta/recipes-gnome/gtk+/gtk+3_3.16.6.bb1
-rw-r--r--yocto-poky/meta/recipes-gnome/gtk-engines/gtk-engines_2.20.2.bb5
-rw-r--r--yocto-poky/meta/recipes-gnome/gtk-theme-torturer/gtk-theme-torturer_git.bb3
-rw-r--r--yocto-poky/meta/recipes-gnome/libglade/libglade_2.6.4.bb3
-rw-r--r--yocto-poky/meta/recipes-gnome/librsvg/librsvg/CVE-2015-7558_1.patch139
-rw-r--r--yocto-poky/meta/recipes-gnome/librsvg/librsvg/CVE-2015-7558_2.patch230
-rw-r--r--yocto-poky/meta/recipes-gnome/librsvg/librsvg/CVE-2015-7558_3.patch223
-rw-r--r--yocto-poky/meta/recipes-gnome/librsvg/librsvg_2.40.10.bb8
-rw-r--r--yocto-poky/meta/recipes-gnome/libsecret/libsecret_0.18.2.bb5
-rw-r--r--yocto-poky/meta/recipes-gnome/libwnck/libwnck3_3.14.0.bb5
-rw-r--r--yocto-poky/meta/recipes-graphics/cairo/cairo.inc3
-rw-r--r--yocto-poky/meta/recipes-graphics/cairo/cairo/Manually-transpose-the-matrix-in-_cairo_gl_shader_bi.patch49
-rw-r--r--yocto-poky/meta/recipes-graphics/cairo/cairo_1.14.2.bb1
-rw-r--r--yocto-poky/meta/recipes-graphics/directfb/directfb.inc3
-rw-r--r--yocto-poky/meta/recipes-graphics/libsdl/libsdl_1.2.15.bb3
-rw-r--r--yocto-poky/meta/recipes-graphics/libsdl2/libsdl2_2.0.3.bb2
-rw-r--r--yocto-poky/meta/recipes-graphics/mesa/mesa-demos/0010-sharedtex_mt-fix-rendering-thread-hang.patch43
-rw-r--r--yocto-poky/meta/recipes-graphics/mesa/mesa-demos_8.2.0.bb1
-rw-r--r--yocto-poky/meta/recipes-graphics/piglit/piglit_git.bb6
-rw-r--r--yocto-poky/meta/recipes-graphics/waffle/waffle/0001-third_party-threads-Use-PTHREAD_MUTEX_RECURSIVE-by-d.patch54
-rw-r--r--yocto-poky/meta/recipes-graphics/waffle/waffle_1.5.1.bb4
-rw-r--r--yocto-poky/meta/recipes-graphics/xorg-app/xwininfo_1.1.3.bb2
-rw-r--r--yocto-poky/meta/recipes-graphics/xorg-lib/libxcb.inc4
-rw-r--r--yocto-poky/meta/recipes-graphics/xorg-lib/libxcb/gcc-mips-pr68302-mips-workaround.patch22
-rw-r--r--yocto-poky/meta/recipes-graphics/xorg-lib/pixman/0001-v3-test-add-a-check-for-FE_DIVBYZERO.patch65
-rw-r--r--yocto-poky/meta/recipes-graphics/xorg-lib/pixman_0.32.6.bb1
-rw-r--r--yocto-poky/meta/recipes-graphics/xorg-xserver/xserver-xorg.inc3
-rw-r--r--yocto-poky/meta/recipes-kernel/kern-tools/kern-tools-native_git.bb2
-rw-r--r--yocto-poky/meta/recipes-kernel/kmod/kmod.inc2
-rw-r--r--yocto-poky/meta/recipes-kernel/linux-firmware/linux-firmware_git.bb4
-rw-r--r--yocto-poky/meta/recipes-kernel/linux/linux-yocto-rt_3.14.bb3
-rw-r--r--yocto-poky/meta/recipes-kernel/linux/linux-yocto-rt_4.1.bb9
-rw-r--r--yocto-poky/meta/recipes-kernel/linux/linux-yocto-tiny_3.14.bb2
-rw-r--r--yocto-poky/meta/recipes-kernel/linux/linux-yocto-tiny_4.1.bb6
-rw-r--r--yocto-poky/meta/recipes-kernel/linux/linux-yocto.inc4
-rw-r--r--yocto-poky/meta/recipes-kernel/linux/linux-yocto_3.14.bb9
-rw-r--r--yocto-poky/meta/recipes-kernel/linux/linux-yocto_3.19.bb9
-rw-r--r--yocto-poky/meta/recipes-kernel/linux/linux-yocto_4.1.bb23
-rw-r--r--yocto-poky/meta/recipes-kernel/lttng/lttng-tools/0001-Fix-sessiond-disable-match-app-event-by-name.patch58
-rw-r--r--yocto-poky/meta/recipes-kernel/lttng/lttng-tools_2.6.0.bb3
-rw-r--r--yocto-poky/meta/recipes-kernel/oprofile/oprofileui-server_git.bb2
-rw-r--r--yocto-poky/meta/recipes-kernel/oprofile/oprofileui_git.bb5
-rw-r--r--yocto-poky/meta/recipes-kernel/sysprof/sysprof_git.bb3
-rw-r--r--yocto-poky/meta/recipes-kernel/trace-cmd/kernelshark_git.bb3
-rw-r--r--yocto-poky/meta/recipes-lsb4/libpng/libpng12_1.2.53.bb2
-rw-r--r--yocto-poky/meta/recipes-multimedia/gstreamer/gst-player_git.bb4
-rw-r--r--yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-libav_1.4.5.bb3
-rw-r--r--yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-omx.inc3
-rw-r--r--yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad.inc15
-rw-r--r--yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad/0001-glimagesink-Downrank-to-marginal.patch32
-rw-r--r--yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_1.4.5.bb4
-rw-r--r--yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base.inc7
-rw-r--r--yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good.inc14
-rwxr-xr-xyocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0/0003-basesink-Shouldn-t-drop-buffer-when-sync-false.patch30
-rw-r--r--yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0_1.4.5.bb1
-rw-r--r--yocto-poky/meta/recipes-multimedia/libpng/libpng-1.6.17/CVE-2015-8126_1.patch91
-rw-r--r--yocto-poky/meta/recipes-multimedia/libpng/libpng-1.6.17/CVE-2015-8126_2.patch134
-rw-r--r--yocto-poky/meta/recipes-multimedia/libpng/libpng-1.6.17/CVE-2015-8126_3.patch79
-rw-r--r--yocto-poky/meta/recipes-multimedia/libpng/libpng-1.6.17/CVE-2015-8126_4.patch48
-rw-r--r--yocto-poky/meta/recipes-multimedia/libpng/libpng-1.6.17/CVE-2015-8472.patch29
-rw-r--r--yocto-poky/meta/recipes-multimedia/libpng/libpng_1.6.17.bb10
-rw-r--r--yocto-poky/meta/recipes-multimedia/libsndfile/files/libsndfile-fix-CVE-2014-9756.patch24
-rw-r--r--yocto-poky/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.25.bb1
-rw-r--r--yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2015-8781.patch196
-rw-r--r--yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2015-8784.patch73
-rw-r--r--yocto-poky/meta/recipes-multimedia/libtiff/tiff_4.0.4.bb2
-rw-r--r--yocto-poky/meta/recipes-multimedia/pulseaudio/pulseaudio/0001-card-add-pa_card_profile.ports.patch245
-rw-r--r--yocto-poky/meta/recipes-multimedia/pulseaudio/pulseaudio/0002-alsa-bluetooth-fail-if-user-requested-profile-doesn-.patch60
-rw-r--r--yocto-poky/meta/recipes-multimedia/pulseaudio/pulseaudio/0003-card-move-profile-selection-after-pa_card_new.patch363
-rw-r--r--yocto-poky/meta/recipes-multimedia/pulseaudio/pulseaudio/0004-alsa-set-availability-for-some-unavailable-profiles.patch75
-rw-r--r--yocto-poky/meta/recipes-multimedia/pulseaudio/pulseaudio_6.0.bb4
-rw-r--r--yocto-poky/meta/recipes-qt/qt4/qt4-4.8.7.inc2
-rw-r--r--yocto-poky/meta/recipes-qt/qt4/qt4-4.8.7/0028-Don-t-crash-on-broken-GIF-images.patch47
-rw-r--r--yocto-poky/meta/recipes-qt/qt4/qt4-4.8.7/0034-Fix-kmap2qmap-build-with-clang.patch34
-rw-r--r--yocto-poky/meta/recipes-sato/gtk-engines/gtk-sato-engine.inc3
-rw-r--r--yocto-poky/meta/recipes-sato/leafpad/leafpad_0.8.18.1.bb2
-rw-r--r--yocto-poky/meta/recipes-sato/matchbox-terminal/matchbox-terminal_git.bb4
-rw-r--r--yocto-poky/meta/recipes-sato/pcmanfm/pcmanfm_1.2.3.bb2
-rw-r--r--yocto-poky/meta/recipes-sato/puzzles/oh-puzzles_git.bb2
-rw-r--r--yocto-poky/meta/recipes-sato/webkit/webkitgtk_2.8.5.bb40
-rw-r--r--yocto-poky/meta/recipes-support/apr/apr-util_1.5.4.bb6
-rw-r--r--yocto-poky/meta/recipes-support/apr/apr_1.5.2.bb14
-rw-r--r--yocto-poky/meta/recipes-support/atk/at-spi2-core_2.16.0.bb2
-rw-r--r--yocto-poky/meta/recipes-support/curl/curl/CVE-2016-0754.patch417
-rw-r--r--yocto-poky/meta/recipes-support/curl/curl/CVE-2016-0755.patch138
-rw-r--r--yocto-poky/meta/recipes-support/curl/curl_7.44.0.bb9
-rw-r--r--yocto-poky/meta/recipes-support/gmp/gmp-4.2.1/Use-__gnu_inline__-attribute.patch36
-rw-r--r--yocto-poky/meta/recipes-support/gmp/gmp-4.2.1/disable-stdc.patch39
-rw-r--r--yocto-poky/meta/recipes-support/gmp/gmp_4.2.1.bb2
-rw-r--r--yocto-poky/meta/recipes-support/icu/icu/fix-install-manx.patch48
-rw-r--r--yocto-poky/meta/recipes-support/icu/icu_55.1.bb5
-rw-r--r--yocto-poky/meta/recipes-support/libbsd/files/CVE-2016-2090.patch50
-rw-r--r--yocto-poky/meta/recipes-support/libbsd/libbsd_0.7.0.bb4
-rw-r--r--yocto-poky/meta/recipes-support/libfm/libfm-extra_1.2.3.bb2
-rw-r--r--yocto-poky/meta/recipes-support/libfm/libfm_1.2.3.bb5
-rw-r--r--yocto-poky/meta/recipes-support/libgcrypt/files/CVE-2015-7511_1.patch245
-rw-r--r--yocto-poky/meta/recipes-support/libgcrypt/files/CVE-2015-7511_2.patch55
-rw-r--r--yocto-poky/meta/recipes-support/libgcrypt/libgcrypt_1.6.3.bb4
-rw-r--r--yocto-poky/meta/recipes-support/libgpg-error/libgpg-error/0001-libgpg-error-Add-nios2-support.patch46
-rw-r--r--yocto-poky/meta/recipes-support/libgpg-error/libgpg-error_1.19.bb4
-rw-r--r--yocto-poky/meta/recipes-support/libksba/libksba/ksba-add-pkgconfig-support.patch2
-rw-r--r--yocto-poky/meta/recipes-support/libpcre/libpcre_8.38.bb (renamed from yocto-poky/meta/recipes-support/libpcre/libpcre_8.37.bb)9
-rw-r--r--yocto-poky/meta/recipes-support/libunwind/libunwind-1.1/Add-AO_REQUIRE_CAS-to-fix-build-on-ARM-v6.patch61
-rw-r--r--yocto-poky/meta/recipes-support/libunwind/libunwind_1.1.bb5
-rw-r--r--yocto-poky/meta/recipes-support/libxslt/libxslt/CVE-2015-7995.patch33
-rw-r--r--yocto-poky/meta/recipes-support/libxslt/libxslt_1.1.28.bb3
-rw-r--r--yocto-poky/meta/recipes-support/nettle/nettle-3.1.1/CVE-2015-8803_8805.patch71
-rw-r--r--yocto-poky/meta/recipes-support/nettle/nettle-3.1.1/CVE-2015-8804.patch281
-rw-r--r--yocto-poky/meta/recipes-support/nettle/nettle_3.1.1.bb5
-rw-r--r--yocto-poky/meta/recipes-support/p11-kit/p11-kit_0.22.1.bb2
-rw-r--r--yocto-poky/meta/recipes-support/pinentry/pinentry_0.9.2.bb2
-rw-r--r--yocto-poky/meta/recipes-support/user-creation/files/system-xuser.conf11
-rw-r--r--yocto-poky/meta/recipes-support/user-creation/xuser-account_0.1.bb6
-rw-r--r--yocto-poky/meta/recipes-support/vte/vte-0.28.2/cve-2012-2738.patch135
-rw-r--r--yocto-poky/meta/recipes-support/vte/vte.inc3
-rw-r--r--yocto-poky/meta/recipes-support/vte/vte_0.28.2.bb3
-rw-r--r--yocto-poky/meta/site/nios2-linux395
474 files changed, 20541 insertions, 1395 deletions
diff --git a/yocto-poky/meta/classes/allarch.bbclass b/yocto-poky/meta/classes/allarch.bbclass
index 2fea7c04d..4af38d7f7 100644
--- a/yocto-poky/meta/classes/allarch.bbclass
+++ b/yocto-poky/meta/classes/allarch.bbclass
@@ -27,6 +27,10 @@ python () {
d.setVar("PACKAGE_EXTRA_ARCHS", "")
d.setVar("SDK_ARCH", "none")
d.setVar("SDK_CC_ARCH", "none")
+ d.setVar("TARGET_CPPFLAGS", "none")
+ d.setVar("TARGET_CFLAGS", "none")
+ d.setVar("TARGET_CXXFLAGS", "none")
+ d.setVar("TARGET_LDFLAGS", "none")
# Avoid this being unnecessarily different due to nuances of
# the target machine that aren't important for "all" arch
diff --git a/yocto-poky/meta/classes/archiver.bbclass b/yocto-poky/meta/classes/archiver.bbclass
index 089d70748..41a552c76 100644
--- a/yocto-poky/meta/classes/archiver.bbclass
+++ b/yocto-poky/meta/classes/archiver.bbclass
@@ -99,27 +99,6 @@ python () {
d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_patched' % pn)
elif ar_src == "configured":
d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_configured' % pn)
-
- # The gcc staff uses shared source
- flag = d.getVarFlag("do_unpack", "stamp-base", True)
- if flag:
- if ar_src in [ 'original', 'patched' ]:
- ar_outdir = os.path.join(d.getVar('ARCHIVER_TOPDIR', True), 'work-shared')
- d.setVar('ARCHIVER_OUTDIR', ar_outdir)
- d.setVarFlag('do_ar_original', 'stamp-base', flag)
- d.setVarFlag('do_ar_patched', 'stamp-base', flag)
- d.setVarFlag('do_unpack_and_patch', 'stamp-base', flag)
- d.setVarFlag('do_ar_original', 'vardepsexclude', 'PN PF ARCHIVER_OUTDIR WORKDIR')
- d.setVarFlag('do_unpack_and_patch', 'vardepsexclude', 'PN PF ARCHIVER_OUTDIR WORKDIR')
- d.setVarFlag('do_ar_patched', 'vardepsexclude', 'PN PF ARCHIVER_OUTDIR WORKDIR')
- d.setVarFlag('create_diff_gz', 'vardepsexclude', 'PF')
- d.setVarFlag('create_tarball', 'vardepsexclude', 'PF')
-
- flag_clean = d.getVarFlag('do_unpack', 'stamp-base-clean', True)
- if flag_clean:
- d.setVarFlag('do_ar_original', 'stamp-base-clean', flag_clean)
- d.setVarFlag('do_ar_patched', 'stamp-base-clean', flag_clean)
- d.setVarFlag('do_unpack_and_patch', 'stamp-base-clean', flag_clean)
}
# Take all the sources for a recipe and puts them in WORKDIR/archiver-work/.
@@ -178,13 +157,8 @@ python do_ar_patched() {
# Get the ARCHIVER_OUTDIR before we reset the WORKDIR
ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
bb.note('Archiving the patched source...')
- d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR', True))
- # The gcc staff uses shared source
- flag = d.getVarFlag('do_unpack', 'stamp-base', True)
- if flag:
- create_tarball(d, d.getVar('S', True), 'patched', ar_outdir, 'gcc')
- else:
- create_tarball(d, d.getVar('S', True), 'patched', ar_outdir)
+ d.setVar('WORKDIR', ar_outdir)
+ create_tarball(d, d.getVar('S', True), 'patched', ar_outdir)
}
python do_ar_configured() {
@@ -222,17 +196,18 @@ python do_ar_configured() {
create_tarball(d, srcdir, 'configured', ar_outdir)
}
-def create_tarball(d, srcdir, suffix, ar_outdir, pf=None):
+def create_tarball(d, srcdir, suffix, ar_outdir):
"""
create the tarball from srcdir
"""
import tarfile
+ # Make sure we are only creating a single tarball for gcc sources
+ if d.getVar('SRC_URI', True) == "" and 'gcc' in d.getVar('PN', True):
+ return
+
bb.utils.mkdirhier(ar_outdir)
- if pf:
- tarname = os.path.join(ar_outdir, '%s-%s.tar.gz' % (pf, suffix))
- else:
- tarname = os.path.join(ar_outdir, '%s-%s.tar.gz' % \
+ tarname = os.path.join(ar_outdir, '%s-%s.tar.gz' % \
(d.getVar('PF', True), suffix))
srcdir = srcdir.rstrip('/')
@@ -275,11 +250,9 @@ python do_unpack_and_patch() {
[ 'patched', 'configured'] and \
d.getVarFlag('ARCHIVER_MODE', 'diff', True) != '1':
return
-
- ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
-
# Change the WORKDIR to make do_unpack do_patch run in another dir.
- d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR', True))
+ ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
+ d.setVar('WORKDIR', ar_outdir)
# The changed 'WORKDIR' also casued 'B' changed, create dir 'B' for the
# possibly requiring of the following tasks (such as some recipes's
@@ -299,7 +272,11 @@ python do_unpack_and_patch() {
src = d.getVar('S', True).rstrip('/')
src_orig = '%s.orig' % src
oe.path.copytree(src, src_orig)
- bb.build.exec_func('do_patch', d)
+
+ # Make sure gcc sources are patched only once
+ if not ((d.getVar('SRC_URI', True) == "" and 'gcc' in d.getVar('PN', True))):
+ bb.build.exec_func('do_patch', d)
+
# Create the patches
if d.getVarFlag('ARCHIVER_MODE', 'diff', True) == '1':
bb.note('Creating diff gz...')
@@ -370,7 +347,6 @@ do_deploy_archives[sstate-inputdirs] = "${ARCHIVER_TOPDIR}"
do_deploy_archives[sstate-outputdirs] = "${DEPLOY_DIR_SRC}"
addtask do_ar_original after do_unpack
-addtask do_unpack_and_patch after do_patch
addtask do_ar_patched after do_unpack_and_patch
addtask do_ar_configured after do_unpack_and_patch
addtask do_dumpdata
@@ -383,3 +359,11 @@ do_deploy_all_archives[recideptask] = "do_${BB_DEFAULT_TASK}"
do_deploy_all_archives() {
:
}
+
+python () {
+ # Add tasks in the correct order, specifically for linux-yocto to avoid race condition
+ if bb.data.inherits_class('kernel-yocto', d):
+ bb.build.addtask('do_kernel_configme', 'do_configure', 'do_unpack_and_patch', d)
+ else:
+ bb.build.addtask('do_unpack_and_patch', None, 'do_patch', d)
+}
diff --git a/yocto-poky/meta/classes/autotools.bbclass b/yocto-poky/meta/classes/autotools.bbclass
index 819045a3b..d546a5c02 100644
--- a/yocto-poky/meta/classes/autotools.bbclass
+++ b/yocto-poky/meta/classes/autotools.bbclass
@@ -77,16 +77,20 @@ CONFIGUREOPTS = " --build=${BUILD_SYS} \
${@append_libtool_sysroot(d)}"
CONFIGUREOPT_DEPTRACK ?= "--disable-dependency-tracking"
+AUTOTOOLS_SCRIPT_PATH ?= "${S}"
+CONFIGURE_SCRIPT ?= "${AUTOTOOLS_SCRIPT_PATH}/configure"
+
+AUTOTOOLS_AUXDIR ?= "${AUTOTOOLS_SCRIPT_PATH}"
oe_runconf () {
- cfgscript="${S}/configure"
+ cfgscript="${CONFIGURE_SCRIPT}"
if [ -x "$cfgscript" ] ; then
bbnote "Running $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} $@"
set +e
${CACHED_CONFIGUREVARS} $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@"
if [ "$?" != "0" ]; then
echo "Configure failed. The contents of all config.log files follows to aid debugging"
- find ${S} -ignore_readdir_race -name config.log -print -exec cat {} \;
+ find ${B} -ignore_readdir_race -name config.log -print -exec cat {} \;
die "oe_runconf failed"
fi
set -e
@@ -95,8 +99,6 @@ oe_runconf () {
fi
}
-AUTOTOOLS_AUXDIR ?= "${S}"
-
CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
autotools_preconfigure() {
@@ -134,7 +136,7 @@ do_configure[postfuncs] += "autotools_postconfigure"
ACLOCALDIR = "${B}/aclocal-copy"
python autotools_copy_aclocals () {
- s = d.getVar("S", True)
+ s = d.getVar("AUTOTOOLS_SCRIPT_PATH", True)
if not os.path.exists(s + "/configure.in") and not os.path.exists(s + "/configure.ac"):
if not d.getVar("AUTOTOOLS_COPYACLOCAL", False):
return
@@ -168,9 +170,9 @@ python autotools_copy_aclocals () {
for datadep in data[3]:
if datadep in done:
continue
- done.append(datadep)
if (not data[0].endswith("-native")) and taskdepdata[datadep][0].endswith("-native") and dep != start:
continue
+ done.append(datadep)
new.append(datadep)
if taskdepdata[datadep][1] == "do_configure":
configuredeps.append(taskdepdata[datadep][0])
@@ -228,13 +230,13 @@ autotools_do_configure() {
( for ac in `find ${S} -ignore_readdir_race -name configure.in -o -name configure.ac`; do
rm -f `dirname $ac`/configure
done )
- if [ -e ${S}/configure.in -o -e ${S}/configure.ac ]; then
+ if [ -e ${AUTOTOOLS_SCRIPT_PATH}/configure.in -o -e ${AUTOTOOLS_SCRIPT_PATH}/configure.ac ]; then
olddir=`pwd`
- cd ${S}
+ cd ${AUTOTOOLS_SCRIPT_PATH}
ACLOCAL="aclocal --system-acdir=${ACLOCALDIR}/"
if [ x"${acpaths}" = xdefault ]; then
acpaths=
- for i in `find ${S} -ignore_readdir_race -maxdepth 2 -name \*.m4|grep -v 'aclocal.m4'| \
+ for i in `find ${AUTOTOOLS_SCRIPT_PATH} -ignore_readdir_race -maxdepth 2 -name \*.m4|grep -v 'aclocal.m4'| \
grep -v 'acinclude.m4' | grep -v 'aclocal-copy' | sed -e 's,\(.*/\).*$,\1,'|sort -u`; do
acpaths="$acpaths -I $i"
done
@@ -265,21 +267,20 @@ autotools_do_configure() {
bbnote Executing glib-gettextize --force --copy
echo "no" | glib-gettextize --force --copy
fi
- else if grep "^[[:space:]]*AM_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then
+ elif grep "^[[:space:]]*AM_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then
# We'd call gettextize here if it wasn't so broken...
- cp ${STAGING_DATADIR_NATIVE}/gettext/config.rpath ${AUTOTOOLS_AUXDIR}/
- if [ -d ${S}/po/ ]; then
- cp -f ${STAGING_DATADIR_NATIVE}/gettext/po/Makefile.in.in ${S}/po/
- if [ ! -e ${S}/po/remove-potcdate.sin ]; then
- cp ${STAGING_DATADIR_NATIVE}/gettext/po/remove-potcdate.sin ${S}/po/
- fi
+ cp ${STAGING_DATADIR_NATIVE}/gettext/config.rpath ${AUTOTOOLS_AUXDIR}/
+ if [ -d ${S}/po/ ]; then
+ cp -f ${STAGING_DATADIR_NATIVE}/gettext/po/Makefile.in.in ${S}/po/
+ if [ ! -e ${S}/po/remove-potcdate.sin ]; then
+ cp ${STAGING_DATADIR_NATIVE}/gettext/po/remove-potcdate.sin ${S}/po/
fi
- for i in gettext.m4 iconv.m4 lib-ld.m4 lib-link.m4 lib-prefix.m4 nls.m4 po.m4 progtest.m4; do
- for j in `find ${S} -ignore_readdir_race -name $i | grep -v aclocal-copy`; do
- rm $j
- done
- done
fi
+ for i in gettext.m4 iconv.m4 lib-ld.m4 lib-link.m4 lib-prefix.m4 nls.m4 po.m4 progtest.m4; do
+ for j in `find ${S} -ignore_readdir_race -name $i | grep -v aclocal-copy`; do
+ rm $j
+ done
+ done
fi
mkdir -p m4
if grep "^[[:space:]]*[AI][CT]_PROG_INTLTOOL" $CONFIGURE_AC >/dev/null; then
@@ -290,7 +291,7 @@ autotools_do_configure() {
ACLOCAL="$ACLOCAL" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || die "autoreconf execution failed."
cd $olddir
fi
- if [ -e ${S}/configure ]; then
+ if [ -e ${CONFIGURE_SCRIPT} ]; then
oe_runconf
else
bbnote "nothing to configure"
diff --git a/yocto-poky/meta/classes/base.bbclass b/yocto-poky/meta/classes/base.bbclass
index f0780011b..9bd5499a1 100644
--- a/yocto-poky/meta/classes/base.bbclass
+++ b/yocto-poky/meta/classes/base.bbclass
@@ -391,7 +391,8 @@ python () {
items = flagval.split(",")
num = len(items)
if num > 4:
- bb.error("Only enable,disable,depend,rdepend can be specified!")
+ bb.error("%s: PACKAGECONFIG[%s] Only enable,disable,depend,rdepend can be specified!"
+ % (d.getVar('PN', True), flag))
if flag in pkgconfig:
if num >= 3 and items[2]:
@@ -512,7 +513,8 @@ python () {
if unskipped_pkgs:
for pkg in skipped_pkgs:
bb.debug(1, "SKIPPING the package " + pkg + " at do_rootfs because it's " + recipe_license)
- d.setVar('LICENSE_EXCLUSION-' + pkg, 1)
+ mlprefix = d.getVar('MLPREFIX', True)
+ d.setVar('LICENSE_EXCLUSION-' + mlprefix + pkg, 1)
for pkg in unskipped_pkgs:
bb.debug(1, "INCLUDING the package " + pkg)
elif all_skipped or incompatible_license(d, bad_licenses):
diff --git a/yocto-poky/meta/classes/buildhistory.bbclass b/yocto-poky/meta/classes/buildhistory.bbclass
index 4db044199..5e2581f88 100644
--- a/yocto-poky/meta/classes/buildhistory.bbclass
+++ b/yocto-poky/meta/classes/buildhistory.bbclass
@@ -521,7 +521,7 @@ POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_list_installed_sdk_targ
POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_list_installed_sdk_host ;\
buildhistory_get_sdk_installed_host ; "
-SDK_POSTPROCESS_COMMAND += "buildhistory_get_sdkinfo ; "
+SDK_POSTPROCESS_COMMAND_append = " buildhistory_get_sdkinfo ; "
def buildhistory_get_build_id(d):
if d.getVar('BB_WORKERCONTEXT', True) != '1':
diff --git a/yocto-poky/meta/classes/cpan-base.bbclass b/yocto-poky/meta/classes/cpan-base.bbclass
index d9817ba6b..7810a4dc6 100644
--- a/yocto-poky/meta/classes/cpan-base.bbclass
+++ b/yocto-poky/meta/classes/cpan-base.bbclass
@@ -49,7 +49,11 @@ PERLVERSION[vardepvalue] = ""
FILES_${PN}-dbg += "${PERLLIBDIRS}/auto/*/.debug \
${PERLLIBDIRS}/auto/*/*/.debug \
${PERLLIBDIRS}/auto/*/*/*/.debug \
+ ${PERLLIBDIRS}/auto/*/*/*/*/.debug \
+ ${PERLLIBDIRS}/auto/*/*/*/*/*/.debug \
${PERLLIBDIRS}/vendor_perl/${PERLVERSION}/auto/*/.debug \
${PERLLIBDIRS}/vendor_perl/${PERLVERSION}/auto/*/*/.debug \
${PERLLIBDIRS}/vendor_perl/${PERLVERSION}/auto/*/*/*/.debug \
+ ${PERLLIBDIRS}/vendor_perl/${PERLVERSION}/auto/*/*/*/*/.debug \
+ ${PERLLIBDIRS}/vendor_perl/${PERLVERSION}/auto/*/*/*/*/*/.debug \
"
diff --git a/yocto-poky/meta/classes/cpan.bbclass b/yocto-poky/meta/classes/cpan.bbclass
index e2bbd2f63..8e079e0d5 100644
--- a/yocto-poky/meta/classes/cpan.bbclass
+++ b/yocto-poky/meta/classes/cpan.bbclass
@@ -17,7 +17,7 @@ export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl-native/perl/${@get_perl_vers
cpan_do_configure () {
export PERL5LIB="${PERL_ARCHLIB}"
- yes '' | perl ${EXTRA_PERLFLAGS} Makefile.PL ${EXTRA_CPANFLAGS}
+ yes '' | perl ${EXTRA_PERLFLAGS} Makefile.PL INSTALLDIRS=vendor ${EXTRA_CPANFLAGS}
# Makefile.PLs can exit with success without generating a
# Makefile, e.g. in cases of missing configure time
diff --git a/yocto-poky/meta/classes/cpan_build.bbclass b/yocto-poky/meta/classes/cpan_build.bbclass
index 4f648a60c..fac074d61 100644
--- a/yocto-poky/meta/classes/cpan_build.bbclass
+++ b/yocto-poky/meta/classes/cpan_build.bbclass
@@ -8,6 +8,7 @@ EXTRA_CPAN_BUILD_FLAGS ?= ""
# Env var which tells perl if it should use host (no) or target (yes) settings
export PERLCONFIGTARGET = "${@is_target(d)}"
export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}"
+export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl-native/perl/${@get_perl_version(d)}/"
export LD = "${CCLD}"
cpan_build_do_configure () {
@@ -16,22 +17,24 @@ cpan_build_do_configure () {
. ${STAGING_LIBDIR}/perl/config.sh
fi
- perl Build.PL --installdirs vendor \
- --destdir ${D} \
- --install_path arch="${libdir}/perl" \
- --install_path script=${bindir} \
- --install_path bin=${bindir} \
- --install_path bindoc=${mandir}/man1 \
- --install_path libdoc=${mandir}/man3 \
- ${EXTRA_CPAN_BUILD_FLAGS}
+ perl Build.PL --installdirs vendor --destdir ${D} \
+ ${EXTRA_CPAN_BUILD_FLAGS}
+
+ # Build.PLs can exit with success without generating a
+ # Build, e.g. in cases of missing configure time
+ # dependencies. This is considered a best practice by
+ # cpantesters.org. See:
+ # * http://wiki.cpantesters.org/wiki/CPANAuthorNotes
+ # * http://www.nntp.perl.org/group/perl.qa/2008/08/msg11236.html
+ [ -e Build ] || bbfatal "No Build was generated by Build.PL"
}
cpan_build_do_compile () {
- perl Build
+ perl Build verbose=1
}
cpan_build_do_install () {
- perl Build install
+ perl Build install --destdir ${D}
}
EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/yocto-poky/meta/classes/cross-canadian.bbclass b/yocto-poky/meta/classes/cross-canadian.bbclass
index d30a1687b..ea17f09bc 100644
--- a/yocto-poky/meta/classes/cross-canadian.bbclass
+++ b/yocto-poky/meta/classes/cross-canadian.bbclass
@@ -67,7 +67,7 @@ python () {
d.appendVar("CANADIANEXTRAOS", " linux-gnuspe linux-uclibcspe linux-muslspe")
elif tarch == "mips64":
d.appendVar("CANADIANEXTRAOS", " linux-gnun32 linux-uclibcn32 linux-musln32")
- if tarch == "arm":
+ if tarch == "arm" or tarch == "armeb":
d.setVar("TARGET_OS", "linux-gnueabi")
else:
d.setVar("TARGET_OS", "linux")
diff --git a/yocto-poky/meta/classes/deploy.bbclass b/yocto-poky/meta/classes/deploy.bbclass
index 78f5e4a7b..8ad07da01 100644
--- a/yocto-poky/meta/classes/deploy.bbclass
+++ b/yocto-poky/meta/classes/deploy.bbclass
@@ -8,3 +8,4 @@ python do_deploy_setscene () {
}
addtask do_deploy_setscene
do_deploy[dirs] = "${DEPLOYDIR} ${B}"
+do_deploy[stamp-extra-info] = "${MACHINE}"
diff --git a/yocto-poky/meta/classes/distrodata.bbclass b/yocto-poky/meta/classes/distrodata.bbclass
index 4168e4384..44c06e148 100644
--- a/yocto-poky/meta/classes/distrodata.bbclass
+++ b/yocto-poky/meta/classes/distrodata.bbclass
@@ -33,7 +33,7 @@ python do_distrodata_np() {
tmpdir = d.getVar('TMPDIR', True)
distro_check_dir = os.path.join(tmpdir, "distro_check")
datetime = localdata.getVar('DATETIME', True)
- dist_check.update_distro_data(distro_check_dir, datetime)
+ dist_check.update_distro_data(distro_check_dir, datetime, localdata)
if pn.find("-native") != -1:
pnstripped = pn.split("-native")
@@ -118,7 +118,7 @@ python do_distrodata() {
tmpdir = d.getVar('TMPDIR', True)
distro_check_dir = os.path.join(tmpdir, "distro_check")
datetime = localdata.getVar('DATETIME', True)
- dist_check.update_distro_data(distro_check_dir, datetime)
+ dist_check.update_distro_data(distro_check_dir, datetime, localdata)
pn = d.getVar("PN", True)
bb.note("Package Name: %s" % pn)
@@ -271,10 +271,11 @@ python do_checkpkg() {
from bb.fetch2 import FetchError, NoMethodError, decodeurl
"""first check whether a uri is provided"""
- src_uri = d.getVar('SRC_URI', True)
- if not src_uri:
- return
- uri_type, _, _, _, _, _ = decodeurl(src_uri)
+ src_uri = (d.getVar('SRC_URI', True) or '').split()
+ if src_uri:
+ uri_type, _, _, _, _, _ = decodeurl(src_uri[0])
+ else:
+ uri_type = "none"
"""initialize log files."""
logpath = d.getVar('LOG_DIR', True)
@@ -354,7 +355,10 @@ python do_checkpkg() {
elif cmp == 0:
pstatus = "MATCH"
- psrcuri = psrcuri.split()[0]
+ if psrcuri:
+ psrcuri = psrcuri.split()[0]
+ else:
+ psrcuri = "none"
pdepends = "".join(pdepends.split("\t"))
pdesc = "".join(pdesc.split("\t"))
no_upgr_reason = d.getVar('RECIPE_NO_UPDATE_REASON', True)
@@ -402,7 +406,7 @@ python do_distro_check() {
bb.utils.mkdirhier(logpath)
result_file = os.path.join(logpath, "distrocheck.csv")
datetime = localdata.getVar('DATETIME', True)
- dc.update_distro_data(distro_check_dir, datetime)
+ dc.update_distro_data(distro_check_dir, datetime, localdata)
# do the comparison
result = dc.compare_in_distro_packages_list(distro_check_dir, d)
diff --git a/yocto-poky/meta/classes/distutils3.bbclass b/yocto-poky/meta/classes/distutils3.bbclass
index e909ef41b..443bf3ac4 100644
--- a/yocto-poky/meta/classes/distutils3.bbclass
+++ b/yocto-poky/meta/classes/distutils3.bbclass
@@ -21,6 +21,7 @@ distutils3_do_compile() {
build ${DISTUTILS_BUILD_ARGS} || \
bbfatal "${PYTHON_PN} setup.py build_ext execution failed."
}
+distutils3_do_compile[vardepsexclude] = "MACHINE"
distutils3_stage_headers() {
install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
@@ -33,6 +34,7 @@ distutils3_stage_headers() {
${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
bbfatal "${PYTHON_PN} setup.py install_headers execution failed."
}
+distutils3_stage_headers[vardepsexclude] = "MACHINE"
distutils3_stage_all() {
if [ ${BUILD_SYS} != ${HOST_SYS} ]; then
@@ -48,6 +50,7 @@ distutils3_stage_all() {
${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
bbfatal "${PYTHON_PN} setup.py install (stage) execution failed."
}
+distutils3_stage_all[vardepsexclude] = "MACHINE"
distutils3_do_install() {
install -d ${D}${PYTHON_SITEPACKAGES_DIR}
@@ -90,6 +93,7 @@ distutils3_do_install() {
rmdir ${D}${datadir}/share
fi
}
+distutils3_do_install[vardepsexclude] = "MACHINE"
EXPORT_FUNCTIONS do_compile do_install
diff --git a/yocto-poky/meta/classes/externalsrc.bbclass b/yocto-poky/meta/classes/externalsrc.bbclass
index 0fa5817d9..f7ed66d8c 100644
--- a/yocto-poky/meta/classes/externalsrc.bbclass
+++ b/yocto-poky/meta/classes/externalsrc.bbclass
@@ -73,7 +73,8 @@ python () {
fetch_tasks = ['do_fetch', 'do_unpack']
# If we deltask do_patch, there's no dependency to ensure do_unpack gets run, so add one
- d.appendVarFlag('do_configure', 'deps', ['do_unpack'])
+ # Note that we cannot use d.appendVarFlag() here because deps is expected to be a list object, not a string
+ d.setVarFlag('do_configure', 'deps', (d.getVarFlag('do_configure', 'deps', False) or []) + ['do_unpack'])
for task in d.getVar("SRCTREECOVEREDTASKS", True).split():
if local_srcuri and task in fetch_tasks:
@@ -88,5 +89,5 @@ python () {
python externalsrc_compile_prefunc() {
# Make it obvious that this is happening, since forgetting about it could lead to much confusion
- bb.warn('Compiling %s from external source %s' % (d.getVar('PN', True), d.getVar('EXTERNALSRC', True)))
+ bb.plain('NOTE: %s: compiling from external source tree %s' % (d.getVar('PN', True), d.getVar('EXTERNALSRC', True)))
}
diff --git a/yocto-poky/meta/classes/fontcache.bbclass b/yocto-poky/meta/classes/fontcache.bbclass
index d122387ff..8ebdfc4f5 100644
--- a/yocto-poky/meta/classes/fontcache.bbclass
+++ b/yocto-poky/meta/classes/fontcache.bbclass
@@ -9,12 +9,23 @@ inherit qemu
FONT_PACKAGES ??= "${PN}"
FONT_EXTRA_RDEPENDS ?= "fontconfig-utils"
FONTCONFIG_CACHE_DIR ?= "${localstatedir}/cache/fontconfig"
+FONTCONFIG_CACHE_PARAMS ?= "-v"
+# You can change this to e.g. FC_DEBUG=16 to debug fc-cache issues,
+# something has to be set, because qemuwrapper is using this variable after -E
+# multiple variables aren't allowed because for qemu they are separated
+# by comma and in -n "$D" case they should be separated by space
+FONTCONFIG_CACHE_ENV ?= "FC_DEBUG=1"
fontcache_common() {
-if [ "x$D" != "x" ] ; then
- $INTERCEPT_DIR/postinst_intercept update_font_cache ${PKG} mlprefix=${MLPREFIX} bindir=${bindir} \
- libdir=${libdir} base_libdir=${base_libdir} fontconfigcachedir=${FONTCONFIG_CACHE_DIR}
+if [ -n "$D" ] ; then
+ $INTERCEPT_DIR/postinst_intercept update_font_cache ${PKG} mlprefix=${MLPREFIX} \
+ 'bindir="${bindir}"' \
+ 'libdir="${libdir}"' \
+ 'base_libdir="${base_libdir}"' \
+ 'fontconfigcachedir="${FONTCONFIG_CACHE_DIR}"' \
+ 'fontconfigcacheparams="${FONTCONFIG_CACHE_PARAMS}"' \
+ 'fontconfigcacheenv="${FONTCONFIG_CACHE_ENV}"'
else
- fc-cache
+ ${FONTCONFIG_CACHE_ENV} fc-cache ${FONTCONFIG_CACHE_PARAMS}
fi
}
diff --git a/yocto-poky/meta/classes/grub-efi.bbclass b/yocto-poky/meta/classes/grub-efi.bbclass
index 4ddc2bb12..9a4220abd 100644
--- a/yocto-poky/meta/classes/grub-efi.bbclass
+++ b/yocto-poky/meta/classes/grub-efi.bbclass
@@ -52,7 +52,8 @@ efi_iso_populate() {
mkdir -p ${EFIIMGDIR}/${EFIDIR}
cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
cp $iso_dir/vmlinuz ${EFIIMGDIR}
- echo "${GRUB_IMAGE}" > ${EFIIMGDIR}/startup.nsh
+ EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
+ echo "fs0:${EFIPATH}\\${GRUB_IMAGE}" > ${EFIIMGDIR}/startup.nsh
if [ -f "$iso_dir/initrd" ] ; then
cp $iso_dir/initrd ${EFIIMGDIR}
fi
diff --git a/yocto-poky/meta/classes/gtk-icon-cache.bbclass b/yocto-poky/meta/classes/gtk-icon-cache.bbclass
index 12358e3ae..0f1052b08 100644
--- a/yocto-poky/meta/classes/gtk-icon-cache.bbclass
+++ b/yocto-poky/meta/classes/gtk-icon-cache.bbclass
@@ -4,12 +4,13 @@ DEPENDS += "${@['hicolor-icon-theme', '']['${BPN}' == 'hicolor-icon-theme']} gtk
gtk_icon_cache_postinst() {
if [ "x$D" != "x" ]; then
- $INTERCEPT_DIR/postinst_intercept update_icon_cache ${PKG} mlprefix=${MLPREFIX} libdir=${libdir} \
- base_libdir=${base_libdir}
+ $INTERCEPT_DIR/postinst_intercept update_icon_cache ${PKG} \
+ mlprefix=${MLPREFIX} \
+ libdir_native=${libdir_native}
else
# Update the pixbuf loaders in case they haven't been registered yet
- GDK_PIXBUF_MODULEDIR=${libdir}/gdk-pixbuf-2.0/2.10.0/loaders gdk-pixbuf-query-loaders --update-cache
+ ${libdir}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache
for icondir in /usr/share/icons/* ; do
if [ -d $icondir ] ; then
@@ -21,8 +22,9 @@ fi
gtk_icon_cache_postrm() {
if [ "x$D" != "x" ]; then
- $INTERCEPT_DIR/postinst_intercept update_icon_cache ${PKG} mlprefix=${MLPREFIX} libdir=${libdir} \
- base_libdir=${base_libdir}
+ $INTERCEPT_DIR/postinst_intercept update_icon_cache ${PKG} \
+ mlprefix=${MLPREFIX} \
+ libdir=${libdir}
else
for icondir in /usr/share/icons/* ; do
if [ -d $icondir ] ; then
diff --git a/yocto-poky/meta/classes/gummiboot.bbclass b/yocto-poky/meta/classes/gummiboot.bbclass
index 3d9c08bbc..9a97ac175 100644
--- a/yocto-poky/meta/classes/gummiboot.bbclass
+++ b/yocto-poky/meta/classes/gummiboot.bbclass
@@ -46,7 +46,8 @@ efi_iso_populate() {
mkdir -p ${EFIIMGDIR}/${EFIDIR}
cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
cp $iso_dir/vmlinuz ${EFIIMGDIR}
- echo "${DEST_EFI_IMAGE}" > ${EFIIMGDIR}/startup.nsh
+ EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
+ echo "fs0:${EFIPATH}\\${DEST_EFI_IMAGE}" > ${EFIIMGDIR}/startup.nsh
if [ -f "$iso_dir/initrd" ] ; then
cp $iso_dir/initrd ${EFIIMGDIR}
fi
diff --git a/yocto-poky/meta/classes/image-live.bbclass b/yocto-poky/meta/classes/image-live.bbclass
index fa7a131ed..23e4a5cef 100644
--- a/yocto-poky/meta/classes/image-live.bbclass
+++ b/yocto-poky/meta/classes/image-live.bbclass
@@ -2,7 +2,7 @@
AUTO_SYSLINUXCFG = "1"
INITRD_IMAGE ?= "core-image-minimal-initramfs"
INITRD ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE}-${MACHINE}.cpio.gz"
-SYSLINUX_ROOT = "root=/dev/ram0"
+SYSLINUX_ROOT ?= "root=/dev/ram0"
SYSLINUX_TIMEOUT ?= "50"
SYSLINUX_LABELS ?= "boot install"
LABELS_append = " ${SYSLINUX_LABELS} "
diff --git a/yocto-poky/meta/classes/image-mklibs.bbclass b/yocto-poky/meta/classes/image-mklibs.bbclass
index c455a8e2d..cfb3ffc91 100644
--- a/yocto-poky/meta/classes/image-mklibs.bbclass
+++ b/yocto-poky/meta/classes/image-mklibs.bbclass
@@ -25,7 +25,7 @@ mklibs_optimize_image_doit() {
x86_64)
dynamic_loader="${base_libdir}/ld-linux-x86-64.so.2"
;;
- i586 )
+ i*86 )
dynamic_loader="${base_libdir}/ld-linux.so.2"
;;
arm )
diff --git a/yocto-poky/meta/classes/image-vm.bbclass b/yocto-poky/meta/classes/image-vm.bbclass
index 063266708..5ddd1cb27 100644
--- a/yocto-poky/meta/classes/image-vm.bbclass
+++ b/yocto-poky/meta/classes/image-vm.bbclass
@@ -18,7 +18,8 @@ inherit boot-directdisk
IMAGE_TYPEDEP_vmdk = "ext4"
IMAGE_TYPEDEP_vdi = "ext4"
IMAGE_TYPEDEP_qcow2 = "ext4"
-IMAGE_TYPES_MASKED += "vmdk vdi qcow2"
+IMAGE_TYPEDEP_hdddirect = "ext4"
+IMAGE_TYPES_MASKED += "vmdk vdi qcow2 hdddirect"
create_vmdk_image () {
qemu-img convert -O vmdk ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hdddirect ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.vmdk
diff --git a/yocto-poky/meta/classes/image.bbclass b/yocto-poky/meta/classes/image.bbclass
index 86a98bb11..d2f8105f1 100644
--- a/yocto-poky/meta/classes/image.bbclass
+++ b/yocto-poky/meta/classes/image.bbclass
@@ -150,7 +150,7 @@ def build_live(d):
IMAGE_TYPE_live = "${@build_live(d)}"
inherit ${IMAGE_TYPE_live}
-IMAGE_TYPE_vm = '${@bb.utils.contains_any("IMAGE_FSTYPES", ["vmdk", "vdi", "qcow2"], "image-vm", "", d)}'
+IMAGE_TYPE_vm = '${@bb.utils.contains_any("IMAGE_FSTYPES", ["vmdk", "vdi", "qcow2", "hdddirect"], "image-vm", "", d)}'
inherit ${IMAGE_TYPE_vm}
python () {
@@ -239,6 +239,29 @@ read_only_rootfs_hook () {
# Tweak the mount option and fs_passno for rootfs in fstab
sed -i -e '/^[#[:space:]]*\/dev\/root/{s/defaults/ro/;s/\([[:space:]]*[[:digit:]]\)\([[:space:]]*\)[[:digit:]]$/\1\20/}' ${IMAGE_ROOTFS}/etc/fstab
+ # If we're using openssh and the /etc/ssh directory has no pre-generated keys,
+ # we should configure openssh to use the configuration file /etc/ssh/sshd_config_readonly
+ # and the keys under /var/run/ssh.
+ if [ -d ${IMAGE_ROOTFS}/etc/ssh ]; then
+ if [ -e ${IMAGE_ROOTFS}/etc/ssh/ssh_host_rsa_key ]; then
+ echo "SYSCONFDIR=/etc/ssh" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ echo "SSHD_OPTS=" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ else
+ echo "SYSCONFDIR=/var/run/ssh" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ echo "SSHD_OPTS='-f /etc/ssh/sshd_config_readonly'" >> ${IMAGE_ROOTFS}/etc/default/ssh
+ fi
+ fi
+
+ # Also tweak the key location for dropbear in the same way.
+ if [ -d ${IMAGE_ROOTFS}/etc/dropbear ]; then
+ if [ -e ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ]; then
+ echo "DROPBEAR_RSAKEY_DIR=/etc/dropbear" >> ${IMAGE_ROOTFS}/etc/default/dropbear
+ else
+ echo "DROPBEAR_RSAKEY_DIR=/var/lib/dropbear" >> ${IMAGE_ROOTFS}/etc/default/dropbear
+ fi
+ fi
+
+
if ${@bb.utils.contains("DISTRO_FEATURES", "sysvinit", "true", "false", d)}; then
# Change the value of ROOTFS_READ_ONLY in /etc/default/rcS to yes
if [ -e ${IMAGE_ROOTFS}/etc/default/rcS ]; then
@@ -249,18 +272,6 @@ read_only_rootfs_hook () {
if [ -x ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh ]; then
${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh
fi
- # If we're using openssh and the /etc/ssh directory has no pre-generated keys,
- # we should configure openssh to use the configuration file /etc/ssh/sshd_config_readonly
- # and the keys under /var/run/ssh.
- if [ -d ${IMAGE_ROOTFS}/etc/ssh ]; then
- if [ -e ${IMAGE_ROOTFS}/etc/ssh/ssh_host_rsa_key ]; then
- echo "SYSCONFDIR=/etc/ssh" >> ${IMAGE_ROOTFS}/etc/default/ssh
- echo "SSHD_OPTS=" >> ${IMAGE_ROOTFS}/etc/default/ssh
- else
- echo "SYSCONFDIR=/var/run/ssh" >> ${IMAGE_ROOTFS}/etc/default/ssh
- echo "SSHD_OPTS='-f /etc/ssh/sshd_config_readonly'" >> ${IMAGE_ROOTFS}/etc/default/ssh
- fi
- fi
fi
if ${@bb.utils.contains("DISTRO_FEATURES", "systemd", "true", "false", d)}; then
diff --git a/yocto-poky/meta/classes/image_types.bbclass b/yocto-poky/meta/classes/image_types.bbclass
index 306403e31..50369197c 100644
--- a/yocto-poky/meta/classes/image_types.bbclass
+++ b/yocto-poky/meta/classes/image_types.bbclass
@@ -13,7 +13,7 @@ def imagetypes_getdepends(d):
deps = []
ctypes = d.getVar('COMPRESSIONTYPES', True).split()
for type in (d.getVar('IMAGE_FSTYPES', True) or "").split():
- if type in ["vmdk", "vdi", "qcow2", "live", "iso", "hddimg"]:
+ if type in ["vmdk", "vdi", "qcow2", "hdddirect", "live", "iso", "hddimg"]:
type = "ext4"
basetype = type
for ctype in ctypes:
@@ -139,17 +139,19 @@ multiubi_mkfs() {
# Cleanup cfg file
mv ubinize${vname}.cfg ${DEPLOY_DIR_IMAGE}/
- # Create own symlink
- cd ${DEPLOY_DIR_IMAGE}
- if [ -e ${IMAGE_NAME}${vname}.rootfs.ubifs ]; then
- ln -sf ${IMAGE_NAME}${vname}.rootfs.ubifs \
- ${IMAGE_LINK_NAME}${vname}.ubifs
- fi
- if [ -e ${IMAGE_NAME}${vname}.rootfs.ubi ]; then
- ln -sf ${IMAGE_NAME}${vname}.rootfs.ubi \
- ${IMAGE_LINK_NAME}${vname}.ubi
+ # Create own symlinks for 'named' volumes
+ if [ -n "$vname" ]; then
+ cd ${DEPLOY_DIR_IMAGE}
+ if [ -e ${IMAGE_NAME}${vname}.rootfs.ubifs ]; then
+ ln -sf ${IMAGE_NAME}${vname}.rootfs.ubifs \
+ ${IMAGE_LINK_NAME}${vname}.ubifs
+ fi
+ if [ -e ${IMAGE_NAME}${vname}.rootfs.ubi ]; then
+ ln -sf ${IMAGE_NAME}${vname}.rootfs.ubi \
+ ${IMAGE_LINK_NAME}${vname}.ubi
+ fi
+ cd -
fi
- cd -
}
IMAGE_CMD_multiubi () {
@@ -225,6 +227,7 @@ IMAGE_TYPES = " \
vmdk \
vdi \
qcow2 \
+ hdddirect \
elf \
wic wic.gz wic.bz2 wic.lzma \
"
@@ -252,7 +255,7 @@ DEPLOYABLE_IMAGE_TYPES ?= "hddimg iso"
IMAGE_EXTENSION_live = "hddimg iso"
# The IMAGE_TYPES_MASKED variable will be used to mask out from the IMAGE_FSTYPES,
-# images that will not be built at do_rootfs time: vmdk, vdi, qcow2, hddimg, iso, etc.
+# images that will not be built at do_rootfs time: vmdk, vdi, qcow2, hdddirect, hddimg, iso, etc.
IMAGE_TYPES_MASKED ?= ""
# The WICVARS variable is used to define list of bitbake variables used in wic code
diff --git a/yocto-poky/meta/classes/insane.bbclass b/yocto-poky/meta/classes/insane.bbclass
index 5c8629af1..a77438db5 100644
--- a/yocto-poky/meta/classes/insane.bbclass
+++ b/yocto-poky/meta/classes/insane.bbclass
@@ -32,7 +32,7 @@ WARN_QA ?= "ldflags useless-rpaths rpaths staticdev libdir xorg-driver-abi \
installed-vs-shipped compile-host-path install-host-path \
pn-overrides infodir build-deps file-rdeps \
unknown-configure-option symlink-to-sysroot multilib \
- invalid-pkgconfig host-user-contaminated \
+ invalid-packageconfig host-user-contaminated \
"
ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \
perms dep-cmp pkgvarcheck perm-config perm-line perm-link \
@@ -86,6 +86,7 @@ def package_qa_get_machine_dict():
"mipsel": ( 8, 0, 0, True, 32),
"mips64": ( 8, 0, 0, False, 64),
"mips64el": ( 8, 0, 0, True, 64),
+ "nios2": (113, 0, 0, True, 32),
"s390": (22, 0, 0, False, 32),
"sh4": (42, 0, 0, True, 32),
"sparc": ( 2, 0, 0, False, 32),
@@ -166,7 +167,7 @@ def package_qa_get_machine_dict():
def package_qa_clean_path(path,d):
""" Remove the common prefix from the path. In this case it is the TMPDIR"""
- return path.replace(d.getVar('TMPDIR',True),"")
+ return path.replace(d.getVar("TMPDIR", True) + "/", "")
def package_qa_write_error(type, error, d):
logfile = d.getVar('QA_LOGFILE', True)
@@ -980,6 +981,7 @@ def package_qa_check_host_user(path, name, d, elf, messages):
return
dest = d.getVar('PKGDEST', True)
+ pn = d.getVar('PN', True)
home = os.path.join(dest, 'home')
if path == home or path.startswith(home + os.sep):
return
@@ -991,14 +993,15 @@ def package_qa_check_host_user(path, name, d, elf, messages):
if exc.errno != errno.ENOENT:
raise
else:
+ rootfs_path = path[len(dest):]
check_uid = int(d.getVar('HOST_USER_UID', True))
if stat.st_uid == check_uid:
- messages["host-user-contaminated"] = "%s is owned by uid %d, which is the same as the user running bitbake. This may be due to host contamination" % (path, check_uid)
+ messages["host-user-contaminated"] = "%s: %s is owned by uid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, rootfs_path, check_uid)
return False
check_gid = int(d.getVar('HOST_USER_GID', True))
if stat.st_gid == check_gid:
- messages["host-user-contaminated"] = "%s is owned by gid %d, which is the same as the user running bitbake. This may be due to host contamination" % (path, check_gid)
+ messages["host-user-contaminated"] = "%s: %s is owned by gid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, rootfs_path, check_gid)
return False
return True
@@ -1089,7 +1092,7 @@ python do_package_qa () {
# Check package name
if not pkgname_pattern.match(package):
package_qa_handle_error("pkgname",
- "%s doesn't match the [a-z0-9.+-]+ regex\n" % package, d)
+ "%s doesn't match the [a-z0-9.+-]+ regex" % package, d)
path = "%s/%s" % (pkgdest, package)
if not package_qa_walk(path, warnchecks, errorchecks, skip, package, d):
@@ -1143,7 +1146,7 @@ python do_qa_configure() {
if "config.log" in files:
if subprocess.call(statement, shell=True) == 0:
bb.fatal("""This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities.
-Rerun configure task after fixing this. The path was '%s'""" % root)
+Rerun configure task after fixing this.""")
if "configure.ac" in files:
configs.append(os.path.join(root,"configure.ac"))
@@ -1207,7 +1210,7 @@ Missing inherit gettext?""" % (gt, config))
if pconfig not in pkgconfigflags:
pn = d.getVar('PN', True)
error_msg = "%s: invalid PACKAGECONFIG: %s" % (pn, pconfig)
- package_qa_handle_error("invalid-pkgconfig", error_msg, d)
+ package_qa_handle_error("invalid-packageconfig", error_msg, d)
}
python do_qa_unpack() {
diff --git a/yocto-poky/meta/classes/kernel-arch.bbclass b/yocto-poky/meta/classes/kernel-arch.bbclass
index 211b72bee..d8b180ec4 100644
--- a/yocto-poky/meta/classes/kernel-arch.bbclass
+++ b/yocto-poky/meta/classes/kernel-arch.bbclass
@@ -13,14 +13,17 @@ valid_archs = "alpha cris ia64 \
sh sh64 um h8300 \
parisc s390 v850 \
avr32 blackfin \
- microblaze"
+ microblaze \
+ nios2"
def map_kernel_arch(a, d):
import re
valid_archs = d.getVar('valid_archs', True).split()
- if re.match('(i.86|athlon|x86.64)$', a): return 'x86'
+ if re.match('i.86$', a): return 'i386'
+ elif re.match('x86.64$', a): return 'x86_64'
+ elif re.match('athlon$', a): return 'x86'
elif re.match('armeb$', a): return 'arm'
elif re.match('aarch64$', a): return 'arm64'
elif re.match('aarch64_be$', a): return 'arm64'
diff --git a/yocto-poky/meta/classes/kernel-yocto.bbclass b/yocto-poky/meta/classes/kernel-yocto.bbclass
index 325f94c73..c2d0d3076 100644
--- a/yocto-poky/meta/classes/kernel-yocto.bbclass
+++ b/yocto-poky/meta/classes/kernel-yocto.bbclass
@@ -52,7 +52,9 @@ def get_machine_branch(d, default):
parm = urldata.parm
if "branch" in parm:
branches = urldata.parm.get("branch").split(',')
- return branches[0]
+ btype = urldata.parm.get("type")
+ if btype != "kmeta":
+ return branches[0]
return default
@@ -182,11 +184,18 @@ do_kernel_checkout() {
source_dir=`echo ${S} | sed 's%/$%%'`
source_workdir="${WORKDIR}/git"
if [ -d "${WORKDIR}/git/" ]; then
- # case: git repository (bare or non-bare)
+ # case: git repository
# if S is WORKDIR/git, then we shouldn't be moving or deleting the tree.
if [ "${source_dir}" != "${source_workdir}" ]; then
- rm -rf ${S}
- mv ${WORKDIR}/git ${S}
+ if [ -d "${source_workdir}/.git" ]; then
+ # regular git repository with .git
+ rm -rf ${S}
+ mv ${WORKDIR}/git ${S}
+ else
+ # create source for bare cloned git repository
+ git clone ${WORKDIR}/git ${S}
+ rm -rf ${WORKDIR}/git
+ fi
fi
cd ${S}
else
diff --git a/yocto-poky/meta/classes/kernel.bbclass b/yocto-poky/meta/classes/kernel.bbclass
index dfbdfd24f..ee3e9a0d9 100644
--- a/yocto-poky/meta/classes/kernel.bbclass
+++ b/yocto-poky/meta/classes/kernel.bbclass
@@ -309,9 +309,18 @@ do_shared_workdir () {
cp -fR include/generated/* $kerneldir/include/generated/
fi
- if [ -d arch/${ARCH}/include/generated ]; then
- mkdir -p $kerneldir/arch/${ARCH}/include/generated/
- cp -fR arch/${ARCH}/include/generated/* $kerneldir/arch/${ARCH}/include/generated/
+ # When ARCH is set to i386 or x86_64, we need to map ARCH to the real name of src
+ # dir (x86) under arch/ of kenrel tree, so that we can find correct source to copy.
+
+ if [ "${ARCH}" = "i386" ] || [ "${ARCH}" = "x86_64" ]; then
+ KERNEL_SRCARCH=x86
+ else
+ KERNEL_SRCARCH=${ARCH}
+ fi
+
+ if [ -d arch/${KERNEL_SRCARCH}/include/generated ]; then
+ mkdir -p $kerneldir/arch/${KERNEL_SRCARCH}/include/generated/
+ cp -fR arch/${KERNEL_SRCARCH}/include/generated/* $kerneldir/arch/${KERNEL_SRCARCH}/include/generated/
fi
}
@@ -413,7 +422,7 @@ do_strip() {
gawk '{print $1}'`
for str in ${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}; do {
- if [ "$headers" != *"$str"* ]; then
+ if ! (echo "$headers" | grep -q "^$str$"); then
bbwarn "Section not found: $str";
fi
diff --git a/yocto-poky/meta/classes/libc-package.bbclass b/yocto-poky/meta/classes/libc-package.bbclass
index 47be691e2..adb423034 100644
--- a/yocto-poky/meta/classes/libc-package.bbclass
+++ b/yocto-poky/meta/classes/libc-package.bbclass
@@ -236,8 +236,8 @@ python package_do_split_gconvs () {
supported[locale] = charset
def output_locale_source(name, pkgname, locale, encoding):
- d.setVar('RDEPENDS_%s' % pkgname, 'localedef %s-localedata-%s %s-charmap-%s' % \
- (mlprefix+bpn, legitimize_package_name(locale), mlprefix+bpn, legitimize_package_name(encoding)))
+ d.setVar('RDEPENDS_%s' % pkgname, '%slocaledef %s-localedata-%s %s-charmap-%s' % \
+ (mlprefix, mlprefix+bpn, legitimize_package_name(locale), mlprefix+bpn, legitimize_package_name(encoding)))
d.setVar('pkg_postinst_%s' % pkgname, d.getVar('locale_base_postinst', True) \
% (locale, encoding, locale))
d.setVar('pkg_postrm_%s' % pkgname, d.getVar('locale_base_postrm', True) % \
diff --git a/yocto-poky/meta/classes/license.bbclass b/yocto-poky/meta/classes/license.bbclass
index c616a2012..8ad4614d6 100644
--- a/yocto-poky/meta/classes/license.bbclass
+++ b/yocto-poky/meta/classes/license.bbclass
@@ -474,6 +474,7 @@ do_populate_lic[sstate-inputdirs] = "${LICSSTATEDIR}"
do_populate_lic[sstate-outputdirs] = "${LICENSE_DIRECTORY}/"
ROOTFS_POSTPROCESS_COMMAND_prepend = "write_package_manifest; license_create_manifest; "
+do_rootfs[recrdeptask] += "do_populate_lic"
do_populate_lic_setscene[dirs] = "${LICSSTATEDIR}/${PN}"
do_populate_lic_setscene[cleandirs] = "${LICSSTATEDIR}"
diff --git a/yocto-poky/meta/classes/metadata_scm.bbclass b/yocto-poky/meta/classes/metadata_scm.bbclass
index 237e61821..0f7f4235a 100644
--- a/yocto-poky/meta/classes/metadata_scm.bbclass
+++ b/yocto-poky/meta/classes/metadata_scm.bbclass
@@ -65,18 +65,19 @@ def base_get_metadata_svn_revision(path, d):
return revision
def base_get_metadata_git_branch(path, d):
- branch = os.popen('cd %s; git branch 2>&1 | grep "^* " | tr -d "* "' % path).read()
+ import bb.process
- if len(branch) != 0:
- return branch
- return "<unknown>"
+ try:
+ rev, _ = bb.process.run('git rev-parse --abbrev-ref HEAD', cwd=path)
+ except bb.process.ExecutionError:
+ rev = '<unknown>'
+ return rev.strip()
def base_get_metadata_git_revision(path, d):
- f = os.popen("cd %s; git log -n 1 --pretty=oneline -- 2>&1" % path)
- data = f.read()
- if f.close() is None:
- rev = data.split(" ")[0]
- if len(rev) != 0:
- return rev
- return "<unknown>"
+ import bb.process
+ try:
+ rev, _ = bb.process.run('git rev-parse HEAD', cwd=path)
+ except bb.process.ExecutionError:
+ rev = '<unknown>'
+ return rev.strip()
diff --git a/yocto-poky/meta/classes/multilib.bbclass b/yocto-poky/meta/classes/multilib.bbclass
index 8f61d8d41..052f911ac 100644
--- a/yocto-poky/meta/classes/multilib.bbclass
+++ b/yocto-poky/meta/classes/multilib.bbclass
@@ -26,6 +26,7 @@ python multilib_virtclass_handler () {
if bb.data.inherits_class('image', e.data):
e.data.setVar("MLPREFIX", variant + "-")
e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
+ e.data.setVar('SDKTARGETSYSROOT', e.data.getVar('SDKTARGETSYSROOT', True))
target_vendor = e.data.getVar("TARGET_VENDOR_" + "virtclass-multilib-" + variant, False)
if target_vendor:
e.data.setVar("TARGET_VENDOR", target_vendor)
@@ -93,10 +94,6 @@ python __anonymous () {
# FIXME, we need to map this to something, not delete it!
d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", "")
- if bb.data.inherits_class('populate_sdk_base', d):
- clsextend.map_depends_variable("TOOLCHAIN_TARGET_TASK")
- clsextend.map_depends_variable("TOOLCHAIN_TARGET_TASK_ATTEMPTONLY")
-
if bb.data.inherits_class('image', d):
return
diff --git a/yocto-poky/meta/classes/multilib_global.bbclass b/yocto-poky/meta/classes/multilib_global.bbclass
index 612cfb691..67dc72b76 100644
--- a/yocto-poky/meta/classes/multilib_global.bbclass
+++ b/yocto-poky/meta/classes/multilib_global.bbclass
@@ -93,20 +93,38 @@ def preferred_ml_updates(d):
if prov != provexp and d.getVar(prov, False):
d.renameVar(prov, provexp)
+ def translate_provide(prefix, prov):
+ if not prov.startswith("virtual/"):
+ return prefix + "-" + prov
+ if prov == "virtual/kernel":
+ return prov
+ prov = prov.replace("virtual/", "")
+ return "virtual/" + prefix + "-" + prov
mp = (d.getVar("MULTI_PROVIDER_WHITELIST", True) or "").split()
extramp = []
for p in mp:
if p.endswith("-native") or "-crosssdk-" in p or p.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in p:
continue
- virt = ""
- if p.startswith("virtual/"):
- p = p.replace("virtual/", "")
- virt = "virtual/"
for pref in prefixes:
- extramp.append(virt + pref + "-" + p)
+ extramp.append(translate_provide(pref, p))
d.setVar("MULTI_PROVIDER_WHITELIST", " ".join(mp + extramp))
+ abisafe = (d.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split()
+ extras = []
+ for p in prefixes:
+ for a in abisafe:
+ extras.append(p + "-" + a)
+ d.appendVar("SIGGEN_EXCLUDERECIPES_ABISAFE", " " + " ".join(extras))
+
+ siggen_exclude = (d.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split()
+ extras = []
+ for p in prefixes:
+ for a in siggen_exclude:
+ a1, a2 = a.split("->")
+ extras.append(translate_provide(p, a1) + "->" + translate_provide(p, a2))
+ d.appendVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", " " + " ".join(extras))
+
python multilib_virtclass_handler_vendor () {
if isinstance(e, bb.event.ConfigParsed):
for v in e.data.getVar("MULTILIB_VARIANTS", True).split():
diff --git a/yocto-poky/meta/classes/package.bbclass b/yocto-poky/meta/classes/package.bbclass
index cd92beb39..a86b68016 100644
--- a/yocto-poky/meta/classes/package.bbclass
+++ b/yocto-poky/meta/classes/package.bbclass
@@ -39,7 +39,6 @@
# packaging steps
inherit packagedata
-inherit prserv
inherit chrpath
# Need the package_qa_handle_error() in insane.bbclass
@@ -1146,7 +1145,8 @@ python populate_packages () {
else:
for f in unshipped:
msg = msg + "\n " + f
- msg = msg + "\nPlease set FILES such that these items are packaged. Alternatively if they are unneeded, avoid installing them or delete them within do_install."
+ msg = msg + "\nPlease set FILES such that these items are packaged. Alternatively if they are unneeded, avoid installing them or delete them within do_install.\n"
+ msg = msg + "%s: %d installed and not shipped files." % (pn, len(unshipped))
package_qa_handle_error("installed-vs-shipped", msg, d)
}
populate_packages[dirs] = "${D}"
diff --git a/yocto-poky/meta/classes/pixbufcache.bbclass b/yocto-poky/meta/classes/pixbufcache.bbclass
index 349967d74..dbe11e12d 100644
--- a/yocto-poky/meta/classes/pixbufcache.bbclass
+++ b/yocto-poky/meta/classes/pixbufcache.bbclass
@@ -15,7 +15,7 @@ if [ "x$D" != "x" ]; then
else
# Update the pixbuf loaders in case they haven't been registered yet
- GDK_PIXBUF_MODULEDIR=${libdir}/gdk-pixbuf-2.0/2.10.0/loaders gdk-pixbuf-query-loaders --update-cache
+ ${libdir}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache
if [ -x ${bindir}/gtk-update-icon-cache ] && [ -d ${datadir}/icons ]; then
for icondir in /usr/share/icons/*; do
@@ -46,7 +46,7 @@ python populate_packages_append() {
}
gdkpixbuf_complete() {
- GDK_PIXBUF_FATAL_LOADER=1 ${STAGING_BINDIR_NATIVE}/gdk-pixbuf-query-loaders --update-cache || exit 1
+ GDK_PIXBUF_FATAL_LOADER=1 ${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache || exit 1
}
#
diff --git a/yocto-poky/meta/classes/populate_sdk_base.bbclass b/yocto-poky/meta/classes/populate_sdk_base.bbclass
index aa7a9a5b4..35e129b06 100644
--- a/yocto-poky/meta/classes/populate_sdk_base.bbclass
+++ b/yocto-poky/meta/classes/populate_sdk_base.bbclass
@@ -80,6 +80,7 @@ python write_host_sdk_manifest () {
POPULATE_SDK_POST_TARGET_COMMAND_append = " write_target_sdk_manifest ; "
POPULATE_SDK_POST_HOST_COMMAND_append = " write_host_sdk_manifest; "
+SDK_POSTPROCESS_COMMAND = " create_sdk_files; tar_sdk; ${SDK_PACKAGING_FUNC}; "
# Some archs override this, we need the nativesdk version
# turns out this is hard to get from the datastore due to TRANSLATED_TARGET_ARCH
@@ -108,15 +109,6 @@ fakeroot python do_populate_sdk() {
manifest_type=Manifest.MANIFEST_TYPE_SDK_TARGET)
populate_sdk(d)
-
- # Process DEFAULTTUNE
- bb.build.exec_func("create_sdk_files", d)
-
- bb.build.exec_func("tar_sdk", d)
-
- sdk_packaging_func = d.getVar("SDK_PACKAGING_FUNC", True) or ""
- if sdk_packaging_func.strip():
- bb.build.exec_func(d.getVar("SDK_PACKAGING_FUNC", True), d)
}
fakeroot create_sdk_files() {
@@ -196,7 +188,7 @@ populate_sdk_log_check() {
done
}
-do_populate_sdk[dirs] = "${TOPDIR}"
+do_populate_sdk[dirs] = "${PKGDATA_DIR} ${TOPDIR}"
do_populate_sdk[depends] += "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_DEPENDS', True).split()])} ${@d.getVarFlag('do_rootfs', 'depends', False)}"
do_populate_sdk[rdepends] = "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_RDEPENDS', True).split()])}"
do_populate_sdk[recrdeptask] += "do_packagedata do_package_write_rpm do_package_write_ipk do_package_write_deb"
diff --git a/yocto-poky/meta/classes/populate_sdk_ext.bbclass b/yocto-poky/meta/classes/populate_sdk_ext.bbclass
index 4ef8838e3..b9808bb8e 100644
--- a/yocto-poky/meta/classes/populate_sdk_ext.bbclass
+++ b/yocto-poky/meta/classes/populate_sdk_ext.bbclass
@@ -51,7 +51,7 @@ python copy_buildsystem () {
core_meta_subdir = ''
# Copy in all metadata layers + bitbake (as repositories)
- buildsystem = oe.copy_buildsystem.BuildSystem(d)
+ buildsystem = oe.copy_buildsystem.BuildSystem('extensible SDK', d)
baseoutpath = d.getVar('SDK_OUTPUT', True) + '/' + d.getVar('SDKPATH', True)
layers_copied = buildsystem.copy_bitbake_and_layers(baseoutpath + '/layers')
@@ -155,10 +155,16 @@ python copy_buildsystem () {
f.write('NATIVELSBSTRING_forcevariable = "%s"\n\n' % fixedlsbstring)
# Ensure locked sstate cache objects are re-used without error
- f.write('SIGGEN_LOCKEDSIGS_CHECK_LEVEL = "warn"\n\n')
+ f.write('SIGGEN_LOCKEDSIGS_CHECK_LEVEL = "none"\n\n')
+
+ # If you define a sdk_extraconf() function then it can contain additional config
+ extraconf = (d.getVar('sdk_extraconf', True) or '').strip()
+ if extraconf:
+ # Strip off any leading / trailing spaces
+ for line in extraconf.splitlines():
+ f.write(line.strip() + '\n')
f.write('require conf/locked-sigs.inc\n')
- f.write('require conf/work-config.inc\n')
sigfile = d.getVar('WORKDIR', True) + '/locked-sigs.inc'
oe.copy_buildsystem.generate_locked_sigs(sigfile, d)
@@ -178,17 +184,10 @@ python copy_buildsystem () {
d.getVar('SSTATE_DIR', True),
sstate_out, d,
fixedlsbstring)
-
- # Create a dummy config file for additional settings
- with open(baseoutpath + '/conf/work-config.inc', 'w') as f:
- pass
}
def extsdk_get_buildtools_filename(d):
- # This is somewhat of a hack
- localdata = bb.data.createCopy(d)
- localdata.setVar('PN', 'buildtools-tarball')
- return localdata.expand('${SDK_NAME}-buildtools-nativesdk-standalone-*.sh')
+ return '*-buildtools-nativesdk-standalone-*.sh'
install_tools() {
install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}
@@ -201,6 +200,8 @@ install_tools() {
install $buildtools_path ${SDK_OUTPUT}/${SDKPATH}
install ${SDK_DEPLOY}/${BUILD_ARCH}-nativesdk-libc.tar.bz2 ${SDK_OUTPUT}/${SDKPATH}
+
+ install -m 0755 ${COREBASE}/meta/files/ext-sdk-prepare.sh ${SDK_OUTPUT}/${SDKPATH}
}
# Since bitbake won't run as root it doesn't make sense to try and install
@@ -218,29 +219,37 @@ SDK_PRE_INSTALL_COMMAND_task-populate-sdk-ext = "${sdk_ext_preinst}"
sdk_ext_postinst() {
printf "\nExtracting buildtools...\n"
cd $target_sdk_dir
- printf "buildtools\ny" | ./*buildtools-nativesdk-standalone* > /dev/null
+ printf "buildtools\ny" | ./*buildtools-nativesdk-standalone* > /dev/null || ( printf 'ERROR: buildtools installation failed\n' ; exit 1 )
# Make sure when the user sets up the environment, they also get
# the buildtools-tarball tools in their path.
- echo ". $target_sdk_dir/buildtools/environment-setup*" >> $target_sdk_dir/environment-setup*
+ env_setup_script="$target_sdk_dir/environment-setup-${REAL_MULTIMACH_TARGET_SYS}"
+ echo ". $target_sdk_dir/buildtools/environment-setup*" >> $env_setup_script
# Allow bitbake environment setup to be ran as part of this sdk.
- echo "export OE_SKIP_SDK_CHECK=1" >> $target_sdk_dir/environment-setup*
+ echo "export OE_SKIP_SDK_CHECK=1" >> $env_setup_script
# A bit of another hack, but we need this in the path only for devtool
# so put it at the end of $PATH.
- echo "export PATH=\$PATH:$target_sdk_dir/sysroots/${SDK_SYS}/${bindir_nativesdk}" >> $target_sdk_dir/environment-setup*
+ echo "export PATH=\$PATH:$target_sdk_dir/sysroots/${SDK_SYS}/${bindir_nativesdk}" >> $env_setup_script
+
+ echo "printf 'SDK environment now set up; additionally you may now run devtool to perform development tasks.\nRun devtool --help for further details.\n'" >> $env_setup_script
+
+ # Warn if trying to use external bitbake and the ext SDK together
+ echo "(which bitbake > /dev/null 2>&1 && echo 'WARNING: attempting to use the extensible SDK in an environment set up to run bitbake - this may lead to unexpected results. Please source this script in a new shell session instead.') || true" >> $env_setup_script
# For now this is where uninative.bbclass expects the tarball
mv *-nativesdk-libc.tar.* $target_sdk_dir/`dirname ${oe_init_build_env_path}`
if [ "$prepare_buildsystem" != "no" ]; then
- printf "Preparing build system...\n"
- # dash which is /bin/sh on Ubuntu will not preserve the
- # current working directory when first ran, nor will it set $1 when
- # sourcing a script. That is why this has to look so ugly.
- sh -c ". buildtools/environment-setup* > preparing_build_system.log && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> preparing_build_system.log && bitbake ${SDK_TARGETS} >> preparing_build_system.log" || { echo "SDK preparation failed: see `pwd`/preparing_build_system.log" ; exit 1 ; }
+ printf "Preparing build system...\n"
+ # dash which is /bin/sh on Ubuntu will not preserve the
+ # current working directory when first ran, nor will it set $1 when
+ # sourcing a script. That is why this has to look so ugly.
+ LOGFILE="$target_sdk_dir/preparing_build_system.log"
+ sh -c ". buildtools/environment-setup* > $LOGFILE && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE && $target_sdk_dir/ext-sdk-prepare.sh $target_sdk_dir '${SDK_TARGETS}' >> $LOGFILE 2>&1" || { echo "ERROR: SDK preparation failed: see $LOGFILE"; echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
fi
+ rm -f $target_sdk_dir/ext-sdk-prepare.sh
echo done
}
@@ -249,6 +258,11 @@ SDK_POST_INSTALL_COMMAND_task-populate-sdk-ext = "${sdk_ext_postinst}"
SDK_POSTPROCESS_COMMAND_prepend_task-populate-sdk-ext = "copy_buildsystem; install_tools; "
fakeroot python do_populate_sdk_ext() {
+ # FIXME hopefully we can remove this restriction at some point, but uninative
+ # currently forces this upon us
+ if d.getVar('SDK_ARCH', True) != d.getVar('BUILD_ARCH', True):
+ bb.fatal('The extensible SDK can currently only be built for the same architecture as the machine being built on - SDK_ARCH is set to %s (likely via setting SDKMACHINE) which is different from the architecture of the build machine (%s). Unable to continue.' % (d.getVar('SDK_ARCH', True), d.getVar('BUILD_ARCH', True)))
+
bb.build.exec_func("do_populate_sdk", d)
}
diff --git a/yocto-poky/meta/classes/prserv.bbclass b/yocto-poky/meta/classes/prserv.bbclass
deleted file mode 100644
index 139597f9c..000000000
--- a/yocto-poky/meta/classes/prserv.bbclass
+++ /dev/null
@@ -1,2 +0,0 @@
-
-
diff --git a/yocto-poky/meta/classes/ptest.bbclass b/yocto-poky/meta/classes/ptest.bbclass
index b5f470f08..4dc5dbe9e 100644
--- a/yocto-poky/meta/classes/ptest.bbclass
+++ b/yocto-poky/meta/classes/ptest.bbclass
@@ -39,12 +39,12 @@ do_install_ptest() {
do_install_ptest_base() {
if [ -f ${WORKDIR}/run-ptest ]; then
install -D ${WORKDIR}/run-ptest ${D}${PTEST_PATH}/run-ptest
- if grep -q install-ptest: Makefile; then
- oe_runmake DESTDIR=${D}${PTEST_PATH} install-ptest
- fi
- do_install_ptest
- chown -R root:root ${D}${PTEST_PATH}
fi
+ if grep -q install-ptest: Makefile; then
+ oe_runmake DESTDIR=${D}${PTEST_PATH} install-ptest
+ fi
+ do_install_ptest
+ chown -R root:root ${D}${PTEST_PATH}
}
do_configure_ptest_base[dirs] = "${B}"
diff --git a/yocto-poky/meta/classes/report-error.bbclass b/yocto-poky/meta/classes/report-error.bbclass
index 040c29ea2..82b5bcd69 100644
--- a/yocto-poky/meta/classes/report-error.bbclass
+++ b/yocto-poky/meta/classes/report-error.bbclass
@@ -9,22 +9,25 @@
ERR_REPORT_DIR ?= "${LOG_DIR}/error-report"
def errorreport_getdata(e):
+ import codecs
logpath = e.data.getVar('ERR_REPORT_DIR', True)
datafile = os.path.join(logpath, "error-report.txt")
- with open(datafile) as f:
+ with codecs.open(datafile, 'r', 'utf-8') as f:
data = f.read()
return data
def errorreport_savedata(e, newdata, file):
import json
+ import codecs
logpath = e.data.getVar('ERR_REPORT_DIR', True)
datafile = os.path.join(logpath, file)
- with open(datafile, "w") as f:
+ with codecs.open(datafile, 'w', 'utf-8') as f:
json.dump(newdata, f, indent=4, sort_keys=True)
return datafile
python errorreport_handler () {
import json
+ import codecs
logpath = e.data.getVar('ERR_REPORT_DIR', True)
datafile = os.path.join(logpath, "error-report.txt")
@@ -53,8 +56,8 @@ python errorreport_handler () {
taskdata['task'] = task
if log:
try:
- logFile = open(log, 'r')
- logdata = logFile.read().decode('utf-8')
+ logFile = codecs.open(log, 'r', 'utf-8')
+ logdata = logFile.read()
logFile.close()
except:
logdata = "Unable to read log file"
diff --git a/yocto-poky/meta/classes/sanity.bbclass b/yocto-poky/meta/classes/sanity.bbclass
index 2eb744fb7..ae86d261e 100644
--- a/yocto-poky/meta/classes/sanity.bbclass
+++ b/yocto-poky/meta/classes/sanity.bbclass
@@ -3,7 +3,7 @@
#
SANITY_REQUIRED_UTILITIES ?= "patch diffstat makeinfo git bzip2 tar \
- gzip gawk chrpath wget cpio perl"
+ gzip gawk chrpath wget cpio perl file"
def bblayers_conf_file(d):
return os.path.join(d.getVar('TOPDIR', True), 'conf/bblayers.conf')
@@ -839,9 +839,12 @@ def check_sanity_everybuild(status, d):
else:
bb.utils.mkdirhier(tmpdir)
# Remove setuid, setgid and sticky bits from TMPDIR
- os.chmod(tmpdir, os.stat(tmpdir).st_mode & ~ stat.S_ISUID)
- os.chmod(tmpdir, os.stat(tmpdir).st_mode & ~ stat.S_ISGID)
- os.chmod(tmpdir, os.stat(tmpdir).st_mode & ~ stat.S_ISVTX)
+ try:
+ os.chmod(tmpdir, os.stat(tmpdir).st_mode & ~ stat.S_ISUID)
+ os.chmod(tmpdir, os.stat(tmpdir).st_mode & ~ stat.S_ISGID)
+ os.chmod(tmpdir, os.stat(tmpdir).st_mode & ~ stat.S_ISVTX)
+ except OSError as exc:
+ bb.warn("Unable to chmod TMPDIR: %s" % exc)
with open(checkfile, "w") as f:
f.write(tmpdir)
diff --git a/yocto-poky/meta/classes/sign_package_feed.bbclass b/yocto-poky/meta/classes/sign_package_feed.bbclass
new file mode 100644
index 000000000..426381002
--- /dev/null
+++ b/yocto-poky/meta/classes/sign_package_feed.bbclass
@@ -0,0 +1,31 @@
+# Class for signing package feeds
+#
+# Related configuration variables that will be used after this class is
+# iherited:
+# PACKAGE_FEED_PASSPHRASE_FILE
+# Path to a file containing the passphrase of the signing key.
+# PACKAGE_FEED_GPG_NAME
+# Name of the key to sign with. May be key id or key name.
+# GPG_BIN
+# Optional variable for specifying the gpg binary/wrapper to use for
+# signing.
+# GPG_PATH
+# Optional variable for specifying the gnupg "home" directory:
+#
+inherit sanity
+
+PACKAGE_FEED_SIGN = '1'
+
+python () {
+ # Check sanity of configuration
+ for var in ('PACKAGE_FEED_GPG_NAME', 'PACKAGE_FEED_GPG_PASSPHRASE_FILE'):
+ if not d.getVar(var, True):
+ raise_sanity_error("You need to define %s in the config" % var, d)
+
+ # Set expected location of the public key
+ d.setVar('PACKAGE_FEED_GPG_PUBKEY',
+ os.path.join(d.getVar('STAGING_ETCDIR_NATIVE'),
+ 'PACKAGE-FEED-GPG-PUBKEY'))
+}
+
+do_package_index[depends] += "signing-keys:do_export_public_keys"
diff --git a/yocto-poky/meta/classes/sign_rpm.bbclass b/yocto-poky/meta/classes/sign_rpm.bbclass
index 0aa4cd841..f0c3dc9be 100644
--- a/yocto-poky/meta/classes/sign_rpm.bbclass
+++ b/yocto-poky/meta/classes/sign_rpm.bbclass
@@ -4,23 +4,27 @@
# RPM_GPG_PASSPHRASE_FILE
# Path to a file containing the passphrase of the signing key.
# RPM_GPG_NAME
-# Name of the key to sign with. Alternatively you can define
-# %_gpg_name macro in your ~/.oerpmmacros file.
-# RPM_GPG_PUBKEY
-# Path to a file containing the public key (in "armor" format)
-# corresponding the signing key.
+# Name of the key to sign with. May be key id or key name.
# GPG_BIN
# Optional variable for specifying the gpg binary/wrapper to use for
# signing.
+# GPG_PATH
+# Optional variable for specifying the gnupg "home" directory:
#
inherit sanity
RPM_SIGN_PACKAGES='1'
-_check_gpg_name () {
- macrodef=`rpm -E '%_gpg_name'`
- [ "$macrodef" == "%_gpg_name" ] && return 1 || return 0
+python () {
+ # Check configuration
+ for var in ('RPM_GPG_NAME', 'RPM_GPG_PASSPHRASE_FILE'):
+ if not d.getVar(var, True):
+ raise_sanity_error("You need to define %s in the config" % var, d)
+
+ # Set the expected location of the public key
+ d.setVar('RPM_GPG_PUBKEY', os.path.join(d.getVar('STAGING_ETCDIR_NATIVE'),
+ 'RPM-GPG-PUBKEY'))
}
@@ -29,18 +33,11 @@ def rpmsign_wrapper(d, files, passphrase, gpg_name=None):
# Find the correct rpm binary
rpm_bin_path = d.getVar('STAGING_BINDIR_NATIVE', True) + '/rpm'
- cmd = rpm_bin_path + " --addsign "
- if gpg_name:
- cmd += "--define '%%_gpg_name %s' " % gpg_name
- else:
- try:
- bb.build.exec_func('_check_gpg_name', d)
- except bb.build.FuncFailed:
- raise_sanity_error("You need to define RPM_GPG_NAME in bitbake "
- "config or the %_gpg_name RPM macro defined "
- "(e.g. in ~/.oerpmmacros", d)
+ cmd = rpm_bin_path + " --addsign --define '_gpg_name %s' " % gpg_name
if d.getVar('GPG_BIN', True):
cmd += "--define '%%__gpg %s' " % d.getVar('GPG_BIN', True)
+ if d.getVar('GPG_PATH', True):
+ cmd += "--define '_gpg_path %s' " % d.getVar('GPG_PATH', True)
cmd += ' '.join(files)
# Need to use pexpect for feeding the passphrase
@@ -51,20 +48,19 @@ def rpmsign_wrapper(d, files, passphrase, gpg_name=None):
proc.expect(pexpect.EOF, timeout=900)
proc.close()
except pexpect.TIMEOUT as err:
- bb.debug('rpmsign timeout: %s' % err)
+ bb.warn('rpmsign timeout: %s' % err)
proc.terminate()
+ else:
+ if os.WEXITSTATUS(proc.status) or not os.WIFEXITED(proc.status):
+ bb.warn('rpmsign failed: %s' % proc.before.strip())
return proc.exitstatus
python sign_rpm () {
import glob
- rpm_gpg_pass_file = (d.getVar("RPM_GPG_PASSPHRASE_FILE", True) or "")
- if rpm_gpg_pass_file:
- with open(rpm_gpg_pass_file) as fobj:
- rpm_gpg_passphrase = fobj.readlines()[0].rstrip('\n')
- else:
- raise_sanity_error("You need to define RPM_GPG_PASSPHRASE_FILE in the config", d)
+ with open(d.getVar("RPM_GPG_PASSPHRASE_FILE", True)) as fobj:
+ rpm_gpg_passphrase = fobj.readlines()[0].rstrip('\n')
rpm_gpg_name = (d.getVar("RPM_GPG_NAME", True) or "")
@@ -73,3 +69,5 @@ python sign_rpm () {
if rpmsign_wrapper(d, rpms, rpm_gpg_passphrase, rpm_gpg_name) != 0:
raise bb.build.FuncFailed("RPM signing failed")
}
+
+do_package_index[depends] += "signing-keys:do_export_public_keys"
diff --git a/yocto-poky/meta/classes/sstate.bbclass b/yocto-poky/meta/classes/sstate.bbclass
index b9ad6da9d..d09e27aee 100644
--- a/yocto-poky/meta/classes/sstate.bbclass
+++ b/yocto-poky/meta/classes/sstate.bbclass
@@ -61,16 +61,6 @@ SSTATE_SIG_PASSPHRASE ?= ""
# Whether to verify the GnUPG signatures when extracting sstate archives
SSTATE_VERIFY_SIG ?= "0"
-# Specify dirs in which the shell function is executed and don't use ${B}
-# as default dirs to avoid possible race about ${B} with other task.
-sstate_create_package[dirs] = "${SSTATE_BUILDDIR}"
-sstate_unpack_package[dirs] = "${SSTATE_INSTDIR}"
-
-# Do not run sstate_hardcode_path() in ${B}:
-# the ${B} maybe removed by cmake_do_configure() while
-# sstate_hardcode_path() running.
-sstate_hardcode_path[dirs] = "${SSTATE_BUILDDIR}"
-
python () {
if bb.data.inherits_class('native', d):
d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False))
@@ -164,6 +154,8 @@ def sstate_install(ss, d):
shareddirs = []
bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
+ sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
+
manifest, d2 = oe.sstatesig.sstate_get_manifest_filename(ss['task'], d)
if os.access(manifest, os.R_OK):
@@ -267,7 +259,8 @@ def sstate_install(ss, d):
oe.path.copyhardlinktree(state[1], state[2])
for postinst in (d.getVar('SSTATEPOSTINSTFUNCS', True) or '').split():
- bb.build.exec_func(postinst, d)
+ # All hooks should run in the SSTATE_INSTDIR
+ bb.build.exec_func(postinst, d, (sstateinst,))
for lock in locks:
bb.utils.unlockfile(lock)
@@ -307,7 +300,8 @@ def sstate_installpkg(ss, d):
bb.warn("Cannot verify signature on sstate package %s" % sstatepkg)
for f in (d.getVar('SSTATEPREINSTFUNCS', True) or '').split() + ['sstate_unpack_package'] + (d.getVar('SSTATEPOSTUNPACKFUNCS', True) or '').split():
- bb.build.exec_func(f, d)
+ # All hooks should run in the SSTATE_INSTDIR
+ bb.build.exec_func(f, d, (sstateinst,))
for state in ss['dirs']:
prepdir(state[1])
@@ -579,8 +573,9 @@ def sstate_package(ss, d):
for f in (d.getVar('SSTATECREATEFUNCS', True) or '').split() + ['sstate_create_package'] + \
(d.getVar('SSTATEPOSTCREATEFUNCS', True) or '').split():
- bb.build.exec_func(f, d)
-
+ # All hooks should run in SSTATE_BUILDDIR.
+ bb.build.exec_func(f, d, (sstatebuild,))
+
bb.siggen.dump_this_task(sstatepkg + ".siginfo", d)
return
@@ -642,19 +637,22 @@ python sstate_task_prefunc () {
shared_state = sstate_state_fromvars(d)
sstate_clean(shared_state, d)
}
+sstate_task_prefunc[dirs] = "${WORKDIR}"
python sstate_task_postfunc () {
shared_state = sstate_state_fromvars(d)
+
sstate_install(shared_state, d)
for intercept in shared_state['interceptfuncs']:
- bb.build.exec_func(intercept, d)
+ bb.build.exec_func(intercept, d, (d.getVar("WORKDIR", True),))
omask = os.umask(002)
if omask != 002:
bb.note("Using umask 002 (not %0o) for sstate packaging" % omask)
sstate_package(shared_state, d)
os.umask(omask)
}
-
+sstate_task_postfunc[dirs] = "${WORKDIR}"
+
#
# Shell function to generate a sstate package from a directory
diff --git a/yocto-poky/meta/classes/testimage.bbclass b/yocto-poky/meta/classes/testimage.bbclass
index a1918ba9e..b4d4a69b0 100644
--- a/yocto-poky/meta/classes/testimage.bbclass
+++ b/yocto-poky/meta/classes/testimage.bbclass
@@ -80,11 +80,13 @@ testimage_dump_target () {
testimage_dump_host () {
top -bn1
+ iostat -x -z -N -d -p ALL 20 2
ps -ef
free
df
memstat
dmesg
+ ip -s link
netstat -an
}
@@ -146,6 +148,10 @@ def get_tests_list(d, type="runtime"):
testslist.append("oeqa." + type + "." + testname)
found = True
break
+ elif os.path.exists(os.path.join(p, 'lib', 'oeqa', type, testname.split(".")[0] + '.py')):
+ testslist.append("oeqa." + type + "." + testname)
+ found = True
+ break
if not found:
bb.fatal('Test %s specified in TEST_SUITES could not be found in lib/oeqa/runtime under BBPATH' % testname)
@@ -172,6 +178,7 @@ def exportTests(d,tc):
import json
import shutil
import pkgutil
+ import re
exportpath = d.getVar("TEST_EXPORT_DIR", True)
@@ -198,9 +205,18 @@ def exportTests(d,tc):
savedata["host_dumper"]["parent_dir"] = tc.host_dumper.parent_dir
savedata["host_dumper"]["cmds"] = tc.host_dumper.cmds
- with open(os.path.join(exportpath, "testdata.json"), "w") as f:
+ json_file = os.path.join(exportpath, "testdata.json")
+ with open(json_file, "w") as f:
json.dump(savedata, f, skipkeys=True, indent=4, sort_keys=True)
+ # Replace absolute path with relative in the file
+ exclude_path = os.path.join(d.getVar("COREBASE", True),'meta','lib','oeqa')
+ f1 = open(json_file,'r').read()
+ f2 = open(json_file,'w')
+ m = f1.replace(exclude_path,'oeqa')
+ f2.write(m)
+ f2.close()
+
# now start copying files
# we'll basically copy everything under meta/lib/oeqa, with these exceptions
# - oeqa/targetcontrol.py - not needed
@@ -214,6 +230,8 @@ def exportTests(d,tc):
bb.utils.mkdirhier(os.path.join(exportpath, "oeqa/utils"))
# copy test modules, this should cover tests in other layers too
for t in tc.testslist:
+ if re.search("\w+\.\w+\.test_\S+", t):
+ t = '.'.join(t.split('.')[:3])
mod = pkgutil.get_loader(t)
shutil.copy2(mod.filename, os.path.join(exportpath, "oeqa/runtime"))
# copy __init__.py files
@@ -279,14 +297,20 @@ def testimage_main(d):
self.imagefeatures = d.getVar("IMAGE_FEATURES", True).split()
self.distrofeatures = d.getVar("DISTRO_FEATURES", True).split()
manifest = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("IMAGE_LINK_NAME", True) + ".manifest")
+ nomanifest = d.getVar("IMAGE_NO_MANIFEST", True)
+
self.sigterm = False
self.origsigtermhandler = signal.getsignal(signal.SIGTERM)
signal.signal(signal.SIGTERM, self.sigterm_exception)
- try:
- with open(manifest) as f:
- self.pkgmanifest = f.read()
- except IOError as e:
- bb.fatal("No package manifest file found. Did you build the image?\n%s" % e)
+
+ if nomanifest is None or nomanifest != "1":
+ try:
+ with open(manifest) as f:
+ self.pkgmanifest = f.read()
+ except IOError as e:
+ bb.fatal("No package manifest file found. Did you build the image?\n%s" % e)
+ else:
+ self.pkgmanifest = ""
def sigterm_exception(self, signum, stackframe):
bb.warn("TestImage received SIGTERM, shutting down...")
@@ -305,13 +329,15 @@ def testimage_main(d):
import traceback
bb.fatal("Loading tests failed:\n%s" % traceback.format_exc())
- target.deploy()
- try:
- target.start()
- if export:
- exportTests(d,tc)
- else:
+ if export:
+ signal.signal(signal.SIGTERM, tc.origsigtermhandler)
+ tc.origsigtermhandler = None
+ exportTests(d,tc)
+ else:
+ target.deploy()
+ try:
+ target.start()
starttime = time.time()
result = runTests(tc)
stoptime = time.time()
@@ -324,9 +350,9 @@ def testimage_main(d):
bb.plain(msg)
else:
raise bb.build.FuncFailed("%s - FAILED - check the task log and the ssh log" % pn )
- finally:
- signal.signal(signal.SIGTERM, tc.origsigtermhandler)
- target.stop()
+ finally:
+ signal.signal(signal.SIGTERM, tc.origsigtermhandler)
+ target.stop()
testimage_main[vardepsexclude] =+ "BB_ORIGENV"
diff --git a/yocto-poky/meta/classes/toolchain-scripts.bbclass b/yocto-poky/meta/classes/toolchain-scripts.bbclass
index d0b2b9148..ab4feb083 100644
--- a/yocto-poky/meta/classes/toolchain-scripts.bbclass
+++ b/yocto-poky/meta/classes/toolchain-scripts.bbclass
@@ -32,6 +32,7 @@ toolchain_create_sdk_env_script () {
echo 'export OECORE_TARGET_SYSROOT="$SDKTARGETSYSROOT"' >> $script
echo "export OECORE_ACLOCAL_OPTS=\"-I $sdkpathnative/usr/share/aclocal\"" >> $script
echo "export PYTHONHOME=$sdkpathnative$prefix" >> $script
+ echo 'unset command_not_found_handle' >> $script
toolchain_shared_env_script
}
diff --git a/yocto-poky/meta/classes/uninative.bbclass b/yocto-poky/meta/classes/uninative.bbclass
index 51391dbc4..0cd27db85 100644
--- a/yocto-poky/meta/classes/uninative.bbclass
+++ b/yocto-poky/meta/classes/uninative.bbclass
@@ -1,6 +1,6 @@
NATIVELSBSTRING = "universal"
-UNINATIVE_LOADER = "${STAGING_DIR_NATIVE}/lib/ld-linux-x86-64.so.2"
+UNINATIVE_LOADER ?= "${@bb.utils.contains('BUILD_ARCH', 'x86_64', '${STAGING_DIR_NATIVE}/lib/ld-linux-x86-64.so.2', '${STAGING_DIR_NATIVE}/lib/ld-linux.so.2', d)}"
addhandler uninative_eventhandler
uninative_eventhandler[eventmask] = "bb.event.BuildStarted"
diff --git a/yocto-poky/meta/classes/useradd-staticids.bbclass b/yocto-poky/meta/classes/useradd-staticids.bbclass
index 421a70a6a..924d6eae6 100644
--- a/yocto-poky/meta/classes/useradd-staticids.bbclass
+++ b/yocto-poky/meta/classes/useradd-staticids.bbclass
@@ -2,6 +2,7 @@
# we need a function to reformat the params based on a static file
def update_useradd_static_config(d):
import argparse
+ import itertools
import re
class myArgumentParser( argparse.ArgumentParser ):
@@ -16,6 +17,11 @@ def update_useradd_static_config(d):
def error(self, message):
raise bb.build.FuncFailed(message)
+ def list_extend(iterable, length, obj = None):
+ """Ensure that iterable is the specified length by extending with obj
+ and return it as a list"""
+ return list(itertools.islice(itertools.chain(iterable, itertools.repeat(obj)), length))
+
# We parse and rewrite the useradd components
def rewrite_useradd(params):
# The following comes from --help on useradd from shadow
@@ -84,7 +90,10 @@ def update_useradd_static_config(d):
for line in f:
if line.startswith('#'):
continue
- field = line.rstrip().split(":")
+ # Make sure there always are at least seven elements in
+ # the field list. This allows for leaving out trailing
+ # colons in the passwd file.
+ field = list_extend(line.rstrip().split(":"), 7)
if field[0] == uaargs.LOGIN:
if uaargs.uid and field[2] and (uaargs.uid != field[2]):
bb.warn("%s: Changing username %s's uid from (%s) to (%s), verify configuration files!" % (d.getVar('PN', True), uaargs.LOGIN, uaargs.uid, field[2]))
@@ -220,7 +229,10 @@ def update_useradd_static_config(d):
for line in f:
if line.startswith('#'):
continue
- field = line.rstrip().split(":")
+ # Make sure there always are at least four elements in
+ # the field list. This allows for leaving out trailing
+ # colons in the group file.
+ field = list_extend(line.rstrip().split(":"), 4)
if field[0] == gaargs.GROUP and field[2]:
if gaargs.gid and (gaargs.gid != field[2]):
bb.warn("%s: Changing groupname %s's gid from (%s) to (%s), verify configuration files!" % (d.getVar('PN', True), gaargs.GROUP, gaargs.gid, field[2]))
diff --git a/yocto-poky/meta/classes/useradd_base.bbclass b/yocto-poky/meta/classes/useradd_base.bbclass
index 802f3a108..ab3cd353f 100644
--- a/yocto-poky/meta/classes/useradd_base.bbclass
+++ b/yocto-poky/meta/classes/useradd_base.bbclass
@@ -104,7 +104,7 @@ perform_groupmems () {
sleep $count
done
else
- bbwarn "${PN}: group $groupname already contains $username, not re-adding it"
+ bbnote "${PN}: group $groupname already contains $username, not re-adding it"
fi
if test "x$gshadow" = "xno"; then
rm -f $rootdir${sysconfdir}/gshadow
@@ -136,7 +136,7 @@ perform_groupdel () {
sleep $count
done
else
- bbwarn "${PN}: group $groupname doesn't exist, not removing it"
+ bbnote "${PN}: group $groupname doesn't exist, not removing it"
fi
}
@@ -164,7 +164,7 @@ perform_userdel () {
sleep $count
done
else
- bbwarn "${PN}: user $username doesn't exist, not removing it"
+ bbnote "${PN}: user $username doesn't exist, not removing it"
fi
}
diff --git a/yocto-poky/meta/conf/bitbake.conf b/yocto-poky/meta/conf/bitbake.conf
index d8a66f96c..06971da81 100644
--- a/yocto-poky/meta/conf/bitbake.conf
+++ b/yocto-poky/meta/conf/bitbake.conf
@@ -166,6 +166,7 @@ DATETIME = "${DATE}${TIME}"
ASSUME_PROVIDED = "\
bzip2-native \
chrpath-native \
+ file-native \
git-native \
grep-native \
diffstat-native \
@@ -566,7 +567,7 @@ BBLAYERS_FETCH_DIR ??= "${COREBASE}"
# Download locations and utilities.
##################################################################
-APACHE_MIRROR = "http://www.apache.org/dist"
+APACHE_MIRROR = "http://archive.apache.org/dist"
DEBIAN_MIRROR = "ftp://ftp.debian.org/debian/pool"
GENTOO_MIRROR = "http://distfiles.gentoo.org/distfiles"
GNOME_GIT = "git://git.gnome.org"
@@ -806,7 +807,7 @@ BB_SIGNATURE_EXCLUDE_FLAGS ?= "doc deps depends \
lockfiles type vardepsexclude vardeps vardepvalue vardepvalueexclude \
file-checksums python func task export unexport noexec nostamp dirs cleandirs \
sstate-lockfile-shared prefuncs postfuncs export_func deptask rdeptask \
- recrdeptask nodeprrecs stamp-base stamp-extra-info"
+ recrdeptask nodeprrecs stamp-base stamp-extra-info sstate-outputdirs"
MLPREFIX ??= ""
MULTILIB_VARIANTS ??= ""
diff --git a/yocto-poky/meta/conf/distro/include/default-distrovars.inc b/yocto-poky/meta/conf/distro/include/default-distrovars.inc
index 29b762b22..836690492 100644
--- a/yocto-poky/meta/conf/distro/include/default-distrovars.inc
+++ b/yocto-poky/meta/conf/distro/include/default-distrovars.inc
@@ -47,3 +47,6 @@ DISTRO_VERSION ??= "nodistro.0"
# Missing checksums should raise an error
BB_STRICT_CHECKSUM = "1"
+
+GTK2DISTROFEATURES = "directfb x11"
+GTK3DISTROFEATURES = "x11 wayland"
diff --git a/yocto-poky/meta/conf/distro/include/distro_alias.inc b/yocto-poky/meta/conf/distro/include/distro_alias.inc
index bd3da9ce9..ca333c88c 100644
--- a/yocto-poky/meta/conf/distro/include/distro_alias.inc
+++ b/yocto-poky/meta/conf/distro/include/distro_alias.inc
@@ -14,6 +14,7 @@ DISTRO_PN_ALIAS_pn-abiword-embedded = "Fedora=abiword Ubuntu=abiword"
DISTRO_PN_ALIAS_pn-adt-installer = "Intel"
DISTRO_PN_ALIAS_pn-alsa-state = "OE-Core"
DISTRO_PN_ALIAS_pn-alsa-utils-alsaconf = "OE-Core"
+DISTRO_PN_ALIAS_pn-alsa-utils-scripts = "OE-Core"
DISTRO_PN_ALIAS_pn-atk = "Fedora=atk OpenSuSE=atk"
DISTRO_PN_ALIAS_pn-augeas = "Ubuntu=libaugeas0 Debian=libaugeas0"
DISTRO_PN_ALIAS_pn-avahi-ui = "Ubuntu=avahi-discover Debian=avahi-discover"
@@ -43,6 +44,7 @@ DISTRO_PN_ALIAS_pn-clutter = "Fedora=clutter OpenSuse=clutter Ubuntu=clutter-1.0
DISTRO_PN_ALIAS_pn-clutter-1.8 = "Fedora=clutter OpenSuse=clutter Ubuntu=clutter-1.0 Mandriva=clutter Debian=clutter"
DISTRO_PN_ALIAS_pn-clutter-gst-1.0 = "Debian=clutter-gst Ubuntu=clutter-gst Fedora=clutter-gst"
DISTRO_PN_ALIAS_pn-clutter-gst-1.8 = "Fedora=clutter-gst Debian=libclutter-gst"
+DISTRO_PN_ALIAS_pn-clutter-gst-3.0 = "Ubuntu=libclutter-gst Debian=libclutter-gst"
DISTRO_PN_ALIAS_pn-clutter-gtk-1.0 = "Debian=clutter-gtk Ubuntu=clutter-gtk Fedora=clutter-gtk"
DISTRO_PN_ALIAS_pn-clutter-gtk-1.8 = "Fedora=clutter-gtk OpenSuSE=clutter-gtk Ubuntu=clutter-gtk-0.10 Mandriva=clutter-gtk Debian=clutter-gtk"
DISTRO_PN_ALIAS_pn-cogl-1.0 = "Debian=cogl Ubuntu=cogl Fedora=cogl"
@@ -220,6 +222,7 @@ DISTRO_PN_ALIAS_pn-libmatchbox = "Ubuntu=libmatchbox Fedora=libmatchbox"
DISTRO_PN_ALIAS_pn-libmpc = "Fedora=libmpc OpenSuse=libmpc2"
DISTRO_PN_ALIAS_pn-libnewt = "Debian=libnewt0.52 Fedora=newt"
DISTRO_PN_ALIAS_pn-libnewt-python = "Ubuntu=python-newt Fedora=newt-python"
+DISTRO_PN_ALIAS_pn-libnl = "Mandriva=libnl Fedora=libnl"
DISTRO_PN_ALIAS_pn-libnss-mdns = "Meego=nss-mdns OpenSuSE=nss-mdns Ubuntu=nss-mdns Mandriva=nss_mdns Debian=nss-mdns"
DISTRO_PN_ALIAS_pn-libomxil = "OSPDT upstream=http://omxil.sourceforge.net/"
DISTRO_PN_ALIAS_pn-libowl = "Debian=owl OpenedHand"
@@ -383,14 +386,18 @@ DISTRO_PN_ALIAS_pn-printproto = "Debian=x11proto-print-dev Ubuntu=x11proto-print
DISTRO_PN_ALIAS_pn-pseudo = "Windriver"
DISTRO_PN_ALIAS_pn-psplash = "OpenedHand"
DISTRO_PN_ALIAS_pn-ptest-runner = "OE-Core"
+DISTRO_PN_ALIAS_pn-pulseaudio-client-conf-sato = "OE-Core"
DISTRO_PN_ALIAS_pn-puzzles = "Debian=sgt-puzzles Fedora=puzzles"
DISTRO_PN_ALIAS_pn-python3 = "Fedora=python3 Debian=python3.2"
DISTRO_PN_ALIAS_pn-python3-distribute = "Debian=python3-setuptools Fedora=python3-setuptools"
+DISTRO_PN_ALIAS_pn-python3-pip = "OpenSuSE=python3-pip Debian=python3-pip"
+DISTRO_PN_ALIAS_pn-python3-setuptools = "OpenSuSE=python3-setuptools Debian=python3-setuptools"
DISTRO_PN_ALIAS_pn-python-ZSI = "OE-Core"
DISTRO_PN_ALIAS_pn-python-argparse = "Fedora=python-argparse OpenSuSE=python-argparse"
DISTRO_PN_ALIAS_pn-python-dbus = "Ubuntu=python-dbus Debian=python-dbus Mandriva=python-dbus"
DISTRO_PN_ALIAS_pn-python-distribute = "Opensuse=python-setuptools Fedora=python-setuptools"
DISTRO_PN_ALIAS_pn-python-git = "Debian=python-git Fedora=GitPython"
+DISTRO_PN_ALIAS_pn-python-imaging = "Mandriva=python-imaging Debian=python-imaging"
DISTRO_PN_ALIAS_pn-python-mako = "Fedora=python-mako Opensuse=python-Mako"
DISTRO_PN_ALIAS_pn-python-pycairo = "Meego=pycairo Fedora=pycairo Ubuntu=pycairo Debian=pycairo"
DISTRO_PN_ALIAS_pn-python-pycurl = "Debian=python-pycurl Ubuntu=python-pycurl"
@@ -436,7 +443,9 @@ DISTRO_PN_ALIAS_pn-shadow-sysroot = "Ubuntu=shadow Fedora=shadow"
DISTRO_PN_ALIAS_pn-shasum = "OE-Core"
DISTRO_PN_ALIAS_pn-shutdown-desktop = "OpenedHand"
DISTRO_PN_ALIAS_pn-signgp = "OE-Core"
+DISTRO_PN_ALIAS_pn-speexdsp = "Ubuntu=libspeexdsp1 Fedora=speexdsp"
DISTRO_PN_ALIAS_pn-stat = "Debian=coreutils Fedora=coreutils"
+DISTRO_PN_ALIAS_pn-stress = "Debian=stress Fedora=stress"
DISTRO_PN_ALIAS_pn-swabber = "OE-Core"
DISTRO_PN_ALIAS_pn-sysklogd = "Debian=sysklogd Mandriva=sysklogd"
DISTRO_PN_ALIAS_pn-sysprof = "Fedora=sysprof Debian=sysprof"
@@ -460,6 +469,7 @@ DISTRO_PN_ALIAS_pn-ttf-bitstream-vera = "Debian=ttf-bitstream-vera Ubuntu=ttf-bi
DISTRO_PN_ALIAS_pn-tzcode = "OSPDT"
DISTRO_PN_ALIAS_pn-u-boot-fw-utils = "Ubuntu=u-boot-tools Debian=u-boot-tools"
DISTRO_PN_ALIAS_pn-u-boot-mkimage = "Ubuntu=uboot-mkimage Debian=uboot-mkimage"
+DISTRO_PN_ALIAS_pn-udev = "Mandriva=udev Fedora=udev"
DISTRO_PN_ALIAS_pn-udev-extraconf = "OE-Core"
DISTRO_PN_ALIAS_pn-unfs3 = "Debian=unfs3 Fedora=unfs3"
DISTRO_PN_ALIAS_pn-unfs-server = "OE-Core"
diff --git a/yocto-poky/meta/conf/documentation.conf b/yocto-poky/meta/conf/documentation.conf
index 075ab6a33..845559a5e 100644
--- a/yocto-poky/meta/conf/documentation.conf
+++ b/yocto-poky/meta/conf/documentation.conf
@@ -212,6 +212,8 @@ IMAGE_BOOT_FILES[doc] = "Whitespace separated list of files from ${DEPLOY_DIR_IM
IMAGE_CLASSES[doc] = "A list of classes that all images should inherit."
IMAGE_FEATURES[doc] = "The primary list of features to include in an image. Configure this variable in an image recipe."
IMAGE_FSTYPES[doc] = "Formats of root filesystem images that you want to have created."
+IMAGE_FSTYPES_DEBUGFS[doc] = "Formats of the debug root filesystem images that you want to have created."
+IMAGE_GEN_DEBUGFS[doc] = "When set to '1', generate a companion debug object/source filesystem image."
IMAGE_INSTALL[doc] = "Specifies the packages to install into an image. Image recipes set IMAGE_INSTALL to specify the packages to install into an image through image.bbclass."
IMAGE_LINGUAS[doc] = "Specifies the list of locales to install into the image during the root filesystem construction process."
IMAGE_NAME[doc] = "The name of the output image files minus the extension."
diff --git a/yocto-poky/meta/conf/layer.conf b/yocto-poky/meta/conf/layer.conf
index 9e800181c..977363225 100644
--- a/yocto-poky/meta/conf/layer.conf
+++ b/yocto-poky/meta/conf/layer.conf
@@ -41,9 +41,7 @@ SIGGEN_EXCLUDERECIPES_ABISAFE += " \
"
SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS += " \
- gcc-cross-${TARGET_ARCH}->glibc \
- gcc-cross-${TARGET_ARCH}->musl \
- gcc-cross-${TARGET_ARCH}->uclibc \
+ gcc-cross-${TARGET_ARCH}->virtual/libc \
gcc-cross-${TARGET_ARCH}->linux-libc-headers \
ppp-dialin->ppp \
resolvconf->bash \
@@ -56,5 +54,6 @@ SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS += " \
font-alias->font-util \
weston-init->weston \
weston-init->kbd \
+ oprofile->virtual/kernel \
"
diff --git a/yocto-poky/meta/conf/machine/include/qemu.inc b/yocto-poky/meta/conf/machine/include/qemu.inc
index d5c0b376c..16e94691d 100644
--- a/yocto-poky/meta/conf/machine/include/qemu.inc
+++ b/yocto-poky/meta/conf/machine/include/qemu.inc
@@ -5,7 +5,7 @@ PREFERRED_PROVIDER_virtual/libgles1 ?= "mesa"
PREFERRED_PROVIDER_virtual/libgles2 ?= "mesa"
XSERVER ?= "xserver-xorg \
- ${@bb.utils.contains('DISTRO_FEATURES', 'opengl', 'mesa-driver-swrast', '', d)} \
+ ${@bb.utils.contains('DISTRO_FEATURES', 'opengl', 'mesa-driver-swrast xserver-xorg-extension-glx', '', d)} \
xf86-input-evdev \
xf86-input-mouse \
xf86-video-fbdev \
diff --git a/yocto-poky/meta/conf/machine/include/tune-thunderx.inc b/yocto-poky/meta/conf/machine/include/tune-thunderx.inc
new file mode 100644
index 000000000..40de61dc2
--- /dev/null
+++ b/yocto-poky/meta/conf/machine/include/tune-thunderx.inc
@@ -0,0 +1,19 @@
+require conf/machine/include/arm/arch-armv8.inc
+
+DEFAULTTUNE ?= "thunderx"
+AVAILTUNES += "thunderx thunderx_be"
+
+TUNEVALID[thunderx] = "Enable instructions for Cavium ThunderX"
+
+TUNE_CCARGS .= "${@bb.utils.contains("TUNE_FEATURES", "thunderx", " -mcpu=thunderx ", "",d)}"
+
+ARMPKGARCH_tune-thunderx ?= "thunderx"
+ARMPKGARCH_tune-thunderx_be ?= "thunderx_be"
+
+TUNE_FEATURES_tune-thunderx ?= "${TUNE_FEATURES_tune-aarch64} thunderx"
+TUNE_FEATURES_tune-thunderx_be ?= "${TUNE_FEATURES_tune-thunderx} bigendian"
+BASE_LIB_tune-thunderx = "lib64"
+BASE_LIB_tune-thunderx_be = "lib64"
+
+PACKAGE_EXTRA_ARCHS_tune-thunderx = "aarch64 thunderx"
+PACKAGE_EXTRA_ARCHS_tune-thunderx_be = "aarch64_be thunderx_be"
diff --git a/yocto-poky/meta/conf/machine/qemux86-64.conf b/yocto-poky/meta/conf/machine/qemux86-64.conf
index a4fd43ce1..489194aa6 100644
--- a/yocto-poky/meta/conf/machine/qemux86-64.conf
+++ b/yocto-poky/meta/conf/machine/qemux86-64.conf
@@ -16,7 +16,7 @@ KERNEL_IMAGETYPE = "bzImage"
SERIAL_CONSOLES = "115200;ttyS0 115200;ttyS1"
XSERVER = "xserver-xorg \
- ${@bb.utils.contains('DISTRO_FEATURES', 'opengl', 'mesa-driver-swrast', '', d)} \
+ ${@bb.utils.contains('DISTRO_FEATURES', 'opengl', 'mesa-driver-swrast xserver-xorg-extension-glx', '', d)} \
xf86-input-vmmouse \
xf86-input-keyboard \
xf86-input-evdev \
diff --git a/yocto-poky/meta/conf/machine/qemux86.conf b/yocto-poky/meta/conf/machine/qemux86.conf
index 96cea66b4..3cc809158 100644
--- a/yocto-poky/meta/conf/machine/qemux86.conf
+++ b/yocto-poky/meta/conf/machine/qemux86.conf
@@ -15,7 +15,7 @@ KERNEL_IMAGETYPE = "bzImage"
SERIAL_CONSOLES = "115200;ttyS0 115200;ttyS1"
XSERVER = "xserver-xorg \
- ${@bb.utils.contains('DISTRO_FEATURES', 'opengl', 'mesa-driver-swrast', '', d)} \
+ ${@bb.utils.contains('DISTRO_FEATURES', 'opengl', 'mesa-driver-swrast xserver-xorg-extension-glx', '', d)} \
xf86-input-vmmouse \
xf86-input-keyboard \
xf86-input-evdev \
diff --git a/yocto-poky/meta/conf/multilib.conf b/yocto-poky/meta/conf/multilib.conf
index 89a8e9096..50303fb5d 100644
--- a/yocto-poky/meta/conf/multilib.conf
+++ b/yocto-poky/meta/conf/multilib.conf
@@ -2,7 +2,7 @@
baselib = "${@d.getVar('BASE_LIB_tune-' + (d.getVar('DEFAULTTUNE', True) or 'INVALID'), True) or d.getVar('BASELIB', True)}"
MULTILIB_VARIANTS = "${@extend_variants(d,'MULTILIBS','multilib')}"
-MULTILIB_SAVE_VARNAME = "DEFAULTTUNE TARGET_ARCH TARGET_SYS"
+MULTILIB_SAVE_VARNAME = "DEFAULTTUNE TARGET_ARCH TARGET_SYS TARGET_VENDOR"
MULTILIBS ??= "multilib:lib32"
@@ -24,3 +24,4 @@ OPKG_ARGS_append = " --force-maintainer --force-overwrite"
# inside the multilib sysroot. Fix this by explicitly adding the MACHINE's
# architecture-independent pkgconfig location to PKG_CONFIG_PATH.
PKG_CONFIG_PATH .= ":${STAGING_DIR}/${MACHINE}${datadir}/pkgconfig"
+PKG_CONFIG_PATH[vardepsexclude] = "MACHINE"
diff --git a/yocto-poky/meta/files/common-licenses/GFDL-1.1 b/yocto-poky/meta/files/common-licenses/GFDL-1.1
index 4a0fe1c8d..1d7422315 100644
--- a/yocto-poky/meta/files/common-licenses/GFDL-1.1
+++ b/yocto-poky/meta/files/common-licenses/GFDL-1.1
@@ -1,8 +1,7 @@
- GNU Free Documentation License
- Version 1.2, November 2002
+ GNU Free Documentation License
+ Version 1.1, March 2000
-
- Copyright (C) 2000,2001,2002 Free Software Foundation, Inc.
+ Copyright (C) 2000 Free Software Foundation, Inc.
51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
@@ -11,12 +10,12 @@
0. PREAMBLE
The purpose of this License is to make a manual, textbook, or other
-functional and useful document "free" in the sense of freedom: to
-assure everyone the effective freedom to copy and redistribute it,
-with or without modifying it, either commercially or noncommercially.
-Secondarily, this License preserves for the author and publisher a way
-to get credit for their work, while not being considered responsible
-for modifications made by others.
+written document "free" in the sense of freedom: to assure everyone
+the effective freedom to copy and redistribute it, with or without
+modifying it, either commercially or noncommercially. Secondarily,
+this License preserves for the author and publisher a way to get
+credit for their work, while not being considered responsible for
+modifications made by others.
This License is a kind of "copyleft", which means that derivative
works of the document must themselves be free in the same sense. It
@@ -34,15 +33,11 @@ principally for works whose purpose is instruction or reference.
1. APPLICABILITY AND DEFINITIONS
-This License applies to any manual or other work, in any medium, that
-contains a notice placed by the copyright holder saying it can be
-distributed under the terms of this License. Such a notice grants a
-world-wide, royalty-free license, unlimited in duration, to use that
-work under the conditions stated herein. The "Document", below,
-refers to any such manual or work. Any member of the public is a
-licensee, and is addressed as "you". You accept the license if you
-copy, modify or distribute the work in a way requiring permission
-under copyright law.
+This License applies to any manual or other work that contains a
+notice placed by the copyright holder saying it can be distributed
+under the terms of this License. The "Document", below, refers to any
+such manual or work. Any member of the public is a licensee, and is
+addressed as "you".
A "Modified Version" of the Document means any work containing the
Document or a portion of it, either copied verbatim, or with
@@ -52,7 +47,7 @@ A "Secondary Section" is a named appendix or a front-matter section of
the Document that deals exclusively with the relationship of the
publishers or authors of the Document to the Document's overall subject
(or to related matters) and contains nothing that could fall directly
-within that overall subject. (Thus, if the Document is in part a
+within that overall subject. (For example, if the Document is in part a
textbook of mathematics, a Secondary Section may not explain any
mathematics.) The relationship could be a matter of historical
connection with the subject or with related matters, or of legal,
@@ -61,40 +56,33 @@ them.
The "Invariant Sections" are certain Secondary Sections whose titles
are designated, as being those of Invariant Sections, in the notice
-that says that the Document is released under this License. If a
-section does not fit the above definition of Secondary then it is not
-allowed to be designated as Invariant. The Document may contain zero
-Invariant Sections. If the Document does not identify any Invariant
-Sections then there are none.
+that says that the Document is released under this License.
The "Cover Texts" are certain short passages of text that are listed,
as Front-Cover Texts or Back-Cover Texts, in the notice that says that
-the Document is released under this License. A Front-Cover Text may
-be at most 5 words, and a Back-Cover Text may be at most 25 words.
+the Document is released under this License.
A "Transparent" copy of the Document means a machine-readable copy,
represented in a format whose specification is available to the
-general public, that is suitable for revising the document
+general public, whose contents can be viewed and edited directly and
straightforwardly with generic text editors or (for images composed of
pixels) generic paint programs or (for drawings) some widely available
drawing editor, and that is suitable for input to text formatters or
for automatic translation to a variety of formats suitable for input
to text formatters. A copy made in an otherwise Transparent file
-format whose markup, or absence of markup, has been arranged to thwart
-or discourage subsequent modification by readers is not Transparent.
-An image format is not Transparent if used for any substantial amount
-of text. A copy that is not "Transparent" is called "Opaque".
+format whose markup has been designed to thwart or discourage
+subsequent modification by readers is not Transparent. A copy that is
+not "Transparent" is called "Opaque".
Examples of suitable formats for Transparent copies include plain
ASCII without markup, Texinfo input format, LaTeX input format, SGML
or XML using a publicly available DTD, and standard-conforming simple
-HTML, PostScript or PDF designed for human modification. Examples of
-transparent image formats include PNG, XCF and JPG. Opaque formats
-include proprietary formats that can be read and edited only by
-proprietary word processors, SGML or XML for which the DTD and/or
+HTML designed for human modification. Opaque formats include
+PostScript, PDF, proprietary formats that can be read and edited only
+by proprietary word processors, SGML or XML for which the DTD and/or
processing tools are not generally available, and the
-machine-generated HTML, PostScript or PDF produced by some word
-processors for output purposes only.
+machine-generated HTML produced by some word processors for output
+purposes only.
The "Title Page" means, for a printed book, the title page itself,
plus such following pages as are needed to hold, legibly, the material
@@ -103,21 +91,6 @@ formats which do not have any title page as such, "Title Page" means
the text near the most prominent appearance of the work's title,
preceding the beginning of the body of the text.
-A section "Entitled XYZ" means a named subunit of the Document whose
-title either is precisely XYZ or contains XYZ in parentheses following
-text that translates XYZ in another language. (Here XYZ stands for a
-specific section name mentioned below, such as "Acknowledgements",
-"Dedications", "Endorsements", or "History".) To "Preserve the Title"
-of such a section when you modify the Document means that it remains a
-section "Entitled XYZ" according to this definition.
-
-The Document may include Warranty Disclaimers next to the notice which
-states that this License applies to the Document. These Warranty
-Disclaimers are considered to be included by reference in this
-License, but only as regards disclaiming warranties: any other
-implication that these Warranty Disclaimers may have is void and has
-no effect on the meaning of this License.
-
2. VERBATIM COPYING
@@ -137,10 +110,9 @@ you may publicly display copies.
3. COPYING IN QUANTITY
-If you publish printed copies (or copies in media that commonly have
-printed covers) of the Document, numbering more than 100, and the
-Document's license notice requires Cover Texts, you must enclose the
-copies in covers that carry, clearly and legibly, all these Cover
+If you publish printed copies of the Document numbering more than 100,
+and the Document's license notice requires Cover Texts, you must enclose
+the copies in covers that carry, clearly and legibly, all these Cover
Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on
the back cover. Both covers must also clearly and legibly identify
you as the publisher of these copies. The front cover must present
@@ -158,15 +130,16 @@ pages.
If you publish or distribute Opaque copies of the Document numbering
more than 100, you must either include a machine-readable Transparent
copy along with each Opaque copy, or state in or with each Opaque copy
-a computer-network location from which the general network-using
-public has access to download using public-standard network protocols
-a complete Transparent copy of the Document, free of added material.
-If you use the latter option, you must take reasonably prudent steps,
-when you begin distribution of Opaque copies in quantity, to ensure
-that this Transparent copy will remain thus accessible at the stated
-location until at least one year after the last time you distribute an
-Opaque copy (directly or through your agents or retailers) of that
-edition to the public.
+a publicly-accessible computer-network location containing a complete
+Transparent copy of the Document, free of added material, which the
+general network-using public has access to download anonymously at no
+charge using public-standard network protocols. If you use the latter
+option, you must take reasonably prudent steps, when you begin
+distribution of Opaque copies in quantity, to ensure that this
+Transparent copy will remain thus accessible at the stated location
+until at least one year after the last time you distribute an Opaque
+copy (directly or through your agents or retailers) of that edition to
+the public.
It is requested, but not required, that you contact the authors of the
Document well before redistributing any large number of copies, to give
@@ -190,8 +163,7 @@ A. Use in the Title Page (and on the covers, if any) a title distinct
B. List on the Title Page, as authors, one or more persons or entities
responsible for authorship of the modifications in the Modified
Version, together with at least five of the principal authors of the
- Document (all of its principal authors, if it has fewer than five),
- unless they release you from this requirement.
+ Document (all of its principal authors, if it has less than five).
C. State on the Title page the name of the publisher of the
Modified Version, as the publisher.
D. Preserve all the copyright notices of the Document.
@@ -203,10 +175,10 @@ F. Include, immediately after the copyright notices, a license notice
G. Preserve in that license notice the full lists of Invariant Sections
and required Cover Texts given in the Document's license notice.
H. Include an unaltered copy of this License.
-I. Preserve the section Entitled "History", Preserve its Title, and add
- to it an item stating at least the title, year, new authors, and
+I. Preserve the section entitled "History", and its title, and add to
+ it an item stating at least the title, year, new authors, and
publisher of the Modified Version as given on the Title Page. If
- there is no section Entitled "History" in the Document, create one
+ there is no section entitled "History" in the Document, create one
stating the title, year, authors, and publisher of the Document as
given on its Title Page, then add an item describing the Modified
Version as stated in the previous sentence.
@@ -217,18 +189,17 @@ J. Preserve the network location, if any, given in the Document for
You may omit a network location for a work that was published at
least four years before the Document itself, or if the original
publisher of the version it refers to gives permission.
-K. For any section Entitled "Acknowledgements" or "Dedications",
- Preserve the Title of the section, and preserve in the section all
- the substance and tone of each of the contributor acknowledgements
+K. In any section entitled "Acknowledgements" or "Dedications",
+ preserve the section's title, and preserve in the section all the
+ substance and tone of each of the contributor acknowledgements
and/or dedications given therein.
L. Preserve all the Invariant Sections of the Document,
unaltered in their text and in their titles. Section numbers
or the equivalent are not considered part of the section titles.
-M. Delete any section Entitled "Endorsements". Such a section
+M. Delete any section entitled "Endorsements". Such a section
may not be included in the Modified Version.
-N. Do not retitle any existing section to be Entitled "Endorsements"
+N. Do not retitle any existing section as "Endorsements"
or to conflict in title with any Invariant Section.
-O. Preserve any Warranty Disclaimers.
If the Modified Version includes new front-matter sections or
appendices that qualify as Secondary Sections and contain no material
@@ -237,7 +208,7 @@ of these sections as invariant. To do this, add their titles to the
list of Invariant Sections in the Modified Version's license notice.
These titles must be distinct from any other section titles.
-You may add a section Entitled "Endorsements", provided it contains
+You may add a section entitled "Endorsements", provided it contains
nothing but endorsements of your Modified Version by various
parties--for example, statements of peer review or that the text has
been approved by an organization as the authoritative definition of a
@@ -265,7 +236,7 @@ License, under the terms defined in section 4 above for modified
versions, provided that you include in the combination all of the
Invariant Sections of all of the original documents, unmodified, and
list them all as Invariant Sections of your combined work in its
-license notice, and that you preserve all their Warranty Disclaimers.
+license notice.
The combined work need only contain one copy of this License, and
multiple identical Invariant Sections may be replaced with a single
@@ -276,11 +247,11 @@ author or publisher of that section if known, or else a unique number.
Make the same adjustment to the section titles in the list of
Invariant Sections in the license notice of the combined work.
-In the combination, you must combine any sections Entitled "History"
-in the various original documents, forming one section Entitled
-"History"; likewise combine any sections Entitled "Acknowledgements",
-and any sections Entitled "Dedications". You must delete all sections
-Entitled "Endorsements".
+In the combination, you must combine any sections entitled "History"
+in the various original documents, forming one section entitled
+"History"; likewise combine any sections entitled "Acknowledgements",
+and any sections entitled "Dedications". You must delete all sections
+entitled "Endorsements."
6. COLLECTIONS OF DOCUMENTS
@@ -301,20 +272,18 @@ other respects regarding verbatim copying of that document.
A compilation of the Document or its derivatives with other separate
and independent documents or works, in or on a volume of a storage or
-distribution medium, is called an "aggregate" if the copyright
-resulting from the compilation is not used to limit the legal rights
-of the compilation's users beyond what the individual works permit.
-When the Document is included in an aggregate, this License does not
-apply to the other works in the aggregate which are not themselves
-derivative works of the Document.
+distribution medium, does not as a whole count as a Modified Version
+of the Document, provided no compilation copyright is claimed for the
+compilation. Such a compilation is called an "aggregate", and this
+License does not apply to the other self-contained works thus compiled
+with the Document, on account of their being thus compiled, if they
+are not themselves derivative works of the Document.
If the Cover Text requirement of section 3 is applicable to these
-copies of the Document, then if the Document is less than one half of
-the entire aggregate, the Document's Cover Texts may be placed on
-covers that bracket the Document within the aggregate, or the
-electronic equivalent of covers if the Document is in electronic form.
-Otherwise they must appear on printed covers that bracket the whole
-aggregate.
+copies of the Document, then if the Document is less than one quarter
+of the entire aggregate, the Document's Cover Texts may be placed on
+covers that surround only the Document within the aggregate.
+Otherwise they must appear on covers around the whole aggregate.
8. TRANSLATION
@@ -325,17 +294,10 @@ Replacing Invariant Sections with translations requires special
permission from their copyright holders, but you may include
translations of some or all Invariant Sections in addition to the
original versions of these Invariant Sections. You may include a
-translation of this License, and all the license notices in the
-Document, and any Warranty Disclaimers, provided that you also include
-the original English version of this License and the original versions
-of those notices and disclaimers. In case of a disagreement between
-the translation and the original version of this License or a notice
-or disclaimer, the original version will prevail.
-
-If a section in the Document is Entitled "Acknowledgements",
-"Dedications", or "History", the requirement (section 4) to Preserve
-its Title (section 1) will typically require changing the actual
-title.
+translation of this License provided that you also include the
+original English version of this License. In case of a disagreement
+between the translation and the original English version of this
+License, the original English version will prevail.
9. TERMINATION
@@ -373,23 +335,19 @@ To use this License in a document you have written, include a copy of
the License in the document and put the following copyright and
license notices just after the title page:
- Copyright (c) YEAR YOUR NAME.
- Permission is granted to copy, distribute and/or modify this document
- under the terms of the GNU Free Documentation License, Version 1.2
- or any later version published by the Free Software Foundation;
- with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts.
- A copy of the license is included in the section entitled "GNU
- Free Documentation License".
-
-If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts,
-replace the "with...Texts." line with this:
-
- with the Invariant Sections being LIST THEIR TITLES, with the
- Front-Cover Texts being LIST, and with the Back-Cover Texts being LIST.
-
-If you have Invariant Sections without Cover Texts, or some other
-combination of the three, merge those two alternatives to suit the
-situation.
+ Copyright (c) YEAR YOUR NAME.
+ Permission is granted to copy, distribute and/or modify this document
+ under the terms of the GNU Free Documentation License, Version 1.1
+ or any later version published by the Free Software Foundation;
+ with the Invariant Sections being LIST THEIR TITLES, with the
+ Front-Cover Texts being LIST, and with the Back-Cover Texts being LIST.
+ A copy of the license is included in the section entitled "GNU
+ Free Documentation License".
+
+If you have no Invariant Sections, write "with no Invariant Sections"
+instead of saying which ones are invariant. If you have no
+Front-Cover Texts, write "no Front-Cover Texts" instead of
+"Front-Cover Texts being LIST"; likewise for Back-Cover Texts.
If your document contains nontrivial examples of program code, we
recommend releasing these examples in parallel under your choice of
diff --git a/yocto-poky/meta/files/ext-sdk-prepare.sh b/yocto-poky/meta/files/ext-sdk-prepare.sh
new file mode 100644
index 000000000..160c71e97
--- /dev/null
+++ b/yocto-poky/meta/files/ext-sdk-prepare.sh
@@ -0,0 +1,20 @@
+#!/bin/sh
+
+# Prepare the build system within the extensible SDK
+
+target_sdk_dir="$1"
+sdk_targets="$2"
+
+# Avoid actually building images during this phase, but still
+# ensure all dependencies are extracted from sstate
+# This is a hack, to be sure, but we really don't need to do this here
+for sdktarget in $sdk_targets ; do
+ bbappend=`recipetool newappend $target_sdk_dir/workspace $sdktarget`
+ printf 'python do_rootfs_forcevariable () {\n bb.utils.mkdirhier(d.getVar("IMAGE_ROOTFS", True))\n}\n' > $bbappend
+ printf 'python do_bootimg () {\n pass\n}\n' >> $bbappend
+ printf 'python do_bootdirectdisk () {\n pass\n}\n' >> $bbappend
+ printf 'python do_vmimg () {\n pass\n}\n' >> $bbappend
+ printf "Created bbappend %s\n" "$bbappend"
+done
+bitbake $sdk_targets || exit 1
+rm -rf $target_sdk_dir/workspace/appends/*
diff --git a/yocto-poky/meta/files/toolchain-shar-extract.sh b/yocto-poky/meta/files/toolchain-shar-extract.sh
index cd0a547f8..35d3c7550 100644
--- a/yocto-poky/meta/files/toolchain-shar-extract.sh
+++ b/yocto-poky/meta/files/toolchain-shar-extract.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
INST_ARCH=$(uname -m | sed -e "s/i[3-6]86/ix86/" -e "s/x86[-_]64/x86_64/")
SDK_ARCH=$(echo @SDK_ARCH@ | sed -e "s/i[3-6]86/ix86/" -e "s/x86[-_]64/x86_64/")
@@ -68,8 +68,9 @@ while getopts ":yd:nDRS" OPT; do
esac
done
-echo "@SDK_TITLE@ installer version @SDK_VERSION@"
-echo "==========================================================="
+titlestr="@SDK_TITLE@ installer version @SDK_VERSION@"
+printf "%s\n" "$titlestr"
+printf "%${#titlestr}s\n" | tr " " "="
if [ $verbose = 1 ] ; then
set -x
@@ -86,7 +87,7 @@ if [ "$target_sdk_dir" = "" ]; then
if [ "$answer" = "Y" ]; then
target_sdk_dir="$DEFAULT_INSTALL_DIR"
else
- read -e -p "Enter target directory for SDK (default: $DEFAULT_INSTALL_DIR): " target_sdk_dir
+ read -p "Enter target directory for SDK (default: $DEFAULT_INSTALL_DIR): " target_sdk_dir
[ "$target_sdk_dir" = "" ] && target_sdk_dir=$DEFAULT_INSTALL_DIR
fi
fi
@@ -100,9 +101,9 @@ fi
if [ "$SDK_EXTENSIBLE" = "1" ]; then
# We're going to be running the build system, additional restrictions apply
- if echo "$target_sdk_dir" | grep -q '[+\ @]'; then
+ if echo "$target_sdk_dir" | grep -q '[+\ @$]'; then
echo "The target directory path ($target_sdk_dir) contains illegal" \
- "characters such as spaces, @ or +. Abort!"
+ "characters such as spaces, @, \$ or +. Abort!"
exit 1
fi
else
@@ -163,14 +164,25 @@ fi
payload_offset=$(($(grep -na -m1 "^MARKER:$" $0|cut -d':' -f1) + 1))
printf "Extracting SDK..."
-tail -n +$payload_offset $0| $SUDO_EXEC tar xj -C $target_sdk_dir
+tail -n +$payload_offset $0| $SUDO_EXEC tar xj -C $target_sdk_dir --checkpoint=.2500
echo "done"
printf "Setting it up..."
# fix environment paths
+real_env_setup_script=""
for env_setup_script in `ls $target_sdk_dir/environment-setup-*`; do
+ if grep -q 'OECORE_NATIVE_SYSROOT=' $env_setup_script; then
+ # Handle custom env setup scripts that are only named
+ # environment-setup-* so that they have relocation
+ # applied - what we want beyond here is the main one
+ # rather than the one that simply sorts last
+ real_env_setup_script="$env_setup_script"
+ fi
$SUDO_EXEC sed -e "s:@SDKPATH@:$target_sdk_dir:g" -i $env_setup_script
done
+if [ -n "$real_env_setup_script" ] ; then
+ env_setup_script="$real_env_setup_script"
+fi
@SDK_POST_INSTALL_COMMAND@
@@ -182,7 +194,9 @@ fi
echo "SDK has been successfully set up and is ready to be used."
echo "Each time you wish to use the SDK in a new shell session, you need to source the environment setup script e.g."
-echo " \$ . $target_sdk_dir/environment-setup-@REAL_MULTIMACH_TARGET_SYS@"
+for env_setup_script in `ls $target_sdk_dir/environment-setup-*`; do
+ echo " \$ . $env_setup_script"
+done
exit 0
diff --git a/yocto-poky/meta/files/toolchain-shar-relocate.sh b/yocto-poky/meta/files/toolchain-shar-relocate.sh
index dfb8e16d7..4ef292717 100644
--- a/yocto-poky/meta/files/toolchain-shar-relocate.sh
+++ b/yocto-poky/meta/files/toolchain-shar-relocate.sh
@@ -26,25 +26,21 @@ if [ $relocate = 1 ] ; then
fi
fi
-# replace @SDKPATH@ with the new prefix in all text files: configs/scripts/etc
+# replace @SDKPATH@ with the new prefix in all text files: configs/scripts/etc.
+# replace the host perl with SDK perl.
for replace in "$target_sdk_dir -maxdepth 1" "$native_sysroot"; do
- $SUDO_EXEC find $replace -type f -exec file '{}' \; | \
- grep ":.*\(ASCII\|script\|source\).*text" | \
- awk -F':' '{printf "\"%s\"\n", $1}' | \
- grep -v "$target_sdk_dir/environment-setup-*" | \
- $SUDO_EXEC xargs -n32 sed -i -e "s:$DEFAULT_INSTALL_DIR:$target_sdk_dir:g"
-done
+ $SUDO_EXEC find $replace -type f
+done | xargs -n100 file | grep ":.*\(ASCII\|script\|source\).*text" | \
+ awk -F':' '{printf "\"%s\"\n", $1}' | \
+ grep -v "$target_sdk_dir/environment-setup-*" | \
+ xargs -n100 $SUDO_EXEC sed -i \
+ -e "s:$DEFAULT_INSTALL_DIR:$target_sdk_dir:g" \
+ -e "s:^#! */usr/bin/perl.*:#! /usr/bin/env perl:g" \
+ -e "s: /usr/bin/perl: /usr/bin/env perl:g"
# change all symlinks pointing to @SDKPATH@
for l in $($SUDO_EXEC find $native_sysroot -type l); do
$SUDO_EXEC ln -sfn $(readlink $l|$SUDO_EXEC sed -e "s:$DEFAULT_INSTALL_DIR:$target_sdk_dir:") $l
done
-# find out all perl scripts in $native_sysroot and modify them replacing the
-# host perl with SDK perl.
-for perl_script in $($SUDO_EXEC find $native_sysroot -type f -exec grep -l "^#!.*perl" '{}' \;); do
- $SUDO_EXEC sed -i -e "s:^#! */usr/bin/perl.*:#! /usr/bin/env perl:g" -e \
- "s: /usr/bin/perl: /usr/bin/env perl:g" $perl_script
-done
-
echo done
diff --git a/yocto-poky/meta/lib/oe/copy_buildsystem.py b/yocto-poky/meta/lib/oe/copy_buildsystem.py
index 979578c41..c0e7541c0 100644
--- a/yocto-poky/meta/lib/oe/copy_buildsystem.py
+++ b/yocto-poky/meta/lib/oe/copy_buildsystem.py
@@ -14,8 +14,9 @@ def _smart_copy(src, dest):
shutil.copymode(src, dest)
class BuildSystem(object):
- def __init__(self, d):
+ def __init__(self, context, d):
self.d = d
+ self.context = context
self.layerdirs = d.getVar('BBLAYERS', True).split()
def copy_bitbake_and_layers(self, destdir):
@@ -38,7 +39,7 @@ class BuildSystem(object):
if os.path.exists(layerconf):
with open(layerconf, 'r') as f:
if f.readline().startswith("# ### workspace layer auto-generated by devtool ###"):
- bb.warn("Skipping local workspace layer %s" % layer)
+ bb.plain("NOTE: Excluding local workspace layer %s from %s" % (layer, self.context))
continue
# If the layer was already under corebase, leave it there
diff --git a/yocto-poky/meta/lib/oe/distro_check.py b/yocto-poky/meta/lib/oe/distro_check.py
index 8ed5b0ec8..f92cd2e42 100644
--- a/yocto-poky/meta/lib/oe/distro_check.py
+++ b/yocto-poky/meta/lib/oe/distro_check.py
@@ -1,7 +1,23 @@
-def get_links_from_url(url):
+from contextlib import contextmanager
+@contextmanager
+def create_socket(url, d):
+ import urllib
+ socket = urllib.urlopen(url, proxies=get_proxies(d))
+ try:
+ yield socket
+ finally:
+ socket.close()
+
+def get_proxies(d):
+ import os
+ proxykeys = ['http', 'https', 'ftp', 'ftps', 'no', 'all']
+ proxyvalues = map(lambda key: d.getVar(key+'_proxy', True), proxykeys)
+ return dict(zip(proxykeys, proxyvalues))
+
+def get_links_from_url(url, d):
"Return all the href links found on the web location"
- import urllib, sgmllib
+ import sgmllib
class LinksParser(sgmllib.SGMLParser):
def parse(self, s):
@@ -24,19 +40,18 @@ def get_links_from_url(url):
"Return the list of hyperlinks."
return self.hyperlinks
- sock = urllib.urlopen(url)
- webpage = sock.read()
- sock.close()
+ with create_socket(url,d) as sock:
+ webpage = sock.read()
linksparser = LinksParser()
linksparser.parse(webpage)
return linksparser.get_hyperlinks()
-def find_latest_numeric_release(url):
+def find_latest_numeric_release(url, d):
"Find the latest listed numeric release on the given url"
max=0
maxstr=""
- for link in get_links_from_url(url):
+ for link in get_links_from_url(url, d):
try:
release = float(link)
except:
@@ -70,7 +85,7 @@ def clean_package_list(package_list):
return set.keys()
-def get_latest_released_meego_source_package_list():
+def get_latest_released_meego_source_package_list(d):
"Returns list of all the name os packages in the latest meego distro"
package_names = []
@@ -82,11 +97,11 @@ def get_latest_released_meego_source_package_list():
package_list=clean_package_list(package_names)
return "1.0", package_list
-def get_source_package_list_from_url(url, section):
+def get_source_package_list_from_url(url, section, d):
"Return a sectioned list of package names from a URL list"
bb.note("Reading %s: %s" % (url, section))
- links = get_links_from_url(url)
+ links = get_links_from_url(url, d)
srpms = filter(is_src_rpm, links)
names_list = map(package_name_from_srpm, srpms)
@@ -96,44 +111,44 @@ def get_source_package_list_from_url(url, section):
return new_pkgs
-def get_latest_released_fedora_source_package_list():
+def get_latest_released_fedora_source_package_list(d):
"Returns list of all the name os packages in the latest fedora distro"
- latest = find_latest_numeric_release("http://archive.fedoraproject.org/pub/fedora/linux/releases/")
+ latest = find_latest_numeric_release("http://archive.fedoraproject.org/pub/fedora/linux/releases/", d)
- package_names = get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/releases/%s/Fedora/source/SRPMS/" % latest, "main")
+ package_names = get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/releases/%s/Fedora/source/SRPMS/" % latest, "main", d)
# package_names += get_source_package_list_from_url("http://download.fedora.redhat.com/pub/fedora/linux/releases/%s/Everything/source/SPRMS/" % latest, "everything")
- package_names += get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/updates/%s/SRPMS/" % latest, "updates")
+ package_names += get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/updates/%s/SRPMS/" % latest, "updates", d)
package_list=clean_package_list(package_names)
return latest, package_list
-def get_latest_released_opensuse_source_package_list():
+def get_latest_released_opensuse_source_package_list(d):
"Returns list of all the name os packages in the latest opensuse distro"
- latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/")
+ latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/",d)
- package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/%s/repo/oss/suse/src/" % latest, "main")
- package_names += get_source_package_list_from_url("http://download.opensuse.org/update/%s/rpm/src/" % latest, "updates")
+ package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/%s/repo/oss/suse/src/" % latest, "main", d)
+ package_names += get_source_package_list_from_url("http://download.opensuse.org/update/%s/rpm/src/" % latest, "updates", d)
package_list=clean_package_list(package_names)
return latest, package_list
-def get_latest_released_mandriva_source_package_list():
+def get_latest_released_mandriva_source_package_list(d):
"Returns list of all the name os packages in the latest mandriva distro"
- latest = find_latest_numeric_release("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/")
- package_names = get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/release/" % latest, "main")
+ latest = find_latest_numeric_release("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/", d)
+ package_names = get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/release/" % latest, "main", d)
# package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/contrib/release/" % latest, "contrib")
- package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/updates/" % latest, "updates")
+ package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/updates/" % latest, "updates", d)
package_list=clean_package_list(package_names)
return latest, package_list
-def find_latest_debian_release(url):
+def find_latest_debian_release(url, d):
"Find the latest listed debian release on the given url"
releases = []
- for link in get_links_from_url(url):
+ for link in get_links_from_url(url, d):
if link[:6] == "Debian":
if ';' not in link:
releases.append(link)
@@ -143,16 +158,15 @@ def find_latest_debian_release(url):
except:
return "_NotFound_"
-def get_debian_style_source_package_list(url, section):
+def get_debian_style_source_package_list(url, section, d):
"Return the list of package-names stored in the debian style Sources.gz file"
- import urllib
- sock = urllib.urlopen(url)
- import tempfile
- tmpfile = tempfile.NamedTemporaryFile(mode='wb', prefix='oecore.', suffix='.tmp', delete=False)
- tmpfilename=tmpfile.name
- tmpfile.write(sock.read())
- sock.close()
- tmpfile.close()
+ with create_socket(url,d) as sock:
+ webpage = sock.read()
+ import tempfile
+ tmpfile = tempfile.NamedTemporaryFile(mode='wb', prefix='oecore.', suffix='.tmp', delete=False)
+ tmpfilename=tmpfile.name
+ tmpfile.write(sock.read())
+ tmpfile.close()
import gzip
bb.note("Reading %s: %s" % (url, section))
@@ -165,41 +179,41 @@ def get_debian_style_source_package_list(url, section):
return package_names
-def get_latest_released_debian_source_package_list():
+def get_latest_released_debian_source_package_list(d):
"Returns list of all the name os packages in the latest debian distro"
- latest = find_latest_debian_release("http://ftp.debian.org/debian/dists/")
+ latest = find_latest_debian_release("http://ftp.debian.org/debian/dists/", d)
url = "http://ftp.debian.org/debian/dists/stable/main/source/Sources.gz"
- package_names = get_debian_style_source_package_list(url, "main")
+ package_names = get_debian_style_source_package_list(url, "main", d)
# url = "http://ftp.debian.org/debian/dists/stable/contrib/source/Sources.gz"
# package_names += get_debian_style_source_package_list(url, "contrib")
url = "http://ftp.debian.org/debian/dists/stable-proposed-updates/main/source/Sources.gz"
- package_names += get_debian_style_source_package_list(url, "updates")
+ package_names += get_debian_style_source_package_list(url, "updates", d)
package_list=clean_package_list(package_names)
return latest, package_list
-def find_latest_ubuntu_release(url):
+def find_latest_ubuntu_release(url, d):
"Find the latest listed ubuntu release on the given url"
url += "?C=M;O=D" # Descending Sort by Last Modified
- for link in get_links_from_url(url):
+ for link in get_links_from_url(url, d):
if link[-8:] == "-updates":
return link[:-8]
return "_NotFound_"
-def get_latest_released_ubuntu_source_package_list():
+def get_latest_released_ubuntu_source_package_list(d):
"Returns list of all the name os packages in the latest ubuntu distro"
- latest = find_latest_ubuntu_release("http://archive.ubuntu.com/ubuntu/dists/")
+ latest = find_latest_ubuntu_release("http://archive.ubuntu.com/ubuntu/dists/", d)
url = "http://archive.ubuntu.com/ubuntu/dists/%s/main/source/Sources.gz" % latest
- package_names = get_debian_style_source_package_list(url, "main")
+ package_names = get_debian_style_source_package_list(url, "main", d)
# url = "http://archive.ubuntu.com/ubuntu/dists/%s/multiverse/source/Sources.gz" % latest
# package_names += get_debian_style_source_package_list(url, "multiverse")
# url = "http://archive.ubuntu.com/ubuntu/dists/%s/universe/source/Sources.gz" % latest
# package_names += get_debian_style_source_package_list(url, "universe")
url = "http://archive.ubuntu.com/ubuntu/dists/%s-updates/main/source/Sources.gz" % latest
- package_names += get_debian_style_source_package_list(url, "updates")
+ package_names += get_debian_style_source_package_list(url, "updates", d)
package_list=clean_package_list(package_names)
return latest, package_list
-def create_distro_packages_list(distro_check_dir):
+def create_distro_packages_list(distro_check_dir, d):
pkglst_dir = os.path.join(distro_check_dir, "package_lists")
if not os.path.isdir (pkglst_dir):
os.makedirs(pkglst_dir)
@@ -220,7 +234,7 @@ def create_distro_packages_list(distro_check_dir):
begin = datetime.now()
for distro in per_distro_functions:
name = distro[0]
- release, package_list = distro[1]()
+ release, package_list = distro[1](d)
bb.note("Distro: %s, Latest Release: %s, # src packages: %d" % (name, release, len(package_list)))
package_list_file = os.path.join(pkglst_dir, name + "-" + release)
f = open(package_list_file, "w+b")
@@ -231,7 +245,7 @@ def create_distro_packages_list(distro_check_dir):
delta = end - begin
bb.note("package_list generatiosn took this much time: %d seconds" % delta.seconds)
-def update_distro_data(distro_check_dir, datetime):
+def update_distro_data(distro_check_dir, datetime, d):
"""
If distro packages list data is old then rebuild it.
The operations has to be protected by a lock so that
@@ -258,7 +272,7 @@ def update_distro_data(distro_check_dir, datetime):
if saved_datetime[0:8] != datetime[0:8]:
bb.note("The build datetime did not match: saved:%s current:%s" % (saved_datetime, datetime))
bb.note("Regenerating distro package lists")
- create_distro_packages_list(distro_check_dir)
+ create_distro_packages_list(distro_check_dir, d)
f.seek(0)
f.write(datetime)
diff --git a/yocto-poky/meta/lib/oe/image.py b/yocto-poky/meta/lib/oe/image.py
index f9e9bfd58..b9eb3de5a 100644
--- a/yocto-poky/meta/lib/oe/image.py
+++ b/yocto-poky/meta/lib/oe/image.py
@@ -5,7 +5,7 @@ import multiprocessing
def generate_image(arg):
- (type, subimages, create_img_cmd) = arg
+ (type, subimages, create_img_cmd, sprefix) = arg
bb.note("Running image creation script for %s: %s ..." %
(type, create_img_cmd))
@@ -54,14 +54,16 @@ class ImageDepGraph(object):
base_type = self._image_base_type(node)
deps = (self.d.getVar('IMAGE_TYPEDEP_' + node, True) or "")
base_deps = (self.d.getVar('IMAGE_TYPEDEP_' + base_type, True) or "")
- if deps != "" or base_deps != "":
- graph[node] = deps
- for dep in deps.split() + base_deps.split():
- if not dep in graph:
- add_node(dep)
- else:
- graph[node] = ""
+ graph[node] = ""
+ for dep in deps.split() + base_deps.split():
+ if not dep in graph[node]:
+ if graph[node] != "":
+ graph[node] += " "
+ graph[node] += dep
+
+ if not dep in graph:
+ add_node(dep)
for fstype in image_fstypes:
add_node(fstype)
@@ -264,9 +266,9 @@ class Image(ImageDepGraph):
return (alltypes, filtered_groups, cimages)
- def _write_script(self, type, cmds):
+ def _write_script(self, type, cmds, sprefix=""):
tempdir = self.d.getVar('T', True)
- script_name = os.path.join(tempdir, "create_image." + type)
+ script_name = os.path.join(tempdir, sprefix + "create_image." + type)
rootfs_size = self._get_rootfs_size()
self.d.setVar('img_creation_func', '\n'.join(cmds))
@@ -284,7 +286,7 @@ class Image(ImageDepGraph):
return script_name
- def _get_imagecmds(self):
+ def _get_imagecmds(self, sprefix=""):
old_overrides = self.d.getVar('OVERRIDES', 0)
alltypes, fstype_groups, cimages = self._get_image_types()
@@ -320,9 +322,9 @@ class Image(ImageDepGraph):
else:
subimages.append(type)
- script_name = self._write_script(type, cmds)
+ script_name = self._write_script(type, cmds, sprefix)
- image_cmds.append((type, subimages, script_name))
+ image_cmds.append((type, subimages, script_name, sprefix))
image_cmd_groups.append(image_cmds)
@@ -355,6 +357,27 @@ class Image(ImageDepGraph):
image_cmd_groups = self._get_imagecmds()
+ # Process the debug filesystem...
+ debugfs_d = bb.data.createCopy(self.d)
+ if self.d.getVar('IMAGE_GEN_DEBUGFS', True) == "1":
+ bb.note("Processing debugfs image(s) ...")
+ orig_d = self.d
+ self.d = debugfs_d
+
+ self.d.setVar('IMAGE_ROOTFS', orig_d.getVar('IMAGE_ROOTFS', True) + '-dbg')
+ self.d.setVar('IMAGE_NAME', orig_d.getVar('IMAGE_NAME', True) + '-dbg')
+ self.d.setVar('IMAGE_LINK_NAME', orig_d.getVar('IMAGE_LINK_NAME', True) + '-dbg')
+
+ debugfs_image_fstypes = orig_d.getVar('IMAGE_FSTYPES_DEBUGFS', True)
+ if debugfs_image_fstypes:
+ self.d.setVar('IMAGE_FSTYPES', orig_d.getVar('IMAGE_FSTYPES_DEBUGFS', True))
+
+ self._remove_old_symlinks()
+
+ image_cmd_groups += self._get_imagecmds("debugfs.")
+
+ self.d = orig_d
+
self._write_wic_env()
for image_cmds in image_cmd_groups:
@@ -369,9 +392,16 @@ class Image(ImageDepGraph):
if result is not None:
bb.fatal(result)
- for image_type, subimages, script in image_cmds:
- bb.note("Creating symlinks for %s image ..." % image_type)
- self._create_symlinks(subimages)
+ for image_type, subimages, script, sprefix in image_cmds:
+ if sprefix == 'debugfs.':
+ bb.note("Creating symlinks for %s debugfs image ..." % image_type)
+ orig_d = self.d
+ self.d = debugfs_d
+ self._create_symlinks(subimages)
+ self.d = orig_d
+ else:
+ bb.note("Creating symlinks for %s image ..." % image_type)
+ self._create_symlinks(subimages)
execute_pre_post_process(self.d, post_process_cmds)
diff --git a/yocto-poky/meta/lib/oe/package_manager.py b/yocto-poky/meta/lib/oe/package_manager.py
index 292ed4446..b9fa6d879 100644
--- a/yocto-poky/meta/lib/oe/package_manager.py
+++ b/yocto-poky/meta/lib/oe/package_manager.py
@@ -133,8 +133,11 @@ class RpmIndexer(Indexer):
if pkgfeed_gpg_name:
repomd_file = os.path.join(arch_dir, 'repodata', 'repomd.xml')
gpg_cmd = "%s --detach-sign --armor --batch --no-tty --yes " \
- "--passphrase-file '%s' -u '%s' %s" % (gpg_bin,
- pkgfeed_gpg_pass, pkgfeed_gpg_name, repomd_file)
+ "--passphrase-file '%s' -u '%s' " % \
+ (gpg_bin, pkgfeed_gpg_pass, pkgfeed_gpg_name)
+ if self.d.getVar('GPG_PATH', True):
+ gpg_cmd += "--homedir %s " % self.d.getVar('GPG_PATH', True)
+ gpg_cmd += repomd_file
repo_sign_cmds.append(gpg_cmd)
rpm_dirs_found = True
@@ -200,6 +203,8 @@ class OpkgIndexer(Indexer):
result = oe.utils.multiprocess_exec(index_cmds, create_index)
if result:
bb.fatal('%s' % ('\n'.join(result)))
+ if self.d.getVar('PACKAGE_FEED_SIGN', True) == '1':
+ raise NotImplementedError('Package feed signing not implementd for ipk')
@@ -275,6 +280,8 @@ class DpkgIndexer(Indexer):
result = oe.utils.multiprocess_exec(index_cmds, create_index)
if result:
bb.fatal('%s' % ('\n'.join(result)))
+ if self.d.getVar('PACKAGE_FEED_SIGN', True) == '1':
+ raise NotImplementedError('Package feed signing not implementd for dpkg')
@@ -434,24 +441,30 @@ class OpkgPkgsList(PkgsList):
(self.opkg_cmd, self.opkg_args)
try:
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip()
+ # bb.note(cmd)
+ tmp_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip()
+
except subprocess.CalledProcessError as e:
bb.fatal("Cannot get the installed packages list. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output))
- if output and format == "file":
- tmp_output = ""
- for line in output.split('\n'):
+ output = list()
+ for line in tmp_output.split('\n'):
+ if len(line.strip()) == 0:
+ continue
+ if format == "file":
pkg, pkg_file, pkg_arch = line.split()
full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file)
if os.path.exists(full_path):
- tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch)
+ output.append('%s %s %s' % (pkg, full_path, pkg_arch))
else:
- tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch)
+ output.append('%s %s %s' % (pkg, pkg_file, pkg_arch))
+ else:
+ output.append(line)
- output = tmp_output
+ output.sort()
- return output
+ return '\n'.join(output)
class DpkgPkgsList(PkgsList):
@@ -605,12 +618,12 @@ class PackageManager(object):
cmd.extend(['-x', exclude])
try:
bb.note("Installing complementary packages ...")
+ bb.note('Running %s' % cmd)
complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.fatal("Could not compute complementary packages list. Command "
"'%s' returned %d:\n%s" %
(' '.join(cmd), e.returncode, e.output))
-
self.install(complementary_pkgs.split(), attempt_only=True)
def deploy_dir_lock(self):
@@ -1050,6 +1063,35 @@ class RpmPM(PackageManager):
def update(self):
self._invoke_smart('update rpmsys')
+ def get_rdepends_recursively(self, pkgs):
+ # pkgs will be changed during the loop, so use [:] to make a copy.
+ for pkg in pkgs[:]:
+ sub_data = oe.packagedata.read_subpkgdata(pkg, self.d)
+ sub_rdep = sub_data.get("RDEPENDS_" + pkg)
+ if not sub_rdep:
+ continue
+ done = bb.utils.explode_dep_versions2(sub_rdep).keys()
+ next = done
+ # Find all the rdepends on dependency chain
+ while next:
+ new = []
+ for sub_pkg in next:
+ sub_data = oe.packagedata.read_subpkgdata(sub_pkg, self.d)
+ sub_pkg_rdep = sub_data.get("RDEPENDS_" + sub_pkg)
+ if not sub_pkg_rdep:
+ continue
+ for p in bb.utils.explode_dep_versions2(sub_pkg_rdep):
+ # Already handled, skip it.
+ if p in done or p in pkgs:
+ continue
+ # It's a new dep
+ if oe.packagedata.has_subpkgdata(p, self.d):
+ done.append(p)
+ new.append(p)
+ next = new
+ pkgs.extend(done)
+ return pkgs
+
'''
Install pkgs with smart, the pkg name is oe format
'''
@@ -1059,8 +1101,58 @@ class RpmPM(PackageManager):
bb.note("There are no packages to install")
return
bb.note("Installing the following packages: %s" % ' '.join(pkgs))
+ if not attempt_only:
+ # Pull in multilib requires since rpm may not pull in them
+ # correctly, for example,
+ # lib32-packagegroup-core-standalone-sdk-target requires
+ # lib32-libc6, but rpm may pull in libc6 rather than lib32-libc6
+ # since it doesn't know mlprefix (lib32-), bitbake knows it and
+ # can handle it well, find out the RDEPENDS on the chain will
+ # fix the problem. Both do_rootfs and do_populate_sdk have this
+ # issue.
+ # The attempt_only packages don't need this since they are
+ # based on the installed ones.
+ #
+ # Separate pkgs into two lists, one is multilib, the other one
+ # is non-multilib.
+ ml_pkgs = []
+ non_ml_pkgs = pkgs[:]
+ for pkg in pkgs:
+ for mlib in (self.d.getVar("MULTILIB_VARIANTS", True) or "").split():
+ if pkg.startswith(mlib + '-'):
+ ml_pkgs.append(pkg)
+ non_ml_pkgs.remove(pkg)
+
+ if len(ml_pkgs) > 0 and len(non_ml_pkgs) > 0:
+ # Found both foo and lib-foo
+ ml_pkgs = self.get_rdepends_recursively(ml_pkgs)
+ non_ml_pkgs = self.get_rdepends_recursively(non_ml_pkgs)
+ # Longer list makes smart slower, so only keep the pkgs
+ # which have the same BPN, and smart can handle others
+ # correctly.
+ pkgs_new = []
+ for pkg in non_ml_pkgs:
+ for mlib in (self.d.getVar("MULTILIB_VARIANTS", True) or "").split():
+ mlib_pkg = mlib + "-" + pkg
+ if mlib_pkg in ml_pkgs:
+ pkgs_new.append(pkg)
+ pkgs_new.append(mlib_pkg)
+ for pkg in pkgs:
+ if pkg not in pkgs_new:
+ pkgs_new.append(pkg)
+ pkgs = pkgs_new
+ new_depends = {}
+ deps = bb.utils.explode_dep_versions2(" ".join(pkgs))
+ for depend in deps:
+ data = oe.packagedata.read_subpkgdata(depend, self.d)
+ key = "PKG_%s" % depend
+ if key in data:
+ new_depend = data[key]
+ else:
+ new_depend = depend
+ new_depends[new_depend] = deps[depend]
+ pkgs = bb.utils.join_deps(new_depends, commasep=True).split(', ')
pkgs = self._pkg_translate_oe_to_smart(pkgs, attempt_only)
-
if not attempt_only:
bb.note('to be installed: %s' % ' '.join(pkgs))
cmd = "%s %s install -y %s" % \
@@ -1379,6 +1471,16 @@ class OpkgPM(PackageManager):
self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True),
arch))
+ if self.opkg_dir != '/var/lib/opkg':
+ # There is no command line option for this anymore, we need to add
+ # info_dir and status_file to config file, if OPKGLIBDIR doesn't have
+ # the default value of "/var/lib" as defined in opkg:
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR "/var/lib/opkg/info"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE "/var/lib/opkg/status"
+ cfg_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'info'))
+ cfg_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'status'))
+
+
def _create_config(self):
with open(self.config_file, "w+") as config_file:
priority = 1
@@ -1394,6 +1496,15 @@ class OpkgPM(PackageManager):
config_file.write("src oe-%s file:%s\n" %
(arch, pkgs_dir))
+ if self.opkg_dir != '/var/lib/opkg':
+ # There is no command line option for this anymore, we need to add
+ # info_dir and status_file to config file, if OPKGLIBDIR doesn't have
+ # the default value of "/var/lib" as defined in opkg:
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR "/var/lib/opkg/info"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE "/var/lib/opkg/status"
+ config_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'info'))
+ config_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'status'))
+
def insert_feeds_uris(self):
if self.feed_uris == "":
return
@@ -1433,7 +1544,7 @@ class OpkgPM(PackageManager):
self.deploy_dir_unlock()
def install(self, pkgs, attempt_only=False):
- if attempt_only and len(pkgs) == 0:
+ if not pkgs:
return
cmd = "%s %s install %s" % (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
diff --git a/yocto-poky/meta/lib/oe/patch.py b/yocto-poky/meta/lib/oe/patch.py
index 108bf1de5..2bf501e9e 100644
--- a/yocto-poky/meta/lib/oe/patch.py
+++ b/yocto-poky/meta/lib/oe/patch.py
@@ -337,12 +337,15 @@ class GitApplyTree(PatchTree):
return (tmpfile, cmd)
@staticmethod
- def extractPatches(tree, startcommit, outdir):
+ def extractPatches(tree, startcommit, outdir, paths=None):
import tempfile
import shutil
tempdir = tempfile.mkdtemp(prefix='oepatch')
try:
shellcmd = ["git", "format-patch", startcommit, "-o", tempdir]
+ if paths:
+ shellcmd.append('--')
+ shellcmd.extend(paths)
out = runcmd(["sh", "-c", " ".join(shellcmd)], tree)
if out:
for srcfile in out.split():
@@ -407,6 +410,13 @@ class GitApplyTree(PatchTree):
runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
except CmdError:
pass
+ # git am won't always clean up after itself, sadly, so...
+ shellcmd = ["git", "--work-tree=%s" % reporoot, "reset", "--hard", "HEAD"]
+ runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+ # Also need to take care of any stray untracked files
+ shellcmd = ["git", "--work-tree=%s" % reporoot, "clean", "-f"]
+ runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+
# Fall back to git apply
shellcmd = ["git", "--git-dir=%s" % reporoot, "apply", "-p%s" % patch['strippath']]
try:
diff --git a/yocto-poky/meta/lib/oe/recipeutils.py b/yocto-poky/meta/lib/oe/recipeutils.py
index d4fa72651..119a68821 100644
--- a/yocto-poky/meta/lib/oe/recipeutils.py
+++ b/yocto-poky/meta/lib/oe/recipeutils.py
@@ -31,9 +31,13 @@ def pn_to_recipe(cooker, pn):
import bb.providers
if pn in cooker.recipecache.pkg_pn:
- filenames = cooker.recipecache.pkg_pn[pn]
best = bb.providers.findBestProvider(pn, cooker.data, cooker.recipecache, cooker.recipecache.pkg_pn)
return best[3]
+ elif pn in cooker.recipecache.providers:
+ filenames = cooker.recipecache.providers[pn]
+ eligible, foundUnique = bb.providers.filterProviders(filenames, pn, cooker.expanded_data, cooker.recipecache)
+ filename = eligible[0]
+ return filename
else:
return None
@@ -72,6 +76,8 @@ def parse_recipe_simple(cooker, pn, d, appends=True):
raise bb.providers.NoProvider('Unable to find any recipe file matching %s' % pn)
if appends:
appendfiles = cooker.collection.get_file_appends(recipefile)
+ else:
+ appendfiles = None
return parse_recipe(recipefile, appendfiles, d)
@@ -95,6 +101,63 @@ def get_var_files(fn, varlist, d):
return varfiles
+def split_var_value(value, assignment=True):
+ """
+ Split a space-separated variable's value into a list of items,
+ taking into account that some of the items might be made up of
+ expressions containing spaces that should not be split.
+ Parameters:
+ value:
+ The string value to split
+ assignment:
+ True to assume that the value represents an assignment
+ statement, False otherwise. If True, and an assignment
+ statement is passed in the first item in
+ the returned list will be the part of the assignment
+ statement up to and including the opening quote character,
+ and the last item will be the closing quote.
+ """
+ inexpr = 0
+ lastchar = None
+ out = []
+ buf = ''
+ for char in value:
+ if char == '{':
+ if lastchar == '$':
+ inexpr += 1
+ elif char == '}':
+ inexpr -= 1
+ elif assignment and char in '"\'' and inexpr == 0:
+ if buf:
+ out.append(buf)
+ out.append(char)
+ char = ''
+ buf = ''
+ elif char.isspace() and inexpr == 0:
+ char = ''
+ if buf:
+ out.append(buf)
+ buf = ''
+ buf += char
+ lastchar = char
+ if buf:
+ out.append(buf)
+
+ # Join together assignment statement and opening quote
+ outlist = out
+ if assignment:
+ assigfound = False
+ for idx, item in enumerate(out):
+ if '=' in item:
+ assigfound = True
+ if assigfound:
+ if '"' in item or "'" in item:
+ outlist = [' '.join(out[:idx+1])]
+ outlist.extend(out[idx+1:])
+ break
+ return outlist
+
+
def patch_recipe_file(fn, values, patch=False, relpath=''):
"""Update or insert variable values into a recipe file (assuming you
have already identified the exact file you want to update.)
@@ -112,7 +175,7 @@ def patch_recipe_file(fn, values, patch=False, relpath=''):
if name in nowrap_vars:
tf.write(rawtext)
elif name in list_vars:
- splitvalue = values[name].split()
+ splitvalue = split_var_value(values[name], assignment=False)
if len(splitvalue) > 1:
linesplit = ' \\\n' + (' ' * (len(name) + 4))
tf.write('%s = "%s%s"\n' % (name, linesplit.join(splitvalue), linesplit))
@@ -277,6 +340,22 @@ def copy_recipe_files(d, tgt_dir, whole_dir=False, download=True):
return remotes
+def get_recipe_local_files(d, patches=False):
+ """Get a list of local files in SRC_URI within a recipe."""
+ uris = (d.getVar('SRC_URI', True) or "").split()
+ fetch = bb.fetch2.Fetch(uris, d)
+ ret = {}
+ for uri in uris:
+ if fetch.ud[uri].type == 'file':
+ if (not patches and
+ bb.utils.exec_flat_python_func('patch_path', uri, fetch, '')):
+ continue
+ # Skip files that are referenced by absolute path
+ if not os.path.isabs(fetch.ud[uri].basepath):
+ ret[fetch.ud[uri].basepath] = fetch.localpath(uri)
+ return ret
+
+
def get_recipe_patches(d):
"""Get a list of the patches included in SRC_URI within a recipe."""
patchfiles = []
@@ -518,7 +597,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
instfunclines.append(line)
return (instfunclines, None, 4, False)
else:
- splitval = origvalue.split()
+ splitval = split_var_value(origvalue, assignment=False)
changed = False
removevar = varname
if varname in ['SRC_URI', 'SRC_URI_append%s' % appendoverride]:
@@ -673,11 +752,14 @@ def get_recipe_upstream_version(rd):
ru['type'] = 'U'
ru['datetime'] = ''
+ pv = rd.getVar('PV', True)
+
# XXX: If don't have SRC_URI means that don't have upstream sources so
- # returns 1.0.
+ # returns the current recipe version, so that upstream version check
+ # declares a match.
src_uris = rd.getVar('SRC_URI', True)
if not src_uris:
- ru['version'] = '1.0'
+ ru['version'] = pv
ru['type'] = 'M'
ru['datetime'] = datetime.now()
return ru
@@ -686,8 +768,6 @@ def get_recipe_upstream_version(rd):
src_uri = src_uris.split()[0]
uri_type, _, _, _, _, _ = decodeurl(src_uri)
- pv = rd.getVar('PV', True)
-
manual_upstream_version = rd.getVar("RECIPE_UPSTREAM_VERSION", True)
if manual_upstream_version:
# manual tracking of upstream version.
diff --git a/yocto-poky/meta/lib/oe/rootfs.py b/yocto-poky/meta/lib/oe/rootfs.py
index 3b53fce4a..18df22d9a 100644
--- a/yocto-poky/meta/lib/oe/rootfs.py
+++ b/yocto-poky/meta/lib/oe/rootfs.py
@@ -66,6 +66,7 @@ class Rootfs(object):
m = r.search(line)
if m:
found_error = 1
+ bb.warn('[log_check] In line: [%s]' % line)
bb.warn('[log_check] %s: found an error message in the logfile (keyword \'%s\'):\n[log_check] %s'
% (self.d.getVar('PN', True), m.group(), line))
@@ -278,6 +279,7 @@ class Rootfs(object):
bb.note("Running intercept scripts:")
os.environ['D'] = self.image_rootfs
+ os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE', True)
for script in os.listdir(intercepts_dir):
script_full = os.path.join(intercepts_dir, script)
@@ -595,7 +597,11 @@ class DpkgOpkgRootfs(Rootfs):
pkg_list = []
- pkgs = self._get_pkgs_postinsts(status_file)
+ pkgs = None
+ if not self.d.getVar('PACKAGE_INSTALL', True).strip():
+ bb.note("Building empty image")
+ else:
+ pkgs = self._get_pkgs_postinsts(status_file)
if pkgs:
root = "__packagegroup_postinst__"
pkgs[root] = pkgs.keys()
diff --git a/yocto-poky/meta/lib/oe/sdk.py b/yocto-poky/meta/lib/oe/sdk.py
index 53da0f01a..3103f4889 100644
--- a/yocto-poky/meta/lib/oe/sdk.py
+++ b/yocto-poky/meta/lib/oe/sdk.py
@@ -5,6 +5,7 @@ from oe.package_manager import *
import os
import shutil
import glob
+import traceback
class Sdk(object):
@@ -25,7 +26,7 @@ class Sdk(object):
else:
self.manifest_dir = manifest_dir
- bb.utils.remove(self.sdk_output, True)
+ self.remove(self.sdk_output, True)
self.install_order = Manifest.INSTALL_ORDER
@@ -34,29 +35,56 @@ class Sdk(object):
pass
def populate(self):
- bb.utils.mkdirhier(self.sdk_output)
+ self.mkdirhier(self.sdk_output)
# call backend dependent implementation
self._populate()
# Don't ship any libGL in the SDK
- bb.utils.remove(os.path.join(self.sdk_output, self.sdk_native_path,
- self.d.getVar('libdir_nativesdk', True).strip('/'),
- "libGL*"))
+ self.remove(os.path.join(self.sdk_output, self.sdk_native_path,
+ self.d.getVar('libdir_nativesdk', True).strip('/'),
+ "libGL*"))
# Fix or remove broken .la files
- bb.utils.remove(os.path.join(self.sdk_output, self.sdk_native_path,
- self.d.getVar('libdir_nativesdk', True).strip('/'),
- "*.la"))
+ self.remove(os.path.join(self.sdk_output, self.sdk_native_path,
+ self.d.getVar('libdir_nativesdk', True).strip('/'),
+ "*.la"))
# Link the ld.so.cache file into the hosts filesystem
link_name = os.path.join(self.sdk_output, self.sdk_native_path,
self.sysconfdir, "ld.so.cache")
- bb.utils.mkdirhier(os.path.dirname(link_name))
+ self.mkdirhier(os.path.dirname(link_name))
os.symlink("/etc/ld.so.cache", link_name)
execute_pre_post_process(self.d, self.d.getVar('SDK_POSTPROCESS_COMMAND', True))
+ def movefile(self, sourcefile, destdir):
+ try:
+ # FIXME: this check of movefile's return code to None should be
+ # fixed within the function to use only exceptions to signal when
+ # something goes wrong
+ if (bb.utils.movefile(sourcefile, destdir) == None):
+ raise OSError("moving %s to %s failed"
+ %(sourcefile, destdir))
+ #FIXME: using umbrella exc catching because bb.utils method raises it
+ except Exception as e:
+ bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
+ bb.error("unable to place %s in final SDK location" % sourcefile)
+
+ def mkdirhier(self, dirpath):
+ try:
+ bb.utils.mkdirhier(dirpath)
+ except OSError as e:
+ bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
+ bb.fatal("cannot make dir for SDK: %s" % dirpath)
+
+ def remove(self, path, recurse=False):
+ try:
+ bb.utils.remove(path, recurse)
+ #FIXME: using umbrella exc catching because bb.utils method raises it
+ except Exception as e:
+ bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
+ bb.warn("cannot remove SDK dir: %s" % path)
class RpmSdk(Sdk):
def __init__(self, d, manifest_dir=None):
@@ -143,15 +171,15 @@ class RpmSdk(Sdk):
"lib",
"rpm"
)
- bb.utils.mkdirhier(native_rpm_state_dir)
+ self.mkdirhier(native_rpm_state_dir)
for f in glob.glob(os.path.join(self.sdk_output,
"var",
"lib",
"rpm",
"*")):
- bb.utils.movefile(f, native_rpm_state_dir)
+ self.movefile(f, native_rpm_state_dir)
- bb.utils.remove(os.path.join(self.sdk_output, "var"), True)
+ self.remove(os.path.join(self.sdk_output, "var"), True)
# Move host sysconfig data
native_sysconf_dir = os.path.join(self.sdk_output,
@@ -159,10 +187,10 @@ class RpmSdk(Sdk):
self.d.getVar('sysconfdir',
True).strip('/'),
)
- bb.utils.mkdirhier(native_sysconf_dir)
+ self.mkdirhier(native_sysconf_dir)
for f in glob.glob(os.path.join(self.sdk_output, "etc", "*")):
- bb.utils.movefile(f, native_sysconf_dir)
- bb.utils.remove(os.path.join(self.sdk_output, "etc"), True)
+ self.movefile(f, native_sysconf_dir)
+ self.remove(os.path.join(self.sdk_output, "etc"), True)
class OpkgSdk(Sdk):
@@ -219,12 +247,12 @@ class OpkgSdk(Sdk):
target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir)
host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir)
- bb.utils.mkdirhier(target_sysconfdir)
+ self.mkdirhier(target_sysconfdir)
shutil.copy(self.target_conf, target_sysconfdir)
os.chmod(os.path.join(target_sysconfdir,
os.path.basename(self.target_conf)), 0644)
- bb.utils.mkdirhier(host_sysconfdir)
+ self.mkdirhier(host_sysconfdir)
shutil.copy(self.host_conf, host_sysconfdir)
os.chmod(os.path.join(host_sysconfdir,
os.path.basename(self.host_conf)), 0644)
@@ -232,11 +260,11 @@ class OpkgSdk(Sdk):
native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
self.d.getVar('localstatedir_nativesdk', True).strip('/'),
"lib", "opkg")
- bb.utils.mkdirhier(native_opkg_state_dir)
+ self.mkdirhier(native_opkg_state_dir)
for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")):
- bb.utils.movefile(f, native_opkg_state_dir)
+ self.movefile(f, native_opkg_state_dir)
- bb.utils.remove(os.path.join(self.sdk_output, "var"), True)
+ self.remove(os.path.join(self.sdk_output, "var"), True)
class DpkgSdk(Sdk):
@@ -264,7 +292,7 @@ class DpkgSdk(Sdk):
def _copy_apt_dir_to(self, dst_dir):
staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE", True)
- bb.utils.remove(dst_dir, True)
+ self.remove(dst_dir, True)
shutil.copytree(os.path.join(staging_etcdir_native, "apt"), dst_dir)
@@ -306,11 +334,11 @@ class DpkgSdk(Sdk):
native_dpkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
"var", "lib", "dpkg")
- bb.utils.mkdirhier(native_dpkg_state_dir)
+ self.mkdirhier(native_dpkg_state_dir)
for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "dpkg", "*")):
- bb.utils.movefile(f, native_dpkg_state_dir)
+ self.movefile(f, native_dpkg_state_dir)
+ self.remove(os.path.join(self.sdk_output, "var"), True)
- bb.utils.remove(os.path.join(self.sdk_output, "var"), True)
def sdk_list_installed_packages(d, target, format=None, rootfs_dir=None):
diff --git a/yocto-poky/meta/lib/oe/sstatesig.py b/yocto-poky/meta/lib/oe/sstatesig.py
index cb46712ee..6d1be3e37 100644
--- a/yocto-poky/meta/lib/oe/sstatesig.py
+++ b/yocto-poky/meta/lib/oe/sstatesig.py
@@ -94,6 +94,26 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
self.machine = data.getVar("MACHINE", True)
self.mismatch_msgs = []
pass
+
+ def tasks_resolved(self, virtmap, virtpnmap, dataCache):
+ # Translate virtual/xxx entries to PN values
+ newabisafe = []
+ for a in self.abisaferecipes:
+ if a in virtpnmap:
+ newabisafe.append(virtpnmap[a])
+ else:
+ newabisafe.append(a)
+ self.abisaferecipes = newabisafe
+ newsafedeps = []
+ for a in self.saferecipedeps:
+ a1, a2 = a.split("->")
+ if a1 in virtpnmap:
+ a1 = virtpnmap[a1]
+ if a2 in virtpnmap:
+ a2 = virtpnmap[a2]
+ newsafedeps.append(a1 + "->" + a2)
+ self.saferecipedeps = newsafedeps
+
def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None):
return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
diff --git a/yocto-poky/meta/lib/oeqa/oetest.py b/yocto-poky/meta/lib/oeqa/oetest.py
index a6f89b6a8..6f9edec58 100644
--- a/yocto-poky/meta/lib/oeqa/oetest.py
+++ b/yocto-poky/meta/lib/oeqa/oetest.py
@@ -11,9 +11,14 @@ import os, re, mmap
import unittest
import inspect
import subprocess
-import bb
-from oeqa.utils.decorators import LogResults, gettag
-from sys import exc_info, exc_clear
+try:
+ import bb
+except ImportError:
+ pass
+import logging
+from oeqa.utils.decorators import LogResults, gettag, getResults
+
+logger = logging.getLogger("BitBake")
def getVar(obj):
#extend form dict, if a variable didn't exists, need find it in testcase
@@ -89,7 +94,7 @@ def loadTests(tc, type="runtime"):
suite.dependencies.append(dep_suite)
break
else:
- bb.warn("Test %s was declared as @skipUnlessPassed('%s') but that test is either not defined or not active. Will run the test anyway." %
+ logger.warning("Test %s was declared as @skipUnlessPassed('%s') but that test is either not defined or not active. Will run the test anyway." %
(test, depends_on))
# Use brute-force topological sort to determine ordering. Sort by
# depth (higher depth = must run later), with original ordering to
@@ -106,14 +111,34 @@ def loadTests(tc, type="runtime"):
suites.sort(cmp=lambda a,b: cmp((a.depth, a.index), (b.depth, b.index)))
return testloader.suiteClass(suites)
+_buffer = ""
+
+def custom_verbose(msg, *args, **kwargs):
+ global _buffer
+ if msg[-1] != "\n":
+ _buffer += msg
+ else:
+ _buffer += msg
+ try:
+ bb.plain(_buffer.rstrip("\n"), *args, **kwargs)
+ except NameError:
+ logger.info(_buffer.rstrip("\n"), *args, **kwargs)
+ _buffer = ""
+
def runTests(tc, type="runtime"):
suite = loadTests(tc, type)
- bb.note("Test modules %s" % tc.testslist)
+ logger.info("Test modules %s" % tc.testslist)
if hasattr(tc, "tagexp") and tc.tagexp:
- bb.note("Filter test cases by tags: %s" % tc.tagexp)
- bb.note("Found %s tests" % suite.countTestCases())
+ logger.info("Filter test cases by tags: %s" % tc.tagexp)
+ logger.info("Found %s tests" % suite.countTestCases())
runner = unittest.TextTestRunner(verbosity=2)
+ try:
+ if bb.msg.loggerDefaultVerbose:
+ runner.stream.write = custom_verbose
+ except NameError:
+ # Not in bb environment?
+ pass
result = runner.run(suite)
return result
@@ -158,17 +183,24 @@ class oeRuntimeTest(oeTest):
pass
def tearDown(self):
- # If a test fails or there is an exception
- if not exc_info() == (None, None, None):
- exc_clear()
- #Only dump for QemuTarget
- if (type(self.target).__name__ == "QemuTarget"):
- self.tc.host_dumper.create_dir(self._testMethodName)
- self.tc.host_dumper.dump_host()
- self.target.target_dumper.dump_target(
- self.tc.host_dumper.dump_dir)
- print ("%s dump data stored in %s" % (self._testMethodName,
- self.tc.host_dumper.dump_dir))
+ res = getResults()
+ # If a test fails or there is an exception dump
+ # for QemuTarget only
+ if (type(self.target).__name__ == "QemuTarget" and
+ (self.id() in res.getErrorList() or
+ self.id() in res.getFailList())):
+ self.tc.host_dumper.create_dir(self._testMethodName)
+ self.tc.host_dumper.dump_host()
+ self.target.target_dumper.dump_target(
+ self.tc.host_dumper.dump_dir)
+ print ("%s dump data stored in %s" % (self._testMethodName,
+ self.tc.host_dumper.dump_dir))
+
+ self.tearDownLocal()
+
+ # Method to be run after tearDown and implemented by child classes
+ def tearDownLocal(self):
+ pass
#TODO: use package_manager.py to install packages on any type of image
def install_packages(self, packagelist):
@@ -190,7 +222,7 @@ class oeSDKTest(oeTest):
return False
def _run(self, cmd):
- return subprocess.check_output(cmd, shell=True)
+ return subprocess.check_output(". %s; " % self.tc.sdkenv + cmd, shell=True)
def getmodule(pos=2):
# stack returns a list of tuples containg frame information
diff --git a/yocto-poky/meta/lib/oeqa/runexported.py b/yocto-poky/meta/lib/oeqa/runexported.py
index 96442b1b2..dba0d7aec 100755
--- a/yocto-poky/meta/lib/oeqa/runexported.py
+++ b/yocto-poky/meta/lib/oeqa/runexported.py
@@ -21,7 +21,7 @@
import sys
import os
import time
-from optparse import OptionParser
+import argparse
try:
import simplejson as json
@@ -49,8 +49,8 @@ class FakeTarget(object):
def exportStart(self):
self.sshlog = os.path.join(self.testdir, "ssh_target_log.%s" % self.datetime)
sshloglink = os.path.join(self.testdir, "ssh_target_log")
- if os.path.islink(sshloglink):
- os.unlink(sshloglink)
+ if os.path.exists(sshloglink):
+ os.remove(sshloglink)
os.symlink(self.sshlog, sshloglink)
print("SSH log file: %s" % self.sshlog)
self.connection = SSHControl(self.ip, logfile=self.sshlog)
@@ -76,43 +76,41 @@ class TestContext(object):
def main():
- usage = "usage: %prog [options] <json file>"
- parser = OptionParser(usage=usage)
- parser.add_option("-t", "--target-ip", dest="ip", help="The IP address of the target machine. Use this to \
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-t", "--target-ip", dest="ip", help="The IP address of the target machine. Use this to \
overwrite the value determined from TEST_TARGET_IP at build time")
- parser.add_option("-s", "--server-ip", dest="server_ip", help="The IP address of this machine. Use this to \
+ parser.add_argument("-s", "--server-ip", dest="server_ip", help="The IP address of this machine. Use this to \
overwrite the value determined from TEST_SERVER_IP at build time.")
- parser.add_option("-d", "--deploy-dir", dest="deploy_dir", help="Full path to the package feeds, that this \
+ parser.add_argument("-d", "--deploy-dir", dest="deploy_dir", help="Full path to the package feeds, that this \
the contents of what used to be DEPLOY_DIR on the build machine. If not specified it will use the value \
specified in the json if that directory actually exists or it will error out.")
- parser.add_option("-l", "--log-dir", dest="log_dir", help="This sets the path for TEST_LOG_DIR. If not specified \
+ parser.add_argument("-l", "--log-dir", dest="log_dir", help="This sets the path for TEST_LOG_DIR. If not specified \
the current dir is used. This is used for usually creating a ssh log file and a scp test file.")
+ parser.add_argument("json", help="The json file exported by the build system", default="testdata.json", nargs='?')
- (options, args) = parser.parse_args()
- if len(args) != 1:
- parser.error("Incorrect number of arguments. The one and only argument should be a json file exported by the build system")
+ args = parser.parse_args()
- with open(args[0], "r") as f:
+ with open(args.json, "r") as f:
loaded = json.load(f)
- if options.ip:
- loaded["target"]["ip"] = options.ip
- if options.server_ip:
- loaded["target"]["server_ip"] = options.server_ip
+ if args.ip:
+ loaded["target"]["ip"] = args.ip
+ if args.server_ip:
+ loaded["target"]["server_ip"] = args.server_ip
d = MyDataDict()
for key in loaded["d"].keys():
d[key] = loaded["d"][key]
- if options.log_dir:
- d["TEST_LOG_DIR"] = options.log_dir
+ if args.log_dir:
+ d["TEST_LOG_DIR"] = args.log_dir
else:
d["TEST_LOG_DIR"] = os.path.abspath(os.path.dirname(__file__))
- if options.deploy_dir:
- d["DEPLOY_DIR"] = options.deploy_dir
+ if args.deploy_dir:
+ d["DEPLOY_DIR"] = args.deploy_dir
else:
if not os.path.isdir(d["DEPLOY_DIR"]):
- raise Exception("The path to DEPLOY_DIR does not exists: %s" % d["DEPLOY_DIR"])
+ print("WARNING: The path to DEPLOY_DIR does not exist: %s" % d["DEPLOY_DIR"])
target = FakeTarget(d)
diff --git a/yocto-poky/meta/lib/oeqa/runtime/_ptest.py b/yocto-poky/meta/lib/oeqa/runtime/_ptest.py
index 81c9c4386..0621028b8 100644
--- a/yocto-poky/meta/lib/oeqa/runtime/_ptest.py
+++ b/yocto-poky/meta/lib/oeqa/runtime/_ptest.py
@@ -98,7 +98,7 @@ class PtestRunnerTest(oeRuntimeTest):
return complementary_pkgs.split()
- def setUp(self):
+ def setUpLocal(self):
self.ptest_log = os.path.join(oeRuntimeTest.tc.d.getVar("TEST_LOG_DIR",True), "ptest-%s.log" % oeRuntimeTest.tc.d.getVar('DATETIME', True))
@skipUnlessPassed('test_ssh')
diff --git a/yocto-poky/meta/lib/oeqa/runtime/connman.py b/yocto-poky/meta/lib/oeqa/runtime/connman.py
index ee69e5df9..bd9dba3bd 100644
--- a/yocto-poky/meta/lib/oeqa/runtime/connman.py
+++ b/yocto-poky/meta/lib/oeqa/runtime/connman.py
@@ -29,26 +29,3 @@ class ConnmanTest(oeRuntimeTest):
if status != 0:
print self.service_status("connman")
self.fail("No connmand process running")
-
- @testcase(223)
- def test_only_one_connmand_in_background(self):
- """
- Summary: Only one connmand in background
- Expected: There will be only one connmand instance in background.
- Product: BSPs
- Author: Alexandru Georgescu <alexandru.c.georgescu@intel.com>
- AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
- """
-
- # Make sure that 'connmand' is running in background
- (status, output) = self.target.run(oeRuntimeTest.pscmd + ' | grep [c]onnmand')
- self.assertEqual(0, status, 'Failed to find "connmand" process running in background.')
-
- # Start a new instance of 'connmand'
- (status, output) = self.target.run('connmand')
- self.assertEqual(0, status, 'Failed to start a new "connmand" process.')
-
- # Make sure that only one 'connmand' is running in background
- (status, output) = self.target.run(oeRuntimeTest.pscmd + ' | grep [c]onnmand | wc -l')
- self.assertEqual(0, status, 'Failed to find "connmand" process running in background.')
- self.assertEqual(1, int(output), 'Found {} connmand processes running, expected 1.'.format(output))
diff --git a/yocto-poky/meta/lib/oeqa/runtime/date.py b/yocto-poky/meta/lib/oeqa/runtime/date.py
index 3a8fe8481..447987e07 100644
--- a/yocto-poky/meta/lib/oeqa/runtime/date.py
+++ b/yocto-poky/meta/lib/oeqa/runtime/date.py
@@ -4,11 +4,11 @@ import re
class DateTest(oeRuntimeTest):
- def setUp(self):
+ def setUpLocal(self):
if oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager", True) == "systemd":
self.target.run('systemctl stop systemd-timesyncd')
- def tearDown(self):
+ def tearDownLocal(self):
if oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager", True) == "systemd":
self.target.run('systemctl start systemd-timesyncd')
diff --git a/yocto-poky/meta/lib/oeqa/runtime/files/testsdkmakefile b/yocto-poky/meta/lib/oeqa/runtime/files/testsdkmakefile
new file mode 100644
index 000000000..fb05f822f
--- /dev/null
+++ b/yocto-poky/meta/lib/oeqa/runtime/files/testsdkmakefile
@@ -0,0 +1,5 @@
+test: test.o
+ $(CC) -o test test.o -lm
+test.o: test.c
+ $(CC) -c test.c
+
diff --git a/yocto-poky/meta/lib/oeqa/runtime/kernelmodule.py b/yocto-poky/meta/lib/oeqa/runtime/kernelmodule.py
index 2e8172032..38ca18454 100644
--- a/yocto-poky/meta/lib/oeqa/runtime/kernelmodule.py
+++ b/yocto-poky/meta/lib/oeqa/runtime/kernelmodule.py
@@ -10,7 +10,7 @@ def setUpModule():
class KernelModuleTest(oeRuntimeTest):
- def setUp(self):
+ def setUpLocal(self):
self.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "hellomod.c"), "/tmp/hellomod.c")
self.target.copy_to(os.path.join(oeRuntimeTest.tc.filesdir, "hellomod_makefile"), "/tmp/Makefile")
@@ -30,5 +30,5 @@ class KernelModuleTest(oeRuntimeTest):
(status, output) = self.target.run(cmd, 900)
self.assertEqual(status, 0, msg="\n".join([cmd, output]))
- def tearDown(self):
+ def tearDownLocal(self):
self.target.run('rm -f /tmp/Makefile /tmp/hellomod.c')
diff --git a/yocto-poky/meta/lib/oeqa/runtime/parselogs.py b/yocto-poky/meta/lib/oeqa/runtime/parselogs.py
index e20947b8b..fc2bc3893 100644
--- a/yocto-poky/meta/lib/oeqa/runtime/parselogs.py
+++ b/yocto-poky/meta/lib/oeqa/runtime/parselogs.py
@@ -36,6 +36,8 @@ common_errors = [
'VGA arbiter: cannot open kernel arbiter, no multi-card support',
'Failed to find URL:http://ipv4.connman.net/online/status.html',
'Online check failed for',
+ 'netlink init failed',
+ 'Fast TSC calibration',
]
x86_common = [
@@ -46,7 +48,6 @@ x86_common = [
] + common_errors
qemux86_common = [
- 'Fast TSC calibration',
'wrong ELF class',
"fail to add MMCONFIG information, can't access extended PCI configuration space under this bridge.",
"can't claim BAR ",
@@ -89,7 +90,7 @@ ignore_errors = {
'(EE) open /dev/fb0: No such file or directory',
'(EE) AIGLX: reverting to software rendering',
] + x86_common,
- 'core2_32' : [
+ 'intel-core2-32' : [
'ACPI: No _BQC method, cannot determine initial brightness',
'[Firmware Bug]: ACPI: No _BQC method, cannot determine initial brightness',
'(EE) Failed to load module "psb"',
@@ -98,6 +99,7 @@ ignore_errors = {
'(EE) Failed to load module psbdrv',
'(EE) open /dev/fb0: No such file or directory',
'(EE) AIGLX: reverting to software rendering',
+ "controller can't do DEVSLP, turning off",
] + x86_common,
'intel-corei7-64' : [
"controller can't do DEVSLP, turning off",
@@ -108,13 +110,9 @@ ignore_errors = {
'edgerouter' : [
'Fatal server error:',
] + common_errors,
- 'minnow' : [
- 'netlink init failed',
- ] + common_errors,
'jasperforest' : [
'Activated service \'org.bluez\' failed:',
'Unable to find NFC netlink family',
- 'netlink init failed',
] + common_errors,
}
@@ -233,8 +231,7 @@ class ParseLogsTest(oeRuntimeTest):
#get the output of dmesg and write it in a file. This file is added to log_locations.
def write_dmesg(self):
- (status, dmesg) = self.target.run("dmesg")
- (status, dmesg2) = self.target.run("echo \""+str(dmesg)+"\" > /tmp/dmesg_output.log")
+ (status, dmesg) = self.target.run("dmesg > /tmp/dmesg_output.log")
@testcase(1059)
@skipUnlessPassed('test_ssh')
diff --git a/yocto-poky/meta/lib/oeqa/runtime/scanelf.py b/yocto-poky/meta/lib/oeqa/runtime/scanelf.py
index 43a024ab9..67e02ff45 100644
--- a/yocto-poky/meta/lib/oeqa/runtime/scanelf.py
+++ b/yocto-poky/meta/lib/oeqa/runtime/scanelf.py
@@ -8,7 +8,7 @@ def setUpModule():
class ScanelfTest(oeRuntimeTest):
- def setUp(self):
+ def setUpLocal(self):
self.scancmd = 'scanelf --quiet --recursive --mount --ldpath --path'
@testcase(966)
diff --git a/yocto-poky/meta/lib/oeqa/sdk/gcc.py b/yocto-poky/meta/lib/oeqa/sdk/gcc.py
index 67994b9b5..8395b9b90 100644
--- a/yocto-poky/meta/lib/oeqa/sdk/gcc.py
+++ b/yocto-poky/meta/lib/oeqa/sdk/gcc.py
@@ -14,7 +14,7 @@ class GccCompileTest(oeSDKTest):
@classmethod
def setUpClass(self):
- for f in ['test.c', 'test.cpp', 'testmakefile']:
+ for f in ['test.c', 'test.cpp', 'testsdkmakefile']:
shutil.copyfile(os.path.join(self.tc.filesdir, f), self.tc.sdktestdir + f)
def test_gcc_compile(self):
@@ -27,10 +27,10 @@ class GccCompileTest(oeSDKTest):
self._run('$CXX %s/test.cpp -o %s/test -lm' % (self.tc.sdktestdir, self.tc.sdktestdir))
def test_make(self):
- self._run('cd %s; make -f testmakefile' % self.tc.sdktestdir)
+ self._run('cd %s; make -f testsdkmakefile' % self.tc.sdktestdir)
@classmethod
def tearDownClass(self):
- files = [self.tc.sdktestdir + f for f in ['test.c', 'test.cpp', 'test.o', 'test', 'testmakefile']]
+ files = [self.tc.sdktestdir + f for f in ['test.c', 'test.cpp', 'test.o', 'test', 'testsdkmakefile']]
for f in files:
bb.utils.remove(f)
diff --git a/yocto-poky/meta/lib/oeqa/selftest/archiver.py b/yocto-poky/meta/lib/oeqa/selftest/archiver.py
new file mode 100644
index 000000000..f2030c446
--- /dev/null
+++ b/yocto-poky/meta/lib/oeqa/selftest/archiver.py
@@ -0,0 +1,50 @@
+from oeqa.selftest.base import oeSelfTest
+from oeqa.utils.commands import bitbake, get_bb_var
+from oeqa.utils.decorators import testcase
+import glob
+import os
+import shutil
+
+
+class Archiver(oeSelfTest):
+
+ @testcase(1345)
+ def test_archiver_allows_to_filter_on_recipe_name(self):
+ """
+ Summary: The archiver should offer the possibility to filter on the recipe. (#6929)
+ Expected: 1. Included recipe (busybox) should be included
+ 2. Excluded recipe (zlib) should be excluded
+ Product: oe-core
+ Author: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ """
+
+ include_recipe = 'busybox'
+ exclude_recipe = 'zlib'
+
+ features = 'INHERIT += "archiver"\n'
+ features += 'ARCHIVER_MODE[src] = "original"\n'
+ features += 'COPYLEFT_PN_INCLUDE = "%s"\n' % include_recipe
+ features += 'COPYLEFT_PN_EXCLUDE = "%s"\n' % exclude_recipe
+
+ # Update local.conf
+ self.write_config(features)
+
+ tmp_dir = get_bb_var('TMPDIR')
+ deploy_dir_src = get_bb_var('DEPLOY_DIR_SRC')
+ target_sys = get_bb_var('TARGET_SYS')
+ src_path = os.path.join(deploy_dir_src, target_sys)
+
+ # Delete tmp directory
+ shutil.rmtree(tmp_dir)
+
+ # Build core-image-minimal
+ bitbake('core-image-minimal')
+
+ # Check that include_recipe was included
+ is_included = len(glob.glob(src_path + '/%s*' % include_recipe))
+ self.assertEqual(1, is_included, 'Recipe %s was not included.' % include_recipe)
+
+ # Check that exclude_recipe was excluded
+ is_excluded = len(glob.glob(src_path + '/%s*' % exclude_recipe))
+ self.assertEqual(0, is_excluded, 'Recipe %s was not excluded.' % exclude_recipe)
diff --git a/yocto-poky/meta/lib/oeqa/selftest/base.py b/yocto-poky/meta/lib/oeqa/selftest/base.py
index b2faa661e..9bddc23f8 100644
--- a/yocto-poky/meta/lib/oeqa/selftest/base.py
+++ b/yocto-poky/meta/lib/oeqa/selftest/base.py
@@ -31,7 +31,7 @@ class oeSelfTest(unittest.TestCase):
self.testinc_bblayers_path = os.path.join(self.builddir, "conf/bblayers.inc")
self.testlayer_path = oeSelfTest.testlayer_path
self._extra_tear_down_commands = []
- self._track_for_cleanup = []
+ self._track_for_cleanup = [self.testinc_path]
super(oeSelfTest, self).__init__(methodName)
def setUp(self):
diff --git a/yocto-poky/meta/lib/oeqa/selftest/bbtests.py b/yocto-poky/meta/lib/oeqa/selftest/bbtests.py
index 3d6860f65..94ca79c03 100644
--- a/yocto-poky/meta/lib/oeqa/selftest/bbtests.py
+++ b/yocto-poky/meta/lib/oeqa/selftest/bbtests.py
@@ -1,8 +1,5 @@
-import unittest
import os
-import logging
import re
-import shutil
import oeqa.utils.ftools as ftools
from oeqa.selftest.base import oeSelfTest
@@ -68,15 +65,43 @@ class BitbakeTests(oeSelfTest):
bitbake('-cclean man')
self.assertTrue("ERROR: Function failed: patch_do_patch" in result.output, msg = "Though no man-1.5h1-make.patch file exists, bitbake didn't output any err. message. bitbake output: %s" % result.output)
+ @testcase(1354)
+ def test_force_task_1(self):
+ # test 1 from bug 5875
+ test_recipe = 'zlib'
+ test_data = "Microsoft Made No Profit From Anyone's Zunes Yo"
+ image_dir = get_bb_var('D', test_recipe)
+ pkgsplit_dir = get_bb_var('PKGDEST', test_recipe)
+ man_dir = get_bb_var('mandir', test_recipe)
+
+ bitbake('-c cleansstate %s' % test_recipe)
+ bitbake(test_recipe)
+ self.add_command_to_tearDown('bitbake -c clean %s' % test_recipe)
+
+ man_file = os.path.join(image_dir + man_dir, 'man3/zlib.3')
+ ftools.append_file(man_file, test_data)
+ bitbake('-c package -f %s' % test_recipe)
+
+ man_split_file = os.path.join(pkgsplit_dir, 'zlib-doc' + man_dir, 'man3/zlib.3')
+ man_split_content = ftools.read_file(man_split_file)
+ self.assertIn(test_data, man_split_content, 'The man file has not changed in packages-split.')
+
+ ret = bitbake(test_recipe)
+ self.assertIn('task do_package_write_rpm:', ret.output, 'Task do_package_write_rpm did not re-executed.')
+
@testcase(163)
- def test_force_task(self):
- bitbake('m4-native')
- self.add_command_to_tearDown('bitbake -c clean m4-native')
- result = bitbake('-C compile m4-native')
- look_for_tasks = ['do_compile', 'do_install', 'do_populate_sysroot']
+ def test_force_task_2(self):
+ # test 2 from bug 5875
+ test_recipe = 'zlib'
+
+ bitbake('-c cleansstate %s' % test_recipe)
+ bitbake(test_recipe)
+ self.add_command_to_tearDown('bitbake -c clean %s' % test_recipe)
+
+ result = bitbake('-C compile %s' % test_recipe)
+ look_for_tasks = ['do_compile:', 'do_install:', 'do_populate_sysroot:', 'do_package:']
for task in look_for_tasks:
- find_task = re.search("m4-native.*%s" % task, result.output)
- self.assertTrue(find_task, msg = "Couldn't find %s task. bitbake output %s" % (task, result.output))
+ self.assertIn(task, result.output, msg="Couldn't find %s task.")
@testcase(167)
def test_bitbake_g(self):
@@ -101,6 +126,8 @@ class BitbakeTests(oeSelfTest):
self.write_config("""DL_DIR = \"${TOPDIR}/download-selftest\"
SSTATE_DIR = \"${TOPDIR}/download-selftest\"
""")
+ self.track_for_cleanup(os.path.join(self.builddir, "download-selftest"))
+
bitbake('-ccleanall man')
result = bitbake('-c fetch man', ignore_status=True)
bitbake('-ccleanall man')
@@ -116,20 +143,20 @@ doesn't exist, yet fetcher didn't report any error. bitbake output: %s" % result
self.write_config("""DL_DIR = \"${TOPDIR}/download-selftest\"
SSTATE_DIR = \"${TOPDIR}/download-selftest\"
""")
+ self.track_for_cleanup(os.path.join(self.builddir, "download-selftest"))
+
data = 'SRC_URI_append = ";downloadfilename=test-aspell.tar.gz"'
self.write_recipeinc('aspell', data)
bitbake('-ccleanall aspell')
result = bitbake('-c fetch aspell', ignore_status=True)
self.delete_recipeinc('aspell')
- self.addCleanup(bitbake, '-ccleanall aspell')
self.assertEqual(result.status, 0, msg = "Couldn't fetch aspell. %s" % result.output)
self.assertTrue(os.path.isfile(os.path.join(get_bb_var("DL_DIR"), 'test-aspell.tar.gz')), msg = "File rename failed. No corresponding test-aspell.tar.gz file found under %s" % str(get_bb_var("DL_DIR")))
self.assertTrue(os.path.isfile(os.path.join(get_bb_var("DL_DIR"), 'test-aspell.tar.gz.done')), "File rename failed. No corresponding test-aspell.tar.gz.done file found under %s" % str(get_bb_var("DL_DIR")))
@testcase(1028)
def test_environment(self):
- self.append_config("TEST_ENV=\"localconf\"")
- self.addCleanup(self.remove_config, "TEST_ENV=\"localconf\"")
+ self.write_config("TEST_ENV=\"localconf\"")
result = runCmd('bitbake -e | grep TEST_ENV=')
self.assertTrue('localconf' in result.output, msg = "bitbake didn't report any value for TEST_ENV variable. To test, run 'bitbake -e | grep TEST_ENV='")
@@ -156,8 +183,7 @@ SSTATE_DIR = \"${TOPDIR}/download-selftest\"
ftools.write_file(preconf ,"TEST_PREFILE=\"prefile\"")
result = runCmd('bitbake -r conf/prefile.conf -e | grep TEST_PREFILE=')
self.assertTrue('prefile' in result.output, "Preconfigure file \"prefile.conf\"was not taken into consideration. ")
- self.append_config("TEST_PREFILE=\"localconf\"")
- self.addCleanup(self.remove_config, "TEST_PREFILE=\"localconf\"")
+ self.write_config("TEST_PREFILE=\"localconf\"")
result = runCmd('bitbake -r conf/prefile.conf -e | grep TEST_PREFILE=')
self.assertTrue('localconf' in result.output, "Preconfigure file \"prefile.conf\"was not taken into consideration.")
@@ -166,8 +192,7 @@ SSTATE_DIR = \"${TOPDIR}/download-selftest\"
postconf = os.path.join(self.builddir, 'conf/postfile.conf')
self.track_for_cleanup(postconf)
ftools.write_file(postconf , "TEST_POSTFILE=\"postfile\"")
- self.append_config("TEST_POSTFILE=\"localconf\"")
- self.addCleanup(self.remove_config, "TEST_POSTFILE=\"localconf\"")
+ self.write_config("TEST_POSTFILE=\"localconf\"")
result = runCmd('bitbake -R conf/postfile.conf -e | grep TEST_POSTFILE=')
self.assertTrue('postfile' in result.output, "Postconfigure file \"postfile.conf\"was not taken into consideration.")
@@ -181,6 +206,7 @@ SSTATE_DIR = \"${TOPDIR}/download-selftest\"
self.write_config("""DL_DIR = \"${TOPDIR}/download-selftest\"
SSTATE_DIR = \"${TOPDIR}/download-selftest\"
""")
+ self.track_for_cleanup(os.path.join(self.builddir, "download-selftest"))
self.write_recipeinc('man',"\ndo_fail_task () {\nexit 1 \n}\n\naddtask do_fail_task before do_fetch\n" )
runCmd('bitbake -c cleanall man xcursor-transparent-theme')
result = runCmd('bitbake man xcursor-transparent-theme -k', ignore_status=True)
diff --git a/yocto-poky/meta/lib/oeqa/selftest/buildoptions.py b/yocto-poky/meta/lib/oeqa/selftest/buildoptions.py
index 483803bf8..acf481f7b 100644
--- a/yocto-poky/meta/lib/oeqa/selftest/buildoptions.py
+++ b/yocto-poky/meta/lib/oeqa/selftest/buildoptions.py
@@ -1,9 +1,6 @@
-import unittest
import os
-import logging
import re
import glob as g
-import pexpect as p
from oeqa.selftest.base import oeSelfTest
from oeqa.selftest.buildhistory import BuildhistoryBase
@@ -42,7 +39,7 @@ class ImageOptionsTests(oeSelfTest):
for image_file in deploydir_files:
if imagename in image_file and os.path.islink(os.path.join(deploydir, image_file)):
track_original_files.append(os.path.realpath(os.path.join(deploydir, image_file)))
- self.append_config("RM_OLD_IMAGE = \"1\"")
+ self.write_config("RM_OLD_IMAGE = \"1\"")
bitbake("-C rootfs core-image-minimal")
deploydir_files = os.listdir(deploydir)
remaining_not_expected = [path for path in track_original_files if os.path.basename(path) in deploydir_files]
@@ -100,7 +97,7 @@ class SanityOptionsTest(oeSelfTest):
@testcase(278)
def test_sanity_userspace_dependency(self):
- self.append_config('WARN_QA_append = " unsafe-references-in-binaries unsafe-references-in-scripts"')
+ self.write_config('WARN_QA_append = " unsafe-references-in-binaries unsafe-references-in-scripts"')
bitbake("-ccleansstate gzip nfs-utils")
res = bitbake("gzip nfs-utils")
self.assertTrue("WARNING: QA Issue: gzip" in res.output, "WARNING: QA Issue: gzip message is not present in bitbake's output: %s" % res.output)
@@ -128,7 +125,7 @@ class BuildImagesTest(oeSelfTest):
This method is used to test the build of directfb image for arm arch.
In essence we build a coreimagedirectfb and test the exitcode of bitbake that in case of success is 0.
"""
- self.add_command_to_tearDown('cleanupworkdir')
+ self.add_command_to_tearDown('cleanup-workdir')
self.write_config("DISTRO_FEATURES_remove = \"x11\"\nDISTRO_FEATURES_append = \" directfb\"\nMACHINE ??= \"qemuarm\"")
res = bitbake("core-image-directfb", ignore_status=True)
self.assertEqual(res.status, 0, "\ncoreimagedirectfb failed to build. Please check logs for further details.\nbitbake output %s" % res.output)
@@ -139,7 +136,7 @@ class ArchiverTest(oeSelfTest):
"""
Test for archiving the work directory and exporting the source files.
"""
- self.add_command_to_tearDown('cleanupworkdir')
+ self.add_command_to_tearDown('cleanup-workdir')
self.write_config("INHERIT = \"archiver\"\nARCHIVER_MODE[src] = \"original\"\nARCHIVER_MODE[srpm] = \"1\"")
res = bitbake("xcursor-transparent-theme", ignore_status=True)
self.assertEqual(res.status, 0, "\nCouldn't build xcursortransparenttheme.\nbitbake output %s" % res.output)
diff --git a/yocto-poky/meta/lib/oeqa/selftest/devtool.py b/yocto-poky/meta/lib/oeqa/selftest/devtool.py
index 6e731d677..dcdef5a14 100644
--- a/yocto-poky/meta/lib/oeqa/selftest/devtool.py
+++ b/yocto-poky/meta/lib/oeqa/selftest/devtool.py
@@ -84,11 +84,44 @@ class DevtoolBase(oeSelfTest):
class DevtoolTests(DevtoolBase):
+ def setUp(self):
+ """Test case setup function"""
+ super(DevtoolTests, self).setUp()
+ self.workspacedir = os.path.join(self.builddir, 'workspace')
+ self.assertTrue(not os.path.exists(self.workspacedir),
+ 'This test cannot be run with a workspace directory '
+ 'under the build directory')
+
+ def _check_src_repo(self, repo_dir):
+ """Check srctree git repository"""
+ self.assertTrue(os.path.isdir(os.path.join(repo_dir, '.git')),
+ 'git repository for external source tree not found')
+ result = runCmd('git status --porcelain', cwd=repo_dir)
+ self.assertEqual(result.output.strip(), "",
+ 'Created git repo is not clean')
+ result = runCmd('git symbolic-ref HEAD', cwd=repo_dir)
+ self.assertEqual(result.output.strip(), "refs/heads/devtool",
+ 'Wrong branch in git repo')
+
+ def _check_repo_status(self, repo_dir, expected_status):
+ """Check the worktree status of a repository"""
+ result = runCmd('git status . --porcelain',
+ cwd=repo_dir)
+ for line in result.output.splitlines():
+ for ind, (f_status, fn_re) in enumerate(expected_status):
+ if re.match(fn_re, line[3:]):
+ if f_status != line[:2]:
+ self.fail('Unexpected status in line: %s' % line)
+ expected_status.pop(ind)
+ break
+ else:
+ self.fail('Unexpected modified file in line: %s' % line)
+ if expected_status:
+ self.fail('Missing file changes: %s' % expected_status)
+
@testcase(1158)
def test_create_workspace(self):
# Check preconditions
- workspacedir = os.path.join(self.builddir, 'workspace')
- self.assertTrue(not os.path.exists(workspacedir), 'This test cannot be run with a workspace directory under the build directory')
result = runCmd('bitbake-layers show-layers')
self.assertTrue('/workspace' not in result.output, 'This test cannot be run with a workspace layer in bblayers.conf')
# Try creating a workspace layer with a specific path
@@ -99,19 +132,16 @@ class DevtoolTests(DevtoolBase):
result = runCmd('bitbake-layers show-layers')
self.assertIn(tempdir, result.output)
# Try creating a workspace layer with the default path
- self.track_for_cleanup(workspacedir)
+ self.track_for_cleanup(self.workspacedir)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
result = runCmd('devtool create-workspace')
- self.assertTrue(os.path.isfile(os.path.join(workspacedir, 'conf', 'layer.conf')), msg = "No workspace created. devtool output: %s " % result.output)
+ self.assertTrue(os.path.isfile(os.path.join(self.workspacedir, 'conf', 'layer.conf')), msg = "No workspace created. devtool output: %s " % result.output)
result = runCmd('bitbake-layers show-layers')
self.assertNotIn(tempdir, result.output)
- self.assertIn(workspacedir, result.output)
+ self.assertIn(self.workspacedir, result.output)
@testcase(1159)
def test_devtool_add(self):
- # Check preconditions
- workspacedir = os.path.join(self.builddir, 'workspace')
- self.assertTrue(not os.path.exists(workspacedir), 'This test cannot be run with a workspace directory under the build directory')
# Fetch source
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
@@ -121,11 +151,11 @@ class DevtoolTests(DevtoolBase):
srcdir = os.path.join(tempdir, 'pv-1.5.3')
self.assertTrue(os.path.isfile(os.path.join(srcdir, 'configure')), 'Unable to find configure script in source directory')
# Test devtool add
- self.track_for_cleanup(workspacedir)
+ self.track_for_cleanup(self.workspacedir)
self.add_command_to_tearDown('bitbake -c cleansstate pv')
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
result = runCmd('devtool add pv %s' % srcdir)
- self.assertTrue(os.path.exists(os.path.join(workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created')
+ self.assertTrue(os.path.exists(os.path.join(self.workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created')
# Test devtool status
result = runCmd('devtool status')
self.assertIn('pv', result.output)
@@ -144,9 +174,6 @@ class DevtoolTests(DevtoolBase):
@testcase(1162)
def test_devtool_add_library(self):
- # Check preconditions
- workspacedir = os.path.join(self.builddir, 'workspace')
- self.assertTrue(not os.path.exists(workspacedir), 'This test cannot be run with a workspace directory under the build directory')
# We don't have the ability to pick up this dependency automatically yet...
bitbake('libusb1')
# Fetch source
@@ -158,10 +185,10 @@ class DevtoolTests(DevtoolBase):
srcdir = os.path.join(tempdir, 'libftdi1-1.1')
self.assertTrue(os.path.isfile(os.path.join(srcdir, 'CMakeLists.txt')), 'Unable to find CMakeLists.txt in source directory')
# Test devtool add (and use -V so we test that too)
- self.track_for_cleanup(workspacedir)
+ self.track_for_cleanup(self.workspacedir)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
result = runCmd('devtool add libftdi %s -V 1.1' % srcdir)
- self.assertTrue(os.path.exists(os.path.join(workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created')
+ self.assertTrue(os.path.exists(os.path.join(self.workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created')
# Test devtool status
result = runCmd('devtool status')
self.assertIn('libftdi', result.output)
@@ -185,9 +212,6 @@ class DevtoolTests(DevtoolBase):
@testcase(1160)
def test_devtool_add_fetch(self):
- # Check preconditions
- workspacedir = os.path.join(self.builddir, 'workspace')
- self.assertTrue(not os.path.exists(workspacedir), 'This test cannot be run with a workspace directory under the build directory')
# Fetch source
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
@@ -196,11 +220,11 @@ class DevtoolTests(DevtoolBase):
testrecipe = 'python-markupsafe'
srcdir = os.path.join(tempdir, testrecipe)
# Test devtool add
- self.track_for_cleanup(workspacedir)
+ self.track_for_cleanup(self.workspacedir)
self.add_command_to_tearDown('bitbake -c cleansstate %s' % testrecipe)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
result = runCmd('devtool add %s %s -f %s' % (testrecipe, srcdir, url))
- self.assertTrue(os.path.exists(os.path.join(workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created. %s' % result.output)
+ self.assertTrue(os.path.exists(os.path.join(self.workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created. %s' % result.output)
self.assertTrue(os.path.isfile(os.path.join(srcdir, 'setup.py')), 'Unable to find setup.py in source directory')
# Test devtool status
result = runCmd('devtool status')
@@ -232,9 +256,6 @@ class DevtoolTests(DevtoolBase):
@testcase(1161)
def test_devtool_add_fetch_git(self):
- # Check preconditions
- workspacedir = os.path.join(self.builddir, 'workspace')
- self.assertTrue(not os.path.exists(workspacedir), 'This test cannot be run with a workspace directory under the build directory')
# Fetch source
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
@@ -243,11 +264,11 @@ class DevtoolTests(DevtoolBase):
testrecipe = 'libmatchbox2'
srcdir = os.path.join(tempdir, testrecipe)
# Test devtool add
- self.track_for_cleanup(workspacedir)
+ self.track_for_cleanup(self.workspacedir)
self.add_command_to_tearDown('bitbake -c cleansstate %s' % testrecipe)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
result = runCmd('devtool add %s %s -f %s' % (testrecipe, srcdir, url))
- self.assertTrue(os.path.exists(os.path.join(workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created: %s' % result.output)
+ self.assertTrue(os.path.exists(os.path.join(self.workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created: %s' % result.output)
self.assertTrue(os.path.isfile(os.path.join(srcdir, 'configure.ac')), 'Unable to find configure.ac in source directory')
# Test devtool status
result = runCmd('devtool status')
@@ -284,32 +305,25 @@ class DevtoolTests(DevtoolBase):
@testcase(1164)
def test_devtool_modify(self):
- # Check preconditions
- workspacedir = os.path.join(self.builddir, 'workspace')
- self.assertTrue(not os.path.exists(workspacedir), 'This test cannot be run with a workspace directory under the build directory')
# Clean up anything in the workdir/sysroot/sstate cache
bitbake('mdadm -c cleansstate')
# Try modifying a recipe
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
- self.track_for_cleanup(workspacedir)
+ self.track_for_cleanup(self.workspacedir)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
self.add_command_to_tearDown('bitbake -c clean mdadm')
result = runCmd('devtool modify mdadm -x %s' % tempdir)
self.assertTrue(os.path.exists(os.path.join(tempdir, 'Makefile')), 'Extracted source could not be found')
- self.assertTrue(os.path.isdir(os.path.join(tempdir, '.git')), 'git repository for external source tree not found')
- self.assertTrue(os.path.exists(os.path.join(workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created')
- matches = glob.glob(os.path.join(workspacedir, 'appends', 'mdadm_*.bbappend'))
+ self.assertTrue(os.path.exists(os.path.join(self.workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created')
+ matches = glob.glob(os.path.join(self.workspacedir, 'appends', 'mdadm_*.bbappend'))
self.assertTrue(matches, 'bbappend not created %s' % result.output)
# Test devtool status
result = runCmd('devtool status')
self.assertIn('mdadm', result.output)
self.assertIn(tempdir, result.output)
# Check git repo
- result = runCmd('git status --porcelain', cwd=tempdir)
- self.assertEqual(result.output.strip(), "", 'Created git repo is not clean')
- result = runCmd('git symbolic-ref HEAD', cwd=tempdir)
- self.assertEqual(result.output.strip(), "refs/heads/devtool", 'Wrong branch in git repo')
+ self._check_src_repo(tempdir)
# Try building
bitbake('mdadm')
# Try making (minor) modifications to the source
@@ -336,13 +350,10 @@ class DevtoolTests(DevtoolBase):
@testcase(1166)
def test_devtool_modify_invalid(self):
- # Check preconditions
- workspacedir = os.path.join(self.builddir, 'workspace')
- self.assertTrue(not os.path.exists(workspacedir), 'This test cannot be run with a workspace directory under the build directory')
# Try modifying some recipes
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
- self.track_for_cleanup(workspacedir)
+ self.track_for_cleanup(self.workspacedir)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
testrecipes = 'perf kernel-devsrc package-index core-image-minimal meta-toolchain packagegroup-core-sdk meta-ide-support'.split()
@@ -367,14 +378,14 @@ class DevtoolTests(DevtoolBase):
self.assertNotEqual(result.status, 0, 'devtool modify on %s should have failed. devtool output: %s' % (testrecipe, result.output))
self.assertIn('ERROR: ', result.output, 'devtool modify on %s should have given an ERROR' % testrecipe)
+ @testcase(1365)
def test_devtool_modify_native(self):
# Check preconditions
- workspacedir = os.path.join(self.builddir, 'workspace')
- self.assertTrue(not os.path.exists(workspacedir), 'This test cannot be run with a workspace directory under the build directory')
+ self.assertTrue(not os.path.exists(self.workspacedir), 'This test cannot be run with a workspace directory under the build directory')
# Try modifying some recipes
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
- self.track_for_cleanup(workspacedir)
+ self.track_for_cleanup(self.workspacedir)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
bbclassextended = False
@@ -400,8 +411,6 @@ class DevtoolTests(DevtoolBase):
@testcase(1165)
def test_devtool_modify_git(self):
# Check preconditions
- workspacedir = os.path.join(self.builddir, 'workspace')
- self.assertTrue(not os.path.exists(workspacedir), 'This test cannot be run with a workspace directory under the build directory')
testrecipe = 'mkelfimage'
src_uri = get_bb_var('SRC_URI', testrecipe)
self.assertIn('git://', src_uri, 'This test expects the %s recipe to be a git recipe' % testrecipe)
@@ -410,32 +419,26 @@ class DevtoolTests(DevtoolBase):
# Try modifying a recipe
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
- self.track_for_cleanup(workspacedir)
+ self.track_for_cleanup(self.workspacedir)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
self.add_command_to_tearDown('bitbake -c clean %s' % testrecipe)
result = runCmd('devtool modify %s -x %s' % (testrecipe, tempdir))
self.assertTrue(os.path.exists(os.path.join(tempdir, 'Makefile')), 'Extracted source could not be found')
- self.assertTrue(os.path.isdir(os.path.join(tempdir, '.git')), 'git repository for external source tree not found')
- self.assertTrue(os.path.exists(os.path.join(workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created. devtool output: %s' % result.output)
- matches = glob.glob(os.path.join(workspacedir, 'appends', 'mkelfimage_*.bbappend'))
+ self.assertTrue(os.path.exists(os.path.join(self.workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created. devtool output: %s' % result.output)
+ matches = glob.glob(os.path.join(self.workspacedir, 'appends', 'mkelfimage_*.bbappend'))
self.assertTrue(matches, 'bbappend not created')
# Test devtool status
result = runCmd('devtool status')
self.assertIn(testrecipe, result.output)
self.assertIn(tempdir, result.output)
# Check git repo
- result = runCmd('git status --porcelain', cwd=tempdir)
- self.assertEqual(result.output.strip(), "", 'Created git repo is not clean')
- result = runCmd('git symbolic-ref HEAD', cwd=tempdir)
- self.assertEqual(result.output.strip(), "refs/heads/devtool", 'Wrong branch in git repo')
+ self._check_src_repo(tempdir)
# Try building
bitbake(testrecipe)
@testcase(1167)
def test_devtool_modify_localfiles(self):
# Check preconditions
- workspacedir = os.path.join(self.builddir, 'workspace')
- self.assertTrue(not os.path.exists(workspacedir), 'This test cannot be run with a workspace directory under the build directory')
testrecipe = 'lighttpd'
src_uri = (get_bb_var('SRC_URI', testrecipe) or '').split()
foundlocal = False
@@ -449,13 +452,13 @@ class DevtoolTests(DevtoolBase):
# Try modifying a recipe
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
- self.track_for_cleanup(workspacedir)
+ self.track_for_cleanup(self.workspacedir)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
self.add_command_to_tearDown('bitbake -c clean %s' % testrecipe)
result = runCmd('devtool modify %s -x %s' % (testrecipe, tempdir))
self.assertTrue(os.path.exists(os.path.join(tempdir, 'configure.ac')), 'Extracted source could not be found')
- self.assertTrue(os.path.exists(os.path.join(workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created')
- matches = glob.glob(os.path.join(workspacedir, 'appends', '%s_*.bbappend' % testrecipe))
+ self.assertTrue(os.path.exists(os.path.join(self.workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created')
+ matches = glob.glob(os.path.join(self.workspacedir, 'appends', '%s_*.bbappend' % testrecipe))
self.assertTrue(matches, 'bbappend not created')
# Test devtool status
result = runCmd('devtool status')
@@ -464,30 +467,46 @@ class DevtoolTests(DevtoolBase):
# Try building
bitbake(testrecipe)
+ @testcase(1378)
+ def test_devtool_modify_virtual(self):
+ # Try modifying a virtual recipe
+ virtrecipe = 'virtual/libx11'
+ realrecipe = 'libx11'
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ result = runCmd('devtool modify %s -x %s' % (virtrecipe, tempdir))
+ self.assertTrue(os.path.exists(os.path.join(tempdir, 'Makefile.am')), 'Extracted source could not be found')
+ self.assertTrue(os.path.exists(os.path.join(self.workspacedir, 'conf', 'layer.conf')), 'Workspace directory not created')
+ matches = glob.glob(os.path.join(self.workspacedir, 'appends', '%s_*.bbappend' % realrecipe))
+ self.assertTrue(matches, 'bbappend not created %s' % result.output)
+ # Test devtool status
+ result = runCmd('devtool status')
+ self.assertNotIn(virtrecipe, result.output)
+ self.assertIn(realrecipe, result.output)
+ # Check git repo
+ self._check_src_repo(tempdir)
+ # This is probably sufficient
+
+
@testcase(1169)
def test_devtool_update_recipe(self):
# Check preconditions
- workspacedir = os.path.join(self.builddir, 'workspace')
- self.assertTrue(not os.path.exists(workspacedir), 'This test cannot be run with a workspace directory under the build directory')
testrecipe = 'minicom'
recipefile = get_bb_var('FILE', testrecipe)
src_uri = get_bb_var('SRC_URI', testrecipe)
self.assertNotIn('git://', src_uri, 'This test expects the %s recipe to NOT be a git recipe' % testrecipe)
- result = runCmd('git status . --porcelain', cwd=os.path.dirname(recipefile))
- self.assertEqual(result.output.strip(), "", '%s recipe is not clean' % testrecipe)
+ self._check_repo_status(os.path.dirname(recipefile), [])
# First, modify a recipe
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
- self.track_for_cleanup(workspacedir)
+ self.track_for_cleanup(self.workspacedir)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
# (don't bother with cleaning the recipe on teardown, we won't be building it)
result = runCmd('devtool modify %s -x %s' % (testrecipe, tempdir))
# Check git repo
- self.assertTrue(os.path.isdir(os.path.join(tempdir, '.git')), 'git repository for external source tree not found')
- result = runCmd('git status --porcelain', cwd=tempdir)
- self.assertEqual(result.output.strip(), "", 'Created git repo is not clean')
- result = runCmd('git symbolic-ref HEAD', cwd=tempdir)
- self.assertEqual(result.output.strip(), "refs/heads/devtool", 'Wrong branch in git repo')
+ self._check_src_repo(tempdir)
# Add a couple of commits
# FIXME: this only tests adding, need to also test update and remove
result = runCmd('echo "Additional line" >> README', cwd=tempdir)
@@ -497,25 +516,14 @@ class DevtoolTests(DevtoolBase):
result = runCmd('git commit -m "Add a new file"', cwd=tempdir)
self.add_command_to_tearDown('cd %s; rm %s/*.patch; git checkout %s %s' % (os.path.dirname(recipefile), testrecipe, testrecipe, os.path.basename(recipefile)))
result = runCmd('devtool update-recipe %s' % testrecipe)
- result = runCmd('git status . --porcelain', cwd=os.path.dirname(recipefile))
- self.assertNotEqual(result.output.strip(), "", '%s recipe should be modified' % testrecipe)
- status = result.output.splitlines()
- self.assertEqual(len(status), 3, 'Less/more files modified than expected. Entire status:\n%s' % result.output)
- for line in status:
- if line.endswith('0001-Change-the-README.patch'):
- self.assertEqual(line[:3], '?? ', 'Unexpected status in line: %s' % line)
- elif line.endswith('0002-Add-a-new-file.patch'):
- self.assertEqual(line[:3], '?? ', 'Unexpected status in line: %s' % line)
- elif re.search('%s_[^_]*.bb$' % testrecipe, line):
- self.assertEqual(line[:3], ' M ', 'Unexpected status in line: %s' % line)
- else:
- raise AssertionError('Unexpected modified file in status: %s' % line)
+ expected_status = [(' M', '.*/%s$' % os.path.basename(recipefile)),
+ ('??', '.*/0001-Change-the-README.patch$'),
+ ('??', '.*/0002-Add-a-new-file.patch$')]
+ self._check_repo_status(os.path.dirname(recipefile), expected_status)
@testcase(1172)
def test_devtool_update_recipe_git(self):
# Check preconditions
- workspacedir = os.path.join(self.builddir, 'workspace')
- self.assertTrue(not os.path.exists(workspacedir), 'This test cannot be run with a workspace directory under the build directory')
testrecipe = 'mtd-utils'
recipefile = get_bb_var('FILE', testrecipe)
src_uri = get_bb_var('SRC_URI', testrecipe)
@@ -525,21 +533,16 @@ class DevtoolTests(DevtoolBase):
if entry.startswith('file://') and entry.endswith('.patch'):
patches.append(entry[7:].split(';')[0])
self.assertGreater(len(patches), 0, 'The %s recipe does not appear to contain any patches, so this test will not be effective' % testrecipe)
- result = runCmd('git status . --porcelain', cwd=os.path.dirname(recipefile))
- self.assertEqual(result.output.strip(), "", '%s recipe is not clean' % testrecipe)
+ self._check_repo_status(os.path.dirname(recipefile), [])
# First, modify a recipe
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
- self.track_for_cleanup(workspacedir)
+ self.track_for_cleanup(self.workspacedir)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
# (don't bother with cleaning the recipe on teardown, we won't be building it)
result = runCmd('devtool modify %s -x %s' % (testrecipe, tempdir))
# Check git repo
- self.assertTrue(os.path.isdir(os.path.join(tempdir, '.git')), 'git repository for external source tree not found')
- result = runCmd('git status --porcelain', cwd=tempdir)
- self.assertEqual(result.output.strip(), "", 'Created git repo is not clean')
- result = runCmd('git symbolic-ref HEAD', cwd=tempdir)
- self.assertEqual(result.output.strip(), "refs/heads/devtool", 'Wrong branch in git repo')
+ self._check_src_repo(tempdir)
# Add a couple of commits
# FIXME: this only tests adding, need to also test update and remove
result = runCmd('echo "# Additional line" >> Makefile', cwd=tempdir)
@@ -549,19 +552,10 @@ class DevtoolTests(DevtoolBase):
result = runCmd('git commit -m "Add a new file"', cwd=tempdir)
self.add_command_to_tearDown('cd %s; rm -rf %s; git checkout %s %s' % (os.path.dirname(recipefile), testrecipe, testrecipe, os.path.basename(recipefile)))
result = runCmd('devtool update-recipe -m srcrev %s' % testrecipe)
- result = runCmd('git status . --porcelain', cwd=os.path.dirname(recipefile))
- self.assertNotEqual(result.output.strip(), "", '%s recipe should be modified' % testrecipe)
- status = result.output.splitlines()
- for line in status:
- for patch in patches:
- if line.endswith(patch):
- self.assertEqual(line[:3], ' D ', 'Unexpected status in line: %s' % line)
- break
- else:
- if re.search('%s_[^_]*.bb$' % testrecipe, line):
- self.assertEqual(line[:3], ' M ', 'Unexpected status in line: %s' % line)
- else:
- raise AssertionError('Unexpected modified file in status: %s' % line)
+ expected_status = [(' M', '.*/%s$' % os.path.basename(recipefile))] + \
+ [(' D', '.*/%s$' % patch) for patch in patches]
+ self._check_repo_status(os.path.dirname(recipefile), expected_status)
+
result = runCmd('git diff %s' % os.path.basename(recipefile), cwd=os.path.dirname(recipefile))
addlines = ['SRCREV = ".*"', 'SRC_URI = "git://git.infradead.org/mtd-utils.git"']
srcurilines = src_uri.split()
@@ -588,50 +582,33 @@ class DevtoolTests(DevtoolBase):
# Now try with auto mode
runCmd('cd %s; git checkout %s %s' % (os.path.dirname(recipefile), testrecipe, os.path.basename(recipefile)))
result = runCmd('devtool update-recipe %s' % testrecipe)
- result = runCmd('git rev-parse --show-toplevel')
+ result = runCmd('git rev-parse --show-toplevel', cwd=os.path.dirname(recipefile))
topleveldir = result.output.strip()
- result = runCmd('git status . --porcelain', cwd=os.path.dirname(recipefile))
- status = result.output.splitlines()
relpatchpath = os.path.join(os.path.relpath(os.path.dirname(recipefile), topleveldir), testrecipe)
- expectedstatus = [('M', os.path.relpath(recipefile, topleveldir)),
- ('??', '%s/0001-Change-the-Makefile.patch' % relpatchpath),
- ('??', '%s/0002-Add-a-new-file.patch' % relpatchpath)]
- for line in status:
- statusline = line.split(None, 1)
- for fstatus, fn in expectedstatus:
- if fn == statusline[1]:
- if fstatus != statusline[0]:
- self.fail('Unexpected status in line: %s' % line)
- break
- else:
- self.fail('Unexpected modified file in line: %s' % line)
+ expected_status = [(' M', os.path.relpath(recipefile, topleveldir)),
+ ('??', '%s/0001-Change-the-Makefile.patch' % relpatchpath),
+ ('??', '%s/0002-Add-a-new-file.patch' % relpatchpath)]
+ self._check_repo_status(os.path.dirname(recipefile), expected_status)
@testcase(1170)
def test_devtool_update_recipe_append(self):
# Check preconditions
- workspacedir = os.path.join(self.builddir, 'workspace')
- self.assertTrue(not os.path.exists(workspacedir), 'This test cannot be run with a workspace directory under the build directory')
testrecipe = 'mdadm'
recipefile = get_bb_var('FILE', testrecipe)
src_uri = get_bb_var('SRC_URI', testrecipe)
self.assertNotIn('git://', src_uri, 'This test expects the %s recipe to NOT be a git recipe' % testrecipe)
- result = runCmd('git status . --porcelain', cwd=os.path.dirname(recipefile))
- self.assertEqual(result.output.strip(), "", '%s recipe is not clean' % testrecipe)
+ self._check_repo_status(os.path.dirname(recipefile), [])
# First, modify a recipe
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
tempsrcdir = os.path.join(tempdir, 'source')
templayerdir = os.path.join(tempdir, 'layer')
self.track_for_cleanup(tempdir)
- self.track_for_cleanup(workspacedir)
+ self.track_for_cleanup(self.workspacedir)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
# (don't bother with cleaning the recipe on teardown, we won't be building it)
result = runCmd('devtool modify %s -x %s' % (testrecipe, tempsrcdir))
# Check git repo
- self.assertTrue(os.path.isdir(os.path.join(tempsrcdir, '.git')), 'git repository for external source tree not found')
- result = runCmd('git status --porcelain', cwd=tempsrcdir)
- self.assertEqual(result.output.strip(), "", 'Created git repo is not clean')
- result = runCmd('git symbolic-ref HEAD', cwd=tempsrcdir)
- self.assertEqual(result.output.strip(), "refs/heads/devtool", 'Wrong branch in git repo')
+ self._check_src_repo(tempsrcdir)
# Add a commit
result = runCmd("sed 's!\\(#define VERSION\\W*\"[^\"]*\\)\"!\\1-custom\"!' -i ReadMe.c", cwd=tempsrcdir)
result = runCmd('git commit -a -m "Add our custom version"', cwd=tempsrcdir)
@@ -642,8 +619,7 @@ class DevtoolTests(DevtoolBase):
result = runCmd('devtool update-recipe %s -a %s' % (testrecipe, templayerdir))
self.assertNotIn('WARNING:', result.output)
# Check recipe is still clean
- result = runCmd('git status . --porcelain', cwd=os.path.dirname(recipefile))
- self.assertEqual(result.output.strip(), "", '%s recipe is not clean' % testrecipe)
+ self._check_repo_status(os.path.dirname(recipefile), [])
# Check bbappend was created
splitpath = os.path.dirname(recipefile).split(os.sep)
appenddir = os.path.join(templayerdir, splitpath[-2], splitpath[-1])
@@ -685,8 +661,6 @@ class DevtoolTests(DevtoolBase):
@testcase(1171)
def test_devtool_update_recipe_append_git(self):
# Check preconditions
- workspacedir = os.path.join(self.builddir, 'workspace')
- self.assertTrue(not os.path.exists(workspacedir), 'This test cannot be run with a workspace directory under the build directory')
testrecipe = 'mtd-utils'
recipefile = get_bb_var('FILE', testrecipe)
src_uri = get_bb_var('SRC_URI', testrecipe)
@@ -695,23 +669,18 @@ class DevtoolTests(DevtoolBase):
if entry.startswith('git://'):
git_uri = entry
break
- result = runCmd('git status . --porcelain', cwd=os.path.dirname(recipefile))
- self.assertEqual(result.output.strip(), "", '%s recipe is not clean' % testrecipe)
+ self._check_repo_status(os.path.dirname(recipefile), [])
# First, modify a recipe
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
tempsrcdir = os.path.join(tempdir, 'source')
templayerdir = os.path.join(tempdir, 'layer')
self.track_for_cleanup(tempdir)
- self.track_for_cleanup(workspacedir)
+ self.track_for_cleanup(self.workspacedir)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
# (don't bother with cleaning the recipe on teardown, we won't be building it)
result = runCmd('devtool modify %s -x %s' % (testrecipe, tempsrcdir))
# Check git repo
- self.assertTrue(os.path.isdir(os.path.join(tempsrcdir, '.git')), 'git repository for external source tree not found')
- result = runCmd('git status --porcelain', cwd=tempsrcdir)
- self.assertEqual(result.output.strip(), "", 'Created git repo is not clean')
- result = runCmd('git symbolic-ref HEAD', cwd=tempsrcdir)
- self.assertEqual(result.output.strip(), "refs/heads/devtool", 'Wrong branch in git repo')
+ self._check_src_repo(tempsrcdir)
# Add a commit
result = runCmd('echo "# Additional line" >> Makefile', cwd=tempsrcdir)
result = runCmd('git commit -a -m "Change the Makefile"', cwd=tempsrcdir)
@@ -731,8 +700,7 @@ class DevtoolTests(DevtoolBase):
result = runCmd('devtool update-recipe -m srcrev %s -a %s' % (testrecipe, templayerdir))
self.assertNotIn('WARNING:', result.output)
# Check recipe is still clean
- result = runCmd('git status . --porcelain', cwd=os.path.dirname(recipefile))
- self.assertEqual(result.output.strip(), "", '%s recipe is not clean' % testrecipe)
+ self._check_repo_status(os.path.dirname(recipefile), [])
# Check bbappend was created
splitpath = os.path.dirname(recipefile).split(os.sep)
appenddir = os.path.join(templayerdir, splitpath[-2], splitpath[-1])
@@ -779,28 +747,104 @@ class DevtoolTests(DevtoolBase):
self.assertEqual(expectedlines, f.readlines())
# Deleting isn't expected to work under these circumstances
+ @testcase(1370)
+ def test_devtool_update_recipe_local_files(self):
+ """Check that local source files are copied over instead of patched"""
+ testrecipe = 'makedevs'
+ recipefile = get_bb_var('FILE', testrecipe)
+ # Setup srctree for modifying the recipe
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ # (don't bother with cleaning the recipe on teardown, we won't be
+ # building it)
+ result = runCmd('devtool modify %s -x %s' % (testrecipe, tempdir))
+ # Check git repo
+ self._check_src_repo(tempdir)
+ # Edit / commit local source
+ runCmd('echo "/* Foobar */" >> oe-local-files/makedevs.c', cwd=tempdir)
+ runCmd('echo "Foo" > oe-local-files/new-local', cwd=tempdir)
+ runCmd('echo "Bar" > new-file', cwd=tempdir)
+ runCmd('git add new-file', cwd=tempdir)
+ runCmd('git commit -m "Add new file"', cwd=tempdir)
+ self.add_command_to_tearDown('cd %s; git clean -fd .; git checkout .' %
+ os.path.dirname(recipefile))
+ runCmd('devtool update-recipe %s' % testrecipe)
+ expected_status = [(' M', '.*/%s$' % os.path.basename(recipefile)),
+ (' M', '.*/makedevs/makedevs.c$'),
+ ('??', '.*/makedevs/new-local$'),
+ ('??', '.*/makedevs/0001-Add-new-file.patch$')]
+ self._check_repo_status(os.path.dirname(recipefile), expected_status)
+
+ @testcase(1371)
+ def test_devtool_update_recipe_local_files_2(self):
+ """Check local source files support when oe-local-files is in Git"""
+ testrecipe = 'lzo'
+ recipefile = get_bb_var('FILE', testrecipe)
+ # Setup srctree for modifying the recipe
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ result = runCmd('devtool modify %s -x %s' % (testrecipe, tempdir))
+ # Check git repo
+ self._check_src_repo(tempdir)
+ # Add oe-local-files to Git
+ runCmd('rm oe-local-files/.gitignore', cwd=tempdir)
+ runCmd('git add oe-local-files', cwd=tempdir)
+ runCmd('git commit -m "Add local sources"', cwd=tempdir)
+ # Edit / commit local sources
+ runCmd('echo "# Foobar" >> oe-local-files/acinclude.m4', cwd=tempdir)
+ runCmd('git commit -am "Edit existing file"', cwd=tempdir)
+ runCmd('git rm oe-local-files/run-ptest', cwd=tempdir)
+ runCmd('git commit -m"Remove file"', cwd=tempdir)
+ runCmd('echo "Foo" > oe-local-files/new-local', cwd=tempdir)
+ runCmd('git add oe-local-files/new-local', cwd=tempdir)
+ runCmd('git commit -m "Add new local file"', cwd=tempdir)
+ runCmd('echo "Gar" > new-file', cwd=tempdir)
+ runCmd('git add new-file', cwd=tempdir)
+ runCmd('git commit -m "Add new file"', cwd=tempdir)
+ self.add_command_to_tearDown('cd %s; git clean -fd .; git checkout .' %
+ os.path.dirname(recipefile))
+ # Checkout unmodified file to working copy -> devtool should still pick
+ # the modified version from HEAD
+ runCmd('git checkout HEAD^ -- oe-local-files/acinclude.m4', cwd=tempdir)
+ runCmd('devtool update-recipe %s' % testrecipe)
+ expected_status = [(' M', '.*/%s$' % os.path.basename(recipefile)),
+ (' M', '.*/acinclude.m4$'),
+ (' D', '.*/run-ptest$'),
+ ('??', '.*/new-local$'),
+ ('??', '.*/0001-Add-new-file.patch$')]
+ self._check_repo_status(os.path.dirname(recipefile), expected_status)
+
@testcase(1163)
def test_devtool_extract(self):
- # Check preconditions
- workspacedir = os.path.join(self.builddir, 'workspace')
- self.assertTrue(not os.path.exists(workspacedir), 'This test cannot be run with a workspace directory under the build directory')
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
# Try devtool extract
self.track_for_cleanup(tempdir)
- self.track_for_cleanup(workspacedir)
+ self.track_for_cleanup(self.workspacedir)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
result = runCmd('devtool extract remake %s' % tempdir)
self.assertTrue(os.path.exists(os.path.join(tempdir, 'Makefile.am')), 'Extracted source could not be found')
- self.assertTrue(os.path.isdir(os.path.join(tempdir, '.git')), 'git repository for external source tree not found')
+ self._check_src_repo(tempdir)
+
+ @testcase(1379)
+ def test_devtool_extract_virtual(self):
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ # Try devtool extract
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ result = runCmd('devtool extract virtual/libx11 %s' % tempdir)
+ self.assertTrue(os.path.exists(os.path.join(tempdir, 'Makefile.am')), 'Extracted source could not be found')
+ self._check_src_repo(tempdir)
@testcase(1168)
def test_devtool_reset_all(self):
- # Check preconditions
- workspacedir = os.path.join(self.builddir, 'workspace')
- self.assertTrue(not os.path.exists(workspacedir), 'This test cannot be run with a workspace directory under the build directory')
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
- self.track_for_cleanup(workspacedir)
+ self.track_for_cleanup(self.workspacedir)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
testrecipe1 = 'mdadm'
testrecipe2 = 'cronie'
@@ -823,6 +867,7 @@ class DevtoolTests(DevtoolBase):
matches2 = glob.glob(stampprefix2 + '*')
self.assertFalse(matches2, 'Stamp files exist for recipe %s that should have been cleaned' % testrecipe2)
+ @testcase(1272)
def test_devtool_deploy_target(self):
# NOTE: Whilst this test would seemingly be better placed as a runtime test,
# unfortunately the runtime tests run under bitbake and you can't run
@@ -846,8 +891,7 @@ class DevtoolTests(DevtoolBase):
break
else:
self.skipTest('No tap devices found - you must set up tap devices with scripts/runqemu-gen-tapdevs before running this test')
- workspacedir = os.path.join(self.builddir, 'workspace')
- self.assertTrue(not os.path.exists(workspacedir), 'This test cannot be run with a workspace directory under the build directory')
+ self.assertTrue(not os.path.exists(self.workspacedir), 'This test cannot be run with a workspace directory under the build directory')
# Definitions
testrecipe = 'mdadm'
testfile = '/sbin/mdadm'
@@ -863,7 +907,7 @@ class DevtoolTests(DevtoolBase):
# Try devtool modify
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
- self.track_for_cleanup(workspacedir)
+ self.track_for_cleanup(self.workspacedir)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
self.add_command_to_tearDown('bitbake -c clean %s' % testrecipe)
result = runCmd('devtool modify %s -x %s' % (testrecipe, tempdir))
@@ -908,18 +952,19 @@ class DevtoolTests(DevtoolBase):
result = runCmd('ssh %s root@%s %s' % (sshargs, qemu.ip, testcommand), ignore_status=True)
self.assertNotEqual(result, 0, 'undeploy-target did not remove command as it should have')
+ @testcase(1366)
def test_devtool_build_image(self):
"""Test devtool build-image plugin"""
# Check preconditions
- workspacedir = os.path.join(self.builddir, 'workspace')
- self.assertTrue(not os.path.exists(workspacedir), 'This test cannot be run with a workspace directory under the build directory')
+ self.assertTrue(not os.path.exists(self.workspacedir), 'This test cannot be run with a workspace directory under the build directory')
image = 'core-image-minimal'
- self.track_for_cleanup(workspacedir)
+ self.track_for_cleanup(self.workspacedir)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
self.add_command_to_tearDown('bitbake -c clean %s' % image)
bitbake('%s -c clean' % image)
# Add target and native recipes to workspace
- for recipe in ('mdadm', 'parted-native'):
+ recipes = ['mdadm', 'parted-native']
+ for recipe in recipes:
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
self.add_command_to_tearDown('bitbake -c clean %s' % recipe)
@@ -927,17 +972,24 @@ class DevtoolTests(DevtoolBase):
# Try to build image
result = runCmd('devtool build-image %s' % image)
self.assertNotEqual(result, 0, 'devtool build-image failed')
- # Check if image.bbappend has required content
- bbappend = os.path.join(workspacedir, 'appends', image+'.bbappend')
- self.assertTrue(os.path.isfile(bbappend), 'bbappend not created %s' % result.output)
- # NOTE: native recipe parted-native should not be in IMAGE_INSTALL_append
- self.assertTrue('IMAGE_INSTALL_append = " mdadm"\n' in open(bbappend).readlines(),
- 'IMAGE_INSTALL_append = " mdadm" not found in %s' % bbappend)
+ # Check if image contains expected packages
+ deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
+ image_link_name = get_bb_var('IMAGE_LINK_NAME', image)
+ reqpkgs = [item for item in recipes if not item.endswith('-native')]
+ with open(os.path.join(deploy_dir_image, image_link_name + '.manifest'), 'r') as f:
+ for line in f:
+ splitval = line.split()
+ if splitval:
+ pkg = splitval[0]
+ if pkg in reqpkgs:
+ reqpkgs.remove(pkg)
+ if reqpkgs:
+ self.fail('The following packages were not present in the image as expected: %s' % ', '.join(reqpkgs))
+ @testcase(1367)
def test_devtool_upgrade(self):
# Check preconditions
- workspacedir = os.path.join(self.builddir, 'workspace')
- self.assertTrue(not os.path.exists(workspacedir), 'This test cannot be run with a workspace directory under the build directory')
+ self.assertTrue(not os.path.exists(self.workspacedir), 'This test cannot be run with a workspace directory under the build directory')
# Check parameters
result = runCmd('devtool upgrade -h')
for param in 'recipename srctree --version -V --branch -b --keep-temp --no-patch'.split():
@@ -955,9 +1007,9 @@ class DevtoolTests(DevtoolBase):
# Check if srctree at least is populated
self.assertTrue(len(os.listdir(tempdir)) > 0, 'scrtree (%s) should be populated with new (%s) source code' % (tempdir, version))
# Check new recipe folder is present
- self.assertTrue(os.path.exists(os.path.join(workspacedir,'recipes',recipe)), 'Recipe folder should exist')
+ self.assertTrue(os.path.exists(os.path.join(self.workspacedir,'recipes',recipe)), 'Recipe folder should exist')
# Check new recipe file is present
- self.assertTrue(os.path.exists(os.path.join(workspacedir,'recipes',recipe,"%s_%s.bb" % (recipe,version))), 'Recipe folder should exist')
+ self.assertTrue(os.path.exists(os.path.join(self.workspacedir,'recipes',recipe,"%s_%s.bb" % (recipe,version))), 'Recipe folder should exist')
# Check devtool status and make sure recipe is present
result = runCmd('devtool status')
self.assertIn(recipe, result.output)
@@ -967,5 +1019,18 @@ class DevtoolTests(DevtoolBase):
result = runCmd('devtool status')
self.assertNotIn(recipe, result.output)
self.track_for_cleanup(tempdir)
- self.track_for_cleanup(workspacedir)
+ self.track_for_cleanup(self.workspacedir)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+
+ @testcase(1352)
+ def test_devtool_layer_plugins(self):
+ """Test that devtool can use plugins from other layers.
+
+ This test executes the selftest-reverse command from meta-selftest."""
+
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+
+ s = "Microsoft Made No Profit From Anyone's Zunes Yo"
+ result = runCmd("devtool --quiet selftest-reverse \"%s\"" % s)
+ self.assertEqual(result.output, s[::-1])
diff --git a/yocto-poky/meta/lib/oeqa/selftest/imagefeatures.py b/yocto-poky/meta/lib/oeqa/selftest/imagefeatures.py
index fcffc423e..4efb0d92a 100644
--- a/yocto-poky/meta/lib/oeqa/selftest/imagefeatures.py
+++ b/yocto-poky/meta/lib/oeqa/selftest/imagefeatures.py
@@ -25,9 +25,7 @@ class ImageFeatures(oeSelfTest):
features = 'EXTRA_IMAGE_FEATURES = "ssh-server-openssh empty-root-password allow-empty-password"\n'
features += 'INHERIT += "extrausers"\n'
features += 'EXTRA_USERS_PARAMS = "useradd -p \'\' {}; usermod -s /bin/sh {};"'.format(self.test_user, self.test_user)
-
- # Append 'features' to local.conf
- self.append_config(features)
+ self.write_config(features)
# Build a core-image-minimal
bitbake('core-image-minimal')
@@ -53,9 +51,7 @@ class ImageFeatures(oeSelfTest):
features = 'EXTRA_IMAGE_FEATURES = "ssh-server-openssh allow-empty-password"\n'
features += 'INHERIT += "extrausers"\n'
features += 'EXTRA_USERS_PARAMS = "useradd -p \'\' {}; usermod -s /bin/sh {};"'.format(self.test_user, self.test_user)
-
- # Append 'features' to local.conf
- self.append_config(features)
+ self.write_config(features)
# Build a core-image-minimal
bitbake('core-image-minimal')
@@ -87,9 +83,7 @@ class ImageFeatures(oeSelfTest):
features += 'IMAGE_INSTALL_append = " openssh"\n'
features += 'EXTRA_IMAGE_FEATURES = "empty-root-password allow-empty-password package-management"\n'
features += 'RPMROOTFSDEPENDS_remove = "rpmresolve-native:do_populate_sysroot"'
-
- # Append 'features' to local.conf
- self.append_config(features)
+ self.write_config(features)
# Build a core-image-minimal
bitbake('core-image-minimal')
@@ -159,9 +153,7 @@ class ImageFeatures(oeSelfTest):
features = 'DISTRO_FEATURES_append = " wayland"\n'
features += 'CORE_IMAGE_EXTRA_INSTALL += "wayland weston"'
-
- # Append 'features' to local.conf
- self.append_config(features)
+ self.write_config(features)
# Build a core-image-weston
bitbake('core-image-weston')
diff --git a/yocto-poky/meta/lib/oeqa/selftest/layerappend.py b/yocto-poky/meta/lib/oeqa/selftest/layerappend.py
index a82a6c8b9..4de5034a9 100644
--- a/yocto-poky/meta/lib/oeqa/selftest/layerappend.py
+++ b/yocto-poky/meta/lib/oeqa/selftest/layerappend.py
@@ -46,10 +46,11 @@ FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
SRC_URI_append += "file://appendtest.txt"
"""
- layerappend = "BBLAYERS += \"COREBASE/meta-layertest0 COREBASE/meta-layertest1 COREBASE/meta-layertest2\""
+ layerappend = ''
def tearDownLocal(self):
- ftools.remove_from_file(self.builddir + "/conf/bblayers.conf", self.layerappend.replace("COREBASE", self.builddir + "/.."))
+ if self.layerappend:
+ ftools.remove_from_file(self.builddir + "/conf/bblayers.conf", self.layerappend)
@testcase(1196)
def test_layer_appends(self):
@@ -79,7 +80,9 @@ SRC_URI_append += "file://appendtest.txt"
with open(layer + "/recipes-test/layerappendtest/appendtest.txt", "w") as f:
f.write("Layer 2 test")
self.track_for_cleanup(layer)
- ftools.append_file(self.builddir + "/conf/bblayers.conf", self.layerappend.replace("COREBASE", self.builddir + "/.."))
+
+ self.layerappend = "BBLAYERS += \"{0}/meta-layertest0 {0}/meta-layertest1 {0}/meta-layertest2\"".format(corebase)
+ ftools.append_file(self.builddir + "/conf/bblayers.conf", self.layerappend)
bitbake("layerappendtest")
data = ftools.read_file(stagingdir + "/appendtest.txt")
self.assertEqual(data, "Layer 2 test")
diff --git a/yocto-poky/meta/lib/oeqa/selftest/manifest.py b/yocto-poky/meta/lib/oeqa/selftest/manifest.py
new file mode 100644
index 000000000..44d0404c5
--- /dev/null
+++ b/yocto-poky/meta/lib/oeqa/selftest/manifest.py
@@ -0,0 +1,165 @@
+import unittest
+import os
+
+from oeqa.selftest.base import oeSelfTest
+from oeqa.utils.commands import get_bb_var, bitbake
+from oeqa.utils.decorators import testcase
+
+class ManifestEntry:
+ '''A manifest item of a collection able to list missing packages'''
+ def __init__(self, entry):
+ self.file = entry
+ self.missing = []
+
+class VerifyManifest(oeSelfTest):
+ '''Tests for the manifest files and contents of an image'''
+
+ @classmethod
+ def check_manifest_entries(self, manifest, path):
+ manifest_errors = []
+ try:
+ with open(manifest, "r") as mfile:
+ for line in mfile:
+ manifest_entry = os.path.join(path, line.split()[0])
+ self.log.debug("{}: looking for {}"\
+ .format(self.classname, manifest_entry))
+ if not os.path.isfile(manifest_entry):
+ manifest_errors.append(manifest_entry)
+ self.log.debug("{}: {} not found"\
+ .format(self.classname, manifest_entry))
+ except OSError as e:
+ self.log.debug("{}: checking of {} failed"\
+ .format(self.classname, manifest))
+ raise e
+
+ return manifest_errors
+
+ #this will possibly move from here
+ @classmethod
+ def get_dir_from_bb_var(self, bb_var, target = None):
+ target == self.buildtarget if target == None else target
+ directory = get_bb_var(bb_var, target);
+ if not directory or not os.path.isdir(directory):
+ self.log.debug("{}: {} points to {} when target = {}"\
+ .format(self.classname, bb_var, directory, target))
+ raise OSError
+ return directory
+
+ @classmethod
+ def setUpClass(self):
+
+ self.buildtarget = 'core-image-minimal'
+ self.classname = 'VerifyManifest'
+
+ self.log.info("{}: doing bitbake {} as a prerequisite of the test"\
+ .format(self.classname, self.buildtarget))
+ if bitbake(self.buildtarget).status:
+ self.log.debug("{} Failed to setup {}"\
+ .format(self.classname, self.buildtarget))
+ unittest.SkipTest("{}: Cannot setup testing scenario"\
+ .format(self.classname))
+
+ @testcase(1380)
+ def test_SDK_manifest_entries(self):
+ '''Verifying the SDK manifest entries exist, this may take a build'''
+
+ # the setup should bitbake core-image-minimal and here it is required
+ # to do an additional setup for the sdk
+ sdktask = '-c populate_sdk'
+ bbargs = sdktask + ' ' + self.buildtarget
+ self.log.debug("{}: doing bitbake {} as a prerequisite of the test"\
+ .format(self.classname, bbargs))
+ if bitbake(bbargs).status:
+ self.log.debug("{} Failed to bitbake {}"\
+ .format(self.classname, bbargs))
+ unittest.SkipTest("{}: Cannot setup testing scenario"\
+ .format(self.classname))
+
+
+ pkgdata_dir = reverse_dir = {}
+ mfilename = mpath = m_entry = {}
+ # get manifest location based on target to query about
+ d_target= dict(target = self.buildtarget,
+ host = 'nativesdk-packagegroup-sdk-host')
+ try:
+ mdir = self.get_dir_from_bb_var('SDK_DEPLOY', self.buildtarget)
+ for k in d_target.keys():
+ mfilename[k] = "{}-toolchain-{}.{}.manifest".format(
+ get_bb_var("SDK_NAME", self.buildtarget),
+ get_bb_var("SDK_VERSION", self.buildtarget),
+ k)
+ mpath[k] = os.path.join(mdir, mfilename[k])
+ if not os.path.isfile(mpath[k]):
+ self.log.debug("{}: {} does not exist".format(
+ self.classname, mpath[k]))
+ raise IOError
+ m_entry[k] = ManifestEntry(mpath[k])
+
+ pkgdata_dir[k] = self.get_dir_from_bb_var('PKGDATA_DIR',
+ d_target[k])
+ reverse_dir[k] = os.path.join(pkgdata_dir[k],
+ 'runtime-reverse')
+ if not os.path.exists(reverse_dir[k]):
+ self.log.debug("{}: {} does not exist".format(
+ self.classname, reverse_dir[k]))
+ raise IOError
+ except OSError:
+ raise unittest.SkipTest("{}: Error in obtaining manifest dirs"\
+ .format(self.classname))
+ except IOError:
+ msg = "{}: Error cannot find manifests in the specified dir:\n{}"\
+ .format(self.classname, mdir)
+ self.fail(msg)
+
+ for k in d_target.keys():
+ self.log.debug("{}: Check manifest {}".format(
+ self.classname, m_entry[k].file))
+
+ m_entry[k].missing = self.check_manifest_entries(\
+ m_entry[k].file,reverse_dir[k])
+ if m_entry[k].missing:
+ msg = '{}: {} Error has the following missing entries'\
+ .format(self.classname, m_entry[k].file)
+ logmsg = msg+':\n'+'\n'.join(m_entry[k].missing)
+ self.log.debug(logmsg)
+ self.log.info(msg)
+ self.fail(logmsg)
+
+ @testcase(1381)
+ def test_image_manifest_entries(self):
+ '''Verifying the image manifest entries exist'''
+
+ # get manifest location based on target to query about
+ try:
+ mdir = self.get_dir_from_bb_var('DEPLOY_DIR_IMAGE',
+ self.buildtarget)
+ mfilename = get_bb_var("IMAGE_LINK_NAME", self.buildtarget)\
+ + ".manifest"
+ mpath = os.path.join(mdir, mfilename)
+ if not os.path.isfile(mpath): raise IOError
+ m_entry = ManifestEntry(mpath)
+
+ pkgdata_dir = {}
+ pkgdata_dir = self.get_dir_from_bb_var('PKGDATA_DIR',
+ self.buildtarget)
+ revdir = os.path.join(pkgdata_dir, 'runtime-reverse')
+ if not os.path.exists(revdir): raise IOError
+ except OSError:
+ raise unittest.SkipTest("{}: Error in obtaining manifest dirs"\
+ .format(self.classname))
+ except IOError:
+ msg = "{}: Error cannot find manifests in dir:\n{}"\
+ .format(self.classname, mdir)
+ self.fail(msg)
+
+ self.log.debug("{}: Check manifest {}"\
+ .format(self.classname, m_entry.file))
+ m_entry.missing = self.check_manifest_entries(\
+ m_entry.file, revdir)
+ if m_entry.missing:
+ msg = '{}: {} Error has the following missing entries'\
+ .format(self.classname, m_entry.file)
+ logmsg = msg+':\n'+'\n'.join(m_entry.missing)
+ self.log.debug(logmsg)
+ self.log.info(msg)
+ self.fail(logmsg)
diff --git a/yocto-poky/meta/lib/oeqa/selftest/recipetool.py b/yocto-poky/meta/lib/oeqa/selftest/recipetool.py
index c34ad6887..b1f1d2ab9 100644
--- a/yocto-poky/meta/lib/oeqa/selftest/recipetool.py
+++ b/yocto-poky/meta/lib/oeqa/selftest/recipetool.py
@@ -492,9 +492,12 @@ class RecipetoolAppendsrcBase(RecipetoolBase):
class RecipetoolAppendsrcTests(RecipetoolAppendsrcBase):
+
+ @testcase(1273)
def test_recipetool_appendsrcfile_basic(self):
self._test_appendsrcfile('base-files', 'a-file')
+ @testcase(1274)
def test_recipetool_appendsrcfile_basic_wildcard(self):
testrecipe = 'base-files'
self._test_appendsrcfile(testrecipe, 'a-file', options='-w')
@@ -502,12 +505,15 @@ class RecipetoolAppendsrcTests(RecipetoolAppendsrcBase):
bbappendfile = self._check_bbappend(testrecipe, recipefile, self.templayerdir)
self.assertEqual(os.path.basename(bbappendfile), '%s_%%.bbappend' % testrecipe)
+ @testcase(1281)
def test_recipetool_appendsrcfile_subdir_basic(self):
self._test_appendsrcfile('base-files', 'a-file', 'tmp')
+ @testcase(1282)
def test_recipetool_appendsrcfile_subdir_basic_dirdest(self):
self._test_appendsrcfile('base-files', destdir='tmp')
+ @testcase(1280)
def test_recipetool_appendsrcfile_srcdir_basic(self):
testrecipe = 'bash'
srcdir = get_bb_var('S', testrecipe)
@@ -515,12 +521,14 @@ class RecipetoolAppendsrcTests(RecipetoolAppendsrcBase):
subdir = os.path.relpath(srcdir, workdir)
self._test_appendsrcfile(testrecipe, 'a-file', srcdir=subdir)
+ @testcase(1275)
def test_recipetool_appendsrcfile_existing_in_src_uri(self):
testrecipe = 'base-files'
filepath = self._get_first_file_uri(testrecipe)
self.assertTrue(filepath, 'Unable to test, no file:// uri found in SRC_URI for %s' % testrecipe)
self._test_appendsrcfile(testrecipe, filepath, has_src_uri=False)
+ @testcase(1276)
def test_recipetool_appendsrcfile_existing_in_src_uri_diff_params(self):
testrecipe = 'base-files'
subdir = 'tmp'
@@ -530,6 +538,7 @@ class RecipetoolAppendsrcTests(RecipetoolAppendsrcBase):
output = self._test_appendsrcfile(testrecipe, filepath, subdir, has_src_uri=False)
self.assertTrue(any('with different parameters' in l for l in output))
+ @testcase(1277)
def test_recipetool_appendsrcfile_replace_file_srcdir(self):
testrecipe = 'bash'
filepath = 'Makefile.in'
@@ -541,6 +550,7 @@ class RecipetoolAppendsrcTests(RecipetoolAppendsrcBase):
bitbake('%s:do_unpack' % testrecipe)
self.assertEqual(open(self.testfile, 'r').read(), open(os.path.join(srcdir, filepath), 'r').read())
+ @testcase(1278)
def test_recipetool_appendsrcfiles_basic(self, destdir=None):
newfiles = [self.testfile]
for i in range(1, 5):
@@ -550,5 +560,6 @@ class RecipetoolAppendsrcTests(RecipetoolAppendsrcBase):
newfiles.append(testfile)
self._test_appendsrcfiles('gcc', newfiles, destdir=destdir, options='-W')
+ @testcase(1279)
def test_recipetool_appendsrcfiles_basic_subdir(self):
self.test_recipetool_appendsrcfiles_basic(destdir='testdir')
diff --git a/yocto-poky/meta/lib/oeqa/selftest/sstatetests.py b/yocto-poky/meta/lib/oeqa/selftest/sstatetests.py
index c4efc47fe..3c230620e 100644
--- a/yocto-poky/meta/lib/oeqa/selftest/sstatetests.py
+++ b/yocto-poky/meta/lib/oeqa/selftest/sstatetests.py
@@ -34,7 +34,7 @@ class SStateTests(SStateBase):
targetarch = get_bb_var('TUNE_ARCH')
self.run_test_sstate_creation(['binutils-cross-'+ targetarch, 'binutils-native'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True)
- @testcase(975)
+ @testcase(1374)
def test_sstate_creation_distro_specific_fail(self):
targetarch = get_bb_var('TUNE_ARCH')
self.run_test_sstate_creation(['binutils-cross-'+ targetarch, 'binutils-native'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True, should_pass=False)
@@ -43,7 +43,7 @@ class SStateTests(SStateBase):
def test_sstate_creation_distro_nonspecific_pass(self):
self.run_test_sstate_creation(['glibc-initial'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True)
- @testcase(976)
+ @testcase(1375)
def test_sstate_creation_distro_nonspecific_fail(self):
self.run_test_sstate_creation(['glibc-initial'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True, should_pass=False)
@@ -70,11 +70,11 @@ class SStateTests(SStateBase):
targetarch = get_bb_var('TUNE_ARCH')
self.run_test_cleansstate_task(['binutils-cross-' + targetarch, 'binutils-native', 'glibc-initial'], distro_specific=True, distro_nonspecific=True, temp_sstate_location=True)
- @testcase(977)
+ @testcase(1376)
def test_cleansstate_task_distro_nonspecific(self):
self.run_test_cleansstate_task(['glibc-initial'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True)
- @testcase(977)
+ @testcase(1377)
def test_cleansstate_task_distro_specific(self):
targetarch = get_bb_var('TUNE_ARCH')
self.run_test_cleansstate_task(['binutils-cross-'+ targetarch, 'binutils-native', 'glibc-initial'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True)
@@ -111,12 +111,12 @@ class SStateTests(SStateBase):
targetarch = get_bb_var('TUNE_ARCH')
self.run_test_rebuild_distro_specific_sstate(['binutils-cross-' + targetarch, 'binutils-native'], temp_sstate_location=True)
- @testcase(175)
+ @testcase(1372)
def test_rebuild_distro_specific_sstate_cross_target(self):
targetarch = get_bb_var('TUNE_ARCH')
self.run_test_rebuild_distro_specific_sstate(['binutils-cross-' + targetarch], temp_sstate_location=True)
- @testcase(175)
+ @testcase(1373)
def test_rebuild_distro_specific_sstate_native_target(self):
self.run_test_rebuild_distro_specific_sstate(['binutils-native'], temp_sstate_location=True)
@@ -211,6 +211,8 @@ class SStateTests(SStateBase):
they're built on a 32 or 64 bit system. Rather than requiring two different
build machines and running a builds, override the variables calling uname()
manually and check using bitbake -S.
+
+ Also check that SDKMACHINE changing doesn't change any of these stamps.
"""
topdir = get_bb_var('TOPDIR')
@@ -219,6 +221,7 @@ class SStateTests(SStateBase):
TMPDIR = \"${TOPDIR}/tmp-sstatesamehash\"
BUILD_ARCH = \"x86_64\"
BUILD_OS = \"linux\"
+SDKMACHINE = \"x86_64\"
""")
self.track_for_cleanup(topdir + "/tmp-sstatesamehash")
bitbake("core-image-sato -S none")
@@ -226,6 +229,7 @@ BUILD_OS = \"linux\"
TMPDIR = \"${TOPDIR}/tmp-sstatesamehash2\"
BUILD_ARCH = \"i686\"
BUILD_OS = \"linux\"
+SDKMACHINE = \"i686\"
""")
self.track_for_cleanup(topdir + "/tmp-sstatesamehash2")
bitbake("core-image-sato -S none")
@@ -233,11 +237,16 @@ BUILD_OS = \"linux\"
def get_files(d):
f = []
for root, dirs, files in os.walk(d):
+ if "core-image-sato" in root:
+ # SDKMACHINE changing will change do_rootfs/do_testimage/do_build stamps of core-image-sato itself
+ # which is safe to ignore
+ continue
f.extend(os.path.join(root, name) for name in files)
return f
files1 = get_files(topdir + "/tmp-sstatesamehash/stamps/")
files2 = get_files(topdir + "/tmp-sstatesamehash2/stamps/")
files2 = [x.replace("tmp-sstatesamehash2", "tmp-sstatesamehash").replace("i686-linux", "x86_64-linux").replace("i686" + targetvendor + "-linux", "x86_64" + targetvendor + "-linux", ) for x in files2]
+ self.maxDiff = None
self.assertItemsEqual(files1, files2)
@@ -271,11 +280,13 @@ NATIVELSBSTRING = \"DistroB\"
files1 = get_files(topdir + "/tmp-sstatesamehash/stamps/")
files2 = get_files(topdir + "/tmp-sstatesamehash2/stamps/")
files2 = [x.replace("tmp-sstatesamehash2", "tmp-sstatesamehash") for x in files2]
+ self.maxDiff = None
self.assertItemsEqual(files1, files2)
+ @testcase(1368)
def test_sstate_allarch_samesigs(self):
"""
- The sstate checksums off allarch packages should be independent of whichever
+ The sstate checksums of allarch packages should be independent of whichever
MACHINE is set. Check this using bitbake -S.
Also, rather than duplicate the test, check nativesdk stamps are the same between
the two MACHINE values.
@@ -319,4 +330,50 @@ MACHINE = \"qemuarm\"
files2 = [x.replace("tmp-sstatesamehash2", "tmp-sstatesamehash") for x in files2]
self.maxDiff = None
self.assertItemsEqual(files1, files2)
-
+
+ @testcase(1369)
+ def test_sstate_sametune_samesigs(self):
+ """
+ The sstate checksums of two identical machines (using the same tune) should be the
+ same, apart from changes within the machine specific stamps directory. We use the
+ qemux86copy machine to test this. Also include multilibs in the test.
+ """
+
+ topdir = get_bb_var('TOPDIR')
+ targetos = get_bb_var('TARGET_OS')
+ targetvendor = get_bb_var('TARGET_VENDOR')
+ self.write_config("""
+TMPDIR = \"${TOPDIR}/tmp-sstatesamehash\"
+MACHINE = \"qemux86\"
+require conf/multilib.conf
+MULTILIBS = "multilib:lib32"
+DEFAULTTUNE_virtclass-multilib-lib32 = "x86"
+""")
+ self.track_for_cleanup(topdir + "/tmp-sstatesamehash")
+ bitbake("world meta-toolchain -S none")
+ self.write_config("""
+TMPDIR = \"${TOPDIR}/tmp-sstatesamehash2\"
+MACHINE = \"qemux86copy\"
+require conf/multilib.conf
+MULTILIBS = "multilib:lib32"
+DEFAULTTUNE_virtclass-multilib-lib32 = "x86"
+""")
+ self.track_for_cleanup(topdir + "/tmp-sstatesamehash2")
+ bitbake("world meta-toolchain -S none")
+
+ def get_files(d):
+ f = []
+ for root, dirs, files in os.walk(d):
+ for name in files:
+ if "meta-environment" in root or "cross-canadian" in root:
+ continue
+ if "qemux86copy-" in root or "qemux86-" in root:
+ continue
+ if "do_build" not in name and "do_populate_sdk" not in name:
+ f.append(os.path.join(root, name))
+ return f
+ files1 = get_files(topdir + "/tmp-sstatesamehash/stamps")
+ files2 = get_files(topdir + "/tmp-sstatesamehash2/stamps")
+ files2 = [x.replace("tmp-sstatesamehash2", "tmp-sstatesamehash") for x in files2]
+ self.maxDiff = None
+ self.assertItemsEqual(files1, files2)
diff --git a/yocto-poky/meta/lib/oeqa/selftest/wic.py b/yocto-poky/meta/lib/oeqa/selftest/wic.py
index 3dc54a4c6..ea78e2259 100644
--- a/yocto-poky/meta/lib/oeqa/selftest/wic.py
+++ b/yocto-poky/meta/lib/oeqa/selftest/wic.py
@@ -31,50 +31,54 @@ from shutil import rmtree
from oeqa.selftest.base import oeSelfTest
from oeqa.utils.commands import runCmd, bitbake, get_bb_var
+from oeqa.utils.decorators import testcase
+
class Wic(oeSelfTest):
"""Wic test class."""
resultdir = "/var/tmp/wic/build/"
+ image_is_ready = False
- @classmethod
- def setUpClass(cls):
- """Build wic runtime dependencies."""
- bitbake('syslinux syslinux-native parted-native gptfdisk-native '
- 'dosfstools-native mtools-native')
- Wic.image_is_ready = False
-
- def setUp(self):
+ def setUpLocal(self):
"""This code is executed before each test method."""
+ self.write_config('IMAGE_FSTYPES += " hddimg"\nMACHINE_FEATURES_append = " efi"\n')
+
+ # Do this here instead of in setUpClass as the base setUp does some
+ # clean up which can result in the native tools built earlier in
+ # setUpClass being unavailable.
if not Wic.image_is_ready:
- # build core-image-minimal with required features
- features = 'IMAGE_FSTYPES += " hddimg"\nMACHINE_FEATURES_append = " efi"\n'
- self.append_config(features)
+ bitbake('syslinux syslinux-native parted-native gptfdisk-native '
+ 'dosfstools-native mtools-native')
bitbake('core-image-minimal')
- # set this class variable to avoid buiding image many times
Wic.image_is_ready = True
rmtree(self.resultdir, ignore_errors=True)
- def test01_help(self):
+ @testcase(1208)
+ def test_help(self):
"""Test wic --help"""
self.assertEqual(0, runCmd('wic --help').status)
- def test02_createhelp(self):
+ @testcase(1209)
+ def test_createhelp(self):
"""Test wic create --help"""
self.assertEqual(0, runCmd('wic create --help').status)
- def test03_listhelp(self):
+ @testcase(1210)
+ def test_listhelp(self):
"""Test wic list --help"""
self.assertEqual(0, runCmd('wic list --help').status)
- def test04_build_image_name(self):
+ @testcase(1211)
+ def test_build_image_name(self):
"""Test wic create directdisk --image-name core-image-minimal"""
self.assertEqual(0, runCmd("wic create directdisk "
"--image-name core-image-minimal").status)
self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct")))
- def test05_build_artifacts(self):
+ @testcase(1212)
+ def test_build_artifacts(self):
"""Test wic create directdisk providing all artifacts."""
vars = dict((var.lower(), get_bb_var(var, 'core-image-minimal')) \
for var in ('STAGING_DATADIR', 'DEPLOY_DIR_IMAGE',
@@ -87,34 +91,41 @@ class Wic(oeSelfTest):
self.assertEqual(0, status)
self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct")))
- def test06_gpt_image(self):
+ @testcase(1157)
+ def test_gpt_image(self):
"""Test creation of core-image-minimal with gpt table and UUID boot"""
self.assertEqual(0, runCmd("wic create directdisk-gpt "
"--image-name core-image-minimal").status)
self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct")))
- def test07_unsupported_subcommand(self):
+ @testcase(1213)
+ def test_unsupported_subcommand(self):
"""Test unsupported subcommand"""
self.assertEqual(1, runCmd('wic unsupported',
ignore_status=True).status)
- def test08_no_command(self):
+ @testcase(1214)
+ def test_no_command(self):
"""Test wic without command"""
self.assertEqual(1, runCmd('wic', ignore_status=True).status)
- def test09_help_kickstart(self):
+ @testcase(1215)
+ def test_help_overview(self):
"""Test wic help overview"""
self.assertEqual(0, runCmd('wic help overview').status)
- def test10_help_plugins(self):
+ @testcase(1216)
+ def test_help_plugins(self):
"""Test wic help plugins"""
self.assertEqual(0, runCmd('wic help plugins').status)
- def test11_help_kickstart(self):
+ @testcase(1217)
+ def test_help_kickstart(self):
"""Test wic help kickstart"""
self.assertEqual(0, runCmd('wic help kickstart').status)
- def test12_compress_gzip(self):
+ @testcase(1264)
+ def test_compress_gzip(self):
"""Test compressing an image with gzip"""
self.assertEqual(0, runCmd("wic create directdisk "
"--image-name core-image-minimal "
@@ -122,7 +133,8 @@ class Wic(oeSelfTest):
self.assertEqual(1, len(glob(self.resultdir + \
"directdisk-*.direct.gz")))
- def test13_compress_gzip(self):
+ @testcase(1265)
+ def test_compress_bzip2(self):
"""Test compressing an image with bzip2"""
self.assertEqual(0, runCmd("wic create directdisk "
"--image-name core-image-minimal "
@@ -130,7 +142,8 @@ class Wic(oeSelfTest):
self.assertEqual(1, len(glob(self.resultdir + \
"directdisk-*.direct.bz2")))
- def test14_compress_gzip(self):
+ @testcase(1266)
+ def test_compress_xz(self):
"""Test compressing an image with xz"""
self.assertEqual(0, runCmd("wic create directdisk "
"--image-name core-image-minimal "
@@ -138,13 +151,15 @@ class Wic(oeSelfTest):
self.assertEqual(1, len(glob(self.resultdir + \
"directdisk-*.direct.xz")))
- def test15_wrong_compressor(self):
+ @testcase(1267)
+ def test_wrong_compressor(self):
"""Test how wic breaks if wrong compressor is provided"""
self.assertEqual(2, runCmd("wic create directdisk "
"--image-name core-image-minimal "
"-c wrong", ignore_status=True).status)
- def test16_rootfs_indirect_recipes(self):
+ @testcase(1268)
+ def test_rootfs_indirect_recipes(self):
"""Test usage of rootfs plugin with rootfs recipes"""
wks = "directdisk-multi-rootfs"
self.assertEqual(0, runCmd("wic create %s "
@@ -154,7 +169,8 @@ class Wic(oeSelfTest):
% wks).status)
self.assertEqual(1, len(glob(self.resultdir + "%s*.direct" % wks)))
- def test17_rootfs_artifacts(self):
+ @testcase(1269)
+ def test_rootfs_artifacts(self):
"""Test usage of rootfs plugin with rootfs paths"""
vars = dict((var.lower(), get_bb_var(var, 'core-image-minimal')) \
for var in ('STAGING_DATADIR', 'DEPLOY_DIR_IMAGE',
@@ -171,14 +187,16 @@ class Wic(oeSelfTest):
self.assertEqual(1, len(glob(self.resultdir + \
"%(wks)s-*.direct" % vars)))
- def test18_iso_image(self):
- """Test creation of hybrid iso imagewith legacy and EFI boot"""
+ @testcase(1346)
+ def test_iso_image(self):
+ """Test creation of hybrid iso image with legacy and EFI boot"""
self.assertEqual(0, runCmd("wic create mkhybridiso "
"--image-name core-image-minimal").status)
self.assertEqual(1, len(glob(self.resultdir + "HYBRID_ISO_IMG-*.direct")))
self.assertEqual(1, len(glob(self.resultdir + "HYBRID_ISO_IMG-*.iso")))
- def test19_image_env(self):
+ @testcase(1347)
+ def test_image_env(self):
"""Test generation of <image>.env files."""
image = 'core-image-minimal'
stdir = get_bb_var('STAGING_DIR_TARGET', image)
@@ -200,7 +218,8 @@ class Wic(oeSelfTest):
self.assertTrue(var in content, "%s is not in .env file" % var)
self.assertTrue(content[var])
- def test20_wic_image_type(self):
+ @testcase(1351)
+ def test_wic_image_type(self):
"""Test building wic images by bitbake"""
self.assertEqual(0, bitbake('wic-image-minimal').status)
@@ -214,21 +233,24 @@ class Wic(oeSelfTest):
self.assertTrue(os.path.islink(path))
self.assertTrue(os.path.isfile(os.path.realpath(path)))
- def test21_qemux86_directdisk(self):
+ @testcase(1348)
+ def test_qemux86_directdisk(self):
"""Test creation of qemux-86-directdisk image"""
image = "qemux86-directdisk"
self.assertEqual(0, runCmd("wic create %s -e core-image-minimal" \
% image).status)
self.assertEqual(1, len(glob(self.resultdir + "%s-*direct" % image)))
- def test22_mkgummidisk(self):
+ @testcase(1349)
+ def test_mkgummidisk(self):
"""Test creation of mkgummidisk image"""
image = "mkgummidisk"
self.assertEqual(0, runCmd("wic create %s -e core-image-minimal" \
% image).status)
self.assertEqual(1, len(glob(self.resultdir + "%s-*direct" % image)))
- def test23_mkefidisk(self):
+ @testcase(1350)
+ def test_mkefidisk(self):
"""Test creation of mkefidisk image"""
image = "mkefidisk"
self.assertEqual(0, runCmd("wic create %s -e core-image-minimal" \
diff --git a/yocto-poky/meta/lib/oeqa/utils/decorators.py b/yocto-poky/meta/lib/oeqa/utils/decorators.py
index b6adcb184..0d79223a2 100644
--- a/yocto-poky/meta/lib/oeqa/utils/decorators.py
+++ b/yocto-poky/meta/lib/oeqa/utils/decorators.py
@@ -33,6 +33,10 @@ class getResults(object):
ret.append(s.replace("setUpModule (", "").replace(")",""))
else:
ret.append(s)
+ # Append also the test without the full path
+ testname = s.split('.')[-1]
+ if testname:
+ ret.append(testname)
return ret
self.faillist = handleList(upperf.f_locals['result'].failures)
self.errorlist = handleList(upperf.f_locals['result'].errors)
@@ -53,11 +57,11 @@ class skipIfFailure(object):
self.testcase = testcase
def __call__(self,f):
- def wrapped_f(*args):
+ def wrapped_f(*args, **kwargs):
res = getResults()
if self.testcase in (res.getFailList() or res.getErrorList()):
raise unittest.SkipTest("Testcase dependency not met: %s" % self.testcase)
- return f(*args)
+ return f(*args, **kwargs)
wrapped_f.__name__ = f.__name__
return wrapped_f
@@ -67,11 +71,11 @@ class skipIfSkipped(object):
self.testcase = testcase
def __call__(self,f):
- def wrapped_f(*args):
+ def wrapped_f(*args, **kwargs):
res = getResults()
if self.testcase in res.getSkipList():
raise unittest.SkipTest("Testcase dependency not met: %s" % self.testcase)
- return f(*args)
+ return f(*args, **kwargs)
wrapped_f.__name__ = f.__name__
return wrapped_f
@@ -81,13 +85,13 @@ class skipUnlessPassed(object):
self.testcase = testcase
def __call__(self,f):
- def wrapped_f(*args):
+ def wrapped_f(*args, **kwargs):
res = getResults()
if self.testcase in res.getSkipList() or \
self.testcase in res.getFailList() or \
self.testcase in res.getErrorList():
raise unittest.SkipTest("Testcase dependency not met: %s" % self.testcase)
- return f(*args)
+ return f(*args, **kwargs)
wrapped_f.__name__ = f.__name__
wrapped_f._depends_on = self.testcase
return wrapped_f
@@ -98,8 +102,8 @@ class testcase(object):
self.test_case = test_case
def __call__(self, func):
- def wrapped_f(*args):
- return func(*args)
+ def wrapped_f(*args, **kwargs):
+ return func(*args, **kwargs)
wrapped_f.test_case = self.test_case
wrapped_f.__name__ = func.__name__
return wrapped_f
@@ -111,6 +115,12 @@ class NoParsingFilter(logging.Filter):
def LogResults(original_class):
orig_method = original_class.run
+ from time import strftime, gmtime
+ caller = os.path.basename(sys.argv[0])
+ timestamp = strftime('%Y%m%d%H%M%S',gmtime())
+ logfile = os.path.join(os.getcwd(),'results-'+caller+'.'+timestamp+'.log')
+ linkfile = os.path.join(os.getcwd(),'results-'+caller+'.log')
+
#rewrite the run method of unittest.TestCase to add testcase logging
def run(self, result, *args, **kws):
orig_method(self, result, *args, **kws)
@@ -127,14 +137,13 @@ def LogResults(original_class):
#create custom logging level for filtering.
custom_log_level = 100
logging.addLevelName(custom_log_level, 'RESULTS')
- caller = os.path.basename(sys.argv[0])
def results(self, message, *args, **kws):
if self.isEnabledFor(custom_log_level):
self.log(custom_log_level, message, *args, **kws)
logging.Logger.results = results
- logging.basicConfig(filename=os.path.join(os.getcwd(),'results-'+caller+'.log'),
+ logging.basicConfig(filename=logfile,
filemode='w',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%H:%M:%S',
@@ -162,7 +171,13 @@ def LogResults(original_class):
if passed:
local_log.results("Testcase "+str(test_case)+": PASSED")
+ # Create symlink to the current log
+ if os.path.exists(linkfile):
+ os.remove(linkfile)
+ os.symlink(logfile, linkfile)
+
original_class.run = run
+
return original_class
class TimeOut(BaseException):
diff --git a/yocto-poky/meta/lib/oeqa/utils/dump.py b/yocto-poky/meta/lib/oeqa/utils/dump.py
index 4ae871c65..63a591d36 100644
--- a/yocto-poky/meta/lib/oeqa/utils/dump.py
+++ b/yocto-poky/meta/lib/oeqa/utils/dump.py
@@ -16,9 +16,20 @@ class BaseDumper(object):
def __init__(self, cmds, parent_dir):
self.cmds = []
- self.parent_dir = parent_dir
+ # Some testing doesn't inherit testimage, so it is needed
+ # to set some defaults.
+ self.parent_dir = parent_dir or "/tmp/oe-saved-tests"
+ dft_cmds = """ top -bn1
+ iostat -x -z -N -d -p ALL 20 2
+ ps -ef
+ free
+ df
+ memstat
+ dmesg
+ ip -s link
+ netstat -an"""
if not cmds:
- return
+ cmds = dft_cmds
for cmd in cmds.split('\n'):
cmd = cmd.lstrip()
if not cmd or cmd[0] == '#':
diff --git a/yocto-poky/meta/lib/oeqa/utils/ftools.py b/yocto-poky/meta/lib/oeqa/utils/ftools.py
index 64ebe3d21..1bd9a30a4 100644
--- a/yocto-poky/meta/lib/oeqa/utils/ftools.py
+++ b/yocto-poky/meta/lib/oeqa/utils/ftools.py
@@ -1,12 +1,19 @@
import os
import re
+import errno
def write_file(path, data):
+ # In case data is None, return immediately
+ if data is None:
+ return
wdata = data.rstrip() + "\n"
with open(path, "w") as f:
f.write(wdata)
def append_file(path, data):
+ # In case data is None, return immediately
+ if data is None:
+ return
wdata = data.rstrip() + "\n"
with open(path, "a") as f:
f.write(wdata)
@@ -18,7 +25,18 @@ def read_file(path):
return data
def remove_from_file(path, data):
- lines = read_file(path).splitlines()
+ # In case data is None, return immediately
+ if data is None:
+ return
+ try:
+ rdata = read_file(path)
+ except IOError as e:
+ # if file does not exit, just quit, otherwise raise an exception
+ if e.errno == errno.ENOENT:
+ return
+ else:
+ raise
+ lines = rdata.splitlines()
rmdata = data.strip().splitlines()
for l in rmdata:
for c in range(0, lines.count(l)):
diff --git a/yocto-poky/meta/lib/oeqa/utils/qemurunner.py b/yocto-poky/meta/lib/oeqa/utils/qemurunner.py
index d32c9db46..bdc6e0a8f 100644
--- a/yocto-poky/meta/lib/oeqa/utils/qemurunner.py
+++ b/yocto-poky/meta/lib/oeqa/utils/qemurunner.py
@@ -13,12 +13,20 @@ import re
import socket
import select
import errno
+import string
import threading
+import codecs
from oeqa.utils.dump import HostDumper
import logging
logger = logging.getLogger("BitBake.QemuRunner")
+# Get Unicode non printable control chars
+control_range = range(0,32)+range(127,160)
+control_chars = [unichr(x) for x in control_range
+ if unichr(x) not in string.printable]
+re_control_char = re.compile('[%s]' % re.escape("".join(control_chars)))
+
class QemuRunner:
def __init__(self, machine, rootfs, display, tmpdir, deploy_dir_image, logfile, boottime, dump_dir, dump_host_cmds):
@@ -61,7 +69,10 @@ class QemuRunner:
def log(self, msg):
if self.logfile:
- with open(self.logfile, "a") as f:
+ # It is needed to sanitize the data received from qemu
+ # because is possible to have control characters
+ msg = re_control_char.sub('', unicode(msg, 'utf-8'))
+ with codecs.open(self.logfile, "a", encoding="utf-8") as f:
f.write("%s" % msg)
def getOutput(self, o):
@@ -170,6 +181,9 @@ class QemuRunner:
cmdline = ''
with open('/proc/%s/cmdline' % self.qemupid) as p:
cmdline = p.read()
+ # It is needed to sanitize the data received
+ # because is possible to have control characters
+ cmdline = re_control_char.sub('', cmdline)
try:
ips = re.findall("((?:[0-9]{1,3}\.){3}[0-9]{1,3})", cmdline.split("ip=")[1])
if not ips or len(ips) != 3:
@@ -186,7 +200,6 @@ class QemuRunner:
logger.info("Target IP: %s" % self.ip)
logger.info("Server IP: %s" % self.server_ip)
- logger.info("Starting logging thread")
self.thread = LoggingThread(self.log, threadsock, logger)
self.thread.start()
if not self.thread.connection_established.wait(self.boottime):
@@ -197,6 +210,7 @@ class QemuRunner:
self.stop_thread()
return False
+ logger.info("Output from runqemu:\n%s", self.getOutput(output))
logger.info("Waiting at most %d seconds for login banner" % self.boottime)
endtime = time.time() + self.boottime
socklist = [self.server_socket]
@@ -259,8 +273,9 @@ class QemuRunner:
def stop(self):
self.stop_thread()
- if self.runqemu:
+ if hasattr(self, "origchldhandler"):
signal.signal(signal.SIGCHLD, self.origchldhandler)
+ if self.runqemu:
os.kill(self.monitorpid, signal.SIGKILL)
logger.info("Sending SIGTERM to runqemu")
try:
@@ -280,7 +295,6 @@ class QemuRunner:
self.server_socket = None
self.qemupid = None
self.ip = None
- signal.signal(signal.SIGCHLD, self.origchldhandler)
def stop_thread(self):
if self.thread and self.thread.is_alive():
@@ -440,9 +454,9 @@ class LoggingThread(threading.Thread):
def eventloop(self):
poll = select.poll()
- eventmask = self.errorevents | self.readevents
+ event_read_mask = self.errorevents | self.readevents
poll.register(self.serversock.fileno())
- poll.register(self.readpipe, eventmask)
+ poll.register(self.readpipe, event_read_mask)
breakout = False
self.running = True
@@ -466,7 +480,7 @@ class LoggingThread(threading.Thread):
self.readsock, _ = self.serversock.accept()
self.readsock.setblocking(0)
poll.unregister(self.serversock.fileno())
- poll.register(self.readsock.fileno())
+ poll.register(self.readsock.fileno(), event_read_mask)
self.logger.info("Setting connection established event")
self.connection_established.set()
diff --git a/yocto-poky/meta/recipes-bsp/gnu-efi/gnu-efi/gcc46-compatibility.patch b/yocto-poky/meta/recipes-bsp/gnu-efi/gnu-efi/gcc46-compatibility.patch
new file mode 100644
index 000000000..0ce6d7b0c
--- /dev/null
+++ b/yocto-poky/meta/recipes-bsp/gnu-efi/gnu-efi/gcc46-compatibility.patch
@@ -0,0 +1,21 @@
+don't break with old compilers and -DGNU_EFI_USE_MS_ABI
+It's entirely legitimate to request GNU_EFI_USE_MS_ABI even if the current
+compiler doesn't support it, and gnu-efi should transparently fall back to
+using legacy techniques to set the calling convention. We don't get type
+checking, but at least it will still compile.
+
+Author: Steve Langasek <steve.langasek@ubuntu.com>
+Upstream-Status: Pending
+Index: gnu-efi-3.0.3/inc/x86_64/efibind.h
+===================================================================
+--- gnu-efi-3.0.3.orig/inc/x86_64/efibind.h
++++ gnu-efi-3.0.3/inc/x86_64/efibind.h
+@@ -25,8 +25,6 @@ Revision History
+ #if defined(GNU_EFI_USE_MS_ABI)
+ #if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))
+ #define HAVE_USE_MS_ABI 1
+- #else
+- #error Compiler is too old for GNU_EFI_USE_MS_ABI
+ #endif
+ #endif
+
diff --git a/yocto-poky/meta/recipes-bsp/gnu-efi/gnu-efi_3.0.3.bb b/yocto-poky/meta/recipes-bsp/gnu-efi/gnu-efi_3.0.3.bb
index 1a1ba4022..eca34599c 100644
--- a/yocto-poky/meta/recipes-bsp/gnu-efi/gnu-efi_3.0.3.bb
+++ b/yocto-poky/meta/recipes-bsp/gnu-efi/gnu-efi_3.0.3.bb
@@ -18,6 +18,7 @@ SRC_URI = "${SOURCEFORGE_MIRROR}/${BPN}/${BP}.tar.bz2 \
file://parallel-make-archives.patch \
file://lib-Makefile-fix-parallel-issue.patch \
file://gnu-efi-Make-setjmp.S-portable-to-ARM.patch \
+ file://gcc46-compatibility.patch \
"
SRC_URI[md5sum] = "15a4bcbc18a9a5e8110ed955970622e6"
diff --git a/yocto-poky/meta/recipes-bsp/grub/files/CVE-2015-8370.patch b/yocto-poky/meta/recipes-bsp/grub/files/CVE-2015-8370.patch
new file mode 100644
index 000000000..78f514e03
--- /dev/null
+++ b/yocto-poky/meta/recipes-bsp/grub/files/CVE-2015-8370.patch
@@ -0,0 +1,59 @@
+From 451d80e52d851432e109771bb8febafca7a5f1f2 Mon Sep 17 00:00:00 2001
+From: Hector Marco-Gisbert <hecmargi@upv.es>
+Date: Wed, 16 Dec 2015 07:57:18 +0300
+Subject: [PATCH] Fix security issue when reading username and password
+
+This patch fixes two integer underflows at:
+ * grub-core/lib/crypto.c
+ * grub-core/normal/auth.c
+
+CVE-2015-8370
+
+Signed-off-by: Hector Marco-Gisbert <hecmargi@upv.es>
+Signed-off-by: Ismael Ripoll-Ripoll <iripoll@disca.upv.es>
+Also-By: Andrey Borzenkov <arvidjaar@gmail.com>
+
+Upstream-Status: Backport
+
+http://git.savannah.gnu.org/cgit/grub.git/commit/?id=451d80e52d851432e109771bb8febafca7a5f1f2
+
+CVE: CVE-2015-8370
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ grub-core/lib/crypto.c | 3 ++-
+ grub-core/normal/auth.c | 7 +++++--
+ 2 files changed, 7 insertions(+), 3 deletions(-)
+
+Index: git/grub-core/lib/crypto.c
+===================================================================
+--- git.orig/grub-core/lib/crypto.c
++++ git/grub-core/lib/crypto.c
+@@ -458,7 +458,8 @@ grub_password_get (char buf[], unsigned
+
+ if (key == '\b')
+ {
+- cur_len--;
++ if (cur_len)
++ cur_len--;
+ continue;
+ }
+
+Index: git/grub-core/normal/auth.c
+===================================================================
+--- git.orig/grub-core/normal/auth.c
++++ git/grub-core/normal/auth.c
+@@ -174,8 +174,11 @@ grub_username_get (char buf[], unsigned
+
+ if (key == '\b')
+ {
+- cur_len--;
+- grub_printf ("\b");
++ if (cur_len)
++ {
++ cur_len--;
++ grub_printf ("\b");
++ }
+ continue;
+ }
+
diff --git a/yocto-poky/meta/recipes-bsp/grub/grub2.inc b/yocto-poky/meta/recipes-bsp/grub/grub2.inc
index 312771b47..fe2407cef 100644
--- a/yocto-poky/meta/recipes-bsp/grub/grub2.inc
+++ b/yocto-poky/meta/recipes-bsp/grub/grub2.inc
@@ -27,6 +27,7 @@ SRC_URI = "ftp://ftp.gnu.org/gnu/grub/grub-${PV}.tar.gz \
file://0001-Unset-need_charset_alias-when-building-for-musl.patch \
file://0001-parse_dhcp_vendor-Add-missing-const-qualifiers.patch \
file://grub2-fix-initrd-size-bug.patch \
+ file://CVE-2015-8370.patch \
"
DEPENDS = "flex-native bison-native xz"
diff --git a/yocto-poky/meta/recipes-bsp/gummiboot/gummiboot/0001-console-Fix-C-syntax-errors-for-function-declaration.patch b/yocto-poky/meta/recipes-bsp/gummiboot/gummiboot/0001-console-Fix-C-syntax-errors-for-function-declaration.patch
new file mode 100644
index 000000000..fa50bc4a6
--- /dev/null
+++ b/yocto-poky/meta/recipes-bsp/gummiboot/gummiboot/0001-console-Fix-C-syntax-errors-for-function-declaration.patch
@@ -0,0 +1,74 @@
+From 55957faf1272c8f5f304909faeebf647a78e3701 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Wed, 9 Sep 2015 07:19:45 +0000
+Subject: [PATCH] console: Fix C syntax errors for function declaration
+
+To address this, the semicolons after the function parameters should be
+replaced by commas, and the last one should be omitted
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+Upstream-Status: Pending
+
+ src/efi/console.c | 26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+diff --git a/src/efi/console.c b/src/efi/console.c
+index 6206c80..66aa88f 100644
+--- a/src/efi/console.c
++++ b/src/efi/console.c
+@@ -27,8 +27,8 @@
+ struct _EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL;
+
+ typedef EFI_STATUS (EFIAPI *EFI_INPUT_RESET_EX)(
+- struct _EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL *This;
+- BOOLEAN ExtendedVerification;
++ struct _EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL *This,
++ BOOLEAN ExtendedVerification
+ );
+
+ typedef UINT8 EFI_KEY_TOGGLE_STATE;
+@@ -44,29 +44,29 @@ typedef struct {
+ } EFI_KEY_DATA;
+
+ typedef EFI_STATUS (EFIAPI *EFI_INPUT_READ_KEY_EX)(
+- struct _EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL *This;
+- EFI_KEY_DATA *KeyData;
++ struct _EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL *This,
++ EFI_KEY_DATA *KeyData
+ );
+
+ typedef EFI_STATUS (EFIAPI *EFI_SET_STATE)(
+- struct _EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL *This;
+- EFI_KEY_TOGGLE_STATE *KeyToggleState;
++ struct _EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL *This,
++ EFI_KEY_TOGGLE_STATE *KeyToggleState
+ );
+
+ typedef EFI_STATUS (EFIAPI *EFI_KEY_NOTIFY_FUNCTION)(
+- EFI_KEY_DATA *KeyData;
++ EFI_KEY_DATA *KeyData
+ );
+
+ typedef EFI_STATUS (EFIAPI *EFI_REGISTER_KEYSTROKE_NOTIFY)(
+- struct _EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL *This;
+- EFI_KEY_DATA KeyData;
+- EFI_KEY_NOTIFY_FUNCTION KeyNotificationFunction;
+- VOID **NotifyHandle;
++ struct _EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL *This,
++ EFI_KEY_DATA KeyData,
++ EFI_KEY_NOTIFY_FUNCTION KeyNotificationFunction,
++ VOID **NotifyHandle
+ );
+
+ typedef EFI_STATUS (EFIAPI *EFI_UNREGISTER_KEYSTROKE_NOTIFY)(
+- struct _EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL *This;
+- VOID *NotificationHandle;
++ struct _EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL *This,
++ VOID *NotificationHandle
+ );
+
+ typedef struct _EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL {
+--
+2.5.1
+
diff --git a/yocto-poky/meta/recipes-bsp/gummiboot/gummiboot_git.bb b/yocto-poky/meta/recipes-bsp/gummiboot/gummiboot_git.bb
index 91c3db930..376ab542d 100644
--- a/yocto-poky/meta/recipes-bsp/gummiboot/gummiboot_git.bb
+++ b/yocto-poky/meta/recipes-bsp/gummiboot/gummiboot_git.bb
@@ -13,6 +13,7 @@ PV = "48+git${SRCPV}"
SRCREV = "2bcd919c681c952eb867ef1bdb458f1bc49c2d55"
SRC_URI = "git://anongit.freedesktop.org/gummiboot \
file://fix-objcopy.patch \
+ file://0001-console-Fix-C-syntax-errors-for-function-declaration.patch \
"
# Note: Add COMPATIBLE_HOST here is only because it depends on gnu-efi
@@ -28,6 +29,8 @@ EXTRA_OECONF = "--disable-manpages --with-efi-includedir=${STAGING_INCDIR} \
EXTRA_OEMAKE += "gummibootlibdir=${libdir}/gummiboot"
+TUNE_CCARGS_remove = "-mfpmath=sse"
+
do_deploy () {
install ${B}/gummiboot*.efi ${DEPLOYDIR}
}
diff --git a/yocto-poky/meta/recipes-bsp/hostap/hostap-utils-0.4.7/0001-Define-_u32-__s32-__u16-__s16-__u8-in-terms-of-c99-t.patch b/yocto-poky/meta/recipes-bsp/hostap/hostap-utils-0.4.7/0001-Define-_u32-__s32-__u16-__s16-__u8-in-terms-of-c99-t.patch
new file mode 100644
index 000000000..b44dca3ec
--- /dev/null
+++ b/yocto-poky/meta/recipes-bsp/hostap/hostap-utils-0.4.7/0001-Define-_u32-__s32-__u16-__s16-__u8-in-terms-of-c99-t.patch
@@ -0,0 +1,36 @@
+From 742fb110d9841a04b3ced256b0bf80ff304dcaff Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Mon, 31 Aug 2015 05:45:08 +0000
+Subject: [PATCH] Define _u32/__s32/__u16/__s16/__u8 in terms of c99 types
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+Upstream-Status: Pending
+
+ wireless_copy.h | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/wireless_copy.h b/wireless_copy.h
+index 8208258..1171a35 100644
+--- a/wireless_copy.h
++++ b/wireless_copy.h
+@@ -86,11 +86,11 @@
+ #else
+ #include <sys/types.h>
+ #include <net/if.h>
+-typedef __uint32_t __u32;
+-typedef __int32_t __s32;
+-typedef __uint16_t __u16;
+-typedef __int16_t __s16;
+-typedef __uint8_t __u8;
++typedef u_int32_t __u32;
++typedef int32_t __s32;
++typedef u_int16_t __u16;
++typedef int16_t __s16;
++typedef u_int8_t __u8;
+ #ifndef __user
+ #define __user
+ #endif /* __user */
+--
+2.5.1
+
diff --git a/yocto-poky/meta/recipes-bsp/hostap/hostap-utils.inc b/yocto-poky/meta/recipes-bsp/hostap/hostap-utils.inc
index 89d977a65..140321d0c 100644
--- a/yocto-poky/meta/recipes-bsp/hostap/hostap-utils.inc
+++ b/yocto-poky/meta/recipes-bsp/hostap/hostap-utils.inc
@@ -10,7 +10,9 @@ SECTION = "kernel/userland"
PR = "r4"
SRC_URI = "http://hostap.epitest.fi/releases/hostap-utils-${PV}.tar.gz \
- file://hostap-fw-load.patch"
+ file://hostap-fw-load.patch \
+ file://0001-Define-_u32-__s32-__u16-__s16-__u8-in-terms-of-c99-t.patch \
+"
S = "${WORKDIR}/hostap-utils-${PV}"
BINARIES = "hostap_crypt_conf hostap_diag hostap_fw_load hostap_io_debug \
diff --git a/yocto-poky/meta/recipes-connectivity/avahi/avahi-ui_0.6.31.bb b/yocto-poky/meta/recipes-connectivity/avahi/avahi-ui_0.6.31.bb
index eea4d70fa..0d42b90e7 100644
--- a/yocto-poky/meta/recipes-connectivity/avahi/avahi-ui_0.6.31.bb
+++ b/yocto-poky/meta/recipes-connectivity/avahi/avahi-ui_0.6.31.bb
@@ -6,7 +6,8 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=2d5025d4aa3495befef8f17206a5b0a1 \
require avahi.inc
-inherit python-dir pythonnative
+inherit python-dir pythonnative distro_features_check
+ANY_OF_DISTRO_FEATURES = "${GTK2DISTROFEATURES}"
PACKAGECONFIG ??= "python"
PACKAGECONFIG[python] = "--enable-python,--disable-python,python-native python"
diff --git a/yocto-poky/meta/recipes-connectivity/bind/bind/0001-lib-dns-gen.c-fix-too-long-error.patch b/yocto-poky/meta/recipes-connectivity/bind/bind/0001-lib-dns-gen.c-fix-too-long-error.patch
new file mode 100644
index 000000000..1ed858cd3
--- /dev/null
+++ b/yocto-poky/meta/recipes-connectivity/bind/bind/0001-lib-dns-gen.c-fix-too-long-error.patch
@@ -0,0 +1,34 @@
+From 5bc3167a8b714ec0c4a3f1c7f3b9411296ec0a23 Mon Sep 17 00:00:00 2001
+From: Robert Yang <liezhi.yang@windriver.com>
+Date: Wed, 16 Sep 2015 20:23:47 -0700
+Subject: [PATCH] lib/dns/gen.c: fix too long error
+
+The 512 is a little short when build in deep dir, and cause "too long"
+error, use PATH_MAX if defined.
+
+Upstream-Status: Pending
+
+Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
+---
+ lib/dns/gen.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/lib/dns/gen.c b/lib/dns/gen.c
+index 51a0435..3d7214f 100644
+--- a/lib/dns/gen.c
++++ b/lib/dns/gen.c
+@@ -148,7 +148,11 @@ static const char copyright[] =
+ #define TYPECLASSBUF (TYPECLASSLEN + 1)
+ #define TYPECLASSFMT "%" STR(TYPECLASSLEN) "[-0-9a-z]_%d"
+ #define ATTRIBUTESIZE 256
++#ifdef PATH_MAX
++#define DIRNAMESIZE PATH_MAX
++#else
+ #define DIRNAMESIZE 512
++#endif
+
+ static struct cc {
+ struct cc *next;
+--
+1.7.9.5
+
diff --git a/yocto-poky/meta/recipes-connectivity/bind/bind/CVE-2015-8000.patch b/yocto-poky/meta/recipes-connectivity/bind/bind/CVE-2015-8000.patch
new file mode 100644
index 000000000..e1c805257
--- /dev/null
+++ b/yocto-poky/meta/recipes-connectivity/bind/bind/CVE-2015-8000.patch
@@ -0,0 +1,278 @@
+From 8259daad7242ab2af8731681177ef7e948a15ece Mon Sep 17 00:00:00 2001
+From: Mark Andrews <marka@isc.org>
+Date: Mon, 16 Nov 2015 13:12:20 +1100
+Subject: [PATCH] 4260. [security] Insufficient testing when parsing a
+ message allowed records with an incorrect class to be
+ be accepted, triggering a REQUIRE failure when those
+ records were subsequently cached. (CVE-2015-8000) [RT
+ #4098]
+
+(cherry picked from commit c8821d124c532e0a65752b378f924d4259499fd3)
+(cherry picked from commit 3a4c24c4a52d4a2d21d2decbde3d4e514e27d51c)
+
+
+Upstream-Status: Backport
+
+https://source.isc.org/cgi-bin/gitweb.cgi?p=bind9.git;a=commit;h=8259daad7242ab2af8731681177ef7e948a15ece
+
+CVE: CVE-2015-8000
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ CHANGES | 5 +++++
+ bin/tests/system/start.pl | 5 ++++-
+ doc/arm/notes.xml | 9 +++++++++
+ lib/dns/include/dns/message.h | 13 +++++++++++--
+ lib/dns/message.c | 45 ++++++++++++++++++++++++++++++++++++++-----
+ lib/dns/resolver.c | 9 +++++++++
+ lib/dns/xfrin.c | 2 ++
+ 7 files changed, 80 insertions(+), 8 deletions(-)
+
+Index: bind-9.10.2-P4/bin/tests/system/start.pl
+===================================================================
+--- bind-9.10.2-P4.orig/bin/tests/system/start.pl
++++ bind-9.10.2-P4/bin/tests/system/start.pl
+@@ -68,6 +68,7 @@ my $NAMED = $ENV{'NAMED'};
+ my $LWRESD = $ENV{'LWRESD'};
+ my $DIG = $ENV{'DIG'};
+ my $PERL = $ENV{'PERL'};
++my $PYTHON = $ENV{'PYTHON'};
+
+ # Start the server(s)
+
+@@ -213,7 +214,9 @@ sub start_server {
+ $pid_file = "lwresd.pid";
+ } elsif ($server =~ /^ans/) {
+ $cleanup_files = "{ans.run}";
+- if (-e "$testdir/$server/ans.pl") {
++ if (-e "$testdir/$server/ans.py") {
++ $command = "$PYTHON ans.py 10.53.0.$' 5300";
++ } elsif (-e "$testdir/$server/ans.pl") {
+ $command = "$PERL ans.pl";
+ } else {
+ $command = "$PERL $topdir/ans.pl 10.53.0.$'";
+Index: bind-9.10.2-P4/doc/arm/notes.xml
+===================================================================
+--- bind-9.10.2-P4.orig/doc/arm/notes.xml
++++ bind-9.10.2-P4/doc/arm/notes.xml
+@@ -62,6 +62,15 @@
+ <itemizedlist>
+ <listitem>
+ <para>
++ Insufficient testing when parsing a message allowed
++ records with an incorrect class to be be accepted,
++ triggering a REQUIRE failure when those records
++ were subsequently cached. This flaw is disclosed
++ in CVE-2015-8000. [RT #4098]
++ </para>
++ </listitem>
++ <listitem>
++ <para>
+ An incorrect boundary check in the OPENPGPKEY rdatatype
+ could trigger an assertion failure. This flaw is disclosed
+ in CVE-2015-5986. [RT #40286]
+Index: bind-9.10.2-P4/lib/dns/include/dns/message.h
+===================================================================
+--- bind-9.10.2-P4.orig/lib/dns/include/dns/message.h
++++ bind-9.10.2-P4/lib/dns/include/dns/message.h
+@@ -15,8 +15,6 @@
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+-/* $Id$ */
+-
+ #ifndef DNS_MESSAGE_H
+ #define DNS_MESSAGE_H 1
+
+@@ -221,6 +219,8 @@ struct dns_message {
+ unsigned int free_saved : 1;
+ unsigned int sitok : 1;
+ unsigned int sitbad : 1;
++ unsigned int tkey : 1;
++ unsigned int rdclass_set : 1;
+
+ unsigned int opt_reserved;
+ unsigned int sig_reserved;
+@@ -1400,6 +1400,15 @@ dns_message_buildopt(dns_message_t *msg,
+ * \li other.
+ */
+
++void
++dns_message_setclass(dns_message_t *msg, dns_rdataclass_t rdclass);
++/*%<
++ * Set the expected class of records in the response.
++ *
++ * Requires:
++ * \li msg be a valid message with parsing intent.
++ */
++
+ ISC_LANG_ENDDECLS
+
+ #endif /* DNS_MESSAGE_H */
+Index: bind-9.10.2-P4/lib/dns/message.c
+===================================================================
+--- bind-9.10.2-P4.orig/lib/dns/message.c
++++ bind-9.10.2-P4/lib/dns/message.c
+@@ -439,6 +439,8 @@ msginit(dns_message_t *m) {
+ m->free_saved = 0;
+ m->sitok = 0;
+ m->sitbad = 0;
++ m->tkey = 0;
++ m->rdclass_set = 0;
+ m->querytsig = NULL;
+ }
+
+@@ -1091,13 +1093,19 @@ getquestions(isc_buffer_t *source, dns_m
+ * If this class is different than the one we already read,
+ * this is an error.
+ */
+- if (msg->state == DNS_SECTION_ANY) {
+- msg->state = DNS_SECTION_QUESTION;
++ if (msg->rdclass_set == 0) {
+ msg->rdclass = rdclass;
++ msg->rdclass_set = 1;
+ } else if (msg->rdclass != rdclass)
+ DO_FORMERR;
+
+ /*
++ * Is this a TKEY query?
++ */
++ if (rdtype == dns_rdatatype_tkey)
++ msg->tkey = 1;
++
++ /*
+ * Can't ask the same question twice.
+ */
+ result = dns_message_find(name, rdclass, rdtype, 0, NULL);
+@@ -1241,12 +1249,12 @@ getsection(isc_buffer_t *source, dns_mes
+ * If there was no question section, we may not yet have
+ * established a class. Do so now.
+ */
+- if (msg->state == DNS_SECTION_ANY &&
++ if (msg->rdclass_set == 0 &&
+ rdtype != dns_rdatatype_opt && /* class is UDP SIZE */
+ rdtype != dns_rdatatype_tsig && /* class is ANY */
+ rdtype != dns_rdatatype_tkey) { /* class is undefined */
+ msg->rdclass = rdclass;
+- msg->state = DNS_SECTION_QUESTION;
++ msg->rdclass_set = 1;
+ }
+
+ /*
+@@ -1256,7 +1264,7 @@ getsection(isc_buffer_t *source, dns_mes
+ if (msg->opcode != dns_opcode_update
+ && rdtype != dns_rdatatype_tsig
+ && rdtype != dns_rdatatype_opt
+- && rdtype != dns_rdatatype_dnskey /* in a TKEY query */
++ && rdtype != dns_rdatatype_key /* in a TKEY query */
+ && rdtype != dns_rdatatype_sig /* SIG(0) */
+ && rdtype != dns_rdatatype_tkey /* Win2000 TKEY */
+ && msg->rdclass != dns_rdataclass_any
+@@ -1264,6 +1272,16 @@ getsection(isc_buffer_t *source, dns_mes
+ DO_FORMERR;
+
+ /*
++ * If this is not a TKEY query/response then the KEY
++ * record's class needs to match.
++ */
++ if (msg->opcode != dns_opcode_update && !msg->tkey &&
++ rdtype == dns_rdatatype_key &&
++ msg->rdclass != dns_rdataclass_any &&
++ msg->rdclass != rdclass)
++ DO_FORMERR;
++
++ /*
+ * Special type handling for TSIG, OPT, and TKEY.
+ */
+ if (rdtype == dns_rdatatype_tsig) {
+@@ -1377,6 +1395,10 @@ getsection(isc_buffer_t *source, dns_mes
+ skip_name_search = ISC_TRUE;
+ skip_type_search = ISC_TRUE;
+ issigzero = ISC_TRUE;
++ } else {
++ if (msg->rdclass != dns_rdataclass_any &&
++ msg->rdclass != rdclass)
++ DO_FORMERR;
+ }
+ } else
+ covers = 0;
+@@ -1625,6 +1647,7 @@ dns_message_parse(dns_message_t *msg, is
+ msg->counts[DNS_SECTION_ADDITIONAL] = isc_buffer_getuint16(source);
+
+ msg->header_ok = 1;
++ msg->state = DNS_SECTION_QUESTION;
+
+ /*
+ * -1 means no EDNS.
+@@ -3706,3 +3729,15 @@ dns_message_buildopt(dns_message_t *mess
+ dns_message_puttemprdatalist(message, &rdatalist);
+ return (result);
+ }
++
++void
++dns_message_setclass(dns_message_t *msg, dns_rdataclass_t rdclass) {
++
++ REQUIRE(DNS_MESSAGE_VALID(msg));
++ REQUIRE(msg->from_to_wire == DNS_MESSAGE_INTENTPARSE);
++ REQUIRE(msg->state == DNS_SECTION_ANY);
++ REQUIRE(msg->rdclass_set == 0);
++
++ msg->rdclass = rdclass;
++ msg->rdclass_set = 1;
++}
+Index: bind-9.10.2-P4/lib/dns/resolver.c
+===================================================================
+--- bind-9.10.2-P4.orig/lib/dns/resolver.c
++++ bind-9.10.2-P4/lib/dns/resolver.c
+@@ -7309,6 +7309,8 @@ resquery_response(isc_task_t *task, isc_
+ goto done;
+ }
+
++ dns_message_setclass(message, fctx->res->rdclass);
++
+ if ((options & DNS_FETCHOPT_TCP) == 0) {
+ if ((options & DNS_FETCHOPT_NOEDNS0) == 0)
+ dns_adb_setudpsize(fctx->adb, query->addrinfo,
+@@ -7391,6 +7393,13 @@ resquery_response(isc_task_t *task, isc_
+ &dns_master_style_comment,
+ ISC_LOG_DEBUG(10),
+ fctx->res->mctx);
++
++ if (message->rdclass != fctx->res->rdclass) {
++ resend = ISC_TRUE;
++ FCTXTRACE("bad class");
++ goto done;
++ }
++
+ /*
+ * Process receive opt record.
+ */
+Index: bind-9.10.2-P4/lib/dns/xfrin.c
+===================================================================
+--- bind-9.10.2-P4.orig/lib/dns/xfrin.c
++++ bind-9.10.2-P4/lib/dns/xfrin.c
+@@ -1225,6 +1225,8 @@ xfrin_recv_done(isc_task_t *task, isc_ev
+ msg->tsigctx = xfr->tsigctx;
+ xfr->tsigctx = NULL;
+
++ dns_message_setclass(msg, xfr->rdclass);
++
+ if (xfr->nmsg > 0)
+ msg->tcp_continuation = 1;
+
+Index: bind-9.10.2-P4/CHANGES
+===================================================================
+--- bind-9.10.2-P4.orig/CHANGES
++++ bind-9.10.2-P4/CHANGES
+@@ -1,4 +1,9 @@
+- --- 9.10.2-P4 released ---
++4260. [security] Insufficient testing when parsing a message allowed
++ records with an incorrect class to be be accepted,
++ triggering a REQUIRE failure when those records
++ were subsequently cached. (CVE-2015-8000) [RT #4098]
++
++ --- 9.10.2-P4 released ---
+
+ 4170. [security] An incorrect boundary check in the OPENPGPKEY
+ rdatatype could trigger an assertion failure.
diff --git a/yocto-poky/meta/recipes-connectivity/bind/bind/CVE-2015-8461.patch b/yocto-poky/meta/recipes-connectivity/bind/bind/CVE-2015-8461.patch
new file mode 100644
index 000000000..88e9c8342
--- /dev/null
+++ b/yocto-poky/meta/recipes-connectivity/bind/bind/CVE-2015-8461.patch
@@ -0,0 +1,44 @@
+From adbf81335b67be0cebdcf9f1f4fcb38ef4814f4d Mon Sep 17 00:00:00 2001
+From: Mark Andrews <marka@isc.org>
+Date: Thu, 25 Jun 2015 18:36:27 +1000
+Subject: [PATCH] 4146. [bug] Address reference leak that could
+ prevent a clean shutdown. [RT #37125]
+
+Upstream-Status: Backport
+
+https://source.isc.org/cgi-bin/gitweb.cgi?p=bind9.git;a=commit;h=adbf81335b67be0cebdcf9f1f4fcb38ef4814f4d
+
+CVE: CVE-2015-8461
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+---
+ CHANGES | 3 +++
+ lib/dns/resolver.c | 5 +++++
+ 2 files changed, 8 insertions(+)
+
+Index: bind-9.10.2-P4/CHANGES
+===================================================================
+--- bind-9.10.2-P4.orig/CHANGES
++++ bind-9.10.2-P4/CHANGES
+@@ -1,3 +1,6 @@
++4146. [bug] Address reference leak that could prevent a clean
++ shutdown. [RT #37125]
++
+ 4260. [security] Insufficient testing when parsing a message allowed
+ records with an incorrect class to be be accepted,
+ triggering a REQUIRE failure when those records
+Index: bind-9.10.2-P4/lib/dns/resolver.c
+===================================================================
+--- bind-9.10.2-P4.orig/lib/dns/resolver.c
++++ bind-9.10.2-P4/lib/dns/resolver.c
+@@ -1649,6 +1649,11 @@ fctx_query(fetchctx_t *fctx, dns_adbaddr
+ if (query->dispatch != NULL)
+ dns_dispatch_detach(&query->dispatch);
+
++ LOCK(&res->buckets[fctx->bucketnum].lock);
++ INSIST(fctx->references > 1);
++ fctx->references--;
++ UNLOCK(&res->buckets[fctx->bucketnum].lock);
++
+ cleanup_query:
+ if (query->connects == 0) {
+ query->magic = 0;
diff --git a/yocto-poky/meta/recipes-connectivity/bind/bind/CVE-2015-8704.patch b/yocto-poky/meta/recipes-connectivity/bind/bind/CVE-2015-8704.patch
new file mode 100644
index 000000000..d5bf740e8
--- /dev/null
+++ b/yocto-poky/meta/recipes-connectivity/bind/bind/CVE-2015-8704.patch
@@ -0,0 +1,28 @@
+a buffer size check can cause denial of service under certain circumstances
+
+[security]
+The following flaw in BIND was reported by ISC:
+
+A buffer size check used to guard against overflow could cause named to exit with an INSIST failure In apl_42.c.
+
+A server could exit due to an INSIST failure in apl_42.c when performing certain string formatting operations.
+
+Upstream-Status: Backport
+CVE: CVE-2015-8704
+
+[The patch is taken from BIND 9.10.3:
+https://bugzilla.redhat.com/show_bug.cgi?id=CVE-2015-8704]
+
+Signed-off-by: Derek Straka <derek@asterius.io>
+diff --git a/lib/dns/rdata/in_1/apl_42.c b/lib/dns/rdata/in_1/apl_42.c
+index bedd38e..28eb7f2 100644
+--- a/lib/dns/rdata/in_1/apl_42.c
++++ b/lib/dns/rdata/in_1/apl_42.c
+@@ -116,7 +116,7 @@ totext_in_apl(ARGS_TOTEXT) {
+ isc_uint8_t len;
+ isc_boolean_t neg;
+ unsigned char buf[16];
+- char txt[sizeof(" !64000")];
++ char txt[sizeof(" !64000:")];
+ const char *sep = "";
+ int n;
diff --git a/yocto-poky/meta/recipes-connectivity/bind/bind/CVE-2015-8705.patch b/yocto-poky/meta/recipes-connectivity/bind/bind/CVE-2015-8705.patch
new file mode 100644
index 000000000..c4a052d7b
--- /dev/null
+++ b/yocto-poky/meta/recipes-connectivity/bind/bind/CVE-2015-8705.patch
@@ -0,0 +1,44 @@
+a crash or assertion failure can during format processing
+
+[security]
+The following flaw in BIND was reported by ISC:
+
+In versions of BIND 9.10, errors can occur when OPT pseudo-RR data or ECS options are formatted to text. In 9.10.3 through 9.10.3-P2, the issue may result in a REQUIRE assertion failure in buffer.c.
+
+This issue can affect both authoritative and recursive servers if they are performing debug logging. (It may also crash related tools which use the same code, such as dig or delv.)
+
+A server could exit due to an INSIST failure in apl_42.c when performing certain string formatting operations.
+
+Upstream-Status: Backport
+CVE: CVE-2015-8705
+
+[The patch is taken from BIND 9.10.3:
+https://bugzilla.redhat.com/show_bug.cgi?id=CVE-2015-8705]
+
+Signed-off-by: Derek Straka <derek@asterius.io>
+diff --git a/lib/dns/message.c b/lib/dns/message.c
+index ea7b93a..810c58e 100644
+--- a/lib/dns/message.c
++++ b/lib/dns/message.c
+@@ -3310,9 +3310,19 @@
+ } else if (optcode == DNS_OPT_SIT) {
+ ADD_STRING(target, "; SIT");
+ } else if (optcode == DNS_OPT_CLIENT_SUBNET) {
++ isc_buffer_t ecsbuf;
+ ADD_STRING(target, "; CLIENT-SUBNET: ");
+- render_ecs(&optbuf, target);
+- ADD_STRING(target, "\n");
++ isc_buffer_init(&ecsbuf,
++ isc_buffer_current(&optbuf),
++ optlen);
++ isc_buffer_add(&ecsbuf, optlen);
++ result = render_ecs(&ecsbuf, target);
++ if (result == ISC_R_NOSPACE)
++ return (result);
++ if (result == ISC_R_SUCCESS) {
++ isc_buffer_forward(&optbuf, optlen);
++ ADD_STRING(target, "\n");
++ }
+ continue;
+ } else if (optcode == DNS_OPT_EXPIRE) {
+ if (optlen == 4) {
diff --git a/yocto-poky/meta/recipes-connectivity/bind/bind_9.10.2-P4.bb b/yocto-poky/meta/recipes-connectivity/bind/bind_9.10.2-P4.bb
index efae289b3..19f87d793 100644
--- a/yocto-poky/meta/recipes-connectivity/bind/bind_9.10.2-P4.bb
+++ b/yocto-poky/meta/recipes-connectivity/bind/bind_9.10.2-P4.bb
@@ -20,6 +20,11 @@ SRC_URI = "ftp://ftp.isc.org/isc/bind9/${PV}/${BPN}-${PV}.tar.gz \
file://0001-build-use-pkg-config-to-find-libxml2.patch \
file://bind-ensure-searching-for-json-headers-searches-sysr.patch \
file://0001-gen.c-extend-DIRNAMESIZE-from-256-to-512.patch \
+ file://0001-lib-dns-gen.c-fix-too-long-error.patch \
+ file://CVE-2015-8704.patch \
+ file://CVE-2015-8705.patch \
+ file://CVE-2015-8000.patch \
+ file://CVE-2015-8461.patch \
"
SRC_URI[md5sum] = "8b1f5064837756c938eadc1537dec5c7"
diff --git a/yocto-poky/meta/recipes-connectivity/bluez5/bluez5.inc b/yocto-poky/meta/recipes-connectivity/bluez5/bluez5.inc
index 039c44354..df42c88b9 100644
--- a/yocto-poky/meta/recipes-connectivity/bluez5/bluez5.inc
+++ b/yocto-poky/meta/recipes-connectivity/bluez5/bluez5.inc
@@ -18,7 +18,6 @@ PACKAGECONFIG[experimental] = "--enable-experimental,--disable-experimental,"
SRC_URI = "\
${KERNELORG_MIRROR}/linux/bluetooth/bluez-${PV}.tar.xz \
- file://bluetooth.conf \
"
S = "${WORKDIR}/bluez-${PV}"
@@ -53,8 +52,8 @@ do_install_append() {
if [ -f ${S}/profiles/input/input.conf ]; then
install -m 0644 ${S}/profiles/input/input.conf ${D}/${sysconfdir}/bluetooth/
fi
- # at_console doesn't really work with the current state of OE, so punch some more holes so people can actually use BT
- install -m 0644 ${WORKDIR}/bluetooth.conf ${D}/${sysconfdir}/dbus-1/system.d/
+
+ install -m 0644 ${S}/src/bluetooth.conf ${D}/${sysconfdir}/dbus-1/system.d/
# Install desired tools that upstream leaves in build area
for f in ${NOINST_TOOLS} ; do
diff --git a/yocto-poky/meta/recipes-connectivity/bluez5/bluez5/bluetooth.conf b/yocto-poky/meta/recipes-connectivity/bluez5/bluez5/bluetooth.conf
deleted file mode 100644
index 26845bb73..000000000
--- a/yocto-poky/meta/recipes-connectivity/bluez5/bluez5/bluetooth.conf
+++ /dev/null
@@ -1,17 +0,0 @@
-<!-- This configuration file specifies the required security policies
- for Bluetooth core daemon to work. -->
-
-<!DOCTYPE busconfig PUBLIC "-//freedesktop//DTD D-BUS Bus Configuration 1.0//EN"
- "http://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">
-<busconfig>
-
- <!-- ../system.conf have denied everything, so we just punch some holes -->
-
- <policy context="default">
- <allow own="org.bluez"/>
- <allow send_destination="org.bluez"/>
- <allow send_interface="org.bluez.Agent1"/>
- <allow send_type="method_call"/>
- </policy>
-
-</busconfig>
diff --git a/yocto-poky/meta/recipes-connectivity/connman/connman-conf.bb b/yocto-poky/meta/recipes-connectivity/connman/connman-conf.bb
index bd4c28d99..9254ed703 100644
--- a/yocto-poky/meta/recipes-connectivity/connman/connman-conf.bb
+++ b/yocto-poky/meta/recipes-connectivity/connman/connman-conf.bb
@@ -13,14 +13,14 @@ S = "${WORKDIR}"
PACKAGE_ARCH = "${MACHINE_ARCH}"
-FILES_${PN} = "${localstatedir}/* ${libdir}/*"
+FILES_${PN} = "${localstatedir}/* ${datadir}/*"
do_install() {
#Configure Wired network interface in case of qemu* machines
if test -e ${WORKDIR}/wired.config && test -e ${WORKDIR}/wired-setup; then
install -d ${D}${localstatedir}/lib/connman
install -m 0644 ${WORKDIR}/wired.config ${D}${localstatedir}/lib/connman
- install -d ${D}${libdir}/connman
- install -m 0755 ${WORKDIR}/wired-setup ${D}${libdir}/connman
+ install -d ${D}${datadir}/connman
+ install -m 0755 ${WORKDIR}/wired-setup ${D}${datadir}/connman
fi
}
diff --git a/yocto-poky/meta/recipes-connectivity/connman/connman-gnome_0.7.bb b/yocto-poky/meta/recipes-connectivity/connman/connman-gnome_0.7.bb
index f5575d293..7b875f00f 100644
--- a/yocto-poky/meta/recipes-connectivity/connman/connman-gnome_0.7.bb
+++ b/yocto-poky/meta/recipes-connectivity/connman/connman-gnome_0.7.bb
@@ -6,7 +6,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=eb723b61539feef013de476e68b5c50a \
file://properties/main.c;beginline=1;endline=20;md5=50c77c81871308b033ab7a1504626afb \
file://common/connman-dbus.c;beginline=1;endline=20;md5=de6b485c0e717a0236402d220187717a"
-DEPENDS = "gtk+ dbus-glib intltool-native"
+DEPENDS = "gtk+ dbus-glib intltool-native gettext-native"
# 0.7 tag
SRCREV = "cf3c325b23dae843c5499a113591cfbc98acb143"
@@ -19,7 +19,8 @@ SRC_URI = "git://github.com/connectivity/connman-gnome.git \
S = "${WORKDIR}/git"
-inherit autotools-brokensep gtk-icon-cache pkgconfig
+inherit autotools-brokensep gtk-icon-cache pkgconfig distro_features_check
+ANY_OF_DISTRO_FEATURES = "${GTK2DISTROFEATURES}"
RDEPENDS_${PN} = "connman"
diff --git a/yocto-poky/meta/recipes-connectivity/connman/connman.inc b/yocto-poky/meta/recipes-connectivity/connman/connman.inc
index 17dc4b938..afdb3f2d0 100644
--- a/yocto-poky/meta/recipes-connectivity/connman/connman.inc
+++ b/yocto-poky/meta/recipes-connectivity/connman/connman.inc
@@ -30,6 +30,7 @@ EXTRA_OECONF += "\
--disable-polkit \
--enable-client \
"
+CFLAGS += "-D_GNU_SOURCE"
PACKAGECONFIG ??= "wispr \
${@bb.utils.contains('DISTRO_FEATURES', 'systemd','systemd', '', d)} \
@@ -67,15 +68,9 @@ python __anonymous () {
SYSTEMD_SERVICE_${PN} = "connman.service"
SYSTEMD_SERVICE_${PN}-vpn = "connman-vpn.service"
-SYSTEMD_WIRED_SETUP = "ExecStartPre=-${libdir}/connman/wired-setup"
+SYSTEMD_WIRED_SETUP = "ExecStartPre=-${datadir}/connman/wired-setup"
-# This allows *everyone* to access ConnMan over DBus, without any access
-# control. Really the at_console flag should work, which would mean that
-# both this and the xuser patch can be dropped.
do_compile_append() {
- sed -i -e s:deny:allow:g ${S}/src/connman-dbus.conf
- sed -i -e s:deny:allow:g ${S}/vpn/vpn-dbus.conf
-
sed -i "s#ExecStart=#${SYSTEMD_WIRED_SETUP}\nExecStart=#" ${B}/src/connman.service
}
@@ -83,7 +78,7 @@ do_install_append() {
if ${@bb.utils.contains('DISTRO_FEATURES','sysvinit','true','false',d)}; then
install -d ${D}${sysconfdir}/init.d
install -m 0755 ${WORKDIR}/connman ${D}${sysconfdir}/init.d/connman
- sed -i s%@LIBDIR@%${libdir}% ${D}${sysconfdir}/init.d/connman
+ sed -i s%@DATADIR@%${datadir}% ${D}${sysconfdir}/init.d/connman
fi
install -d ${D}${bindir}
@@ -112,7 +107,6 @@ RPROVIDES_${PN} = "\
RDEPENDS_${PN} = "\
dbus \
- ${@base_conditional('ROOTLESS_X', '1', 'xuser-account', '', d)} \
"
PACKAGES_DYNAMIC += "^${PN}-plugin-.*"
diff --git a/yocto-poky/meta/recipes-connectivity/connman/connman/0001-Detect-backtrace-API-availability-before-using-it.patch b/yocto-poky/meta/recipes-connectivity/connman/connman/0001-Detect-backtrace-API-availability-before-using-it.patch
new file mode 100644
index 000000000..5dc6fd634
--- /dev/null
+++ b/yocto-poky/meta/recipes-connectivity/connman/connman/0001-Detect-backtrace-API-availability-before-using-it.patch
@@ -0,0 +1,55 @@
+From 00d4447395725abaa651e12ed40095081e04011e Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Sun, 13 Sep 2015 13:22:01 -0700
+Subject: [PATCH 1/3] Detect backtrace() API availability before using it
+
+C libraries besides glibc do not have backtrace() implemented
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+Upstream-Status: Pending
+
+ configure.ac | 2 ++
+ src/log.c | 5 ++---
+ 2 files changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/configure.ac b/configure.ac
+index 69c0eeb..90099f2 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -171,6 +171,8 @@ fi
+ AM_CONDITIONAL(PPTP, test "${enable_pptp}" != "no")
+ AM_CONDITIONAL(PPTP_BUILTIN, test "${enable_pptp}" = "builtin")
+
++AC_CHECK_HEADERS([execinfo.h])
++
+ AC_CHECK_HEADERS(resolv.h, dummy=yes,
+ AC_MSG_ERROR(resolver header files are required))
+ AC_CHECK_LIB(resolv, ns_initparse, dummy=yes, [
+diff --git a/src/log.c b/src/log.c
+index a693bd0..5b40c1f 100644
+--- a/src/log.c
++++ b/src/log.c
+@@ -30,7 +30,6 @@
+ #include <stdlib.h>
+ #include <string.h>
+ #include <syslog.h>
+-#include <execinfo.h>
+ #include <dlfcn.h>
+
+ #include "connman.h"
+@@ -215,9 +214,9 @@ static void print_backtrace(unsigned int offset)
+ static void signal_handler(int signo)
+ {
+ connman_error("Aborting (signal %d) [%s]", signo, program_exec);
+-
++#ifdef HAVE_EXECINFO_H
+ print_backtrace(2);
+-
++#endif /* HAVE_EXECINFO_H */
+ exit(EXIT_FAILURE);
+ }
+
+--
+2.5.1
+
diff --git a/yocto-poky/meta/recipes-connectivity/connman/connman/0002-resolve-musl-does-not-implement-res_ninit.patch b/yocto-poky/meta/recipes-connectivity/connman/connman/0002-resolve-musl-does-not-implement-res_ninit.patch
new file mode 100644
index 000000000..059342771
--- /dev/null
+++ b/yocto-poky/meta/recipes-connectivity/connman/connman/0002-resolve-musl-does-not-implement-res_ninit.patch
@@ -0,0 +1,77 @@
+From 10b0d16d04b811b1ccd1f9b0cfe757bce8d876a1 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Mon, 6 Apr 2015 23:02:21 -0700
+Subject: [PATCH 2/3] resolve: musl does not implement res_ninit
+
+ported from
+http://git.alpinelinux.org/cgit/aports/plain/testing/connman/libresolv.patch
+
+Upstream-Status: Pending
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ gweb/gresolv.c | 33 ++++++++++++---------------------
+ 1 file changed, 12 insertions(+), 21 deletions(-)
+
+diff --git a/gweb/gresolv.c b/gweb/gresolv.c
+index 5cf7a9a..3ad8e70 100644
+--- a/gweb/gresolv.c
++++ b/gweb/gresolv.c
+@@ -875,8 +875,6 @@ GResolv *g_resolv_new(int index)
+ resolv->index = index;
+ resolv->nameserver_list = NULL;
+
+- res_ninit(&resolv->res);
+-
+ return resolv;
+ }
+
+@@ -916,8 +914,6 @@ void g_resolv_unref(GResolv *resolv)
+
+ flush_nameservers(resolv);
+
+- res_nclose(&resolv->res);
+-
+ g_free(resolv);
+ }
+
+@@ -1020,24 +1016,19 @@ guint g_resolv_lookup_hostname(GResolv *resolv, const char *hostname,
+ debug(resolv, "hostname %s", hostname);
+
+ if (!resolv->nameserver_list) {
+- int i;
+-
+- for (i = 0; i < resolv->res.nscount; i++) {
+- char buf[100];
+- int family = resolv->res.nsaddr_list[i].sin_family;
+- void *sa_addr = &resolv->res.nsaddr_list[i].sin_addr;
+-
+- if (family != AF_INET &&
+- resolv->res._u._ext.nsaddrs[i]) {
+- family = AF_INET6;
+- sa_addr = &resolv->res._u._ext.nsaddrs[i]->sin6_addr;
++ FILE *f = fopen("/etc/resolv.conf", "r");
++ if (f) {
++ char line[256], *s;
++ int i;
++ while (fgets(line, sizeof(line), f)) {
++ if (strncmp(line, "nameserver", 10) || !isspace(line[10]))
++ continue;
++ for (s = &line[11]; isspace(s[0]); s++);
++ for (i = 0; s[i] && !isspace(s[i]); i++);
++ s[i] = 0;
++ g_resolv_add_nameserver(resolv, s, 53, 0);
+ }
+-
+- if (family != AF_INET && family != AF_INET6)
+- continue;
+-
+- if (inet_ntop(family, sa_addr, buf, sizeof(buf)))
+- g_resolv_add_nameserver(resolv, buf, 53, 0);
++ fclose(f);
+ }
+
+ if (!resolv->nameserver_list)
+--
+2.5.1
+
diff --git a/yocto-poky/meta/recipes-connectivity/connman/connman/0003-Fix-header-inclusions-for-musl.patch b/yocto-poky/meta/recipes-connectivity/connman/connman/0003-Fix-header-inclusions-for-musl.patch
new file mode 100644
index 000000000..6327aa2cb
--- /dev/null
+++ b/yocto-poky/meta/recipes-connectivity/connman/connman/0003-Fix-header-inclusions-for-musl.patch
@@ -0,0 +1,85 @@
+From 67645a01a2f3f52625d8dd77f2811a9e213e1b7d Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Sun, 13 Sep 2015 13:28:20 -0700
+Subject: [PATCH] Fix header inclusions for musl
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+Upstream-Status: Pending
+
+ gweb/gresolv.c | 1 +
+ plugins/wifi.c | 3 +--
+ src/tethering.c | 2 --
+ tools/dhcp-test.c | 1 -
+ tools/dnsproxy-test.c | 1 +
+ 5 files changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/gweb/gresolv.c b/gweb/gresolv.c
+index 3ad8e70..61d6fe8 100644
+--- a/gweb/gresolv.c
++++ b/gweb/gresolv.c
+@@ -28,6 +28,7 @@
+ #include <stdarg.h>
+ #include <string.h>
+ #include <stdlib.h>
++#include <stdio.h>
+ #include <resolv.h>
+ #include <sys/types.h>
+ #include <sys/socket.h>
+diff --git a/plugins/wifi.c b/plugins/wifi.c
+index dfe849f..99cff3f 100644
+--- a/plugins/wifi.c
++++ b/plugins/wifi.c
+@@ -30,9 +30,8 @@
+ #include <string.h>
+ #include <sys/ioctl.h>
+ #include <sys/socket.h>
+-#include <linux/if_arp.h>
+-#include <linux/wireless.h>
+ #include <net/ethernet.h>
++#include <linux/wireless.h>
+
+ #ifndef IFF_LOWER_UP
+ #define IFF_LOWER_UP 0x10000
+diff --git a/src/tethering.c b/src/tethering.c
+index ceeec74..c44cb36 100644
+--- a/src/tethering.c
++++ b/src/tethering.c
+@@ -31,10 +31,8 @@
+ #include <stdio.h>
+ #include <sys/ioctl.h>
+ #include <net/if.h>
+-#include <linux/sockios.h>
+ #include <string.h>
+ #include <fcntl.h>
+-#include <linux/if_tun.h>
+ #include <netinet/in.h>
+ #include <linux/if_bridge.h>
+
+diff --git a/tools/dhcp-test.c b/tools/dhcp-test.c
+index c34e10a..eae66fc 100644
+--- a/tools/dhcp-test.c
++++ b/tools/dhcp-test.c
+@@ -33,7 +33,6 @@
+ #include <arpa/inet.h>
+ #include <net/route.h>
+ #include <net/ethernet.h>
+-#include <linux/if_arp.h>
+
+ #include <gdhcp/gdhcp.h>
+
+diff --git a/tools/dnsproxy-test.c b/tools/dnsproxy-test.c
+index 551cae9..226ba86 100644
+--- a/tools/dnsproxy-test.c
++++ b/tools/dnsproxy-test.c
+@@ -27,6 +27,7 @@
+ #include <stdlib.h>
+ #include <string.h>
+ #include <unistd.h>
++#include <stdio.h>
+ #include <arpa/inet.h>
+ #include <netinet/in.h>
+ #include <sys/types.h>
+--
+2.5.1
+
diff --git a/yocto-poky/meta/recipes-connectivity/connman/connman/add_xuser_dbus_permission.patch b/yocto-poky/meta/recipes-connectivity/connman/connman/add_xuser_dbus_permission.patch
deleted file mode 100644
index 707b3cafb..000000000
--- a/yocto-poky/meta/recipes-connectivity/connman/connman/add_xuser_dbus_permission.patch
+++ /dev/null
@@ -1,21 +0,0 @@
-Because Poky doesn't support at_console we need to special-case the session
-user.
-
-Upstream-Status: Inappropriate [configuration]
-
-Signed-off-by: Ross Burton <ross.burton@intel.com>
-
-diff --git a/src/connman-dbus.conf b/src/connman-dbus.conf
-index 98a773e..466809c 100644
---- a/src/connman-dbus.conf
-+++ b/src/connman-dbus.conf
-@@ -8,6 +8,9 @@
- <allow send_interface="net.connman.Counter"/>
- <allow send_interface="net.connman.Notification"/>
- </policy>
-+ <policy user="xuser">
-+ <allow send_destination="net.connman"/>
-+ </policy>
- <policy at_console="true">
- <allow send_destination="net.connman"/>
- </policy>
diff --git a/yocto-poky/meta/recipes-connectivity/connman/connman/connman b/yocto-poky/meta/recipes-connectivity/connman/connman/connman
index bf7a94a06..c64fa0d71 100644
--- a/yocto-poky/meta/recipes-connectivity/connman/connman/connman
+++ b/yocto-poky/meta/recipes-connectivity/connman/connman/connman
@@ -49,8 +49,8 @@ do_start() {
fi
fi
fi
- if [ -f @LIBDIR@/connman/wired-setup ] ; then
- . @LIBDIR@/connman/wired-setup
+ if [ -f @DATADIR@/connman/wired-setup ] ; then
+ . @DATADIR@/connman/wired-setup
fi
$DAEMON $EXTRA_PARAM
}
diff --git a/yocto-poky/meta/recipes-connectivity/connman/connman_1.30.bb b/yocto-poky/meta/recipes-connectivity/connman/connman_1.30.bb
index 8c47353bc..7d65ac931 100644
--- a/yocto-poky/meta/recipes-connectivity/connman/connman_1.30.bb
+++ b/yocto-poky/meta/recipes-connectivity/connman/connman_1.30.bb
@@ -2,7 +2,9 @@ require connman.inc
SRC_URI = "${KERNELORG_MIRROR}/linux/network/${BPN}/${BP}.tar.xz \
file://0001-plugin.h-Change-visibility-to-default-for-debug-symb.patch \
- file://add_xuser_dbus_permission.patch \
+ file://0001-Detect-backtrace-API-availability-before-using-it.patch \
+ file://0002-resolve-musl-does-not-implement-res_ninit.patch \
+ file://0003-Fix-header-inclusions-for-musl.patch \
file://connman \
"
SRC_URI[md5sum] = "4a3efdbd6796922db9c6f66da57887fa"
diff --git a/yocto-poky/meta/recipes-connectivity/iproute2/iproute2.inc b/yocto-poky/meta/recipes-connectivity/iproute2/iproute2.inc
index a53a4e6ab..29f90629a 100644
--- a/yocto-poky/meta/recipes-connectivity/iproute2/iproute2.inc
+++ b/yocto-poky/meta/recipes-connectivity/iproute2/iproute2.inc
@@ -15,6 +15,12 @@ inherit update-alternatives
EXTRA_OEMAKE = "CC='${CC}' KERNEL_INCLUDE=${STAGING_INCDIR} DOCDIR=${docdir}/iproute2 SUBDIRS='lib tc ip' SBINDIR='${base_sbindir}' LIBDIR='${libdir}'"
+do_configure_append () {
+ sh configure ${STAGING_INCDIR}
+ # Explicitly disable ATM support
+ sed -i -e '/TC_CONFIG_ATM/d' Config
+}
+
do_install () {
oe_runmake DESTDIR=${D} install
mv ${D}${base_sbindir}/ip ${D}${base_sbindir}/ip.iproute2
diff --git a/yocto-poky/meta/recipes-connectivity/irda-utils/irda-utils_0.9.18.bb b/yocto-poky/meta/recipes-connectivity/irda-utils/irda-utils_0.9.18.bb
index 8ac3b1869..bd2f815cb 100644
--- a/yocto-poky/meta/recipes-connectivity/irda-utils/irda-utils_0.9.18.bb
+++ b/yocto-poky/meta/recipes-connectivity/irda-utils/irda-utils_0.9.18.bb
@@ -3,7 +3,7 @@ DESCRIPTION = "Provides common files needed to use IrDA. \
IrDA allows communication over Infrared with other devices \
such as phones and laptops."
HOMEPAGE = "http://irda.sourceforge.net/"
-BUGTRACKER = "irda-users@lists.sourceforge.net"
+BUGTRACKER = "http://sourceforge.net/p/irda/bugs/"
SECTION = "base"
LICENSE = "GPLv2+"
LIC_FILES_CHKSUM = "file://irdadump/COPYING;md5=94d55d512a9ba36caa9b7df079bae19f \
diff --git a/yocto-poky/meta/recipes-connectivity/openssh/openssh/CVE-2016-1907_2.patch b/yocto-poky/meta/recipes-connectivity/openssh/openssh/CVE-2016-1907_2.patch
new file mode 100644
index 000000000..9fac69c3d
--- /dev/null
+++ b/yocto-poky/meta/recipes-connectivity/openssh/openssh/CVE-2016-1907_2.patch
@@ -0,0 +1,65 @@
+From f98a09cacff7baad8748c9aa217afd155a4d493f Mon Sep 17 00:00:00 2001
+From: "mmcc@openbsd.org" <mmcc@openbsd.org>
+Date: Tue, 20 Oct 2015 03:36:35 +0000
+Subject: [PATCH] upstream commit
+
+Replace a function-local allocation with stack memory.
+
+ok djm@
+
+Upstream-ID: c09fbbab637053a2ab9f33ca142b4e20a4c5a17e
+Upstream-Status: Backport
+CVE: CVE-2016-1907
+
+[YOCTO #8935]
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ clientloop.c | 9 ++-------
+ 1 file changed, 2 insertions(+), 7 deletions(-)
+
+diff --git a/clientloop.c b/clientloop.c
+index 87ceb3d..1e05cba 100644
+--- a/clientloop.c
++++ b/clientloop.c
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: clientloop.c,v 1.275 2015/07/10 06:21:53 markus Exp $ */
++/* $OpenBSD: clientloop.c,v 1.276 2015/10/20 03:36:35 mmcc Exp $ */
+ /*
+ * Author: Tatu Ylonen <ylo@cs.hut.fi>
+ * Copyright (c) 1995 Tatu Ylonen <ylo@cs.hut.fi>, Espoo, Finland
+@@ -311,11 +311,10 @@ client_x11_get_proto(const char *display, const char *xauth_path,
+ static char proto[512], data[512];
+ FILE *f;
+ int got_data = 0, generated = 0, do_unlink = 0, i;
+- char *xauthdir, *xauthfile;
++ char xauthdir[PATH_MAX] = "", xauthfile[PATH_MAX] = "";
+ struct stat st;
+ u_int now, x11_timeout_real;
+
+- xauthdir = xauthfile = NULL;
+ *_proto = proto;
+ *_data = data;
+ proto[0] = data[0] = '\0';
+@@ -343,8 +342,6 @@ client_x11_get_proto(const char *display, const char *xauth_path,
+ display = xdisplay;
+ }
+ if (trusted == 0) {
+- xauthdir = xmalloc(PATH_MAX);
+- xauthfile = xmalloc(PATH_MAX);
+ mktemp_proto(xauthdir, PATH_MAX);
+ /*
+ * The authentication cookie should briefly outlive
+@@ -407,8 +404,6 @@ client_x11_get_proto(const char *display, const char *xauth_path,
+ unlink(xauthfile);
+ rmdir(xauthdir);
+ }
+- free(xauthdir);
+- free(xauthfile);
+
+ /*
+ * If we didn't get authentication data, just make up some
+--
+1.9.1
+
diff --git a/yocto-poky/meta/recipes-connectivity/openssh/openssh/CVE-2016-1907_3.patch b/yocto-poky/meta/recipes-connectivity/openssh/openssh/CVE-2016-1907_3.patch
new file mode 100644
index 000000000..3dfc51af7
--- /dev/null
+++ b/yocto-poky/meta/recipes-connectivity/openssh/openssh/CVE-2016-1907_3.patch
@@ -0,0 +1,329 @@
+From ed4ce82dbfa8a3a3c8ea6fa0db113c71e234416c Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Wed, 13 Jan 2016 23:04:47 +0000
+Subject: [PATCH] upstream commit
+
+eliminate fallback from untrusted X11 forwarding to trusted
+ forwarding when the X server disables the SECURITY extension; Reported by
+ Thomas Hoger; ok deraadt@
+
+Upstream-ID: f76195bd2064615a63ef9674a0e4096b0713f938
+Upstream-Status: Backport
+CVE: CVE-2016-1907
+
+[YOCTO #8935]
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ clientloop.c | 114 ++++++++++++++++++++++++++++++++++++-----------------------
+ clientloop.h | 4 +--
+ mux.c | 22 ++++++------
+ ssh.c | 23 +++++-------
+ 4 files changed, 93 insertions(+), 70 deletions(-)
+
+Index: openssh-7.1p2/clientloop.c
+===================================================================
+--- openssh-7.1p2.orig/clientloop.c
++++ openssh-7.1p2/clientloop.c
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: clientloop.c,v 1.276 2015/10/20 03:36:35 mmcc Exp $ */
++/* $OpenBSD: clientloop.c,v 1.279 2016/01/13 23:04:47 djm Exp $ */
+ /*
+ * Author: Tatu Ylonen <ylo@cs.hut.fi>
+ * Copyright (c) 1995 Tatu Ylonen <ylo@cs.hut.fi>, Espoo, Finland
+@@ -288,6 +288,9 @@ client_x11_display_valid(const char *dis
+ {
+ size_t i, dlen;
+
++ if (display == NULL)
++ return 0;
++
+ dlen = strlen(display);
+ for (i = 0; i < dlen; i++) {
+ if (!isalnum((u_char)display[i]) &&
+@@ -301,34 +304,33 @@ client_x11_display_valid(const char *dis
+
+ #define SSH_X11_PROTO "MIT-MAGIC-COOKIE-1"
+ #define X11_TIMEOUT_SLACK 60
+-void
++int
+ client_x11_get_proto(const char *display, const char *xauth_path,
+ u_int trusted, u_int timeout, char **_proto, char **_data)
+ {
+- char cmd[1024];
+- char line[512];
+- char xdisplay[512];
++ char cmd[1024], line[512], xdisplay[512];
++ char xauthfile[PATH_MAX], xauthdir[PATH_MAX];
+ static char proto[512], data[512];
+ FILE *f;
+- int got_data = 0, generated = 0, do_unlink = 0, i;
+- char xauthdir[PATH_MAX] = "", xauthfile[PATH_MAX] = "";
++ int got_data = 0, generated = 0, do_unlink = 0, i, r;
+ struct stat st;
+ u_int now, x11_timeout_real;
+
+ *_proto = proto;
+ *_data = data;
+- proto[0] = data[0] = '\0';
++ proto[0] = data[0] = xauthfile[0] = xauthdir[0] = '\0';
+
+- if (xauth_path == NULL ||(stat(xauth_path, &st) == -1)) {
+- debug("No xauth program.");
+- } else if (!client_x11_display_valid(display)) {
+- logit("DISPLAY '%s' invalid, falling back to fake xauth data",
++ if (!client_x11_display_valid(display)) {
++ logit("DISPLAY \"%s\" invalid; disabling X11 forwarding",
+ display);
+- } else {
+- if (display == NULL) {
+- debug("x11_get_proto: DISPLAY not set");
+- return;
+- }
++ return -1;
++ }
++ if (xauth_path != NULL && stat(xauth_path, &st) == -1) {
++ debug("No xauth program.");
++ xauth_path = NULL;
++ }
++
++ if (xauth_path != NULL) {
+ /*
+ * Handle FamilyLocal case where $DISPLAY does
+ * not match an authorization entry. For this we
+@@ -337,43 +339,60 @@ client_x11_get_proto(const char *display
+ * is not perfect.
+ */
+ if (strncmp(display, "localhost:", 10) == 0) {
+- snprintf(xdisplay, sizeof(xdisplay), "unix:%s",
+- display + 10);
++ if ((r = snprintf(xdisplay, sizeof(xdisplay), "unix:%s",
++ display + 10)) < 0 ||
++ (size_t)r >= sizeof(xdisplay)) {
++ error("%s: display name too long", __func__);
++ return -1;
++ }
+ display = xdisplay;
+ }
+ if (trusted == 0) {
+- mktemp_proto(xauthdir, PATH_MAX);
+ /*
++ * Generate an untrusted X11 auth cookie.
++ *
+ * The authentication cookie should briefly outlive
+ * ssh's willingness to forward X11 connections to
+ * avoid nasty fail-open behaviour in the X server.
+ */
++ mktemp_proto(xauthdir, sizeof(xauthdir));
++ if (mkdtemp(xauthdir) == NULL) {
++ error("%s: mkdtemp: %s",
++ __func__, strerror(errno));
++ return -1;
++ }
++ do_unlink = 1;
++ if ((r = snprintf(xauthfile, sizeof(xauthfile),
++ "%s/xauthfile", xauthdir)) < 0 ||
++ (size_t)r >= sizeof(xauthfile)) {
++ error("%s: xauthfile path too long", __func__);
++ unlink(xauthfile);
++ rmdir(xauthdir);
++ return -1;
++ }
++
+ if (timeout >= UINT_MAX - X11_TIMEOUT_SLACK)
+ x11_timeout_real = UINT_MAX;
+ else
+ x11_timeout_real = timeout + X11_TIMEOUT_SLACK;
+- if (mkdtemp(xauthdir) != NULL) {
+- do_unlink = 1;
+- snprintf(xauthfile, PATH_MAX, "%s/xauthfile",
+- xauthdir);
+- snprintf(cmd, sizeof(cmd),
+- "%s -f %s generate %s " SSH_X11_PROTO
+- " untrusted timeout %u 2>" _PATH_DEVNULL,
+- xauth_path, xauthfile, display,
+- x11_timeout_real);
+- debug2("x11_get_proto: %s", cmd);
+- if (x11_refuse_time == 0) {
+- now = monotime() + 1;
+- if (UINT_MAX - timeout < now)
+- x11_refuse_time = UINT_MAX;
+- else
+- x11_refuse_time = now + timeout;
+- channel_set_x11_refuse_time(
+- x11_refuse_time);
+- }
+- if (system(cmd) == 0)
+- generated = 1;
++ if ((r = snprintf(cmd, sizeof(cmd),
++ "%s -f %s generate %s " SSH_X11_PROTO
++ " untrusted timeout %u 2>" _PATH_DEVNULL,
++ xauth_path, xauthfile, display,
++ x11_timeout_real)) < 0 ||
++ (size_t)r >= sizeof(cmd))
++ fatal("%s: cmd too long", __func__);
++ debug2("%s: %s", __func__, cmd);
++ if (x11_refuse_time == 0) {
++ now = monotime() + 1;
++ if (UINT_MAX - timeout < now)
++ x11_refuse_time = UINT_MAX;
++ else
++ x11_refuse_time = now + timeout;
++ channel_set_x11_refuse_time(x11_refuse_time);
+ }
++ if (system(cmd) == 0)
++ generated = 1;
+ }
+
+ /*
+@@ -395,9 +414,7 @@ client_x11_get_proto(const char *display
+ got_data = 1;
+ if (f)
+ pclose(f);
+- } else
+- error("Warning: untrusted X11 forwarding setup failed: "
+- "xauth key data not generated");
++ }
+ }
+
+ if (do_unlink) {
+@@ -405,6 +422,13 @@ client_x11_get_proto(const char *display
+ rmdir(xauthdir);
+ }
+
++ /* Don't fall back to fake X11 data for untrusted forwarding */
++ if (!trusted && !got_data) {
++ error("Warning: untrusted X11 forwarding setup failed: "
++ "xauth key data not generated");
++ return -1;
++ }
++
+ /*
+ * If we didn't get authentication data, just make up some
+ * data. The forwarding code will check the validity of the
+@@ -427,6 +451,8 @@ client_x11_get_proto(const char *display
+ rnd >>= 8;
+ }
+ }
++
++ return 0;
+ }
+
+ /*
+Index: openssh-7.1p2/clientloop.h
+===================================================================
+--- openssh-7.1p2.orig/clientloop.h
++++ openssh-7.1p2/clientloop.h
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: clientloop.h,v 1.31 2013/06/02 23:36:29 dtucker Exp $ */
++/* $OpenBSD: clientloop.h,v 1.32 2016/01/13 23:04:47 djm Exp $ */
+
+ /*
+ * Author: Tatu Ylonen <ylo@cs.hut.fi>
+@@ -39,7 +39,7 @@
+
+ /* Client side main loop for the interactive session. */
+ int client_loop(int, int, int);
+-void client_x11_get_proto(const char *, const char *, u_int, u_int,
++int client_x11_get_proto(const char *, const char *, u_int, u_int,
+ char **, char **);
+ void client_global_request_reply_fwd(int, u_int32_t, void *);
+ void client_session2_setup(int, int, int, const char *, struct termios *,
+Index: openssh-7.1p2/mux.c
+===================================================================
+--- openssh-7.1p2.orig/mux.c
++++ openssh-7.1p2/mux.c
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: mux.c,v 1.54 2015/08/19 23:18:26 djm Exp $ */
++/* $OpenBSD: mux.c,v 1.58 2016/01/13 23:04:47 djm Exp $ */
+ /*
+ * Copyright (c) 2002-2008 Damien Miller <djm@openbsd.org>
+ *
+@@ -1354,16 +1354,18 @@ mux_session_confirm(int id, int success,
+ char *proto, *data;
+
+ /* Get reasonable local authentication information. */
+- client_x11_get_proto(display, options.xauth_location,
++ if (client_x11_get_proto(display, options.xauth_location,
+ options.forward_x11_trusted, options.forward_x11_timeout,
+- &proto, &data);
+- /* Request forwarding with authentication spoofing. */
+- debug("Requesting X11 forwarding with authentication "
+- "spoofing.");
+- x11_request_forwarding_with_spoofing(id, display, proto,
+- data, 1);
+- client_expect_confirm(id, "X11 forwarding", CONFIRM_WARN);
+- /* XXX exit_on_forward_failure */
++ &proto, &data) == 0) {
++ /* Request forwarding with authentication spoofing. */
++ debug("Requesting X11 forwarding with authentication "
++ "spoofing.");
++ x11_request_forwarding_with_spoofing(id, display, proto,
++ data, 1);
++ /* XXX exit_on_forward_failure */
++ client_expect_confirm(id, "X11 forwarding",
++ CONFIRM_WARN);
++ }
+ }
+
+ if (cctx->want_agent_fwd && options.forward_agent) {
+Index: openssh-7.1p2/ssh.c
+===================================================================
+--- openssh-7.1p2.orig/ssh.c
++++ openssh-7.1p2/ssh.c
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: ssh.c,v 1.420 2015/07/30 00:01:34 djm Exp $ */
++/* $OpenBSD: ssh.c,v 1.433 2016/01/13 23:04:47 djm Exp $ */
+ /*
+ * Author: Tatu Ylonen <ylo@cs.hut.fi>
+ * Copyright (c) 1995 Tatu Ylonen <ylo@cs.hut.fi>, Espoo, Finland
+@@ -1604,6 +1604,7 @@ ssh_session(void)
+ struct winsize ws;
+ char *cp;
+ const char *display;
++ char *proto = NULL, *data = NULL;
+
+ /* Enable compression if requested. */
+ if (options.compression) {
+@@ -1674,13 +1675,9 @@ ssh_session(void)
+ display = getenv("DISPLAY");
+ if (display == NULL && options.forward_x11)
+ debug("X11 forwarding requested but DISPLAY not set");
+- if (options.forward_x11 && display != NULL) {
+- char *proto, *data;
+- /* Get reasonable local authentication information. */
+- client_x11_get_proto(display, options.xauth_location,
+- options.forward_x11_trusted,
+- options.forward_x11_timeout,
+- &proto, &data);
++ if (options.forward_x11 && client_x11_get_proto(display,
++ options.xauth_location, options.forward_x11_trusted,
++ options.forward_x11_timeout, &proto, &data) == 0) {
+ /* Request forwarding with authentication spoofing. */
+ debug("Requesting X11 forwarding with authentication "
+ "spoofing.");
+@@ -1770,6 +1767,7 @@ ssh_session2_setup(int id, int success,
+ extern char **environ;
+ const char *display;
+ int interactive = tty_flag;
++ char *proto = NULL, *data = NULL;
+
+ if (!success)
+ return; /* No need for error message, channels code sens one */
+@@ -1777,12 +1775,9 @@ ssh_session2_setup(int id, int success,
+ display = getenv("DISPLAY");
+ if (display == NULL && options.forward_x11)
+ debug("X11 forwarding requested but DISPLAY not set");
+- if (options.forward_x11 && display != NULL) {
+- char *proto, *data;
+- /* Get reasonable local authentication information. */
+- client_x11_get_proto(display, options.xauth_location,
+- options.forward_x11_trusted,
+- options.forward_x11_timeout, &proto, &data);
++ if (options.forward_x11 && client_x11_get_proto(display,
++ options.xauth_location, options.forward_x11_trusted,
++ options.forward_x11_timeout, &proto, &data) == 0) {
+ /* Request forwarding with authentication spoofing. */
+ debug("Requesting X11 forwarding with authentication "
+ "spoofing.");
diff --git a/yocto-poky/meta/recipes-connectivity/openssh/openssh/CVE-2016-1907_upstream_commit.patch b/yocto-poky/meta/recipes-connectivity/openssh/openssh/CVE-2016-1907_upstream_commit.patch
new file mode 100644
index 000000000..f3d132e43
--- /dev/null
+++ b/yocto-poky/meta/recipes-connectivity/openssh/openssh/CVE-2016-1907_upstream_commit.patch
@@ -0,0 +1,33 @@
+From d77148e3a3ef6c29b26ec74331455394581aa257 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Sun, 8 Nov 2015 21:59:11 +0000
+Subject: [PATCH] upstream commit
+
+fix OOB read in packet code caused by missing return
+ statement found by Ben Hawkes; ok markus@ deraadt@
+
+Upstream-ID: a3e3a85434ebfa0690d4879091959591f30efc62
+
+Upstream-Status: Backport
+CVE: CVE-2016-1907
+
+[YOCTO #8935]
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ packet.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+Index: openssh-7.1p2/packet.c
+===================================================================
+--- openssh-7.1p2.orig/packet.c
++++ openssh-7.1p2/packet.c
+@@ -1855,6 +1855,7 @@ ssh_packet_process_incoming(struct ssh *
+ if (len >= state->packet_discard) {
+ if ((r = ssh_packet_stop_discard(ssh)) != 0)
+ return r;
++ return SSH_ERR_CONN_CORRUPT;
+ }
+ state->packet_discard -= len;
+ return 0;
diff --git a/yocto-poky/meta/recipes-connectivity/openssh/openssh/sshd@.service b/yocto-poky/meta/recipes-connectivity/openssh/openssh/sshd@.service
index bb2d68e96..9d83dfb2b 100644
--- a/yocto-poky/meta/recipes-connectivity/openssh/openssh/sshd@.service
+++ b/yocto-poky/meta/recipes-connectivity/openssh/openssh/sshd@.service
@@ -4,7 +4,9 @@ Wants=sshdgenkeys.service
After=sshdgenkeys.service
[Service]
-ExecStart=-@SBINDIR@/sshd -i
+Environment="SSHD_OPTS="
+EnvironmentFile=-/etc/default/ssh
+ExecStart=-@SBINDIR@/sshd -i $SSHD_OPTS
ExecReload=@BASE_BINDIR@/kill -HUP $MAINPID
StandardInput=socket
StandardError=syslog
diff --git a/yocto-poky/meta/recipes-connectivity/openssh/openssh/sshdgenkeys.service b/yocto-poky/meta/recipes-connectivity/openssh/openssh/sshdgenkeys.service
index d65086fc8..148e6ad63 100644
--- a/yocto-poky/meta/recipes-connectivity/openssh/openssh/sshdgenkeys.service
+++ b/yocto-poky/meta/recipes-connectivity/openssh/openssh/sshdgenkeys.service
@@ -1,11 +1,22 @@
[Unit]
Description=OpenSSH Key Generation
-ConditionPathExists=|!/etc/ssh/ssh_host_rsa_key
-ConditionPathExists=|!/etc/ssh/ssh_host_dsa_key
-ConditionPathExists=|!/etc/ssh/ssh_host_ecdsa_key
-ConditionPathExists=|!/etc/ssh/ssh_host_ed25519_key
+RequiresMountsFor=/var /run
+ConditionPathExists=!/var/run/ssh/ssh_host_rsa_key
+ConditionPathExists=!/var/run/ssh/ssh_host_dsa_key
+ConditionPathExists=!/var/run/ssh/ssh_host_ecdsa_key
+ConditionPathExists=!/var/run/ssh/ssh_host_ed25519_key
+ConditionPathExists=!/etc/ssh/ssh_host_rsa_key
+ConditionPathExists=!/etc/ssh/ssh_host_dsa_key
+ConditionPathExists=!/etc/ssh/ssh_host_ecdsa_key
+ConditionPathExists=!/etc/ssh/ssh_host_ed25519_key
[Service]
-ExecStart=@BINDIR@/ssh-keygen -A
+Environment="SYSCONFDIR=/etc/ssh"
+EnvironmentFile=-/etc/default/ssh
+ExecStart=@BASE_BINDIR@/mkdir -p $SYSCONFDIR
+ExecStart=@BINDIR@/ssh-keygen -q -f ${SYSCONFDIR}/ssh_host_rsa_key -N '' -t rsa
+ExecStart=@BINDIR@/ssh-keygen -q -f ${SYSCONFDIR}/ssh_host_dsa_key -N '' -t dsa
+ExecStart=@BINDIR@/ssh-keygen -q -f ${SYSCONFDIR}/ssh_host_ecdsa_key -N '' -t ecdsa
+ExecStart=@BINDIR@/ssh-keygen -q -f ${SYSCONFDIR}/ssh_host_ed25519_key -N '' -t ed25519
Type=oneshot
RemainAfterExit=yes
diff --git a/yocto-poky/meta/recipes-connectivity/openssh/openssh_7.1p1.bb b/yocto-poky/meta/recipes-connectivity/openssh/openssh_7.1p2.bb
index eeeb4b4c7..714c3917c 100644
--- a/yocto-poky/meta/recipes-connectivity/openssh/openssh_7.1p1.bb
+++ b/yocto-poky/meta/recipes-connectivity/openssh/openssh_7.1p2.bb
@@ -20,12 +20,15 @@ SRC_URI = "ftp://ftp.openbsd.org/pub/OpenBSD/OpenSSH/portable/openssh-${PV}.tar.
file://sshdgenkeys.service \
file://volatiles.99_sshd \
file://add-test-support-for-busybox.patch \
- file://run-ptest"
+ file://run-ptest \
+ file://CVE-2016-1907_upstream_commit.patch \
+ file://CVE-2016-1907_2.patch \
+ file://CVE-2016-1907_3.patch "
PAM_SRC_URI = "file://sshd"
-SRC_URI[md5sum] = "8709736bc8a8c253bc4eeb4829888ca5"
-SRC_URI[sha256sum] = "fc0a6d2d1d063d5c66dffd952493d0cda256cad204f681de0f84ef85b2ad8428"
+SRC_URI[md5sum] = "4d8547670e2a220d5ef805ad9e47acf2"
+SRC_URI[sha256sum] = "dd75f024dcf21e06a0d6421d582690bf987a1f6323e32ad6619392f3bfde6bbd"
inherit useradd update-rc.d update-alternatives systemd
@@ -87,7 +90,7 @@ do_compile_ptest() {
do_install_append () {
if [ "${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'pam', '', d)}" = "pam" ]; then
- install -D -m 0755 ${WORKDIR}/sshd ${D}${sysconfdir}/pam.d/sshd
+ install -D -m 0644 ${WORKDIR}/sshd ${D}${sysconfdir}/pam.d/sshd
sed -i -e 's:#UsePAM no:UsePAM yes:' ${WORKDIR}/sshd_config ${D}${sysconfdir}/ssh/sshd_config
fi
diff --git a/yocto-poky/meta/recipes-connectivity/openssl/openssl.inc b/yocto-poky/meta/recipes-connectivity/openssl/openssl.inc
index 53dcfd9f3..8af423f1a 100644
--- a/yocto-poky/meta/recipes-connectivity/openssl/openssl.inc
+++ b/yocto-poky/meta/recipes-connectivity/openssl/openssl.inc
@@ -118,7 +118,7 @@ do_configure () {
linux-*-mips64)
target=linux-mips
;;
- linux-microblaze*)
+ linux-microblaze*|linux-nios2*)
target=linux-generic32
;;
linux-powerpc)
diff --git a/yocto-poky/meta/recipes-connectivity/openssl/openssl/0001-Add-test-for-CVE-2015-3194.patch b/yocto-poky/meta/recipes-connectivity/openssl/openssl/0001-Add-test-for-CVE-2015-3194.patch
new file mode 100644
index 000000000..39a2e5a94
--- /dev/null
+++ b/yocto-poky/meta/recipes-connectivity/openssl/openssl/0001-Add-test-for-CVE-2015-3194.patch
@@ -0,0 +1,66 @@
+From 00456fded43eadd4bb94bf675ae4ea5d158a764f Mon Sep 17 00:00:00 2001
+From: "Dr. Stephen Henson" <steve@openssl.org>
+Date: Wed, 4 Nov 2015 13:30:03 +0000
+Subject: [PATCH] Add test for CVE-2015-3194
+
+Reviewed-by: Richard Levitte <levitte@openssl.org>
+
+Upstream-Status: Backport
+
+This patch was imported from
+https://git.openssl.org/?p=openssl.git;a=commit;h=00456fded43eadd4bb94bf675ae4ea5d158a764f
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ test/certs/pss1.pem | 21 +++++++++++++++++++++
+ test/tx509 | 7 +++++++
+ 2 files changed, 28 insertions(+)
+ create mode 100644 test/certs/pss1.pem
+
+diff --git a/test/certs/pss1.pem b/test/certs/pss1.pem
+new file mode 100644
+index 0000000..29da71d
+--- /dev/null
++++ b/test/certs/pss1.pem
+@@ -0,0 +1,21 @@
++-----BEGIN CERTIFICATE-----
++MIIDdjCCAjqgAwIBAgIJANcwZLyfEv7DMD4GCSqGSIb3DQEBCjAxoA0wCwYJYIZI
++AWUDBAIBoRowGAYJKoZIhvcNAQEIMAsGCWCGSAFlAwQCAaIEAgIA3jAnMSUwIwYD
++VQQDDBxUZXN0IEludmFsaWQgUFNTIGNlcnRpZmljYXRlMB4XDTE1MTEwNDE2MDIz
++NVoXDTE1MTIwNDE2MDIzNVowJzElMCMGA1UEAwwcVGVzdCBJbnZhbGlkIFBTUyBj
++ZXJ0aWZpY2F0ZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMTaM7WH
++qVCAGAIA+zL1KWvvASTrhlq+1ePdO7wsrWX2KiYoTYrJYTnxhLnn0wrHqApt79nL
++IBG7cfShyZqFHOY/IzlYPMVt+gPo293gw96Fds5JBsjhjkyGnOyr9OUntFqvxDbT
++IIFU7o9IdxD4edaqjRv+fegVE+B79pDk4s0ujsk6dULtCg9Rst0ucGFo19mr+b7k
++dbfn8pZ72ZNDJPueVdrUAWw9oll61UcYfk75XdrLk6JlL41GrYHc8KlfXf43gGQq
++QfrpHkg4Ih2cI6Wt2nhFGAzrlcorzLliQIUJRIhM8h4IgDfpBpaPdVQLqS2pFbXa
++5eQjqiyJwak2vJ8CAwEAAaNQME4wHQYDVR0OBBYEFCt180N4oGUt5LbzBwQ4Ia+2
++4V97MB8GA1UdIwQYMBaAFCt180N4oGUt5LbzBwQ4Ia+24V97MAwGA1UdEwQFMAMB
++Af8wMQYJKoZIhvcNAQEKMCSgDTALBglghkgBZQMEAgGhDTALBgkqhkiG9w0BAQii
++BAICAN4DggEBAAjBtm90lGxgddjc4Xu/nbXXFHVs2zVcHv/mqOZoQkGB9r/BVgLb
++xhHrFZ2pHGElbUYPfifdS9ztB73e1d4J+P29o0yBqfd4/wGAc/JA8qgn6AAEO/Xn
++plhFeTRJQtLZVl75CkHXgUGUd3h+ADvKtcBuW9dSUncaUrgNKR8u/h/2sMG38RWY
++DzBddC/66YTa3r7KkVUfW7yqRQfELiGKdcm+bjlTEMsvS+EhHup9CzbpoCx2Fx9p
++NPtFY3yEObQhmL1JyoCRWqBE75GzFPbRaiux5UpEkns+i3trkGssZzsOuVqHNTNZ
++lC9+9hPHIoc9UMmAQNo1vGIW3NWVoeGbaJ8=
++-----END CERTIFICATE-----
+diff --git a/test/tx509 b/test/tx509
+index 0ce3b52..77f5cac 100644
+--- a/test/tx509
++++ b/test/tx509
+@@ -74,5 +74,12 @@ if [ $? != 0 ]; then exit 1; fi
+ cmp x509-f.p x509-ff.p3
+ if [ $? != 0 ]; then exit 1; fi
+
++echo "Parsing test certificates"
++
++$cmd -in certs/pss1.pem -text -noout >/dev/null
++if [ $? != 0 ]; then exit 1; fi
++
++echo OK
++
+ /bin/rm -f x509-f.* x509-ff.* x509-fff.*
+ exit 0
+--
+2.3.5
+
diff --git a/yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2015-3193-bn-asm-x86_64-mont5.pl-fix-carry-propagating-bug-CVE.patch b/yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2015-3193-bn-asm-x86_64-mont5.pl-fix-carry-propagating-bug-CVE.patch
new file mode 100644
index 000000000..125016a23
--- /dev/null
+++ b/yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2015-3193-bn-asm-x86_64-mont5.pl-fix-carry-propagating-bug-CVE.patch
@@ -0,0 +1,101 @@
+From d73cc256c8e256c32ed959456101b73ba9842f72 Mon Sep 17 00:00:00 2001
+From: Andy Polyakov <appro@openssl.org>
+Date: Tue, 1 Dec 2015 09:00:32 +0100
+Subject: [PATCH] bn/asm/x86_64-mont5.pl: fix carry propagating bug
+ (CVE-2015-3193).
+
+Reviewed-by: Richard Levitte <levitte@openssl.org>
+(cherry picked from commit e7c078db57908cbf16074c68034977565ffaf107)
+
+Upstream-Status: Backport
+
+This patch was imported from
+https://git.openssl.org/?p=openssl.git;a=commit;h=d73cc256c8e256c32ed959456101b73ba9842f72
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ crypto/bn/asm/x86_64-mont5.pl | 22 +++++++++++++++++++---
+ crypto/bn/bntest.c | 18 ++++++++++++++++++
+ 2 files changed, 37 insertions(+), 3 deletions(-)
+
+Index: openssl-1.0.2d/crypto/bn/asm/x86_64-mont5.pl
+===================================================================
+--- openssl-1.0.2d.orig/crypto/bn/asm/x86_64-mont5.pl
++++ openssl-1.0.2d/crypto/bn/asm/x86_64-mont5.pl
+@@ -1779,6 +1779,15 @@ sqr8x_reduction:
+ .align 32
+ .L8x_tail_done:
+ add (%rdx),%r8 # can this overflow?
++ adc \$0,%r9
++ adc \$0,%r10
++ adc \$0,%r11
++ adc \$0,%r12
++ adc \$0,%r13
++ adc \$0,%r14
++ adc \$0,%r15 # can't overflow, because we
++ # started with "overhung" part
++ # of multiplication
+ xor %rax,%rax
+
+ neg $carry
+@@ -3125,6 +3134,15 @@ sqrx8x_reduction:
+ .align 32
+ .Lsqrx8x_tail_done:
+ add 24+8(%rsp),%r8 # can this overflow?
++ adc \$0,%r9
++ adc \$0,%r10
++ adc \$0,%r11
++ adc \$0,%r12
++ adc \$0,%r13
++ adc \$0,%r14
++ adc \$0,%r15 # can't overflow, because we
++ # started with "overhung" part
++ # of multiplication
+ mov $carry,%rax # xor %rax,%rax
+
+ sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
+@@ -3168,13 +3186,11 @@ my ($rptr,$nptr)=("%rdx","%rbp");
+ my @ri=map("%r$_",(10..13));
+ my @ni=map("%r$_",(14..15));
+ $code.=<<___;
+- xor %rbx,%rbx
++ xor %ebx,%ebx
+ sub %r15,%rsi # compare top-most words
+ adc %rbx,%rbx
+ mov %rcx,%r10 # -$num
+- .byte 0x67
+ or %rbx,%rax
+- .byte 0x67
+ mov %rcx,%r9 # -$num
+ xor \$1,%rax
+ sar \$3+2,%rcx # cf=0
+Index: openssl-1.0.2d/crypto/bn/bntest.c
+===================================================================
+--- openssl-1.0.2d.orig/crypto/bn/bntest.c
++++ openssl-1.0.2d/crypto/bn/bntest.c
+@@ -1027,6 +1027,24 @@ int test_mod_exp_mont_consttime(BIO *bp,
+ return 0;
+ }
+ }
++
++ /* Regression test for carry propagation bug in sqr8x_reduction */
++ BN_hex2bn(&a, "050505050505");
++ BN_hex2bn(&b, "02");
++ BN_hex2bn(&c,
++ "4141414141414141414141274141414141414141414141414141414141414141"
++ "4141414141414141414141414141414141414141414141414141414141414141"
++ "4141414141414141414141800000000000000000000000000000000000000000"
++ "0000000000000000000000000000000000000000000000000000000000000000"
++ "0000000000000000000000000000000000000000000000000000000000000000"
++ "0000000000000000000000000000000000000000000000000000000001");
++ BN_mod_exp(d, a, b, c, ctx);
++ BN_mul(e, a, a, ctx);
++ if (BN_cmp(d, e)) {
++ fprintf(stderr, "BN_mod_exp and BN_mul produce different results!\n");
++ return 0;
++ }
++
+ BN_free(a);
+ BN_free(b);
+ BN_free(c);
diff --git a/yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2015-3194-1-Add-PSS-parameter-check.patch b/yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2015-3194-1-Add-PSS-parameter-check.patch
new file mode 100644
index 000000000..13d48913b
--- /dev/null
+++ b/yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2015-3194-1-Add-PSS-parameter-check.patch
@@ -0,0 +1,45 @@
+From c394a488942387246653833359a5c94b5832674e Mon Sep 17 00:00:00 2001
+From: "Dr. Stephen Henson" <steve@openssl.org>
+Date: Fri, 2 Oct 2015 12:35:19 +0100
+Subject: [PATCH] Add PSS parameter check.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Avoid seg fault by checking mgf1 parameter is not NULL. This can be
+triggered during certificate verification so could be a DoS attack
+against a client or a server enabling client authentication.
+
+Thanks to Loïc Jonas Etienne (Qnective AG) for discovering this bug.
+
+CVE-2015-3194
+
+Reviewed-by: Richard Levitte <levitte@openssl.org>
+
+Upstream-Status: Backport
+
+This patch was imported from
+https://git.openssl.org/?p=openssl.git;a=commit;h=c394a488942387246653833359a5c94b5832674e
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ crypto/rsa/rsa_ameth.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/crypto/rsa/rsa_ameth.c b/crypto/rsa/rsa_ameth.c
+index ca3922e..4e06218 100644
+--- a/crypto/rsa/rsa_ameth.c
++++ b/crypto/rsa/rsa_ameth.c
+@@ -268,7 +268,7 @@ static X509_ALGOR *rsa_mgf1_decode(X509_ALGOR *alg)
+ {
+ const unsigned char *p;
+ int plen;
+- if (alg == NULL)
++ if (alg == NULL || alg->parameter == NULL)
+ return NULL;
+ if (OBJ_obj2nid(alg->algorithm) != NID_mgf1)
+ return NULL;
+--
+2.3.5
+
diff --git a/yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2015-3195-Fix-leak-with-ASN.1-combine.patch b/yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2015-3195-Fix-leak-with-ASN.1-combine.patch
new file mode 100644
index 000000000..6fc4d0e83
--- /dev/null
+++ b/yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2015-3195-Fix-leak-with-ASN.1-combine.patch
@@ -0,0 +1,66 @@
+From cc598f321fbac9c04da5766243ed55d55948637d Mon Sep 17 00:00:00 2001
+From: "Dr. Stephen Henson" <steve@openssl.org>
+Date: Tue, 10 Nov 2015 19:03:07 +0000
+Subject: [PATCH] Fix leak with ASN.1 combine.
+
+When parsing a combined structure pass a flag to the decode routine
+so on error a pointer to the parent structure is not zeroed as
+this will leak any additional components in the parent.
+
+This can leak memory in any application parsing PKCS#7 or CMS structures.
+
+CVE-2015-3195.
+
+Thanks to Adam Langley (Google/BoringSSL) for discovering this bug using
+libFuzzer.
+
+PR#4131
+
+Reviewed-by: Richard Levitte <levitte@openssl.org>
+
+Upstream-Status: Backport
+
+This patch was imported from
+https://git.openssl.org/?p=openssl.git;a=commit;h=cc598f321fbac9c04da5766243ed55d55948637d
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ crypto/asn1/tasn_dec.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/crypto/asn1/tasn_dec.c b/crypto/asn1/tasn_dec.c
+index febf605..9256049 100644
+--- a/crypto/asn1/tasn_dec.c
++++ b/crypto/asn1/tasn_dec.c
+@@ -180,6 +180,8 @@ int ASN1_item_ex_d2i(ASN1_VALUE **pval, const unsigned char **in, long len,
+ int otag;
+ int ret = 0;
+ ASN1_VALUE **pchptr, *ptmpval;
++ int combine = aclass & ASN1_TFLG_COMBINE;
++ aclass &= ~ASN1_TFLG_COMBINE;
+ if (!pval)
+ return 0;
+ if (aux && aux->asn1_cb)
+@@ -500,7 +502,8 @@ int ASN1_item_ex_d2i(ASN1_VALUE **pval, const unsigned char **in, long len,
+ auxerr:
+ ASN1err(ASN1_F_ASN1_ITEM_EX_D2I, ASN1_R_AUX_ERROR);
+ err:
+- ASN1_item_ex_free(pval, it);
++ if (combine == 0)
++ ASN1_item_ex_free(pval, it);
+ if (errtt)
+ ERR_add_error_data(4, "Field=", errtt->field_name,
+ ", Type=", it->sname);
+@@ -689,7 +692,7 @@ static int asn1_template_noexp_d2i(ASN1_VALUE **val,
+ } else {
+ /* Nothing special */
+ ret = ASN1_item_ex_d2i(val, &p, len, ASN1_ITEM_ptr(tt->item),
+- -1, 0, opt, ctx);
++ -1, tt->flags & ASN1_TFLG_COMBINE, opt, ctx);
+ if (!ret) {
+ ASN1err(ASN1_F_ASN1_TEMPLATE_NOEXP_D2I, ERR_R_NESTED_ASN1_ERROR);
+ goto err;
+--
+2.3.5
+
diff --git a/yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2015-3197.patch b/yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2015-3197.patch
new file mode 100644
index 000000000..dd288c93f
--- /dev/null
+++ b/yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2015-3197.patch
@@ -0,0 +1,63 @@
+From d81a1600588b726c2bdccda7efad3cc7a87d6245 Mon Sep 17 00:00:00 2001
+From: Viktor Dukhovni <openssl-users@dukhovni.org>
+Date: Wed, 30 Dec 2015 22:44:51 -0500
+Subject: [PATCH] Better SSLv2 cipher-suite enforcement
+
+Based on patch by: Nimrod Aviram <nimrod.aviram@gmail.com>
+
+CVE-2015-3197
+
+Reviewed-by: Tim Hudson <tjh@openssl.org>
+Reviewed-by: Richard Levitte <levitte@openssl.org>
+
+Upstream-Status: Backport
+https://github.com/openssl/openssl/commit/d81a1600588b726c2bdccda7efad3cc7a87d6245
+
+CVE: CVE-2015-3197
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ ssl/s2_srvr.c | 15 +++++++++++++--
+ 1 file changed, 13 insertions(+), 2 deletions(-)
+
+Index: openssl-1.0.2d/ssl/s2_srvr.c
+===================================================================
+--- openssl-1.0.2d.orig/ssl/s2_srvr.c
++++ openssl-1.0.2d/ssl/s2_srvr.c
+@@ -402,7 +402,7 @@ static int get_client_master_key(SSL *s)
+ }
+
+ cp = ssl2_get_cipher_by_char(p);
+- if (cp == NULL) {
++ if (cp == NULL || sk_SSL_CIPHER_find(s->session->ciphers, cp) < 0) {
+ ssl2_return_error(s, SSL2_PE_NO_CIPHER);
+ SSLerr(SSL_F_GET_CLIENT_MASTER_KEY, SSL_R_NO_CIPHER_MATCH);
+ return (-1);
+@@ -687,8 +687,12 @@ static int get_client_hello(SSL *s)
+ prio = cs;
+ allow = cl;
+ }
++
++ /* Generate list of SSLv2 ciphers shared between client and server */
+ for (z = 0; z < sk_SSL_CIPHER_num(prio); z++) {
+- if (sk_SSL_CIPHER_find(allow, sk_SSL_CIPHER_value(prio, z)) < 0) {
++ const SSL_CIPHER *cp = sk_SSL_CIPHER_value(prio, z);
++ if ((cp->algorithm_ssl & SSL_SSLV2) == 0 ||
++ sk_SSL_CIPHER_find(allow, cp) < 0) {
+ (void)sk_SSL_CIPHER_delete(prio, z);
+ z--;
+ }
+@@ -697,6 +701,13 @@ static int get_client_hello(SSL *s)
+ sk_SSL_CIPHER_free(s->session->ciphers);
+ s->session->ciphers = prio;
+ }
++
++ /* Make sure we have at least one cipher in common */
++ if (sk_SSL_CIPHER_num(s->session->ciphers) == 0) {
++ ssl2_return_error(s, SSL2_PE_NO_CIPHER);
++ SSLerr(SSL_F_GET_CLIENT_HELLO, SSL_R_NO_CIPHER_MATCH);
++ return -1;
++ }
+ /*
+ * s->session->ciphers should now have a list of ciphers that are on
+ * both the client and server. This list is ordered by the order the
diff --git a/yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2016-0701_1.patch b/yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2016-0701_1.patch
new file mode 100644
index 000000000..cf2d9a7b0
--- /dev/null
+++ b/yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2016-0701_1.patch
@@ -0,0 +1,102 @@
+From 878e2c5b13010329c203f309ed0c8f2113f85648 Mon Sep 17 00:00:00 2001
+From: Matt Caswell <matt@openssl.org>
+Date: Mon, 18 Jan 2016 11:31:58 +0000
+Subject: [PATCH] Prevent small subgroup attacks on DH/DHE
+
+Historically OpenSSL only ever generated DH parameters based on "safe"
+primes. More recently (in version 1.0.2) support was provided for
+generating X9.42 style parameter files such as those required for RFC
+5114 support. The primes used in such files may not be "safe". Where an
+application is using DH configured with parameters based on primes that
+are not "safe" then an attacker could use this fact to find a peer's
+private DH exponent. This attack requires that the attacker complete
+multiple handshakes in which the peer uses the same DH exponent.
+
+A simple mitigation is to ensure that y^q (mod p) == 1
+
+CVE-2016-0701 (fix part 1 of 2)
+
+Issue reported by Antonio Sanso.
+
+Reviewed-by: Viktor Dukhovni <viktor@openssl.org>
+
+Upstream-Status: Backport
+
+https://github.com/openssl/openssl/commit/878e2c5b13010329c203f309ed0c8f2113f85648
+
+CVE: CVE-2016-0701
+Signed-of-by: Armin Kuster <akuster@mvisa.com>
+
+---
+ crypto/dh/dh.h | 1 +
+ crypto/dh/dh_check.c | 35 +++++++++++++++++++++++++----------
+ 2 files changed, 26 insertions(+), 10 deletions(-)
+
+diff --git a/crypto/dh/dh.h b/crypto/dh/dh.h
+index b177673..5498a9d 100644
+--- a/crypto/dh/dh.h
++++ b/crypto/dh/dh.h
+@@ -174,6 +174,7 @@ struct dh_st {
+ /* DH_check_pub_key error codes */
+ # define DH_CHECK_PUBKEY_TOO_SMALL 0x01
+ # define DH_CHECK_PUBKEY_TOO_LARGE 0x02
++# define DH_CHECK_PUBKEY_INVALID 0x03
+
+ /*
+ * primes p where (p-1)/2 is prime too are called "safe"; we define this for
+diff --git a/crypto/dh/dh_check.c b/crypto/dh/dh_check.c
+index 347467c..5adedc0 100644
+--- a/crypto/dh/dh_check.c
++++ b/crypto/dh/dh_check.c
+@@ -151,23 +151,38 @@ int DH_check(const DH *dh, int *ret)
+ int DH_check_pub_key(const DH *dh, const BIGNUM *pub_key, int *ret)
+ {
+ int ok = 0;
+- BIGNUM *q = NULL;
++ BIGNUM *tmp = NULL;
++ BN_CTX *ctx = NULL;
+
+ *ret = 0;
+- q = BN_new();
+- if (q == NULL)
++ ctx = BN_CTX_new();
++ if (ctx == NULL)
+ goto err;
+- BN_set_word(q, 1);
+- if (BN_cmp(pub_key, q) <= 0)
++ BN_CTX_start(ctx);
++ tmp = BN_CTX_get(ctx);
++ if (tmp == NULL)
++ goto err;
++ BN_set_word(tmp, 1);
++ if (BN_cmp(pub_key, tmp) <= 0)
+ *ret |= DH_CHECK_PUBKEY_TOO_SMALL;
+- BN_copy(q, dh->p);
+- BN_sub_word(q, 1);
+- if (BN_cmp(pub_key, q) >= 0)
++ BN_copy(tmp, dh->p);
++ BN_sub_word(tmp, 1);
++ if (BN_cmp(pub_key, tmp) >= 0)
+ *ret |= DH_CHECK_PUBKEY_TOO_LARGE;
+
++ if (dh->q != NULL) {
++ /* Check pub_key^q == 1 mod p */
++ if (!BN_mod_exp(tmp, pub_key, dh->q, dh->p, ctx))
++ goto err;
++ if (!BN_is_one(tmp))
++ *ret |= DH_CHECK_PUBKEY_INVALID;
++ }
++
+ ok = 1;
+ err:
+- if (q != NULL)
+- BN_free(q);
++ if (ctx != NULL) {
++ BN_CTX_end(ctx);
++ BN_CTX_free(ctx);
++ }
+ return (ok);
+ }
+--
+2.3.5
+
diff --git a/yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2016-0701_2.patch b/yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2016-0701_2.patch
new file mode 100644
index 000000000..05caf0a99
--- /dev/null
+++ b/yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2016-0701_2.patch
@@ -0,0 +1,156 @@
+From c5b831f21d0d29d1e517d139d9d101763f60c9a2 Mon Sep 17 00:00:00 2001
+From: Matt Caswell <matt@openssl.org>
+Date: Thu, 17 Dec 2015 02:57:20 +0000
+Subject: [PATCH] Always generate DH keys for ephemeral DH cipher suites
+
+Modified version of the commit ffaef3f15 in the master branch by Stephen
+Henson. This makes the SSL_OP_SINGLE_DH_USE option a no-op and always
+generates a new DH key for every handshake regardless.
+
+CVE-2016-0701 (fix part 2 or 2)
+
+Issue reported by Antonio Sanso
+
+Reviewed-by: Viktor Dukhovni <viktor@openssl.org>
+
+Upstream-Status: Backport
+
+https://github.com/openssl/openssl/commit/c5b831f21d0d29d1e517d139d9d101763f60c9a2
+
+CVE: CVE-2016-0701 #2
+Signed-of-by: Armin Kuster <akuster@mvisa.com>
+
+---
+ doc/ssl/SSL_CTX_set_tmp_dh_callback.pod | 29 +++++------------------------
+ ssl/s3_lib.c | 14 --------------
+ ssl/s3_srvr.c | 17 +++--------------
+ ssl/ssl.h | 2 +-
+ 4 files changed, 9 insertions(+), 53 deletions(-)
+
+Index: openssl-1.0.2d/doc/ssl/SSL_CTX_set_tmp_dh_callback.pod
+===================================================================
+--- openssl-1.0.2d.orig/doc/ssl/SSL_CTX_set_tmp_dh_callback.pod
++++ openssl-1.0.2d/doc/ssl/SSL_CTX_set_tmp_dh_callback.pod
+@@ -48,25 +48,8 @@ even if he gets hold of the normal (cert
+ only used for signing.
+
+ In order to perform a DH key exchange the server must use a DH group
+-(DH parameters) and generate a DH key.
+-The server will always generate a new DH key during the negotiation
+-if either the DH parameters are supplied via callback or the
+-SSL_OP_SINGLE_DH_USE option of SSL_CTX_set_options(3) is set (or both).
+-It will immediately create a DH key if DH parameters are supplied via
+-SSL_CTX_set_tmp_dh() and SSL_OP_SINGLE_DH_USE is not set.
+-In this case,
+-it may happen that a key is generated on initialization without later
+-being needed, while on the other hand the computer time during the
+-negotiation is being saved.
+-
+-If "strong" primes were used to generate the DH parameters, it is not strictly
+-necessary to generate a new key for each handshake but it does improve forward
+-secrecy. If it is not assured that "strong" primes were used,
+-SSL_OP_SINGLE_DH_USE must be used in order to prevent small subgroup
+-attacks. Always using SSL_OP_SINGLE_DH_USE has an impact on the
+-computer time needed during negotiation, but it is not very large, so
+-application authors/users should consider always enabling this option.
+-The option is required to implement perfect forward secrecy (PFS).
++(DH parameters) and generate a DH key. The server will always generate
++a new DH key during the negotiation.
+
+ As generating DH parameters is extremely time consuming, an application
+ should not generate the parameters on the fly but supply the parameters.
+@@ -93,10 +76,9 @@ can supply the DH parameters via a callb
+ Previous versions of the callback used B<is_export> and B<keylength>
+ parameters to control parameter generation for export and non-export
+ cipher suites. Modern servers that do not support export ciphersuites
+-are advised to either use SSL_CTX_set_tmp_dh() in combination with
+-SSL_OP_SINGLE_DH_USE, or alternatively, use the callback but ignore
+-B<keylength> and B<is_export> and simply supply at least 2048-bit
+-parameters in the callback.
++are advised to either use SSL_CTX_set_tmp_dh() or alternatively, use
++the callback but ignore B<keylength> and B<is_export> and simply
++supply at least 2048-bit parameters in the callback.
+
+ =head1 EXAMPLES
+
+@@ -128,7 +110,6 @@ partly left out.)
+ if (SSL_CTX_set_tmp_dh(ctx, dh_2048) != 1) {
+ /* Error. */
+ }
+- SSL_CTX_set_options(ctx, SSL_OP_SINGLE_DH_USE);
+ ...
+
+ =head1 RETURN VALUES
+Index: openssl-1.0.2d/ssl/s3_lib.c
+===================================================================
+--- openssl-1.0.2d.orig/ssl/s3_lib.c
++++ openssl-1.0.2d/ssl/s3_lib.c
+@@ -3206,13 +3206,6 @@ long ssl3_ctrl(SSL *s, int cmd, long lar
+ SSLerr(SSL_F_SSL3_CTRL, ERR_R_DH_LIB);
+ return (ret);
+ }
+- if (!(s->options & SSL_OP_SINGLE_DH_USE)) {
+- if (!DH_generate_key(dh)) {
+- DH_free(dh);
+- SSLerr(SSL_F_SSL3_CTRL, ERR_R_DH_LIB);
+- return (ret);
+- }
+- }
+ if (s->cert->dh_tmp != NULL)
+ DH_free(s->cert->dh_tmp);
+ s->cert->dh_tmp = dh;
+@@ -3710,13 +3703,6 @@ long ssl3_ctx_ctrl(SSL_CTX *ctx, int cmd
+ SSLerr(SSL_F_SSL3_CTX_CTRL, ERR_R_DH_LIB);
+ return 0;
+ }
+- if (!(ctx->options & SSL_OP_SINGLE_DH_USE)) {
+- if (!DH_generate_key(new)) {
+- SSLerr(SSL_F_SSL3_CTX_CTRL, ERR_R_DH_LIB);
+- DH_free(new);
+- return 0;
+- }
+- }
+ if (cert->dh_tmp != NULL)
+ DH_free(cert->dh_tmp);
+ cert->dh_tmp = new;
+Index: openssl-1.0.2d/ssl/s3_srvr.c
+===================================================================
+--- openssl-1.0.2d.orig/ssl/s3_srvr.c
++++ openssl-1.0.2d/ssl/s3_srvr.c
+@@ -1684,20 +1684,9 @@ int ssl3_send_server_key_exchange(SSL *s
+ }
+
+ s->s3->tmp.dh = dh;
+- if ((dhp->pub_key == NULL ||
+- dhp->priv_key == NULL ||
+- (s->options & SSL_OP_SINGLE_DH_USE))) {
+- if (!DH_generate_key(dh)) {
+- SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE, ERR_R_DH_LIB);
+- goto err;
+- }
+- } else {
+- dh->pub_key = BN_dup(dhp->pub_key);
+- dh->priv_key = BN_dup(dhp->priv_key);
+- if ((dh->pub_key == NULL) || (dh->priv_key == NULL)) {
+- SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE, ERR_R_DH_LIB);
+- goto err;
+- }
++ if (!DH_generate_key(dh)) {
++ SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE, ERR_R_DH_LIB);
++ goto err;
+ }
+ r[0] = dh->p;
+ r[1] = dh->g;
+Index: openssl-1.0.2d/ssl/ssl.h
+===================================================================
+--- openssl-1.0.2d.orig/ssl/ssl.h
++++ openssl-1.0.2d/ssl/ssl.h
+@@ -625,7 +625,7 @@ struct ssl_session_st {
+ # define SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION 0x00040000L
+ /* If set, always create a new key when using tmp_ecdh parameters */
+ # define SSL_OP_SINGLE_ECDH_USE 0x00080000L
+-/* If set, always create a new key when using tmp_dh parameters */
++/* Does nothing: retained for compatibility */
+ # define SSL_OP_SINGLE_DH_USE 0x00100000L
+ /* Does nothing: retained for compatibiity */
+ # define SSL_OP_EPHEMERAL_RSA 0x0
diff --git a/yocto-poky/meta/recipes-connectivity/openssl/openssl/ptest_makefile_deps.patch b/yocto-poky/meta/recipes-connectivity/openssl/openssl/ptest_makefile_deps.patch
new file mode 100644
index 000000000..4202e61d1
--- /dev/null
+++ b/yocto-poky/meta/recipes-connectivity/openssl/openssl/ptest_makefile_deps.patch
@@ -0,0 +1,248 @@
+Additional Makefile dependencies removal for test targets
+
+Removing the dependency check for test targets as these tests are
+causing a number of failures and "noise" during ptest execution.
+
+Upstream-Status: Inappropriate [config]
+
+Signed-off-by: Maxin B. John <maxin.john@intel.com>
+
+diff -Naur openssl-1.0.2d-orig/test/Makefile openssl-1.0.2d/test/Makefile
+--- openssl-1.0.2d-orig/test/Makefile 2015-09-28 12:50:41.530022979 +0300
++++ openssl-1.0.2d/test/Makefile 2015-09-28 12:57:45.930717240 +0300
+@@ -155,67 +155,67 @@
+ ( $(MAKE) $$i && echo "PASS: $$i" ) || echo "FAIL: $$i"; \
+ done)
+
+-test_evp: $(EVPTEST)$(EXE_EXT) evptests.txt
++test_evp:
+ ../util/shlib_wrap.sh ./$(EVPTEST) evptests.txt
+
+-test_evp_extra: $(EVPEXTRATEST)$(EXE_EXT)
++test_evp_extra:
+ ../util/shlib_wrap.sh ./$(EVPEXTRATEST)
+
+-test_des: $(DESTEST)$(EXE_EXT)
++test_des:
+ ../util/shlib_wrap.sh ./$(DESTEST)
+
+-test_idea: $(IDEATEST)$(EXE_EXT)
++test_idea:
+ ../util/shlib_wrap.sh ./$(IDEATEST)
+
+-test_sha: $(SHATEST)$(EXE_EXT) $(SHA1TEST)$(EXE_EXT) $(SHA256TEST)$(EXE_EXT) $(SHA512TEST)$(EXE_EXT)
++test_sha:
+ ../util/shlib_wrap.sh ./$(SHATEST)
+ ../util/shlib_wrap.sh ./$(SHA1TEST)
+ ../util/shlib_wrap.sh ./$(SHA256TEST)
+ ../util/shlib_wrap.sh ./$(SHA512TEST)
+
+-test_mdc2: $(MDC2TEST)$(EXE_EXT)
++test_mdc2:
+ ../util/shlib_wrap.sh ./$(MDC2TEST)
+
+-test_md5: $(MD5TEST)$(EXE_EXT)
++test_md5:
+ ../util/shlib_wrap.sh ./$(MD5TEST)
+
+-test_md4: $(MD4TEST)$(EXE_EXT)
++test_md4:
+ ../util/shlib_wrap.sh ./$(MD4TEST)
+
+-test_hmac: $(HMACTEST)$(EXE_EXT)
++test_hmac:
+ ../util/shlib_wrap.sh ./$(HMACTEST)
+
+-test_wp: $(WPTEST)$(EXE_EXT)
++test_wp:
+ ../util/shlib_wrap.sh ./$(WPTEST)
+
+-test_md2: $(MD2TEST)$(EXE_EXT)
++test_md2:
+ ../util/shlib_wrap.sh ./$(MD2TEST)
+
+-test_rmd: $(RMDTEST)$(EXE_EXT)
++test_rmd:
+ ../util/shlib_wrap.sh ./$(RMDTEST)
+
+-test_bf: $(BFTEST)$(EXE_EXT)
++test_bf:
+ ../util/shlib_wrap.sh ./$(BFTEST)
+
+-test_cast: $(CASTTEST)$(EXE_EXT)
++test_cast:
+ ../util/shlib_wrap.sh ./$(CASTTEST)
+
+-test_rc2: $(RC2TEST)$(EXE_EXT)
++test_rc2:
+ ../util/shlib_wrap.sh ./$(RC2TEST)
+
+-test_rc4: $(RC4TEST)$(EXE_EXT)
++test_rc4:
+ ../util/shlib_wrap.sh ./$(RC4TEST)
+
+-test_rc5: $(RC5TEST)$(EXE_EXT)
++test_rc5:
+ ../util/shlib_wrap.sh ./$(RC5TEST)
+
+-test_rand: $(RANDTEST)$(EXE_EXT)
++test_rand:
+ ../util/shlib_wrap.sh ./$(RANDTEST)
+
+-test_enc: ../apps/openssl$(EXE_EXT) testenc
++test_enc:
+ @sh ./testenc
+
+-test_x509: ../apps/openssl$(EXE_EXT) tx509 testx509.pem v3-cert1.pem v3-cert2.pem
++test_x509:
+ echo test normal x509v1 certificate
+ sh ./tx509 2>/dev/null
+ echo test first x509v3 certificate
+@@ -223,25 +223,25 @@
+ echo test second x509v3 certificate
+ sh ./tx509 v3-cert2.pem 2>/dev/null
+
+-test_rsa: ../apps/openssl$(EXE_EXT) trsa testrsa.pem
++test_rsa:
+ @sh ./trsa 2>/dev/null
+ ../util/shlib_wrap.sh ./$(RSATEST)
+
+-test_crl: ../apps/openssl$(EXE_EXT) tcrl testcrl.pem
++test_crl:
+ @sh ./tcrl 2>/dev/null
+
+-test_sid: ../apps/openssl$(EXE_EXT) tsid testsid.pem
++test_sid:
+ @sh ./tsid 2>/dev/null
+
+-test_req: ../apps/openssl$(EXE_EXT) treq testreq.pem testreq2.pem
++test_req:
+ @sh ./treq 2>/dev/null
+ @sh ./treq testreq2.pem 2>/dev/null
+
+-test_pkcs7: ../apps/openssl$(EXE_EXT) tpkcs7 tpkcs7d testp7.pem pkcs7-1.pem
++test_pkcs7:
+ @sh ./tpkcs7 2>/dev/null
+ @sh ./tpkcs7d 2>/dev/null
+
+-test_bn: $(BNTEST)$(EXE_EXT) $(EXPTEST)$(EXE_EXT) bctest
++test_bn:
+ @echo starting big number library test, could take a while...
+ @../util/shlib_wrap.sh ./$(BNTEST) >tmp.bntest
+ @echo quit >>tmp.bntest
+@@ -250,33 +250,33 @@
+ @echo 'test a^b%c implementations'
+ ../util/shlib_wrap.sh ./$(EXPTEST)
+
+-test_ec: $(ECTEST)$(EXE_EXT)
++test_ec:
+ @echo 'test elliptic curves'
+ ../util/shlib_wrap.sh ./$(ECTEST)
+
+-test_ecdsa: $(ECDSATEST)$(EXE_EXT)
++test_ecdsa:
+ @echo 'test ecdsa'
+ ../util/shlib_wrap.sh ./$(ECDSATEST)
+
+-test_ecdh: $(ECDHTEST)$(EXE_EXT)
++test_ecdh:
+ @echo 'test ecdh'
+ ../util/shlib_wrap.sh ./$(ECDHTEST)
+
+-test_verify: ../apps/openssl$(EXE_EXT)
++test_verify:
+ @echo "The following command should have some OK's and some failures"
+ @echo "There are definitly a few expired certificates"
+ ../util/shlib_wrap.sh ../apps/openssl verify -CApath ../certs/demo ../certs/demo/*.pem
+
+-test_dh: $(DHTEST)$(EXE_EXT)
++test_dh:
+ @echo "Generate a set of DH parameters"
+ ../util/shlib_wrap.sh ./$(DHTEST)
+
+-test_dsa: $(DSATEST)$(EXE_EXT)
++test_dsa:
+ @echo "Generate a set of DSA parameters"
+ ../util/shlib_wrap.sh ./$(DSATEST)
+ ../util/shlib_wrap.sh ./$(DSATEST) -app2_1
+
+-test_gen testreq.pem: ../apps/openssl$(EXE_EXT) testgen test.cnf
++test_gen testreq.pem:
+ @echo "Generate and verify a certificate request"
+ @sh ./testgen
+
+@@ -288,13 +288,11 @@
+ @cat certCA.ss certU.ss > intP1.ss
+ @cat certCA.ss certU.ss certP1.ss > intP2.ss
+
+-test_engine: $(ENGINETEST)$(EXE_EXT)
++test_engine:
+ @echo "Manipulate the ENGINE structures"
+ ../util/shlib_wrap.sh ./$(ENGINETEST)
+
+-test_ssl: keyU.ss certU.ss certCA.ss certP1.ss keyP1.ss certP2.ss keyP2.ss \
+- intP1.ss intP2.ss $(SSLTEST)$(EXE_EXT) testssl testsslproxy \
+- ../apps/server2.pem serverinfo.pem
++test_ssl:
+ @echo "test SSL protocol"
+ @if [ -n "$(FIPSCANLIB)" ]; then \
+ sh ./testfipsssl keyU.ss certU.ss certCA.ss; \
+@@ -304,7 +302,7 @@
+ @sh ./testsslproxy keyP1.ss certP1.ss intP1.ss
+ @sh ./testsslproxy keyP2.ss certP2.ss intP2.ss
+
+-test_ca: ../apps/openssl$(EXE_EXT) testca CAss.cnf Uss.cnf
++test_ca:
+ @if ../util/shlib_wrap.sh ../apps/openssl no-rsa; then \
+ echo "skipping CA.sh test -- requires RSA"; \
+ else \
+@@ -312,11 +310,11 @@
+ sh ./testca; \
+ fi
+
+-test_aes: #$(AESTEST)
++test_aes:
+ # @echo "test Rijndael"
+ # ../util/shlib_wrap.sh ./$(AESTEST)
+
+-test_tsa: ../apps/openssl$(EXE_EXT) testtsa CAtsa.cnf ../util/shlib_wrap.sh
++test_tsa:
+ @if ../util/shlib_wrap.sh ../apps/openssl no-rsa; then \
+ echo "skipping testtsa test -- requires RSA"; \
+ else \
+@@ -331,7 +329,7 @@
+ @echo "Test JPAKE"
+ ../util/shlib_wrap.sh ./$(JPAKETEST)
+
+-test_cms: ../apps/openssl$(EXE_EXT) cms-test.pl smcont.txt
++test_cms:
+ @echo "CMS consistency test"
+ $(PERL) cms-test.pl
+
+@@ -339,22 +337,22 @@
+ @echo "Test SRP"
+ ../util/shlib_wrap.sh ./srptest
+
+-test_ocsp: ../apps/openssl$(EXE_EXT) tocsp
++test_ocsp:
+ @echo "Test OCSP"
+ @sh ./tocsp
+
+-test_v3name: $(V3NAMETEST)$(EXE_EXT)
++test_v3name:
+ @echo "Test X509v3_check_*"
+ ../util/shlib_wrap.sh ./$(V3NAMETEST)
+
+ test_heartbeat:
+ ../util/shlib_wrap.sh ./$(HEARTBEATTEST)
+
+-test_constant_time: $(CONSTTIMETEST)$(EXE_EXT)
++test_constant_time:
+ @echo "Test constant time utilites"
+ ../util/shlib_wrap.sh ./$(CONSTTIMETEST)
+
+-test_verify_extra: $(VERIFYEXTRATEST)$(EXE_EXT)
++test_verify_extra:
+ @echo $(START) $@
+ ../util/shlib_wrap.sh ./$(VERIFYEXTRATEST)
+
diff --git a/yocto-poky/meta/recipes-connectivity/openssl/openssl_1.0.2d.bb b/yocto-poky/meta/recipes-connectivity/openssl/openssl_1.0.2d.bb
index 32d8dce26..8defa5b74 100644
--- a/yocto-poky/meta/recipes-connectivity/openssl/openssl_1.0.2d.bb
+++ b/yocto-poky/meta/recipes-connectivity/openssl/openssl_1.0.2d.bb
@@ -36,6 +36,14 @@ SRC_URI += "file://configure-targets.patch \
file://run-ptest \
file://crypto_use_bigint_in_x86-64_perl.patch \
file://openssl-1.0.2a-x32-asm.patch \
+ file://ptest_makefile_deps.patch \
+ file://CVE-2015-3193-bn-asm-x86_64-mont5.pl-fix-carry-propagating-bug-CVE.patch \
+ file://CVE-2015-3194-1-Add-PSS-parameter-check.patch \
+ file://0001-Add-test-for-CVE-2015-3194.patch \
+ file://CVE-2015-3195-Fix-leak-with-ASN.1-combine.patch \
+ file://CVE-2015-3197.patch \
+ file://CVE-2016-0701_1.patch \
+ file://CVE-2016-0701_2.patch \
"
SRC_URI[md5sum] = "38dd619b2e77cbac69b99f52a053d25a"
@@ -55,3 +63,13 @@ PARALLEL_MAKEINST = ""
do_configure_prepend() {
cp ${WORKDIR}/find.pl ${S}/util/find.pl
}
+
+# The crypto_use_bigint patch means that perl's bignum module needs to be
+# installed, but some distributions (for example Fedora 23) don't ship it by
+# default. As the resulting error is very misleading check for bignum before
+# building.
+do_configure_prepend() {
+ if ! perl -Mbigint -e true; then
+ bbfatal "The perl module 'bignum' was not found but this is required to build openssl. Please install this module (often packaged as perl-bignum) and re-run bitbake."
+ fi
+}
diff --git a/yocto-poky/meta/recipes-connectivity/socat/socat/CVE-2016-2217.patch b/yocto-poky/meta/recipes-connectivity/socat/socat/CVE-2016-2217.patch
new file mode 100644
index 000000000..0cd417944
--- /dev/null
+++ b/yocto-poky/meta/recipes-connectivity/socat/socat/CVE-2016-2217.patch
@@ -0,0 +1,372 @@
+Upstream-Status: Backport
+
+http://www.dest-unreach.org/socat/download/socat-1.7.3.1.patch
+
+CVE: CVE-2016-2217
+[Yocto # 9024]
+Singed-off-by: Armin Kuster <akuster@mvista.com>
+
+Index: socat-1.7.3.0/CHANGES
+===================================================================
+--- socat-1.7.3.0.orig/CHANGES
++++ socat-1.7.3.0/CHANGES
+@@ -1,8 +1,39 @@
+
++####################### V 1.7.3.1:
++
++security:
++ Socat security advisory 8
++ A stack overflow in vulnerability was found that can be triggered when
++ command line arguments (complete address specifications, host names,
++ file names) are longer than 512 bytes.
++ Successful exploitation might allow an attacker to execute arbitrary
++ code with the privileges of the socat process.
++ This vulnerability can only be exploited when an attacker is able to
++ inject data into socat's command line.
++ A vulnerable scenario would be a CGI script that reads data from clients
++ and uses (parts of) this data as hostname for a Socat invocation.
++ Test: NESTEDOVFL
++ Credits to Takumi Akiyama for finding and reporting this issue.
++
++ Socat security advisory 7
++ MSVR-1499
++ In the OpenSSL address implementation the hard coded 1024 bit DH p
++ parameter was not prime. The effective cryptographic strength of a key
++ exchange using these parameters was weaker than the one one could get by
++ using a prime p. Moreover, since there is no indication of how these
++ parameters were chosen, the existence of a trapdoor that makes possible
++ for an eavesdropper to recover the shared secret from a key exchange
++ that uses them cannot be ruled out.
++ Futhermore, 1024bit is not considered sufficiently secure.
++ Fix: generated a new 2048bit prime.
++ Thanks to Santiago Zanella-Beguelin and Microsoft Vulnerability
++ Research (MSVR) for finding and reporting this issue.
++
+ ####################### V 1.7.3.0:
+
+ security:
+- (CVE Id pending)
++ Socat security advisory 6
++ CVE-2015-1379: Possible DoS with fork
+ Fixed problems with signal handling caused by use of not async signal
+ safe functions in signal handlers that could freeze socat, allowing
+ denial of service attacks.
+@@ -240,6 +271,7 @@ docu:
+ ####################### V 1.7.2.3:
+
+ security:
++ Socat security advisory 5
+ CVE-2014-0019: socats PROXY-CONNECT address was vulnerable to a buffer
+ overflow with data from command line (see socat-secadv5.txt)
+ Credits to Florian Weimer of the Red Hat Product Security Team
+@@ -247,6 +279,7 @@ security:
+ ####################### V 1.7.2.2:
+
+ security:
++ Socat security advisory 4
+ CVE-2013-3571:
+ after refusing a client connection due to bad source address or source
+ port socat shutdown() the socket but did not close() it, resulting in
+@@ -258,6 +291,7 @@ security:
+ ####################### V 1.7.2.1:
+
+ security:
++ Socat security advisory 3
+ CVE-2012-0219:
+ fixed a possible heap buffer overflow in the readline address. This bug
+ could be exploited when all of the following conditions were met:
+@@ -391,6 +425,7 @@ docu:
+ ####################### V 1.7.1.3:
+
+ security:
++ Socat security advisory 2
+ CVE-2010-2799:
+ fixed a stack overflow vulnerability that occurred when command
+ line arguments (whole addresses, host names, file names) were longer
+@@ -892,6 +927,7 @@ further corrections:
+ ####################### V 1.4.0.3:
+
+ security:
++ Socat security advisory 1
+ CVE-2004-1484:
+ fix to a syslog() based format string vulnerability that can lead to
+ remote code execution. See advisory socat-adv-1.txt
+Index: socat-1.7.3.0/VERSION
+===================================================================
+--- socat-1.7.3.0.orig/VERSION
++++ socat-1.7.3.0/VERSION
+@@ -1 +1 @@
+-"1.7.3.0"
++"1.7.3.1"
+Index: socat-1.7.3.0/nestlex.c
+===================================================================
+--- socat-1.7.3.0.orig/nestlex.c
++++ socat-1.7.3.0/nestlex.c
+@@ -1,5 +1,5 @@
+ /* source: nestlex.c */
+-/* Copyright Gerhard Rieger 2006-2010 */
++/* Copyright Gerhard Rieger */
+ /* Published under the GNU General Public License V.2, see file COPYING */
+
+ /* a function for lexical scanning of nested character patterns */
+@@ -9,6 +9,17 @@
+
+ #include "sysincludes.h"
+
++static int _nestlex(const char **addr,
++ char **token,
++ ptrdiff_t *len,
++ const char *ends[],
++ const char *hquotes[],
++ const char *squotes[],
++ const char *nests[],
++ bool dropquotes,
++ bool c_esc,
++ bool html_esc
++ );
+
+ /* sub: scan a string and copy its value to output string
+ end scanning when an unescaped, unnested string from ends array is found
+@@ -33,6 +44,22 @@ int nestlex(const char **addr, /* input
+ bool c_esc, /* solve C char escapes: \n \t \0 etc */
+ bool html_esc /* solve HTML char escapes: %0d %08 etc */
+ ) {
++ return
++ _nestlex(addr, token, (ptrdiff_t *)len, ends, hquotes, squotes, nests,
++ dropquotes, c_esc, html_esc);
++}
++
++static int _nestlex(const char **addr,
++ char **token,
++ ptrdiff_t *len,
++ const char *ends[],
++ const char *hquotes[],
++ const char *squotes[],
++ const char *nests[],
++ bool dropquotes,
++ bool c_esc,
++ bool html_esc
++ ) {
+ const char *in = *addr; /* pointer into input string */
+ const char **endx; /* loops over end patterns */
+ const char **quotx; /* loops over quote patterns */
+@@ -77,16 +104,18 @@ int nestlex(const char **addr, /* input
+ if (--*len <= 0) { *addr = in; *token = out; return -1; }
+ }
+ }
+- /* we call nestlex recursively */
++ /* we call _nestlex recursively */
+ endnest[0] = *quotx;
+ endnest[1] = NULL;
+ result =
+- nestlex(&in, &out, len, endnest, NULL/*hquotes*/,
++ _nestlex(&in, &out, len, endnest, NULL/*hquotes*/,
+ NULL/*squotes*/, NULL/*nests*/,
+ false, c_esc, html_esc);
+ if (result == 0 && dropquotes) {
+ /* we strip this quote */
+ in += strlen(*quotx);
++ } else if (result < 0) {
++ *addr = in; *token = out; return result;
+ } else {
+ /* we copy the trailing quote */
+ for (i = strlen(*quotx); i > 0; --i) {
+@@ -110,7 +139,7 @@ int nestlex(const char **addr, /* input
+ if (!strncmp(in, *quotx, strlen(*quotx))) {
+ /* this quote pattern matches */
+ /* we strip this quote */
+- /* we call nestlex recursively */
++ /* we call _nestlex recursively */
+ const char *endnest[2];
+ if (dropquotes) {
+ /* we strip this quote */
+@@ -124,13 +153,15 @@ int nestlex(const char **addr, /* input
+ endnest[0] = *quotx;
+ endnest[1] = NULL;
+ result =
+- nestlex(&in, &out, len, endnest, hquotes,
++ _nestlex(&in, &out, len, endnest, hquotes,
+ squotes, nests,
+ false, c_esc, html_esc);
+
+ if (result == 0 && dropquotes) {
+ /* we strip the trailing quote */
+ in += strlen(*quotx);
++ } else if (result < 0) {
++ *addr = in; *token = out; return result;
+ } else {
+ /* we copy the trailing quote */
+ for (i = strlen(*quotx); i > 0; --i) {
+@@ -162,7 +193,7 @@ int nestlex(const char **addr, /* input
+ }
+
+ result =
+- nestlex(&in, &out, len, endnest, hquotes, squotes, nests,
++ _nestlex(&in, &out, len, endnest, hquotes, squotes, nests,
+ false, c_esc, html_esc);
+ if (result == 0) {
+ /* copy endnest */
+@@ -175,6 +206,8 @@ int nestlex(const char **addr, /* input
+ }
+ --i;
+ }
++ } else if (result < 0) {
++ *addr = in; *token = out; return result;
+ }
+ break;
+ }
+@@ -211,7 +244,7 @@ int nestlex(const char **addr, /* input
+ }
+ *out++ = c;
+ --*len;
+- if (*len == 0) {
++ if (*len <= 0) {
+ *addr = in;
+ *token = out;
+ return -1; /* output overflow */
+@@ -222,7 +255,7 @@ int nestlex(const char **addr, /* input
+ /* just a simple char */
+ *out++ = c;
+ --*len;
+- if (*len == 0) {
++ if (*len <= 0) {
+ *addr = in;
+ *token = out;
+ return -1; /* output overflow */
+Index: socat-1.7.3.0/nestlex.h
+===================================================================
+--- socat-1.7.3.0.orig/nestlex.h
++++ socat-1.7.3.0/nestlex.h
+@@ -1,5 +1,5 @@
+ /* source: nestlex.h */
+-/* Copyright Gerhard Rieger 2006 */
++/* Copyright Gerhard Rieger */
+ /* Published under the GNU General Public License V.2, see file COPYING */
+
+ #ifndef __nestlex_h_included
+Index: socat-1.7.3.0/socat.spec
+===================================================================
+--- socat-1.7.3.0.orig/socat.spec
++++ socat-1.7.3.0/socat.spec
+@@ -1,6 +1,6 @@
+
+ %define majorver 1.7
+-%define minorver 3.0
++%define minorver 3.1
+
+ Summary: socat - multipurpose relay
+ Name: socat
+Index: socat-1.7.3.0/test.sh
+===================================================================
+--- socat-1.7.3.0.orig/test.sh
++++ socat-1.7.3.0/test.sh
+@@ -2266,8 +2266,8 @@ gentestcert () {
+ gentestdsacert () {
+ local name="$1"
+ if [ -s $name.key -a -s $name.crt -a -s $name.pem ]; then return; fi
+- openssl dsaparam -out $name-dsa.pem 512 >/dev/null 2>&1
+- openssl dhparam -dsaparam -out $name-dh.pem 512 >/dev/null 2>&1
++ openssl dsaparam -out $name-dsa.pem 1024 >/dev/null 2>&1
++ openssl dhparam -dsaparam -out $name-dh.pem 1024 >/dev/null 2>&1
+ openssl req -newkey dsa:$name-dsa.pem -keyout $name.key -nodes -x509 -config $TESTCERT_CONF -out $name.crt -days 3653 >/dev/null 2>&1
+ cat $name-dsa.pem $name-dh.pem $name.key $name.crt >$name.pem
+ }
+@@ -10973,6 +10973,42 @@ CMD0="$TRACE $SOCAT $opts OPENSSL:localh
+ printf "test $F_n $TEST... " $N
+ $CMD0 </dev/null 1>&0 2>"${te}0"
+ rc0=$?
++if [ $rc0 -lt 128 ] || [ $rc0 -eq 255 ]; then
++ $PRINTF "$OK\n"
++ numOK=$((numOK+1))
++else
++ $PRINTF "$FAILED\n"
++ echo "$CMD0"
++ cat "${te}0"
++ numFAIL=$((numFAIL+1))
++ listFAIL="$listFAIL $N"
++fi
++fi # NUMCOND
++ ;;
++esac
++PORT=$((PORT+1))
++N=$((N+1))
++
++# socat up to 1.7.3.0 had a stack overflow vulnerability that occurred when
++# command line arguments (whole addresses, host names, file names) were longer
++# than 512 bytes and specially crafted.
++NAME=NESTEDOVFL
++case "$TESTS" in
++*%$N%*|*%functions%*|*%bugs%*|*%security%*|*%exec%*|*%$NAME%*)
++TEST="$NAME: stack overflow on overly long nested arg"
++# provide a long host name to TCP-CONNECT and check socats exit code
++if ! eval $NUMCOND; then :; else
++tf="$td/test$N.stdout"
++te="$td/test$N.stderr"
++tdiff="$td/test$N.diff"
++da="test$N $(date) $RANDOM"
++# prepare long data - perl might not be installed
++rm -f "$td/test$N.dat"
++i=0; while [ $i -lt 64 ]; do echo -n "AAAAAAAAAAAAAAAA" >>"$td/test$N.dat"; i=$((i+1)); done
++CMD0="$TRACE $SOCAT $opts EXEC:[$(cat "$td/test$N.dat")] STDIO"
++printf "test $F_n $TEST... " $N
++$CMD0 </dev/null 1>&0 2>"${te}0"
++rc0=$?
+ if [ $rc0 -lt 128 ] || [ $rc0 -eq 255 ]; then
+ $PRINTF "$OK\n"
+ numOK=$((numOK+1))
+Index: socat-1.7.3.0/xio-openssl.c
+===================================================================
+--- socat-1.7.3.0.orig/xio-openssl.c
++++ socat-1.7.3.0/xio-openssl.c
+@@ -912,20 +912,27 @@ int
+ }
+
+ {
+- static unsigned char dh1024_p[] = {
+- 0xCC,0x17,0xF2,0xDC,0x96,0xDF,0x59,0xA4,0x46,0xC5,0x3E,0x0E,
+- 0xB8,0x26,0x55,0x0C,0xE3,0x88,0xC1,0xCE,0xA7,0xBC,0xB3,0xBF,
+- 0x16,0x94,0xD8,0xA9,0x45,0xA2,0xCE,0xA9,0x5B,0x22,0x25,0x5F,
+- 0x92,0x59,0x94,0x1C,0x22,0xBF,0xCB,0xC8,0xC8,0x57,0xCB,0xBF,
+- 0xBC,0x0E,0xE8,0x40,0xF9,0x87,0x03,0xBF,0x60,0x9B,0x08,0xC6,
+- 0x8E,0x99,0xC6,0x05,0xFC,0x00,0xD6,0x6D,0x90,0xA8,0xF5,0xF8,
+- 0xD3,0x8D,0x43,0xC8,0x8F,0x7A,0xBD,0xBB,0x28,0xAC,0x04,0x69,
+- 0x4A,0x0B,0x86,0x73,0x37,0xF0,0x6D,0x4F,0x04,0xF6,0xF5,0xAF,
+- 0xBF,0xAB,0x8E,0xCE,0x75,0x53,0x4D,0x7F,0x7D,0x17,0x78,0x0E,
+- 0x12,0x46,0x4A,0xAF,0x95,0x99,0xEF,0xBC,0xA6,0xC5,0x41,0x77,
+- 0x43,0x7A,0xB9,0xEC,0x8E,0x07,0x3C,0x6D,
++ static unsigned char dh2048_p[] = {
++ 0x00,0xdc,0x21,0x64,0x56,0xbd,0x9c,0xb2,0xac,0xbe,0xc9,0x98,0xef,0x95,0x3e,
++ 0x26,0xfa,0xb5,0x57,0xbc,0xd9,0xe6,0x75,0xc0,0x43,0xa2,0x1c,0x7a,0x85,0xdf,
++ 0x34,0xab,0x57,0xa8,0xf6,0xbc,0xf6,0x84,0x7d,0x05,0x69,0x04,0x83,0x4c,0xd5,
++ 0x56,0xd3,0x85,0x09,0x0a,0x08,0xff,0xb5,0x37,0xa1,0xa3,0x8a,0x37,0x04,0x46,
++ 0xd2,0x93,0x31,0x96,0xf4,0xe4,0x0d,0x9f,0xbd,0x3e,0x7f,0x9e,0x4d,0xaf,0x08,
++ 0xe2,0xe8,0x03,0x94,0x73,0xc4,0xdc,0x06,0x87,0xbb,0x6d,0xae,0x66,0x2d,0x18,
++ 0x1f,0xd8,0x47,0x06,0x5c,0xcf,0x8a,0xb5,0x00,0x51,0x57,0x9b,0xea,0x1e,0xd8,
++ 0xdb,0x8e,0x3c,0x1f,0xd3,0x2f,0xba,0x1f,0x5f,0x3d,0x15,0xc1,0x3b,0x2c,0x82,
++ 0x42,0xc8,0x8c,0x87,0x79,0x5b,0x38,0x86,0x3a,0xeb,0xfd,0x81,0xa9,0xba,0xf7,
++ 0x26,0x5b,0x93,0xc5,0x3e,0x03,0x30,0x4b,0x00,0x5c,0xb6,0x23,0x3e,0xea,0x94,
++ 0xc3,0xb4,0x71,0xc7,0x6e,0x64,0x3b,0xf8,0x92,0x65,0xad,0x60,0x6c,0xd4,0x7b,
++ 0xa9,0x67,0x26,0x04,0xa8,0x0a,0xb2,0x06,0xeb,0xe0,0x7d,0x90,0xdd,0xdd,0xf5,
++ 0xcf,0xb4,0x11,0x7c,0xab,0xc1,0xa3,0x84,0xbe,0x27,0x77,0xc7,0xde,0x20,0x57,
++ 0x66,0x47,0xa7,0x35,0xfe,0x0d,0x6a,0x1c,0x52,0xb8,0x58,0xbf,0x26,0x33,0x81,
++ 0x5e,0xb7,0xa9,0xc0,0xee,0x58,0x11,0x74,0x86,0x19,0x08,0x89,0x1c,0x37,0x0d,
++ 0x52,0x47,0x70,0x75,0x8b,0xa8,0x8b,0x30,0x11,0x71,0x36,0x62,0xf0,0x73,0x41,
++ 0xee,0x34,0x9d,0x0a,0x2b,0x67,0x4e,0x6a,0xa3,0xe2,0x99,0x92,0x1b,0xf5,0x32,
++ 0x73,0x63
+ };
+- static unsigned char dh1024_g[] = {
++ static unsigned char dh2048_g[] = {
+ 0x02,
+ };
+ DH *dh;
+@@ -938,8 +945,8 @@ int
+ }
+ Error("DH_new() failed");
+ } else {
+- dh->p = BN_bin2bn(dh1024_p, sizeof(dh1024_p), NULL);
+- dh->g = BN_bin2bn(dh1024_g, sizeof(dh1024_g), NULL);
++ dh->p = BN_bin2bn(dh2048_p, sizeof(dh2048_p), NULL);
++ dh->g = BN_bin2bn(dh2048_g, sizeof(dh2048_g), NULL);
+ if ((dh->p == NULL) || (dh->g == NULL)) {
+ while (err = ERR_get_error()) {
+ Warn1("BN_bin2bn(): %s",
diff --git a/yocto-poky/meta/recipes-connectivity/socat/socat_1.7.3.0.bb b/yocto-poky/meta/recipes-connectivity/socat/socat_1.7.3.0.bb
index b58e0a73e..6d76d0fd0 100644
--- a/yocto-poky/meta/recipes-connectivity/socat/socat_1.7.3.0.bb
+++ b/yocto-poky/meta/recipes-connectivity/socat/socat_1.7.3.0.bb
@@ -14,6 +14,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263 \
SRC_URI = "http://www.dest-unreach.org/socat/download/socat-${PV}.tar.bz2 \
file://Makefile.in-fix-for-parallel-build.patch \
+ file://CVE-2016-2217.patch \
"
SRC_URI[md5sum] = "b607edb65bc6c57f4a43f06247504274"
diff --git a/yocto-poky/meta/recipes-core/busybox/busybox.inc b/yocto-poky/meta/recipes-core/busybox/busybox.inc
index ed8f9fe76..4d4709a48 100644
--- a/yocto-poky/meta/recipes-core/busybox/busybox.inc
+++ b/yocto-poky/meta/recipes-core/busybox/busybox.inc
@@ -103,9 +103,8 @@ python () {
}
do_prepare_config () {
- sed -e 's#@DATADIR@#${datadir}#g' \
+ sed -e '/CONFIG_STATIC/d' \
< ${WORKDIR}/defconfig > ${S}/.config
- sed -i -e '/CONFIG_STATIC/d' .config
echo "# CONFIG_STATIC is not set" >> .config
for i in 'CROSS' 'DISTRO FEATURES'; do echo "### $i"; done >> \
${S}/.config
diff --git a/yocto-poky/meta/recipes-core/busybox/busybox/0001-Switch-to-POSIX-utmpx-API.patch b/yocto-poky/meta/recipes-core/busybox/busybox/0001-Switch-to-POSIX-utmpx-API.patch
new file mode 100644
index 000000000..1d299eec9
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/busybox/busybox/0001-Switch-to-POSIX-utmpx-API.patch
@@ -0,0 +1,388 @@
+From 86a7f18f211af1abda5c855d2674b0fcb53de524 Mon Sep 17 00:00:00 2001
+From: Bernhard Reutner-Fischer <rep.dot.nop@gmail.com>
+Date: Thu, 2 Apr 2015 23:03:46 +0200
+Subject: [PATCH] *: Switch to POSIX utmpx API
+
+UTMP is SVID legacy, UTMPX is mandated by POSIX.
+
+Glibc and uClibc have identical layout of UTMP and UTMPX, both of these
+libc treat _PATH_UTMPX as _PATH_UTMP so from a user-perspective nothing
+changes except the names of the API entrypoints.
+
+Signed-off-by: Bernhard Reutner-Fischer <rep.dot.nop@gmail.com>
+---
+Upstream-Status: Backport
+
+ coreutils/who.c | 8 ++++----
+ include/libbb.h | 2 +-
+ init/halt.c | 4 ++--
+ libbb/utmp.c | 44 ++++++++++++++++++++++----------------------
+ miscutils/last.c | 8 ++++----
+ miscutils/last_fancy.c | 16 ++++++++++------
+ miscutils/runlevel.c | 12 ++++++------
+ miscutils/wall.c | 8 ++++----
+ procps/uptime.c | 6 +++---
+ 9 files changed, 56 insertions(+), 52 deletions(-)
+
+diff --git a/coreutils/who.c b/coreutils/who.c
+index f955ce6..8337212 100644
+--- a/coreutils/who.c
++++ b/coreutils/who.c
+@@ -73,7 +73,7 @@ static void idle_string(char *str6, time_t t)
+ int who_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE;
+ int who_main(int argc UNUSED_PARAM, char **argv)
+ {
+- struct utmp *ut;
++ struct utmpx *ut;
+ unsigned opt;
+ int do_users = (ENABLE_USERS && (!ENABLE_WHO || applet_name[0] == 'u'));
+ const char *fmt = "%s";
+@@ -83,8 +83,8 @@ int who_main(int argc UNUSED_PARAM, char **argv)
+ if (opt & 2) // -H
+ printf("USER\t\tTTY\t\tIDLE\tTIME\t\t HOST\n");
+
+- setutent();
+- while ((ut = getutent()) != NULL) {
++ setutxent();
++ while ((ut = getutxent()) != NULL) {
+ if (ut->ut_user[0]
+ && ((opt & 1) || ut->ut_type == USER_PROCESS)
+ ) {
+@@ -126,6 +126,6 @@ int who_main(int argc UNUSED_PARAM, char **argv)
+ if (do_users)
+ bb_putchar('\n');
+ if (ENABLE_FEATURE_CLEAN_UP)
+- endutent();
++ endutxent();
+ return EXIT_SUCCESS;
+ }
+diff --git a/include/libbb.h b/include/libbb.h
+index 26b6868..0f8363b 100644
+--- a/include/libbb.h
++++ b/include/libbb.h
+@@ -84,7 +84,7 @@
+ # include <selinux/av_permissions.h>
+ #endif
+ #if ENABLE_FEATURE_UTMP
+-# include <utmp.h>
++# include <utmpx.h>
+ #endif
+ #if ENABLE_LOCALE_SUPPORT
+ # include <locale.h>
+diff --git a/init/halt.c b/init/halt.c
+index 7974adb..ad12d91 100644
+--- a/init/halt.c
++++ b/init/halt.c
+@@ -74,7 +74,7 @@
+
+ static void write_wtmp(void)
+ {
+- struct utmp utmp;
++ struct utmpx utmp;
+ struct utsname uts;
+ /* "man utmp" says wtmp file should *not* be created automagically */
+ /*if (access(bb_path_wtmp_file, R_OK|W_OK) == -1) {
+@@ -88,7 +88,7 @@ static void write_wtmp(void)
+ utmp.ut_line[0] = '~'; utmp.ut_line[1] = '~'; /* = strcpy(utmp.ut_line, "~~"); */
+ uname(&uts);
+ safe_strncpy(utmp.ut_host, uts.release, sizeof(utmp.ut_host));
+- updwtmp(bb_path_wtmp_file, &utmp);
++ updwtmpx(bb_path_wtmp_file, &utmp);
+ }
+ #else
+ #define write_wtmp() ((void)0)
+diff --git a/libbb/utmp.c b/libbb/utmp.c
+index 8ad9ba2..bd07670 100644
+--- a/libbb/utmp.c
++++ b/libbb/utmp.c
+@@ -16,7 +16,7 @@ static void touch(const char *filename)
+
+ void FAST_FUNC write_new_utmp(pid_t pid, int new_type, const char *tty_name, const char *username, const char *hostname)
+ {
+- struct utmp utent;
++ struct utmpx utent;
+ char *id;
+ unsigned width;
+
+@@ -45,17 +45,17 @@ void FAST_FUNC write_new_utmp(pid_t pid, int new_type, const char *tty_name, con
+ tty_name += 3;
+ strncpy(id, tty_name, width);
+
+- touch(_PATH_UTMP);
+- //utmpname(_PATH_UTMP);
+- setutent();
++ touch(_PATH_UTMPX);
++ //utmpxname(_PATH_UTMPX);
++ setutxent();
+ /* Append new one (hopefully, unless we collide on ut_id) */
+- pututline(&utent);
+- endutent();
++ pututxline(&utent);
++ endutxent();
+
+ #if ENABLE_FEATURE_WTMP
+ /* "man utmp" says wtmp file should *not* be created automagically */
+ /*touch(bb_path_wtmp_file);*/
+- updwtmp(bb_path_wtmp_file, &utent);
++ updwtmpx(bb_path_wtmp_file, &utent);
+ #endif
+ }
+
+@@ -64,17 +64,17 @@ void FAST_FUNC write_new_utmp(pid_t pid, int new_type, const char *tty_name, con
+ */
+ void FAST_FUNC update_utmp(pid_t pid, int new_type, const char *tty_name, const char *username, const char *hostname)
+ {
+- struct utmp utent;
+- struct utmp *utp;
++ struct utmpx utent;
++ struct utmpx *utp;
+
+- touch(_PATH_UTMP);
+- //utmpname(_PATH_UTMP);
+- setutent();
++ touch(_PATH_UTMPX);
++ //utmpxname(_PATH_UTMPX);
++ setutxent();
+
+ /* Did init/getty/telnetd/sshd/... create an entry for us?
+ * It should be (new_type-1), but we'd also reuse
+ * any other potentially stale xxx_PROCESS entry */
+- while ((utp = getutent()) != NULL) {
++ while ((utp = getutxent()) != NULL) {
+ if (utp->ut_pid == pid
+ // && ut->ut_line[0]
+ && utp->ut_id[0] /* must have nonzero id */
+@@ -88,25 +88,25 @@ void FAST_FUNC update_utmp(pid_t pid, int new_type, const char *tty_name, const
+ /* Stale record. Nuke hostname */
+ memset(utp->ut_host, 0, sizeof(utp->ut_host));
+ }
+- /* NB: pututline (see later) searches for matching utent
+- * using getutid(utent) - we must not change ut_id
++ /* NB: pututxline (see later) searches for matching utxent
++ * using getutxid(utent) - we must not change ut_id
+ * if we want *exactly this* record to be overwritten!
+ */
+ break;
+ }
+ }
+- //endutent(); - no need, pututline can deal with (and actually likes)
++ //endutxent(); - no need, pututxline can deal with (and actually likes)
+ //the situation when utmp file is positioned on found record
+
+ if (!utp) {
+ if (new_type != DEAD_PROCESS)
+ write_new_utmp(pid, new_type, tty_name, username, hostname);
+ else
+- endutent();
++ endutxent();
+ return;
+ }
+
+- /* Make a copy. We can't use *utp, pututline's internal getutid
++ /* Make a copy. We can't use *utp, pututxline's internal getutxid
+ * will overwrite it before it is used! */
+ utent = *utp;
+
+@@ -120,14 +120,14 @@ void FAST_FUNC update_utmp(pid_t pid, int new_type, const char *tty_name, const
+ utent.ut_tv.tv_sec = time(NULL);
+
+ /* Update, or append new one */
+- //setutent();
+- pututline(&utent);
+- endutent();
++ //setutxent();
++ pututxline(&utent);
++ endutxent();
+
+ #if ENABLE_FEATURE_WTMP
+ /* "man utmp" says wtmp file should *not* be created automagically */
+ /*touch(bb_path_wtmp_file);*/
+- updwtmp(bb_path_wtmp_file, &utent);
++ updwtmpx(bb_path_wtmp_file, &utent);
+ #endif
+ }
+
+diff --git a/miscutils/last.c b/miscutils/last.c
+index a144c7e..6d8b584 100644
+--- a/miscutils/last.c
++++ b/miscutils/last.c
+@@ -32,21 +32,21 @@
+
+ #if defined UT_LINESIZE \
+ && ((UT_LINESIZE != 32) || (UT_NAMESIZE != 32) || (UT_HOSTSIZE != 256))
+-#error struct utmp member char[] size(s) have changed!
++#error struct utmpx member char[] size(s) have changed!
+ #elif defined __UT_LINESIZE \
+ && ((__UT_LINESIZE != 32) || (__UT_NAMESIZE != 64) || (__UT_HOSTSIZE != 256))
+-#error struct utmp member char[] size(s) have changed!
++#error struct utmpx member char[] size(s) have changed!
+ #endif
+
+ #if EMPTY != 0 || RUN_LVL != 1 || BOOT_TIME != 2 || NEW_TIME != 3 || \
+ OLD_TIME != 4
+-#error Values for the ut_type field of struct utmp changed
++#error Values for the ut_type field of struct utmpx changed
+ #endif
+
+ int last_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE;
+ int last_main(int argc UNUSED_PARAM, char **argv UNUSED_PARAM)
+ {
+- struct utmp ut;
++ struct utmpx ut;
+ int n, file = STDIN_FILENO;
+ time_t t_tmp;
+ off_t pos;
+diff --git a/miscutils/last_fancy.c b/miscutils/last_fancy.c
+index 16ed9e9..8194e31 100644
+--- a/miscutils/last_fancy.c
++++ b/miscutils/last_fancy.c
+@@ -22,6 +22,10 @@
+ #define HEADER_LINE_WIDE "USER", "TTY", \
+ INET6_ADDRSTRLEN, INET6_ADDRSTRLEN, "HOST", "LOGIN", " TIME", ""
+
++#if !defined __UT_LINESIZE && defined UT_LINESIZE
++# define __UT_LINESIZE UT_LINESIZE
++#endif
++
+ enum {
+ NORMAL,
+ LOGGED,
+@@ -39,7 +43,7 @@ enum {
+
+ #define show_wide (option_mask32 & LAST_OPT_W)
+
+-static void show_entry(struct utmp *ut, int state, time_t dur_secs)
++static void show_entry(struct utmpx *ut, int state, time_t dur_secs)
+ {
+ unsigned days, hours, mins;
+ char duration[sizeof("(%u+02:02)") + sizeof(int)*3];
+@@ -104,7 +108,7 @@ static void show_entry(struct utmp *ut, int state, time_t dur_secs)
+ duration_str);
+ }
+
+-static int get_ut_type(struct utmp *ut)
++static int get_ut_type(struct utmpx *ut)
+ {
+ if (ut->ut_line[0] == '~') {
+ if (strcmp(ut->ut_user, "shutdown") == 0) {
+@@ -142,7 +146,7 @@ static int get_ut_type(struct utmp *ut)
+ return ut->ut_type;
+ }
+
+-static int is_runlevel_shutdown(struct utmp *ut)
++static int is_runlevel_shutdown(struct utmpx *ut)
+ {
+ if (((ut->ut_pid & 255) == '0') || ((ut->ut_pid & 255) == '6')) {
+ return 1;
+@@ -154,7 +158,7 @@ static int is_runlevel_shutdown(struct utmp *ut)
+ int last_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE;
+ int last_main(int argc UNUSED_PARAM, char **argv)
+ {
+- struct utmp ut;
++ struct utmpx ut;
+ const char *filename = _PATH_WTMP;
+ llist_t *zlist;
+ off_t pos;
+@@ -242,9 +246,9 @@ int last_main(int argc UNUSED_PARAM, char **argv)
+ {
+ llist_t *el, *next;
+ for (el = zlist; el; el = next) {
+- struct utmp *up = (struct utmp *)el->data;
++ struct utmpx *up = (struct utmpx *)el->data;
+ next = el->link;
+- if (strncmp(up->ut_line, ut.ut_line, UT_LINESIZE) == 0) {
++ if (strncmp(up->ut_line, ut.ut_line, __UT_LINESIZE) == 0) {
+ if (show) {
+ show_entry(&ut, NORMAL, up->ut_tv.tv_sec);
+ show = 0;
+diff --git a/miscutils/runlevel.c b/miscutils/runlevel.c
+index 76231df..8558db8 100644
+--- a/miscutils/runlevel.c
++++ b/miscutils/runlevel.c
+@@ -29,19 +29,19 @@
+ int runlevel_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE;
+ int runlevel_main(int argc UNUSED_PARAM, char **argv)
+ {
+- struct utmp *ut;
++ struct utmpx *ut;
+ char prev;
+
+- if (argv[1]) utmpname(argv[1]);
++ if (argv[1]) utmpxname(argv[1]);
+
+- setutent();
+- while ((ut = getutent()) != NULL) {
++ setutxent();
++ while ((ut = getutxent()) != NULL) {
+ if (ut->ut_type == RUN_LVL) {
+ prev = ut->ut_pid / 256;
+ if (prev == 0) prev = 'N';
+ printf("%c %c\n", prev, ut->ut_pid % 256);
+ if (ENABLE_FEATURE_CLEAN_UP)
+- endutent();
++ endutxent();
+ return 0;
+ }
+ }
+@@ -49,6 +49,6 @@ int runlevel_main(int argc UNUSED_PARAM, char **argv)
+ puts("unknown");
+
+ if (ENABLE_FEATURE_CLEAN_UP)
+- endutent();
++ endutxent();
+ return 1;
+ }
+diff --git a/miscutils/wall.c b/miscutils/wall.c
+index bb709ee..50658f4 100644
+--- a/miscutils/wall.c
++++ b/miscutils/wall.c
+@@ -32,7 +32,7 @@
+ int wall_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE;
+ int wall_main(int argc UNUSED_PARAM, char **argv)
+ {
+- struct utmp *ut;
++ struct utmpx *ut;
+ char *msg;
+ int fd;
+
+@@ -46,8 +46,8 @@ int wall_main(int argc UNUSED_PARAM, char **argv)
+ msg = xmalloc_read(fd, NULL);
+ if (ENABLE_FEATURE_CLEAN_UP && argv[1])
+ close(fd);
+- setutent();
+- while ((ut = getutent()) != NULL) {
++ setutxent();
++ while ((ut = getutxent()) != NULL) {
+ char *line;
+ if (ut->ut_type != USER_PROCESS)
+ continue;
+@@ -56,7 +56,7 @@ int wall_main(int argc UNUSED_PARAM, char **argv)
+ free(line);
+ }
+ if (ENABLE_FEATURE_CLEAN_UP) {
+- endutent();
++ endutxent();
+ free(msg);
+ }
+ return EXIT_SUCCESS;
+diff --git a/procps/uptime.c b/procps/uptime.c
+index 778812a..149bae6 100644
+--- a/procps/uptime.c
++++ b/procps/uptime.c
+@@ -81,10 +81,10 @@ int uptime_main(int argc UNUSED_PARAM, char **argv UNUSED_PARAM)
+
+ #if ENABLE_FEATURE_UPTIME_UTMP_SUPPORT
+ {
+- struct utmp *ut;
++ struct utmpx *ut;
+ unsigned users = 0;
+- while ((ut = getutent()) != NULL) {
+- if ((ut->ut_type == USER_PROCESS) && (ut->ut_name[0] != '\0'))
++ while ((ut = getutxent()) != NULL) {
++ if ((ut->ut_type == USER_PROCESS) && (ut->ut_user[0] != '\0'))
+ users++;
+ }
+ printf(", %u users", users);
+--
+2.5.1
+
diff --git a/yocto-poky/meta/recipes-core/busybox/busybox/0001-Use-CC-when-linking-instead-of-LD-and-use-CFLAGS-and.patch b/yocto-poky/meta/recipes-core/busybox/busybox/0001-Use-CC-when-linking-instead-of-LD-and-use-CFLAGS-and.patch
new file mode 100644
index 000000000..2bf2b91c7
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/busybox/busybox/0001-Use-CC-when-linking-instead-of-LD-and-use-CFLAGS-and.patch
@@ -0,0 +1,114 @@
+From a9333eb6a7b8dbda735947cd5bc981ff9352a2c9 Mon Sep 17 00:00:00 2001
+From: Nathan Phillip Brink <ohnobinki@ohnopublishing.net>
+Date: Thu, 10 Mar 2011 00:27:08 -0500
+Subject: [PATCH 1/2] Use $(CC) when linking instead of $(LD) and use $(CFLAGS)
+ and $(EXTRA_CFLAGS) when linking.
+
+This fixes the issue where LDFLAGS escaped with -Wl are ignored during
+compilation. It also simplifies using CFLAGS or EXTRA_CFLAGS (such as
+-m32 on x86_64 or -flto) which apply to both compilation and linking
+situations.
+
+Signed-off-by: Nathan Phillip Brink <ohnobinki@ohnopublishing.net>
+---
+Upstream-Status: Pending
+
+ Makefile | 7 ++++---
+ scripts/Makefile.build | 8 ++++----
+ scripts/Makefile.lib | 13 +++----------
+ 3 files changed, 11 insertions(+), 17 deletions(-)
+
+Index: busybox-1.23.2/Makefile
+===================================================================
+--- busybox-1.23.2.orig/Makefile
++++ busybox-1.23.2/Makefile
+@@ -309,7 +309,8 @@ CHECKFLAGS := -D__linux__ -Dlinux -D
+ MODFLAGS = -DMODULE
+ CFLAGS_MODULE = $(MODFLAGS)
+ AFLAGS_MODULE = $(MODFLAGS)
+-LDFLAGS_MODULE = -r
++LDFLAGS_RELOCATABLE = -r -nostdlib
++LDFLAGS_MODULE = $(LDFLAGS_RELOCATABLE)
+ CFLAGS_KERNEL =
+ AFLAGS_KERNEL =
+
+@@ -331,7 +332,7 @@ KERNELVERSION = $(VERSION).$(PATCHLEVEL)
+ export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION \
+ ARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC \
+ CPP AR NM STRIP OBJCOPY OBJDUMP MAKE AWK GENKSYMS PERL UTS_MACHINE \
+- HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
++ HOSTCXX HOSTCXXFLAGS LDFLAGS_RELOCATABLE LDFLAGS_MODULE CHECK CHECKFLAGS
+
+ export CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
+ export CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
+@@ -610,7 +611,7 @@ quiet_cmd_busybox__ ?= LINK $@
+ cmd_busybox__ ?= $(srctree)/scripts/trylink \
+ "$@" \
+ "$(CC)" \
+- "$(CFLAGS) $(CFLAGS_busybox)" \
++ "$(CFLAGS) $(CFLAGS_busybox) $(EXTRA_CFLAGS)" \
+ "$(LDFLAGS) $(EXTRA_LDFLAGS)" \
+ "$(core-y)" \
+ "$(libs-y)" \
+Index: busybox-1.23.2/scripts/Makefile.build
+===================================================================
+--- busybox-1.23.2.orig/scripts/Makefile.build
++++ busybox-1.23.2/scripts/Makefile.build
+@@ -174,7 +174,7 @@ cmd_modversions = \
+ | $(GENKSYMS) -a $(ARCH) \
+ > $(@D)/.tmp_$(@F:.o=.ver); \
+ \
+- $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \
++ $(CC) $(ld_flags_partial) $(LDFLAGS_RELOCATABLE) -o $@ $(@D)/.tmp_$(@F) \
+ -T $(@D)/.tmp_$(@F:.o=.ver); \
+ rm -f $(@D)/.tmp_$(@F) $(@D)/.tmp_$(@F:.o=.ver); \
+ else \
+@@ -257,7 +257,7 @@ quiet_cmd_link_o_target = LD $@
+ # If the list of objects to link is empty, just create an empty built-in.o
+ # -nostdlib is added to make "make LD=gcc ..." work (some people use that)
+ cmd_link_o_target = $(if $(strip $(obj-y)),\
+- $(LD) -nostdlib $(ld_flags) -r -o $@ $(filter $(obj-y), $^),\
++ $(CC) $(ld_flags_partial) $(LDFLAGS_RELOCATABLE) -o $@ $(filter $(obj-y), $^),\
+ rm -f $@; $(AR) rcs $@)
+
+ $(builtin-target): $(obj-y) FORCE
+@@ -292,10 +292,10 @@ $($(subst $(obj)/,,$(@:.o=-objs))) \
+ $($(subst $(obj)/,,$(@:.o=-y)))), $^)
+
+ quiet_cmd_link_multi-y = LD $@
+-cmd_link_multi-y = $(LD) $(ld_flags) -r -o $@ $(link_multi_deps)
++cmd_link_multi-y = $(CC) $(ld_flags_partial) $(LDFLAGS_RELOCATABLE) -o $@ $(link_multi_deps)
+
+ quiet_cmd_link_multi-m = LD [M] $@
+-cmd_link_multi-m = $(LD) $(ld_flags) $(LDFLAGS_MODULE) -o $@ $(link_multi_deps)
++cmd_link_multi-m = $(CC) $(ld_flags) $(LDFLAGS_MODULE) -o $@ $(link_multi_deps)
+
+ # We would rather have a list of rules like
+ # foo.o: $(foo-objs)
+Index: busybox-1.23.2/scripts/Makefile.lib
+===================================================================
+--- busybox-1.23.2.orig/scripts/Makefile.lib
++++ busybox-1.23.2/scripts/Makefile.lib
+@@ -121,7 +121,8 @@ cpp_flags = -Wp,-MD,$(depfile) $(NO
+ # yet ld_flags is fed to ld.
+ #ld_flags = $(LDFLAGS) $(EXTRA_LDFLAGS)
+ # Remove the -Wl, prefix from linker options normally passed through gcc
+-ld_flags = $(filter-out -Wl$(comma)%,$(LDFLAGS) $(EXTRA_LDFLAGS))
++ld_flags = $(filter-out -Wl$(comma)%,$(LDFLAGS) $(EXTRA_LDFLAGS) $(CFLAGS) $(EXTRA_CFLAGS))
++ld_flags_partial = $($(filter-out -shared%, $(filter-out -pie%,$(ld_flags))))
+
+
+ # Finds the multi-part object the current object will be linked into
+@@ -151,10 +152,8 @@ $(obj)/%:: $(src)/%_shipped
+ # Linking
+ # ---------------------------------------------------------------------------
+
+-# TODO: LDFLAGS usually is supposed to contain gcc's flags, not ld's.
+-# but here we feed them to ld!
+-quiet_cmd_ld = LD $@
+-cmd_ld = $(LD) $(LDFLAGS) $(EXTRA_LDFLAGS) $(LDFLAGS_$(@F)) \
++quiet_cmd_ld = CC $@
++cmd_ld = $(CC) $(ld_flags) $(LDFLAGS_$(@F)) \
+ $(filter-out FORCE,$^) -o $@
+
+ # Objcopy
diff --git a/yocto-poky/meta/recipes-core/busybox/busybox/0001-randconfig-fix.patch b/yocto-poky/meta/recipes-core/busybox/busybox/0001-randconfig-fix.patch
new file mode 100644
index 000000000..415ec3409
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/busybox/busybox/0001-randconfig-fix.patch
@@ -0,0 +1,33 @@
+If CONFIG_FEATURE_LAST_SMALL is enabled the build fails because of a broken
+__UT_NAMESIZE test.
+
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@intel.com>
+
+From 932302666b0354ede63504d1bef8393cab28db8b Mon Sep 17 00:00:00 2001
+From: Denys Vlasenko <vda.linux@googlemail.com>
+Date: Sun, 11 Oct 2015 16:58:18 +0200
+Subject: [PATCH] randconfig fix
+
+Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
+---
+ miscutils/last.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/miscutils/last.c b/miscutils/last.c
+index 6d8b584..f8f3437 100644
+--- a/miscutils/last.c
++++ b/miscutils/last.c
+@@ -34,7 +34,8 @@
+ && ((UT_LINESIZE != 32) || (UT_NAMESIZE != 32) || (UT_HOSTSIZE != 256))
+ #error struct utmpx member char[] size(s) have changed!
+ #elif defined __UT_LINESIZE \
+- && ((__UT_LINESIZE != 32) || (__UT_NAMESIZE != 64) || (__UT_HOSTSIZE != 256))
++ && ((__UT_LINESIZE != 32) || (__UT_NAMESIZE != 32) || (__UT_HOSTSIZE != 256))
++/* __UT_NAMESIZE was checked with 64 above, but glibc-2.11 definitely uses 32! */
+ #error struct utmpx member char[] size(s) have changed!
+ #endif
+
+--
+2.6.4
+
diff --git a/yocto-poky/meta/recipes-core/busybox/busybox/0002-Passthrough-r-to-linker.patch b/yocto-poky/meta/recipes-core/busybox/busybox/0002-Passthrough-r-to-linker.patch
new file mode 100644
index 000000000..de286fb74
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/busybox/busybox/0002-Passthrough-r-to-linker.patch
@@ -0,0 +1,32 @@
+From df2cc76cdebc4773361477f3db203790f6986e3b Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Sat, 22 Aug 2015 23:42:40 -0700
+Subject: [PATCH 2/2] Passthrough -r to linker
+
+clang does not have -r switch and it does not pass it down to linker
+either, LDFLAGS_RELOCATABLE is used when CC is used for LD, so this
+should not cause side effects
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+Upstream-Status: Pending
+
+ Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Makefile b/Makefile
+index 9da02cb..10dd4a9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -309,7 +309,7 @@ CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ -Wbitwise $(C
+ MODFLAGS = -DMODULE
+ CFLAGS_MODULE = $(MODFLAGS)
+ AFLAGS_MODULE = $(MODFLAGS)
+-LDFLAGS_RELOCATABLE = -r -nostdlib
++LDFLAGS_RELOCATABLE = -Xlinker -r -nostdlib
+ LDFLAGS_MODULE = $(LDFLAGS_RELOCATABLE)
+ CFLAGS_KERNEL =
+ AFLAGS_KERNEL =
+--
+2.1.4
+
diff --git a/yocto-poky/meta/recipes-core/busybox/busybox_1.23.2.bb b/yocto-poky/meta/recipes-core/busybox/busybox_1.23.2.bb
index e4d9f9718..7258df022 100644
--- a/yocto-poky/meta/recipes-core/busybox/busybox_1.23.2.bb
+++ b/yocto-poky/meta/recipes-core/busybox/busybox_1.23.2.bb
@@ -30,8 +30,12 @@ SRC_URI = "http://www.busybox.net/downloads/busybox-${PV}.tar.bz2;name=tarball \
file://login-utilities.cfg \
file://recognize_connmand.patch \
file://busybox-cross-menuconfig.patch \
+ file://0001-Switch-to-POSIX-utmpx-API.patch \
file://0001-ifconfig-fix-double-free-fatal-error-in-INET_sprint.patch \
file://0001-chown-fix-help-text.patch \
+ file://0001-Use-CC-when-linking-instead-of-LD-and-use-CFLAGS-and.patch \
+ file://0002-Passthrough-r-to-linker.patch \
+ file://0001-randconfig-fix.patch \
file://mount-via-label.cfg \
file://sha1sum.cfg \
file://sha256sum.cfg \
diff --git a/yocto-poky/meta/recipes-core/coreutils/coreutils-6.9/loadavg.patch b/yocto-poky/meta/recipes-core/coreutils/coreutils-6.9/loadavg.patch
new file mode 100644
index 000000000..c72efd4d3
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/coreutils/coreutils-6.9/loadavg.patch
@@ -0,0 +1,18 @@
+Remove hardcoded paths so OE's configure QA does not detect it and fail the builds
+For cross compilation is less interesting to look into host paths for target libraries anyway
+
+Upstream-Status: Inappropriate [OE Specific]
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+
+Index: coreutils-6.9/m4/getloadavg.m4
+===================================================================
+--- coreutils-6.9.orig/m4/getloadavg.m4
++++ coreutils-6.9/m4/getloadavg.m4
+@@ -49,7 +49,6 @@ if test $gl_have_func = no; then
+ # There is a commonly available library for RS/6000 AIX.
+ # Since it is not a standard part of AIX, it might be installed locally.
+ gl_getloadavg_LIBS=$LIBS
+- LIBS="-L/usr/local/lib $LIBS"
+ AC_CHECK_LIB(getloadavg, getloadavg,
+ [LIBS="-lgetloadavg $LIBS"], [LIBS=$gl_getloadavg_LIBS])
+ fi
diff --git a/yocto-poky/meta/recipes-core/coreutils/coreutils_6.9.bb b/yocto-poky/meta/recipes-core/coreutils/coreutils_6.9.bb
index 4ff1d50bd..e9f82abc8 100644
--- a/yocto-poky/meta/recipes-core/coreutils/coreutils_6.9.bb
+++ b/yocto-poky/meta/recipes-core/coreutils/coreutils_6.9.bb
@@ -9,6 +9,7 @@ LICENSE = "GPLv2+"
LIC_FILES_CHKSUM = "file://COPYING;md5=751419260aa954499f7abaabaa882bbe \
file://src/ls.c;beginline=4;endline=16;md5=15ed60f67b1db5fedd5dbc37cf8a9543"
PR = "r5"
+DEPENDS = "virtual/libiconv"
inherit autotools gettext texinfo
@@ -25,6 +26,7 @@ SRC_URI = "${GNU_MIRROR}/coreutils/${BP}.tar.bz2 \
file://coreutils-build-with-acl.patch \
file://coreutils-fix-texinfo.patch \
file://fix_for_manpage_building.patch \
+ file://loadavg.patch \
"
SRC_URI[md5sum] = "c9607d8495f16e98906e7ed2d9751a06"
diff --git a/yocto-poky/meta/recipes-core/coreutils/coreutils_8.24.bb b/yocto-poky/meta/recipes-core/coreutils/coreutils_8.24.bb
index 034ebcda9..f04234624 100644
--- a/yocto-poky/meta/recipes-core/coreutils/coreutils_8.24.bb
+++ b/yocto-poky/meta/recipes-core/coreutils/coreutils_8.24.bb
@@ -62,7 +62,7 @@ do_compile_prepend () {
mkdir -p ${B}/src
}
-do_install_append() {
+do_install_append_class-target() {
for i in df mktemp base64; do mv ${D}${bindir}/$i ${D}${bindir}/$i.${BPN}; done
install -d ${D}${base_bindir}
diff --git a/yocto-poky/meta/recipes-core/dropbear/dropbear/dropbear@.service b/yocto-poky/meta/recipes-core/dropbear/dropbear/dropbear@.service
index 6fe994209..b420bcddc 100644
--- a/yocto-poky/meta/recipes-core/dropbear/dropbear/dropbear@.service
+++ b/yocto-poky/meta/recipes-core/dropbear/dropbear/dropbear@.service
@@ -4,8 +4,9 @@ Wants=dropbearkey.service
After=syslog.target dropbearkey.service
[Service]
+Environment="DROPBEAR_RSAKEY_DIR=/etc/dropbear"
EnvironmentFile=-/etc/default/dropbear
-ExecStart=-@SBINDIR@/dropbear -i -r /etc/dropbear/dropbear_rsa_host_key $DROPBEAR_EXTRA_ARGS
+ExecStart=-@SBINDIR@/dropbear -i -r ${DROPBEAR_RSAKEY_DIR}/dropbear_rsa_host_key $DROPBEAR_EXTRA_ARGS
ExecReload=@BASE_BINDIR@/kill -HUP $MAINPID
StandardInput=socket
KillMode=process
diff --git a/yocto-poky/meta/recipes-core/dropbear/dropbear/dropbearkey.service b/yocto-poky/meta/recipes-core/dropbear/dropbear/dropbearkey.service
index ccc21d5cc..c49053d57 100644
--- a/yocto-poky/meta/recipes-core/dropbear/dropbear/dropbearkey.service
+++ b/yocto-poky/meta/recipes-core/dropbear/dropbear/dropbearkey.service
@@ -1,8 +1,13 @@
[Unit]
Description=SSH Key Generation
-ConditionPathExists=|!/etc/dropbear/dropbear_rsa_host_key
+RequiresMountsFor=/var /var/lib
+ConditionPathExists=!/etc/dropbear/dropbear_rsa_host_key
+ConditionPathExists=!/var/lib/dropbear/dropbear_rsa_host_key
[Service]
+Environment="DROPBEAR_RSAKEY_DIR=/etc/dropbear"
+EnvironmentFile=-/etc/default/dropbear
Type=oneshot
-ExecStart=@SBINDIR@/dropbearkey -t rsa -f /etc/dropbear/dropbear_rsa_host_key
+ExecStart=@BASE_BINDIR@/mkdir -p ${DROPBEAR_RSAKEY_DIR}
+ExecStart=@SBINDIR@/dropbearkey -t rsa -f ${DROPBEAR_RSAKEY_DIR}/dropbear_rsa_host_key
RemainAfterExit=yes
diff --git a/yocto-poky/meta/recipes-core/glibc/cross-localedef-native_2.22.bb b/yocto-poky/meta/recipes-core/glibc/cross-localedef-native_2.22.bb
index 2153ece0e..3aefe748f 100644
--- a/yocto-poky/meta/recipes-core/glibc/cross-localedef-native_2.22.bb
+++ b/yocto-poky/meta/recipes-core/glibc/cross-localedef-native_2.22.bb
@@ -14,12 +14,13 @@ inherit autotools
FILESEXTRAPATHS =. "${FILE_DIRNAME}/${PN}:${FILE_DIRNAME}/glibc:"
-BRANCH ?= "release/${PV}/master"
+SRCBRANCH ?= "release/${PV}/master"
GLIBC_GIT_URI ?= "git://sourceware.org/git/glibc.git"
-SRC_URI = "${GLIBC_GIT_URI};branch=${BRANCH};name=glibc \
+SRC_URI = "${GLIBC_GIT_URI};branch=${SRCBRANCH};name=glibc \
git://github.com/kraj/localedef;branch=master;name=localedef;destsuffix=git/localedef \
file://fix_for_centos_5.8.patch \
+ file://strcoll-Remove-incorrect-STRDIFF-based-optimization-.patch \
${EGLIBCPATCHES} \
"
EGLIBCPATCHES = "\
diff --git a/yocto-poky/meta/recipes-core/glibc/glibc-locale.inc b/yocto-poky/meta/recipes-core/glibc/glibc-locale.inc
index df6d07343..2352bd029 100644
--- a/yocto-poky/meta/recipes-core/glibc/glibc-locale.inc
+++ b/yocto-poky/meta/recipes-core/glibc/glibc-locale.inc
@@ -87,7 +87,7 @@ do_install () {
if [ -e ${LOCALETREESRC}/${datadir}/locale ]; then
cp -fpPR ${LOCALETREESRC}/${datadir}/locale ${D}${datadir}
fi
- chown root.root -R ${D}
+ chown root:root -R ${D}
cp -fpPR ${LOCALETREESRC}/SUPPORTED ${WORKDIR}
}
diff --git a/yocto-poky/meta/recipes-core/glibc/glibc/0028-Clear-ELF_RTYPE_CLASS_EXTERN_PROTECTED_DATA-for-prel.patch b/yocto-poky/meta/recipes-core/glibc/glibc/0028-Clear-ELF_RTYPE_CLASS_EXTERN_PROTECTED_DATA-for-prel.patch
new file mode 100644
index 000000000..3455df1cf
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/glibc/glibc/0028-Clear-ELF_RTYPE_CLASS_EXTERN_PROTECTED_DATA-for-prel.patch
@@ -0,0 +1,84 @@
+From cadaf1336332ca7bcdfe4a400776e5782a20e26d Mon Sep 17 00:00:00 2001
+From: "H.J. Lu" <hjl.tools@gmail.com>
+Date: Wed, 28 Oct 2015 07:49:44 -0700
+Subject: [PATCH] Keep only ELF_RTYPE_CLASS_{PLT|COPY} bits for prelink
+
+prelink runs ld.so with the environment variable LD_TRACE_PRELINKING
+set to dump the relocation type class from _dl_debug_bindings. prelink
+has the following relocation type classes:
+
+ #define RTYPE_CLASS_VALID 8
+ #define RTYPE_CLASS_PLT (8|1)
+ #define RTYPE_CLASS_COPY (8|2)
+ #define RTYPE_CLASS_TLS (8|4)
+
+where ELF_RTYPE_CLASS_EXTERN_PROTECTED_DATA has a conflict with
+RTYPE_CLASS_TLS.
+
+Since prelink only uses ELF_RTYPE_CLASS_PLT and ELF_RTYPE_CLASS_COPY
+bits, we should clear the other bits when the DL_DEBUG_PRELINK bit is
+set.
+
+ [BZ #19178]
+ * elf/dl-lookup.c (RTYPE_CLASS_VALID): New.
+ (RTYPE_CLASS_PLT): Likewise.
+ (RTYPE_CLASS_COPY): Likewise.
+ (RTYPE_CLASS_TLS): Likewise.
+ (_dl_debug_bindings): Use RTYPE_CLASS_TLS and RTYPE_CLASS_VALID
+ to set relocation type class for DL_DEBUG_PRELINK. Keep only
+ ELF_RTYPE_CLASS_PLT and ELF_RTYPE_CLASS_COPY bits for
+ DL_DEBUG_PRELINK.
+
+Upstream-Status: submitted (https://sourceware.org/bugzilla/show_bug.cgi?id=19178)
+
+Signed-off-by: Mark Hatle <mark.hatle@windriver.com>
+---
+ elf/dl-lookup.c | 21 +++++++++++++++++++--
+ 1 file changed, 19 insertions(+), 2 deletions(-)
+
+diff --git a/elf/dl-lookup.c b/elf/dl-lookup.c
+index 581fb20..6ae6cc3 100644
+--- a/elf/dl-lookup.c
++++ b/elf/dl-lookup.c
+@@ -1016,6 +1016,18 @@ _dl_debug_bindings (const char *undef_name, struct link_map *undef_map,
+ #ifdef SHARED
+ if (GLRO(dl_debug_mask) & DL_DEBUG_PRELINK)
+ {
++/* ELF_RTYPE_CLASS_XXX must match RTYPE_CLASS_XXX used by prelink with
++ LD_TRACE_PRELINKING. */
++#define RTYPE_CLASS_VALID 8
++#define RTYPE_CLASS_PLT (8|1)
++#define RTYPE_CLASS_COPY (8|2)
++#define RTYPE_CLASS_TLS (8|4)
++#if ELF_RTYPE_CLASS_PLT != 0 && ELF_RTYPE_CLASS_PLT != 1
++# error ELF_RTYPE_CLASS_PLT must be 0 or 1!
++#endif
++#if ELF_RTYPE_CLASS_COPY != 0 && ELF_RTYPE_CLASS_COPY != 2
++# error ELF_RTYPE_CLASS_COPY must be 0 or 2!
++#endif
+ int conflict = 0;
+ struct sym_val val = { NULL, NULL };
+
+@@ -1071,12 +1083,17 @@ _dl_debug_bindings (const char *undef_name, struct link_map *undef_map,
+
+ if (value->s)
+ {
++ /* Keep only ELF_RTYPE_CLASS_PLT and ELF_RTYPE_CLASS_COPY
++ bits since since prelink only uses them. */
++ type_class &= ELF_RTYPE_CLASS_PLT | ELF_RTYPE_CLASS_COPY;
+ if (__glibc_unlikely (ELFW(ST_TYPE) (value->s->st_info)
+ == STT_TLS))
+- type_class = 4;
++ /* Clear the RTYPE_CLASS_VALID bit in RTYPE_CLASS_TLS. */
++ type_class = RTYPE_CLASS_TLS & ~RTYPE_CLASS_VALID;
+ else if (__glibc_unlikely (ELFW(ST_TYPE) (value->s->st_info)
+ == STT_GNU_IFUNC))
+- type_class |= 8;
++ /* Set the RTYPE_CLASS_VALID bit. */
++ type_class |= RTYPE_CLASS_VALID;
+ }
+
+ if (conflict
+--
+1.9.3
+
diff --git a/yocto-poky/meta/recipes-core/glibc/glibc/CVE-2015-7547.patch b/yocto-poky/meta/recipes-core/glibc/glibc/CVE-2015-7547.patch
new file mode 100644
index 000000000..4e539f849
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/glibc/glibc/CVE-2015-7547.patch
@@ -0,0 +1,642 @@
+From e9db92d3acfe1822d56d11abcea5bfc4c41cf6ca Mon Sep 17 00:00:00 2001
+From: Carlos O'Donell <carlos@systemhalted.org>
+Date: Tue, 16 Feb 2016 21:26:37 -0500
+Subject: [PATCH] CVE-2015-7547: getaddrinfo() stack-based buffer overflow (Bug
+ 18665).
+
+* A stack-based buffer overflow was found in libresolv when invoked from
+ libnss_dns, allowing specially crafted DNS responses to seize control
+ of execution flow in the DNS client. The buffer overflow occurs in
+ the functions send_dg (send datagram) and send_vc (send TCP) for the
+ NSS module libnss_dns.so.2 when calling getaddrinfo with AF_UNSPEC
+ family. The use of AF_UNSPEC triggers the low-level resolver code to
+ send out two parallel queries for A and AAAA. A mismanagement of the
+ buffers used for those queries could result in the response of a query
+ writing beyond the alloca allocated buffer created by
+ _nss_dns_gethostbyname4_r. Buffer management is simplified to remove
+ the overflow. Thanks to the Google Security Team and Red Hat for
+ reporting the security impact of this issue, and Robert Holiday of
+ Ciena for reporting the related bug 18665. (CVE-2015-7547)
+
+See also:
+https://sourceware.org/ml/libc-alpha/2016-02/msg00416.html
+https://sourceware.org/ml/libc-alpha/2016-02/msg00418.html
+
+Upstream-Status: Backport
+CVE: CVE-2015-7547
+
+https://sourceware.org/git/?p=glibc.git;a=commit;h=e9db92d3acfe1822d56d11abcea5bfc4c41cf6ca
+minor tweeking to apply to Changelog and res_send.c
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ ChangeLog | 17 ++-
+ NEWS | 14 +++
+ resolv/nss_dns/dns-host.c | 111 +++++++++++++++++++-
+ resolv/res_query.c | 3 +
+ resolv/res_send.c | 260 +++++++++++++++++++++++++++++++++++-----------
+ 5 files changed, 339 insertions(+), 66 deletions(-)
+
+Index: git/NEWS
+===================================================================
+--- git.orig/NEWS
++++ git/NEWS
+@@ -105,6 +105,20 @@ Security related changes:
+ depending on the length of the string passed as an argument to the
+ functions. Reported by Joseph Myers.
+
++* A stack-based buffer overflow was found in libresolv when invoked from
++ libnss_dns, allowing specially crafted DNS responses to seize control
++ of execution flow in the DNS client. The buffer overflow occurs in
++ the functions send_dg (send datagram) and send_vc (send TCP) for the
++ NSS module libnss_dns.so.2 when calling getaddrinfo with AF_UNSPEC
++ family. The use of AF_UNSPEC triggers the low-level resolver code to
++ send out two parallel queries for A and AAAA. A mismanagement of the
++ buffers used for those queries could result in the response of a query
++ writing beyond the alloca allocated buffer created by
++ _nss_dns_gethostbyname4_r. Buffer management is simplified to remove
++ the overflow. Thanks to the Google Security Team and Red Hat for
++ reporting the security impact of this issue, and Robert Holiday of
++ Ciena for reporting the related bug 18665. (CVE-2015-7547)
++
+ * The following bugs are resolved with this release:
+
+ 6652, 10672, 12674, 12847, 12926, 13862, 14132, 14138, 14171, 14498,
+Index: git/resolv/nss_dns/dns-host.c
+===================================================================
+--- git.orig/resolv/nss_dns/dns-host.c
++++ git/resolv/nss_dns/dns-host.c
+@@ -1031,7 +1031,10 @@ gaih_getanswer_slice (const querybuf *an
+ int h_namelen = 0;
+
+ if (ancount == 0)
+- return NSS_STATUS_NOTFOUND;
++ {
++ *h_errnop = HOST_NOT_FOUND;
++ return NSS_STATUS_NOTFOUND;
++ }
+
+ while (ancount-- > 0 && cp < end_of_message && had_error == 0)
+ {
+@@ -1208,7 +1211,14 @@ gaih_getanswer_slice (const querybuf *an
+ /* Special case here: if the resolver sent a result but it only
+ contains a CNAME while we are looking for a T_A or T_AAAA record,
+ we fail with NOTFOUND instead of TRYAGAIN. */
+- return canon == NULL ? NSS_STATUS_TRYAGAIN : NSS_STATUS_NOTFOUND;
++ if (canon != NULL)
++ {
++ *h_errnop = HOST_NOT_FOUND;
++ return NSS_STATUS_NOTFOUND;
++ }
++
++ *h_errnop = NETDB_INTERNAL;
++ return NSS_STATUS_TRYAGAIN;
+ }
+
+
+@@ -1222,11 +1232,101 @@ gaih_getanswer (const querybuf *answer1,
+
+ enum nss_status status = NSS_STATUS_NOTFOUND;
+
++ /* Combining the NSS status of two distinct queries requires some
++ compromise and attention to symmetry (A or AAAA queries can be
++ returned in any order). What follows is a breakdown of how this
++ code is expected to work and why. We discuss only SUCCESS,
++ TRYAGAIN, NOTFOUND and UNAVAIL, since they are the only returns
++ that apply (though RETURN and MERGE exist). We make a distinction
++ between TRYAGAIN (recoverable) and TRYAGAIN' (not-recoverable).
++ A recoverable TRYAGAIN is almost always due to buffer size issues
++ and returns ERANGE in errno and the caller is expected to retry
++ with a larger buffer.
++
++ Lastly, you may be tempted to make significant changes to the
++ conditions in this code to bring about symmetry between responses.
++ Please don't change anything without due consideration for
++ expected application behaviour. Some of the synthesized responses
++ aren't very well thought out and sometimes appear to imply that
++ IPv4 responses are always answer 1, and IPv6 responses are always
++ answer 2, but that's not true (see the implementation of send_dg
++ and send_vc to see response can arrive in any order, particularly
++ for UDP). However, we expect it holds roughly enough of the time
++ that this code works, but certainly needs to be fixed to make this
++ a more robust implementation.
++
++ ----------------------------------------------
++ | Answer 1 Status / | Synthesized | Reason |
++ | Answer 2 Status | Status | |
++ |--------------------------------------------|
++ | SUCCESS/SUCCESS | SUCCESS | [1] |
++ | SUCCESS/TRYAGAIN | TRYAGAIN | [5] |
++ | SUCCESS/TRYAGAIN' | SUCCESS | [1] |
++ | SUCCESS/NOTFOUND | SUCCESS | [1] |
++ | SUCCESS/UNAVAIL | SUCCESS | [1] |
++ | TRYAGAIN/SUCCESS | TRYAGAIN | [2] |
++ | TRYAGAIN/TRYAGAIN | TRYAGAIN | [2] |
++ | TRYAGAIN/TRYAGAIN' | TRYAGAIN | [2] |
++ | TRYAGAIN/NOTFOUND | TRYAGAIN | [2] |
++ | TRYAGAIN/UNAVAIL | TRYAGAIN | [2] |
++ | TRYAGAIN'/SUCCESS | SUCCESS | [3] |
++ | TRYAGAIN'/TRYAGAIN | TRYAGAIN | [3] |
++ | TRYAGAIN'/TRYAGAIN' | TRYAGAIN' | [3] |
++ | TRYAGAIN'/NOTFOUND | TRYAGAIN' | [3] |
++ | TRYAGAIN'/UNAVAIL | UNAVAIL | [3] |
++ | NOTFOUND/SUCCESS | SUCCESS | [3] |
++ | NOTFOUND/TRYAGAIN | TRYAGAIN | [3] |
++ | NOTFOUND/TRYAGAIN' | TRYAGAIN' | [3] |
++ | NOTFOUND/NOTFOUND | NOTFOUND | [3] |
++ | NOTFOUND/UNAVAIL | UNAVAIL | [3] |
++ | UNAVAIL/SUCCESS | UNAVAIL | [4] |
++ | UNAVAIL/TRYAGAIN | UNAVAIL | [4] |
++ | UNAVAIL/TRYAGAIN' | UNAVAIL | [4] |
++ | UNAVAIL/NOTFOUND | UNAVAIL | [4] |
++ | UNAVAIL/UNAVAIL | UNAVAIL | [4] |
++ ----------------------------------------------
++
++ [1] If the first response is a success we return success.
++ This ignores the state of the second answer and in fact
++ incorrectly sets errno and h_errno to that of the second
++ answer. However because the response is a success we ignore
++ *errnop and *h_errnop (though that means you touched errno on
++ success). We are being conservative here and returning the
++ likely IPv4 response in the first answer as a success.
++
++ [2] If the first response is a recoverable TRYAGAIN we return
++ that instead of looking at the second response. The
++ expectation here is that we have failed to get an IPv4 response
++ and should retry both queries.
++
++ [3] If the first response was not a SUCCESS and the second
++ response is not NOTFOUND (had a SUCCESS, need to TRYAGAIN,
++ or failed entirely e.g. TRYAGAIN' and UNAVAIL) then use the
++ result from the second response, otherwise the first responses
++ status is used. Again we have some odd side-effects when the
++ second response is NOTFOUND because we overwrite *errnop and
++ *h_errnop that means that a first answer of NOTFOUND might see
++ its *errnop and *h_errnop values altered. Whether it matters
++ in practice that a first response NOTFOUND has the wrong
++ *errnop and *h_errnop is undecided.
++
++ [4] If the first response is UNAVAIL we return that instead of
++ looking at the second response. The expectation here is that
++ it will have failed similarly e.g. configuration failure.
++
++ [5] Testing this code is complicated by the fact that truncated
++ second response buffers might be returned as SUCCESS if the
++ first answer is a SUCCESS. To fix this we add symmetry to
++ TRYAGAIN with the second response. If the second response
++ is a recoverable error we now return TRYAGIN even if the first
++ response was SUCCESS. */
++
+ if (anslen1 > 0)
+ status = gaih_getanswer_slice(answer1, anslen1, qname,
+ &pat, &buffer, &buflen,
+ errnop, h_errnop, ttlp,
+ &first);
++
+ if ((status == NSS_STATUS_SUCCESS || status == NSS_STATUS_NOTFOUND
+ || (status == NSS_STATUS_TRYAGAIN
+ /* We want to look at the second answer in case of an
+@@ -1242,8 +1342,15 @@ gaih_getanswer (const querybuf *answer1,
+ &pat, &buffer, &buflen,
+ errnop, h_errnop, ttlp,
+ &first);
++ /* Use the second response status in some cases. */
+ if (status != NSS_STATUS_SUCCESS && status2 != NSS_STATUS_NOTFOUND)
+ status = status2;
++ /* Do not return a truncated second response (unless it was
++ unavoidable e.g. unrecoverable TRYAGAIN). */
++ if (status == NSS_STATUS_SUCCESS
++ && (status2 == NSS_STATUS_TRYAGAIN
++ && *errnop == ERANGE && *h_errnop != NO_RECOVERY))
++ status = NSS_STATUS_TRYAGAIN;
+ }
+
+ return status;
+Index: git/resolv/res_query.c
+===================================================================
+--- git.orig/resolv/res_query.c
++++ git/resolv/res_query.c
+@@ -396,6 +396,7 @@ __libc_res_nsearch(res_state statp,
+ {
+ free (*answerp2);
+ *answerp2 = NULL;
++ *nanswerp2 = 0;
+ *answerp2_malloced = 0;
+ }
+ }
+@@ -447,6 +448,7 @@ __libc_res_nsearch(res_state statp,
+ {
+ free (*answerp2);
+ *answerp2 = NULL;
++ *nanswerp2 = 0;
+ *answerp2_malloced = 0;
+ }
+
+@@ -521,6 +523,7 @@ __libc_res_nsearch(res_state statp,
+ {
+ free (*answerp2);
+ *answerp2 = NULL;
++ *nanswerp2 = 0;
+ *answerp2_malloced = 0;
+ }
+ if (saved_herrno != -1)
+Index: git/resolv/res_send.c
+===================================================================
+--- git.orig/resolv/res_send.c
++++ git/resolv/res_send.c
+@@ -1,3 +1,20 @@
++/* Copyright (C) 2016 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
++
+ /*
+ * Copyright (c) 1985, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+@@ -363,6 +380,8 @@ __libc_res_nsend(res_state statp, const
+ #ifdef USE_HOOKS
+ if (__glibc_unlikely (statp->qhook || statp->rhook)) {
+ if (anssiz < MAXPACKET && ansp) {
++ /* Always allocate MAXPACKET, callers expect
++ this specific size. */
+ u_char *buf = malloc (MAXPACKET);
+ if (buf == NULL)
+ return (-1);
+@@ -638,6 +657,77 @@ get_nsaddr (res_state statp, int n)
+ return (struct sockaddr *) (void *) &statp->nsaddr_list[n];
+ }
+
++/* The send_vc function is responsible for sending a DNS query over TCP
++ to the nameserver numbered NS from the res_state STATP i.e.
++ EXT(statp).nssocks[ns]. The function supports sending both IPv4 and
++ IPv6 queries at the same serially on the same socket.
++
++ Please note that for TCP there is no way to disable sending both
++ queries, unlike UDP, which honours RES_SNGLKUP and RES_SNGLKUPREOP
++ and sends the queries serially and waits for the result after each
++ sent query. This implemetnation should be corrected to honour these
++ options.
++
++ Please also note that for TCP we send both queries over the same
++ socket one after another. This technically violates best practice
++ since the server is allowed to read the first query, respond, and
++ then close the socket (to service another client). If the server
++ does this, then the remaining second query in the socket data buffer
++ will cause the server to send the client an RST which will arrive
++ asynchronously and the client's OS will likely tear down the socket
++ receive buffer resulting in a potentially short read and lost
++ response data. This will force the client to retry the query again,
++ and this process may repeat until all servers and connection resets
++ are exhausted and then the query will fail. It's not known if this
++ happens with any frequency in real DNS server implementations. This
++ implementation should be corrected to use two sockets by default for
++ parallel queries.
++
++ The query stored in BUF of BUFLEN length is sent first followed by
++ the query stored in BUF2 of BUFLEN2 length. Queries are sent
++ serially on the same socket.
++
++ Answers to the query are stored firstly in *ANSP up to a max of
++ *ANSSIZP bytes. If more than *ANSSIZP bytes are needed and ANSCP
++ is non-NULL (to indicate that modifying the answer buffer is allowed)
++ then malloc is used to allocate a new response buffer and ANSCP and
++ ANSP will both point to the new buffer. If more than *ANSSIZP bytes
++ are needed but ANSCP is NULL, then as much of the response as
++ possible is read into the buffer, but the results will be truncated.
++ When truncation happens because of a small answer buffer the DNS
++ packets header field TC will bet set to 1, indicating a truncated
++ message and the rest of the socket data will be read and discarded.
++
++ Answers to the query are stored secondly in *ANSP2 up to a max of
++ *ANSSIZP2 bytes, with the actual response length stored in
++ *RESPLEN2. If more than *ANSSIZP bytes are needed and ANSP2
++ is non-NULL (required for a second query) then malloc is used to
++ allocate a new response buffer, *ANSSIZP2 is set to the new buffer
++ size and *ANSP2_MALLOCED is set to 1.
++
++ The ANSP2_MALLOCED argument will eventually be removed as the
++ change in buffer pointer can be used to detect the buffer has
++ changed and that the caller should use free on the new buffer.
++
++ Note that the answers may arrive in any order from the server and
++ therefore the first and second answer buffers may not correspond to
++ the first and second queries.
++
++ It is not supported to call this function with a non-NULL ANSP2
++ but a NULL ANSCP. Put another way, you can call send_vc with a
++ single unmodifiable buffer or two modifiable buffers, but no other
++ combination is supported.
++
++ It is the caller's responsibility to free the malloc allocated
++ buffers by detecting that the pointers have changed from their
++ original values i.e. *ANSCP or *ANSP2 has changed.
++
++ If errors are encountered then *TERRNO is set to an appropriate
++ errno value and a zero result is returned for a recoverable error,
++ and a less-than zero result is returned for a non-recoverable error.
++
++ If no errors are encountered then *TERRNO is left unmodified and
++ a the length of the first response in bytes is returned. */
+ static int
+ send_vc(res_state statp,
+ const u_char *buf, int buflen, const u_char *buf2, int buflen2,
+@@ -647,11 +737,7 @@ send_vc(res_state statp,
+ {
+ const HEADER *hp = (HEADER *) buf;
+ const HEADER *hp2 = (HEADER *) buf2;
+- u_char *ans = *ansp;
+- int orig_anssizp = *anssizp;
+- // XXX REMOVE
+- // int anssiz = *anssizp;
+- HEADER *anhp = (HEADER *) ans;
++ HEADER *anhp = (HEADER *) *ansp;
+ struct sockaddr *nsap = get_nsaddr (statp, ns);
+ int truncating, connreset, n;
+ /* On some architectures compiler might emit a warning indicating
+@@ -743,6 +829,8 @@ send_vc(res_state statp,
+ * Receive length & response
+ */
+ int recvresp1 = 0;
++ /* Skip the second response if there is no second query.
++ To do that we mark the second response as received. */
+ int recvresp2 = buf2 == NULL;
+ uint16_t rlen16;
+ read_len:
+@@ -779,40 +867,14 @@ send_vc(res_state statp,
+ u_char **thisansp;
+ int *thisresplenp;
+ if ((recvresp1 | recvresp2) == 0 || buf2 == NULL) {
++ /* We have not received any responses
++ yet or we only have one response to
++ receive. */
+ thisanssizp = anssizp;
+ thisansp = anscp ?: ansp;
+ assert (anscp != NULL || ansp2 == NULL);
+ thisresplenp = &resplen;
+ } else {
+- if (*anssizp != MAXPACKET) {
+- /* No buffer allocated for the first
+- reply. We can try to use the rest
+- of the user-provided buffer. */
+-#if __GNUC_PREREQ (4, 7)
+- DIAG_PUSH_NEEDS_COMMENT;
+- DIAG_IGNORE_NEEDS_COMMENT (5, "-Wmaybe-uninitialized");
+-#endif
+-#if _STRING_ARCH_unaligned
+- *anssizp2 = orig_anssizp - resplen;
+- *ansp2 = *ansp + resplen;
+-#else
+- int aligned_resplen
+- = ((resplen + __alignof__ (HEADER) - 1)
+- & ~(__alignof__ (HEADER) - 1));
+- *anssizp2 = orig_anssizp - aligned_resplen;
+- *ansp2 = *ansp + aligned_resplen;
+-#endif
+-#if __GNUC_PREREQ (4, 7)
+- DIAG_POP_NEEDS_COMMENT;
+-#endif
+- } else {
+- /* The first reply did not fit into the
+- user-provided buffer. Maybe the second
+- answer will. */
+- *anssizp2 = orig_anssizp;
+- *ansp2 = *ansp;
+- }
+-
+ thisanssizp = anssizp2;
+ thisansp = ansp2;
+ thisresplenp = resplen2;
+@@ -820,10 +882,14 @@ send_vc(res_state statp,
+ anhp = (HEADER *) *thisansp;
+
+ *thisresplenp = rlen;
+- if (rlen > *thisanssizp) {
+- /* Yes, we test ANSCP here. If we have two buffers
+- both will be allocatable. */
+- if (__glibc_likely (anscp != NULL)) {
++ /* Is the answer buffer too small? */
++ if (*thisanssizp < rlen) {
++ /* If the current buffer is not the the static
++ user-supplied buffer then we can reallocate
++ it. */
++ if (thisansp != NULL && thisansp != ansp) {
++ /* Always allocate MAXPACKET, callers expect
++ this specific size. */
+ u_char *newp = malloc (MAXPACKET);
+ if (newp == NULL) {
+ *terrno = ENOMEM;
+@@ -835,6 +901,9 @@ send_vc(res_state statp,
+ if (thisansp == ansp2)
+ *ansp2_malloced = 1;
+ anhp = (HEADER *) newp;
++ /* A uint16_t can't be larger than MAXPACKET
++ thus it's safe to allocate MAXPACKET but
++ read RLEN bytes instead. */
+ len = rlen;
+ } else {
+ Dprint(statp->options & RES_DEBUG,
+@@ -997,6 +1066,66 @@ reopen (res_state statp, int *terrno, in
+ return 1;
+ }
+
++/* The send_dg function is responsible for sending a DNS query over UDP
++ to the nameserver numbered NS from the res_state STATP i.e.
++ EXT(statp).nssocks[ns]. The function supports IPv4 and IPv6 queries
++ along with the ability to send the query in parallel for both stacks
++ (default) or serially (RES_SINGLKUP). It also supports serial lookup
++ with a close and reopen of the socket used to talk to the server
++ (RES_SNGLKUPREOP) to work around broken name servers.
++
++ The query stored in BUF of BUFLEN length is sent first followed by
++ the query stored in BUF2 of BUFLEN2 length. Queries are sent
++ in parallel (default) or serially (RES_SINGLKUP or RES_SNGLKUPREOP).
++
++ Answers to the query are stored firstly in *ANSP up to a max of
++ *ANSSIZP bytes. If more than *ANSSIZP bytes are needed and ANSCP
++ is non-NULL (to indicate that modifying the answer buffer is allowed)
++ then malloc is used to allocate a new response buffer and ANSCP and
++ ANSP will both point to the new buffer. If more than *ANSSIZP bytes
++ are needed but ANSCP is NULL, then as much of the response as
++ possible is read into the buffer, but the results will be truncated.
++ When truncation happens because of a small answer buffer the DNS
++ packets header field TC will bet set to 1, indicating a truncated
++ message, while the rest of the UDP packet is discarded.
++
++ Answers to the query are stored secondly in *ANSP2 up to a max of
++ *ANSSIZP2 bytes, with the actual response length stored in
++ *RESPLEN2. If more than *ANSSIZP bytes are needed and ANSP2
++ is non-NULL (required for a second query) then malloc is used to
++ allocate a new response buffer, *ANSSIZP2 is set to the new buffer
++ size and *ANSP2_MALLOCED is set to 1.
++
++ The ANSP2_MALLOCED argument will eventually be removed as the
++ change in buffer pointer can be used to detect the buffer has
++ changed and that the caller should use free on the new buffer.
++
++ Note that the answers may arrive in any order from the server and
++ therefore the first and second answer buffers may not correspond to
++ the first and second queries.
++
++ It is not supported to call this function with a non-NULL ANSP2
++ but a NULL ANSCP. Put another way, you can call send_vc with a
++ single unmodifiable buffer or two modifiable buffers, but no other
++ combination is supported.
++
++ It is the caller's responsibility to free the malloc allocated
++ buffers by detecting that the pointers have changed from their
++ original values i.e. *ANSCP or *ANSP2 has changed.
++
++ If an answer is truncated because of UDP datagram DNS limits then
++ *V_CIRCUIT is set to 1 and the return value non-zero to indicate to
++ the caller to retry with TCP. The value *GOTSOMEWHERE is set to 1
++ if any progress was made reading a response from the nameserver and
++ is used by the caller to distinguish between ECONNREFUSED and
++ ETIMEDOUT (the latter if *GOTSOMEWHERE is 1).
++
++ If errors are encountered then *TERRNO is set to an appropriate
++ errno value and a zero result is returned for a recoverable error,
++ and a less-than zero result is returned for a non-recoverable error.
++
++ If no errors are encountered then *TERRNO is left unmodified and
++ a the length of the first response in bytes is returned. */
+ static int
+ send_dg(res_state statp,
+ const u_char *buf, int buflen, const u_char *buf2, int buflen2,
+@@ -1006,8 +1135,6 @@ send_dg(res_state statp,
+ {
+ const HEADER *hp = (HEADER *) buf;
+ const HEADER *hp2 = (HEADER *) buf2;
+- u_char *ans = *ansp;
+- int orig_anssizp = *anssizp;
+ struct timespec now, timeout, finish;
+ struct pollfd pfd[1];
+ int ptimeout;
+@@ -1040,6 +1167,8 @@ send_dg(res_state statp,
+ int need_recompute = 0;
+ int nwritten = 0;
+ int recvresp1 = 0;
++ /* Skip the second response if there is no second query.
++ To do that we mark the second response as received. */
+ int recvresp2 = buf2 == NULL;
+ pfd[0].fd = EXT(statp).nssocks[ns];
+ pfd[0].events = POLLOUT;
+@@ -1203,55 +1332,56 @@ send_dg(res_state statp,
+ int *thisresplenp;
+
+ if ((recvresp1 | recvresp2) == 0 || buf2 == NULL) {
++ /* We have not received any responses
++ yet or we only have one response to
++ receive. */
+ thisanssizp = anssizp;
+ thisansp = anscp ?: ansp;
+ assert (anscp != NULL || ansp2 == NULL);
+ thisresplenp = &resplen;
+ } else {
+- if (*anssizp != MAXPACKET) {
+- /* No buffer allocated for the first
+- reply. We can try to use the rest
+- of the user-provided buffer. */
+-#if _STRING_ARCH_unaligned
+- *anssizp2 = orig_anssizp - resplen;
+- *ansp2 = *ansp + resplen;
+-#else
+- int aligned_resplen
+- = ((resplen + __alignof__ (HEADER) - 1)
+- & ~(__alignof__ (HEADER) - 1));
+- *anssizp2 = orig_anssizp - aligned_resplen;
+- *ansp2 = *ansp + aligned_resplen;
+-#endif
+- } else {
+- /* The first reply did not fit into the
+- user-provided buffer. Maybe the second
+- answer will. */
+- *anssizp2 = orig_anssizp;
+- *ansp2 = *ansp;
+- }
+-
+ thisanssizp = anssizp2;
+ thisansp = ansp2;
+ thisresplenp = resplen2;
+ }
+
+ if (*thisanssizp < MAXPACKET
+- /* Yes, we test ANSCP here. If we have two buffers
+- both will be allocatable. */
+- && anscp
++ /* If the current buffer is not the the static
++ user-supplied buffer then we can reallocate
++ it. */
++ && (thisansp != NULL && thisansp != ansp)
+ #ifdef FIONREAD
++ /* Is the size too small? */
+ && (ioctl (pfd[0].fd, FIONREAD, thisresplenp) < 0
+ || *thisanssizp < *thisresplenp)
+ #endif
+ ) {
++ /* Always allocate MAXPACKET, callers expect
++ this specific size. */
+ u_char *newp = malloc (MAXPACKET);
+ if (newp != NULL) {
+- *anssizp = MAXPACKET;
+- *thisansp = ans = newp;
++ *thisanssizp = MAXPACKET;
++ *thisansp = newp;
+ if (thisansp == ansp2)
+ *ansp2_malloced = 1;
+ }
+ }
++ /* We could end up with truncation if anscp was NULL
++ (not allowed to change caller's buffer) and the
++ response buffer size is too small. This isn't a
++ reliable way to detect truncation because the ioctl
++ may be an inaccurate report of the UDP message size.
++ Therefore we use this only to issue debug output.
++ To do truncation accurately with UDP we need
++ MSG_TRUNC which is only available on Linux. We
++ can abstract out the Linux-specific feature in the
++ future to detect truncation. */
++ if (__glibc_unlikely (*thisanssizp < *thisresplenp)) {
++ Dprint(statp->options & RES_DEBUG,
++ (stdout, ";; response may be truncated (UDP)\n")
++ );
++ }
++
+ HEADER *anhp = (HEADER *) *thisansp;
+ socklen_t fromlen = sizeof(struct sockaddr_in6);
+ assert (sizeof(from) <= fromlen);
+Index: git/ChangeLog
+===================================================================
+--- git.orig/ChangeLog
++++ git/ChangeLog
+@@ -1,3 +1,18 @@
++2016-02-15 Carlos O'Donell <carlos@redhat.com>
++
++ [BZ #18665]
++ * resolv/nss_dns/dns-host.c (gaih_getanswer_slice): Always set
++ *herrno_p.
++ (gaih_getanswer): Document functional behviour. Return tryagain
++ if any result is tryagain.
++ * resolv/res_query.c (__libc_res_nsearch): Set buffer size to zero
++ when freed.
++ * resolv/res_send.c: Add copyright text.
++ (__libc_res_nsend): Document that MAXPACKET is expected.
++ (send_vc): Document. Remove buffer reuse.
++ (send_dg): Document. Remove buffer reuse. Set *thisanssizp to set the
++ size of the buffer. Add Dprint for truncated UDP buffer.
++
+ 2015-09-26 Paul Pluzhnikov <ppluzhnikov@google.com>
+
+ [BZ #18985]
diff --git a/yocto-poky/meta/recipes-core/glibc/glibc/CVE-2015-8776.patch b/yocto-poky/meta/recipes-core/glibc/glibc/CVE-2015-8776.patch
new file mode 100644
index 000000000..684f34417
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/glibc/glibc/CVE-2015-8776.patch
@@ -0,0 +1,155 @@
+From d36c75fc0d44deec29635dd239b0fbd206ca49b7 Mon Sep 17 00:00:00 2001
+From: Paul Pluzhnikov <ppluzhnikov@google.com>
+Date: Sat, 26 Sep 2015 13:27:48 -0700
+Subject: [PATCH] Fix BZ #18985 -- out of range data to strftime() causes a
+ segfault
+
+Upstream-Status: Backport
+CVE: CVE-2015-8776
+[Yocto # 8980]
+
+https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=d36c75fc0d44deec29635dd239b0fbd206ca49b7
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ ChangeLog | 8 ++++++++
+ NEWS | 2 +-
+ time/strftime_l.c | 20 +++++++++++++-------
+ time/tst-strftime.c | 52 +++++++++++++++++++++++++++++++++++++++++++++++++++-
+ 4 files changed, 73 insertions(+), 9 deletions(-)
+
+Index: git/ChangeLog
+===================================================================
+--- git.orig/ChangeLog
++++ git/ChangeLog
+@@ -1,3 +1,11 @@
++2015-09-26 Paul Pluzhnikov <ppluzhnikov@google.com>
++
++ [BZ #18985]
++ * time/strftime_l.c (a_wkday, f_wkday, a_month, f_month): Range check.
++ (__strftime_internal): Likewise.
++ * time/tst-strftime.c (do_bz18985): New test.
++ (do_test): Call it.
++
+ 2015-12-04 Joseph Myers <joseph@codesourcery.com>
+
+ [BZ #16961]
+Index: git/time/strftime_l.c
+===================================================================
+--- git.orig/time/strftime_l.c
++++ git/time/strftime_l.c
+@@ -514,13 +514,17 @@ __strftime_internal (s, maxsize, format,
+ only a few elements. Dereference the pointers only if the format
+ requires this. Then it is ok to fail if the pointers are invalid. */
+ # define a_wkday \
+- ((const CHAR_T *) _NL_CURRENT (LC_TIME, NLW(ABDAY_1) + tp->tm_wday))
++ ((const CHAR_T *) (tp->tm_wday < 0 || tp->tm_wday > 6 \
++ ? "?" : _NL_CURRENT (LC_TIME, NLW(ABDAY_1) + tp->tm_wday)))
+ # define f_wkday \
+- ((const CHAR_T *) _NL_CURRENT (LC_TIME, NLW(DAY_1) + tp->tm_wday))
++ ((const CHAR_T *) (tp->tm_wday < 0 || tp->tm_wday > 6 \
++ ? "?" : _NL_CURRENT (LC_TIME, NLW(DAY_1) + tp->tm_wday)))
+ # define a_month \
+- ((const CHAR_T *) _NL_CURRENT (LC_TIME, NLW(ABMON_1) + tp->tm_mon))
++ ((const CHAR_T *) (tp->tm_mon < 0 || tp->tm_mon > 11 \
++ ? "?" : _NL_CURRENT (LC_TIME, NLW(ABMON_1) + tp->tm_mon)))
+ # define f_month \
+- ((const CHAR_T *) _NL_CURRENT (LC_TIME, NLW(MON_1) + tp->tm_mon))
++ ((const CHAR_T *) (tp->tm_mon < 0 || tp->tm_mon > 11 \
++ ? "?" : _NL_CURRENT (LC_TIME, NLW(MON_1) + tp->tm_mon)))
+ # define ampm \
+ ((const CHAR_T *) _NL_CURRENT (LC_TIME, tp->tm_hour > 11 \
+ ? NLW(PM_STR) : NLW(AM_STR)))
+@@ -530,8 +534,10 @@ __strftime_internal (s, maxsize, format,
+ # define ap_len STRLEN (ampm)
+ #else
+ # if !HAVE_STRFTIME
+-# define f_wkday (weekday_name[tp->tm_wday])
+-# define f_month (month_name[tp->tm_mon])
++# define f_wkday (tp->tm_wday < 0 || tp->tm_wday > 6 \
++ ? "?" : weekday_name[tp->tm_wday])
++# define f_month (tp->tm_mon < 0 || tp->tm_mon > 11 \
++ ? "?" : month_name[tp->tm_mon])
+ # define a_wkday f_wkday
+ # define a_month f_month
+ # define ampm (L_("AMPM") + 2 * (tp->tm_hour > 11))
+@@ -1325,7 +1331,7 @@ __strftime_internal (s, maxsize, format,
+ *tzset_called = true;
+ }
+ # endif
+- zone = tzname[tp->tm_isdst];
++ zone = tp->tm_isdst <= 1 ? tzname[tp->tm_isdst] : "?";
+ }
+ #endif
+ if (! zone)
+Index: git/time/tst-strftime.c
+===================================================================
+--- git.orig/time/tst-strftime.c
++++ git/time/tst-strftime.c
+@@ -4,6 +4,56 @@
+ #include <time.h>
+
+
++static int
++do_bz18985 (void)
++{
++ char buf[1000];
++ struct tm ttm;
++ int rc, ret = 0;
++
++ memset (&ttm, 1, sizeof (ttm));
++ ttm.tm_zone = NULL; /* Dereferenced directly if non-NULL. */
++ rc = strftime (buf, sizeof (buf), "%a %A %b %B %c %z %Z", &ttm);
++
++ if (rc == 66)
++ {
++ const char expected[]
++ = "? ? ? ? ? ? 16843009 16843009:16843009:16843009 16844909 +467836 ?";
++ if (0 != strcmp (buf, expected))
++ {
++ printf ("expected:\n %s\ngot:\n %s\n", expected, buf);
++ ret += 1;
++ }
++ }
++ else
++ {
++ printf ("expected 66, got %d\n", rc);
++ ret += 1;
++ }
++
++ /* Check negative values as well. */
++ memset (&ttm, 0xFF, sizeof (ttm));
++ ttm.tm_zone = NULL; /* Dereferenced directly if non-NULL. */
++ rc = strftime (buf, sizeof (buf), "%a %A %b %B %c %z %Z", &ttm);
++
++ if (rc == 30)
++ {
++ const char expected[] = "? ? ? ? ? ? -1 -1:-1:-1 1899 ";
++ if (0 != strcmp (buf, expected))
++ {
++ printf ("expected:\n %s\ngot:\n %s\n", expected, buf);
++ ret += 1;
++ }
++ }
++ else
++ {
++ printf ("expected 30, got %d\n", rc);
++ ret += 1;
++ }
++
++ return ret;
++}
++
+ static struct
+ {
+ const char *fmt;
+@@ -104,7 +154,7 @@ do_test (void)
+ }
+ }
+
+- return result;
++ return result + do_bz18985 ();
+ }
+
+ #define TEST_FUNCTION do_test ()
diff --git a/yocto-poky/meta/recipes-core/glibc/glibc/CVE-2015-8777.patch b/yocto-poky/meta/recipes-core/glibc/glibc/CVE-2015-8777.patch
new file mode 100644
index 000000000..eeab72d65
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/glibc/glibc/CVE-2015-8777.patch
@@ -0,0 +1,123 @@
+From a014cecd82b71b70a6a843e250e06b541ad524f7 Mon Sep 17 00:00:00 2001
+From: Florian Weimer <fweimer@redhat.com>
+Date: Thu, 15 Oct 2015 09:23:07 +0200
+Subject: [PATCH] Always enable pointer guard [BZ #18928]
+
+Honoring the LD_POINTER_GUARD environment variable in AT_SECURE mode
+has security implications. This commit enables pointer guard
+unconditionally, and the environment variable is now ignored.
+
+ [BZ #18928]
+ * sysdeps/generic/ldsodefs.h (struct rtld_global_ro): Remove
+ _dl_pointer_guard member.
+ * elf/rtld.c (_rtld_global_ro): Remove _dl_pointer_guard
+ initializer.
+ (security_init): Always set up pointer guard.
+ (process_envvars): Do not process LD_POINTER_GUARD.
+
+Upstream-Status: Backport
+CVE: CVE-2015-8777
+[Yocto # 8980]
+
+https://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commit;h=a014cecd82b71b70a6a843e250e06b541ad524f7
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ ChangeLog | 10 ++++++++++
+ NEWS | 13 ++++++++-----
+ elf/rtld.c | 15 ++++-----------
+ sysdeps/generic/ldsodefs.h | 3 ---
+ 4 files changed, 22 insertions(+), 19 deletions(-)
+
+Index: git/ChangeLog
+===================================================================
+--- git.orig/ChangeLog
++++ git/ChangeLog
+@@ -1,3 +1,14 @@
++2015-10-15 Florian Weimer <fweimer@redhat.com>
++
++ [BZ #18928]
++ * sysdeps/generic/ldsodefs.h (struct rtld_global_ro): Remove
++ _dl_pointer_guard member.
++ * elf/rtld.c (_rtld_global_ro): Remove _dl_pointer_guard
++ initializer.
++ (security_init): Always set up pointer guard.
++ (process_envvars): Do not process LD_POINTER_GUARD.
++
++
+ 2015-08-10 Maxim Ostapenko <m.ostapenko@partner.samsung.com>
+
+ [BZ #18778]
+Index: git/NEWS
+===================================================================
+--- git.orig/NEWS
++++ git/NEWS
+@@ -34,7 +34,10 @@ Version 2.22
+ 18533, 18534, 18536, 18539, 18540, 18542, 18544, 18545, 18546, 18547,
+ 18549, 18553, 18557, 18558, 18569, 18583, 18585, 18586, 18592, 18593,
+ 18594, 18602, 18612, 18613, 18619, 18633, 18635, 18641, 18643, 18648,
+- 18657, 18676, 18694, 18696.
++ 18657, 18676, 18694, 18696, 18928.
++
++* The LD_POINTER_GUARD environment variable can no longer be used to
++ disable the pointer guard feature. It is always enabled.
+
+ * Cache information can be queried via sysconf() function on s390 e.g. with
+ _SC_LEVEL1_ICACHE_SIZE as argument.
+Index: git/elf/rtld.c
+===================================================================
+--- git.orig/elf/rtld.c
++++ git/elf/rtld.c
+@@ -163,7 +163,6 @@ struct rtld_global_ro _rtld_global_ro at
+ ._dl_hwcap_mask = HWCAP_IMPORTANT,
+ ._dl_lazy = 1,
+ ._dl_fpu_control = _FPU_DEFAULT,
+- ._dl_pointer_guard = 1,
+ ._dl_pagesize = EXEC_PAGESIZE,
+ ._dl_inhibit_cache = 0,
+
+@@ -710,15 +709,12 @@ security_init (void)
+ #endif
+
+ /* Set up the pointer guard as well, if necessary. */
+- if (GLRO(dl_pointer_guard))
+- {
+- uintptr_t pointer_chk_guard = _dl_setup_pointer_guard (_dl_random,
+- stack_chk_guard);
++ uintptr_t pointer_chk_guard
++ = _dl_setup_pointer_guard (_dl_random, stack_chk_guard);
+ #ifdef THREAD_SET_POINTER_GUARD
+- THREAD_SET_POINTER_GUARD (pointer_chk_guard);
++ THREAD_SET_POINTER_GUARD (pointer_chk_guard);
+ #endif
+- __pointer_chk_guard_local = pointer_chk_guard;
+- }
++ __pointer_chk_guard_local = pointer_chk_guard;
+
+ /* We do not need the _dl_random value anymore. The less
+ information we leave behind, the better, so clear the
+@@ -2478,9 +2474,6 @@ process_envvars (enum mode *modep)
+ GLRO(dl_use_load_bias) = envline[14] == '1' ? -1 : 0;
+ break;
+ }
+-
+- if (memcmp (envline, "POINTER_GUARD", 13) == 0)
+- GLRO(dl_pointer_guard) = envline[14] != '0';
+ break;
+
+ case 14:
+Index: git/sysdeps/generic/ldsodefs.h
+===================================================================
+--- git.orig/sysdeps/generic/ldsodefs.h
++++ git/sysdeps/generic/ldsodefs.h
+@@ -600,9 +600,6 @@ struct rtld_global_ro
+ /* List of auditing interfaces. */
+ struct audit_ifaces *_dl_audit;
+ unsigned int _dl_naudit;
+-
+- /* 0 if internal pointer values should not be guarded, 1 if they should. */
+- EXTERN int _dl_pointer_guard;
+ };
+ # define __rtld_global_attribute__
+ # if IS_IN (rtld)
diff --git a/yocto-poky/meta/recipes-core/glibc/glibc/CVE-2015-8779.patch b/yocto-poky/meta/recipes-core/glibc/glibc/CVE-2015-8779.patch
new file mode 100644
index 000000000..4dc93c769
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/glibc/glibc/CVE-2015-8779.patch
@@ -0,0 +1,262 @@
+From 0f58539030e436449f79189b6edab17d7479796e Mon Sep 17 00:00:00 2001
+From: Paul Pluzhnikov <ppluzhnikov@google.com>
+Date: Sat, 8 Aug 2015 15:53:03 -0700
+Subject: [PATCH] Fix BZ #17905
+
+Upstream-Status: Backport
+CVE: CVE-2015-8779
+[Yocto # 8980]
+
+https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=0f58539030e436449f79189b6edab17d7479796e
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ ChangeLog | 8 ++++++++
+ NEWS | 2 +-
+ catgets/Makefile | 9 ++++++++-
+ catgets/catgets.c | 19 ++++++++++++-------
+ catgets/open_catalog.c | 23 ++++++++++++++---------
+ catgets/tst-catgets.c | 31 +++++++++++++++++++++++++++++++
+ 6 files changed, 74 insertions(+), 18 deletions(-)
+
+Index: git/catgets/Makefile
+===================================================================
+--- git.orig/catgets/Makefile
++++ git/catgets/Makefile
+@@ -37,6 +37,7 @@ ifeq (y,$(OPTION_EGLIBC_CATGETS))
+ ifeq ($(run-built-tests),yes)
+ tests-special += $(objpfx)de/libc.cat $(objpfx)test1.cat $(objpfx)test2.cat \
+ $(objpfx)sample.SJIS.cat $(objpfx)test-gencat.out
++tests-special += $(objpfx)tst-catgets-mem.out
+ endif
+ endif
+ gencat-modules = xmalloc
+@@ -53,9 +54,11 @@ catgets-CPPFLAGS := -DNLSPATH='"$(msgcat
+
+ generated += de.msg test1.cat test1.h test2.cat test2.h sample.SJIS.cat \
+ test-gencat.h
++generated += tst-catgets.mtrace tst-catgets-mem.out
++
+ generated-dirs += de
+
+-tst-catgets-ENV = NLSPATH="$(objpfx)%l/%N.cat" LANG=de
++tst-catgets-ENV = NLSPATH="$(objpfx)%l/%N.cat" LANG=de MALLOC_TRACE=$(objpfx)tst-catgets.mtrace
+
+ ifeq ($(run-built-tests),yes)
+ # This test just checks whether the program produces any error or not.
+@@ -89,4 +92,8 @@ $(objpfx)test-gencat.out: test-gencat.sh
+ $(objpfx)sample.SJIS.cat: sample.SJIS $(objpfx)gencat
+ $(built-program-cmd) -H $(objpfx)test-gencat.h < $(word 1,$^) > $@; \
+ $(evaluate-test)
++
++$(objpfx)tst-catgets-mem.out: $(objpfx)tst-catgets.out
++ $(common-objpfx)malloc/mtrace $(objpfx)tst-catgets.mtrace > $@; \
++ $(evaluate-test)
+ endif
+Index: git/catgets/catgets.c
+===================================================================
+--- git.orig/catgets/catgets.c
++++ git/catgets/catgets.c
+@@ -16,7 +16,6 @@
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+-#include <alloca.h>
+ #include <errno.h>
+ #include <locale.h>
+ #include <nl_types.h>
+@@ -35,6 +34,7 @@ catopen (const char *cat_name, int flag)
+ __nl_catd result;
+ const char *env_var = NULL;
+ const char *nlspath = NULL;
++ char *tmp = NULL;
+
+ if (strchr (cat_name, '/') == NULL)
+ {
+@@ -54,7 +54,10 @@ catopen (const char *cat_name, int flag)
+ {
+ /* Append the system dependent directory. */
+ size_t len = strlen (nlspath) + 1 + sizeof NLSPATH;
+- char *tmp = alloca (len);
++ tmp = malloc (len);
++
++ if (__glibc_unlikely (tmp == NULL))
++ return (nl_catd) -1;
+
+ __stpcpy (__stpcpy (__stpcpy (tmp, nlspath), ":"), NLSPATH);
+ nlspath = tmp;
+@@ -65,16 +68,18 @@ catopen (const char *cat_name, int flag)
+
+ result = (__nl_catd) malloc (sizeof (*result));
+ if (result == NULL)
+- /* We cannot get enough memory. */
+- return (nl_catd) -1;
+-
+- if (__open_catalog (cat_name, nlspath, env_var, result) != 0)
++ {
++ /* We cannot get enough memory. */
++ result = (nl_catd) -1;
++ }
++ else if (__open_catalog (cat_name, nlspath, env_var, result) != 0)
+ {
+ /* Couldn't open the file. */
+ free ((void *) result);
+- return (nl_catd) -1;
++ result = (nl_catd) -1;
+ }
+
++ free (tmp);
+ return (nl_catd) result;
+ }
+
+Index: git/catgets/open_catalog.c
+===================================================================
+--- git.orig/catgets/open_catalog.c
++++ git/catgets/open_catalog.c
+@@ -47,6 +47,7 @@ __open_catalog (const char *cat_name, co
+ size_t tab_size;
+ const char *lastp;
+ int result = -1;
++ char *buf = NULL;
+
+ if (strchr (cat_name, '/') != NULL || nlspath == NULL)
+ fd = open_not_cancel_2 (cat_name, O_RDONLY);
+@@ -57,23 +58,23 @@ __open_catalog (const char *cat_name, co
+ if (__glibc_unlikely (bufact + (n) >= bufmax)) \
+ { \
+ char *old_buf = buf; \
+- bufmax += 256 + (n); \
+- buf = (char *) alloca (bufmax); \
+- memcpy (buf, old_buf, bufact); \
++ bufmax += (bufmax < 256 + (n)) ? 256 + (n) : bufmax; \
++ buf = realloc (buf, bufmax); \
++ if (__glibc_unlikely (buf == NULL)) \
++ { \
++ free (old_buf); \
++ return -1; \
++ } \
+ }
+
+ /* The RUN_NLSPATH variable contains a colon separated list of
+ descriptions where we expect to find catalogs. We have to
+ recognize certain % substitutions and stop when we found the
+ first existing file. */
+- char *buf;
+ size_t bufact;
+- size_t bufmax;
++ size_t bufmax = 0;
+ size_t len;
+
+- buf = NULL;
+- bufmax = 0;
+-
+ fd = -1;
+ while (*run_nlspath != '\0')
+ {
+@@ -188,7 +189,10 @@ __open_catalog (const char *cat_name, co
+
+ /* Avoid dealing with directories and block devices */
+ if (__builtin_expect (fd, 0) < 0)
+- return -1;
++ {
++ free (buf);
++ return -1;
++ }
+
+ if (__builtin_expect (__fxstat64 (_STAT_VER, fd, &st), 0) < 0)
+ goto close_unlock_return;
+@@ -325,6 +329,7 @@ __open_catalog (const char *cat_name, co
+ /* Release the lock again. */
+ close_unlock_return:
+ close_not_cancel_no_status (fd);
++ free (buf);
+
+ return result;
+ }
+Index: git/catgets/tst-catgets.c
+===================================================================
+--- git.orig/catgets/tst-catgets.c
++++ git/catgets/tst-catgets.c
+@@ -1,7 +1,10 @@
++#include <assert.h>
+ #include <mcheck.h>
+ #include <nl_types.h>
+ #include <stdio.h>
++#include <stdlib.h>
+ #include <string.h>
++#include <sys/resource.h>
+
+
+ static const char *msgs[] =
+@@ -12,6 +15,33 @@ static const char *msgs[] =
+ };
+ #define nmsgs (sizeof (msgs) / sizeof (msgs[0]))
+
++
++/* Test for unbounded alloca. */
++static int
++do_bz17905 (void)
++{
++ char *buf;
++ struct rlimit rl;
++ nl_catd result;
++
++ const int sz = 1024 * 1024;
++
++ getrlimit (RLIMIT_STACK, &rl);
++ rl.rlim_cur = sz;
++ setrlimit (RLIMIT_STACK, &rl);
++
++ buf = malloc (sz + 1);
++ memset (buf, 'A', sz);
++ buf[sz] = '\0';
++ setenv ("NLSPATH", buf, 1);
++
++ result = catopen (buf, NL_CAT_LOCALE);
++ assert (result == (nl_catd) -1);
++
++ free (buf);
++ return 0;
++}
++
+ #define ROUNDS 5
+
+ static int
+@@ -62,6 +92,7 @@ do_test (void)
+ }
+ }
+
++ result += do_bz17905 ();
+ return result;
+ }
+
+Index: git/ChangeLog
+===================================================================
+--- git.orig/ChangeLog
++++ git/ChangeLog
+@@ -1,3 +1,11 @@
++2015-08-08 Paul Pluzhnikov <ppluzhnikov@google.com>
++
++ [BZ #17905]
++ * catgets/Makefile (tst-catgets-mem): New test.
++ * catgets/catgets.c (catopen): Don't use unbounded alloca.
++ * catgets/open_catalog.c (__open_catalog): Likewise.
++ * catgets/tst-catgets.c (do_bz17905): Test unbounded alloca.
++
+ 2015-10-15 Florian Weimer <fweimer@redhat.com>
+
+ [BZ #18928]
+Index: git/NEWS
+===================================================================
+--- git.orig/NEWS
++++ git/NEWS
+@@ -9,7 +9,7 @@ Version 2.22.1
+
+ * The following bugs are resolved with this release:
+
+- 18778, 18781, 18787.
++ 18778, 18781, 18787, 17905.
+
+ Version 2.22
+
diff --git a/yocto-poky/meta/recipes-core/glibc/glibc/CVE-2015-9761_1.patch b/yocto-poky/meta/recipes-core/glibc/glibc/CVE-2015-9761_1.patch
new file mode 100644
index 000000000..3aca91331
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/glibc/glibc/CVE-2015-9761_1.patch
@@ -0,0 +1,1039 @@
+From e02cabecf0d025ec4f4ddee290bdf7aadb873bb3 Mon Sep 17 00:00:00 2001
+From: Joseph Myers <joseph@codesourcery.com>
+Date: Tue, 24 Nov 2015 22:24:52 +0000
+Subject: [PATCH] Refactor strtod parsing of NaN payloads.
+
+The nan* functions handle their string argument by constructing a
+NAN(...) string on the stack as a VLA and passing it to strtod
+functions.
+
+This approach has problems discussed in bug 16961 and bug 16962: the
+stack usage is unbounded, and it gives incorrect results in certain
+cases where the argument is not a valid n-char-sequence.
+
+The natural fix for both issues is to refactor the NaN payload parsing
+out of strtod into a separate function that the nan* functions can
+call directly, so that no temporary string needs constructing on the
+stack at all. This patch does that refactoring in preparation for
+fixing those bugs (but without actually using the new functions from
+nan* - which will also require exporting them from libc at version
+GLIBC_PRIVATE). This patch is not intended to change any user-visible
+behavior, so no tests are added (fixes for the above bugs will of
+course add tests for them).
+
+This patch builds on my recent fixes for strtol and strtod issues in
+Turkish locales. Given those fixes, the parsing of NaN payloads is
+locale-independent; thus, the new functions do not need to take a
+locale_t argument.
+
+Tested for x86_64, x86, mips64 and powerpc.
+
+ * stdlib/strtod_nan.c: New file.
+ * stdlib/strtod_nan_double.h: Likewise.
+ * stdlib/strtod_nan_float.h: Likewise.
+ * stdlib/strtod_nan_main.c: Likewise.
+ * stdlib/strtod_nan_narrow.h: Likewise.
+ * stdlib/strtod_nan_wide.h: Likewise.
+ * stdlib/strtof_nan.c: Likewise.
+ * stdlib/strtold_nan.c: Likewise.
+ * sysdeps/ieee754/ldbl-128/strtod_nan_ldouble.h: Likewise.
+ * sysdeps/ieee754/ldbl-128ibm/strtod_nan_ldouble.h: Likewise.
+ * sysdeps/ieee754/ldbl-96/strtod_nan_ldouble.h: Likewise.
+ * wcsmbs/wcstod_nan.c: Likewise.
+ * wcsmbs/wcstof_nan.c: Likewise.
+ * wcsmbs/wcstold_nan.c: Likewise.
+ * stdlib/Makefile (routines): Add strtof_nan, strtod_nan and
+ strtold_nan.
+ * wcsmbs/Makefile (routines): Add wcstod_nan, wcstold_nan and
+ wcstof_nan.
+ * include/stdlib.h (__strtof_nan): Declare and use
+ libc_hidden_proto.
+ (__strtod_nan): Likewise.
+ (__strtold_nan): Likewise.
+ (__wcstof_nan): Likewise.
+ (__wcstod_nan): Likewise.
+ (__wcstold_nan): Likewise.
+ * include/wchar.h (____wcstoull_l_internal): Declare.
+ * stdlib/strtod_l.c: Do not include <ieee754.h>.
+ (____strtoull_l_internal): Remove declaration.
+ (STRTOF_NAN): Define macro.
+ (SET_MANTISSA): Remove macro.
+ (STRTOULL): Likewise.
+ (____STRTOF_INTERNAL): Use STRTOF_NAN to parse NaN payload.
+ * stdlib/strtof_l.c (____strtoull_l_internal): Remove declaration.
+ (STRTOF_NAN): Define macro.
+ (SET_MANTISSA): Remove macro.
+ * sysdeps/ieee754/ldbl-128/strtold_l.c (STRTOF_NAN): Define macro.
+ (SET_MANTISSA): Remove macro.
+ * sysdeps/ieee754/ldbl-128ibm/strtold_l.c (STRTOF_NAN): Define
+ macro.
+ (SET_MANTISSA): Remove macro.
+ * sysdeps/ieee754/ldbl-64-128/strtold_l.c (STRTOF_NAN): Define
+ macro.
+ (SET_MANTISSA): Remove macro.
+ * sysdeps/ieee754/ldbl-96/strtold_l.c (STRTOF_NAN): Define macro.
+ (SET_MANTISSA): Remove macro.
+ * wcsmbs/wcstod_l.c (____wcstoull_l_internal): Remove declaration.
+ * wcsmbs/wcstof_l.c (____wcstoull_l_internal): Likewise.
+ * wcsmbs/wcstold_l.c (____wcstoull_l_internal): Likewise.
+
+Upstream-Status: Backport
+CVE: CVE-2015-9761 patch #1
+[Yocto # 8980]
+
+https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=e02cabecf0d025ec4f4ddee290bdf7aadb873bb3
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ ChangeLog | 49 ++++++++++++++++++
+ include/stdlib.h | 18 +++++++
+ include/wchar.h | 3 ++
+ stdlib/Makefile | 1 +
+ stdlib/strtod_l.c | 48 ++++--------------
+ stdlib/strtod_nan.c | 24 +++++++++
+ stdlib/strtod_nan_double.h | 30 +++++++++++
+ stdlib/strtod_nan_float.h | 29 +++++++++++
+ stdlib/strtod_nan_main.c | 63 ++++++++++++++++++++++++
+ stdlib/strtod_nan_narrow.h | 22 +++++++++
+ stdlib/strtod_nan_wide.h | 22 +++++++++
+ stdlib/strtof_l.c | 11 +----
+ stdlib/strtof_nan.c | 24 +++++++++
+ stdlib/strtold_nan.c | 30 +++++++++++
+ sysdeps/ieee754/ldbl-128/strtod_nan_ldouble.h | 33 +++++++++++++
+ sysdeps/ieee754/ldbl-128/strtold_l.c | 13 +----
+ sysdeps/ieee754/ldbl-128ibm/strtod_nan_ldouble.h | 30 +++++++++++
+ sysdeps/ieee754/ldbl-128ibm/strtold_l.c | 10 +---
+ sysdeps/ieee754/ldbl-64-128/strtold_l.c | 13 +----
+ sysdeps/ieee754/ldbl-96/strtod_nan_ldouble.h | 30 +++++++++++
+ sysdeps/ieee754/ldbl-96/strtold_l.c | 10 +---
+ wcsmbs/Makefile | 1 +
+ wcsmbs/wcstod_l.c | 3 --
+ wcsmbs/wcstod_nan.c | 23 +++++++++
+ wcsmbs/wcstof_l.c | 3 --
+ wcsmbs/wcstof_nan.c | 23 +++++++++
+ wcsmbs/wcstold_l.c | 3 --
+ wcsmbs/wcstold_nan.c | 30 +++++++++++
+ 28 files changed, 504 insertions(+), 95 deletions(-)
+ create mode 100644 stdlib/strtod_nan.c
+ create mode 100644 stdlib/strtod_nan_double.h
+ create mode 100644 stdlib/strtod_nan_float.h
+ create mode 100644 stdlib/strtod_nan_main.c
+ create mode 100644 stdlib/strtod_nan_narrow.h
+ create mode 100644 stdlib/strtod_nan_wide.h
+ create mode 100644 stdlib/strtof_nan.c
+ create mode 100644 stdlib/strtold_nan.c
+ create mode 100644 sysdeps/ieee754/ldbl-128/strtod_nan_ldouble.h
+ create mode 100644 sysdeps/ieee754/ldbl-128ibm/strtod_nan_ldouble.h
+ create mode 100644 sysdeps/ieee754/ldbl-96/strtod_nan_ldouble.h
+ create mode 100644 wcsmbs/wcstod_nan.c
+ create mode 100644 wcsmbs/wcstof_nan.c
+ create mode 100644 wcsmbs/wcstold_nan.c
+
+Index: git/include/stdlib.h
+===================================================================
+--- git.orig/include/stdlib.h
++++ git/include/stdlib.h
+@@ -203,6 +203,24 @@ libc_hidden_proto (strtoll)
+ libc_hidden_proto (strtoul)
+ libc_hidden_proto (strtoull)
+
++extern float __strtof_nan (const char *, char **, char) internal_function;
++extern double __strtod_nan (const char *, char **, char) internal_function;
++extern long double __strtold_nan (const char *, char **, char)
++ internal_function;
++extern float __wcstof_nan (const wchar_t *, wchar_t **, wchar_t)
++ internal_function;
++extern double __wcstod_nan (const wchar_t *, wchar_t **, wchar_t)
++ internal_function;
++extern long double __wcstold_nan (const wchar_t *, wchar_t **, wchar_t)
++ internal_function;
++
++libc_hidden_proto (__strtof_nan)
++libc_hidden_proto (__strtod_nan)
++libc_hidden_proto (__strtold_nan)
++libc_hidden_proto (__wcstof_nan)
++libc_hidden_proto (__wcstod_nan)
++libc_hidden_proto (__wcstold_nan)
++
+ extern char *__ecvt (double __value, int __ndigit, int *__restrict __decpt,
+ int *__restrict __sign);
+ extern char *__fcvt (double __value, int __ndigit, int *__restrict __decpt,
+Index: git/include/wchar.h
+===================================================================
+--- git.orig/include/wchar.h
++++ git/include/wchar.h
+@@ -52,6 +52,9 @@ extern unsigned long long int __wcstoull
+ __restrict __endptr,
+ int __base,
+ int __group) __THROW;
++extern unsigned long long int ____wcstoull_l_internal (const wchar_t *,
++ wchar_t **, int, int,
++ __locale_t);
+ libc_hidden_proto (__wcstof_internal)
+ libc_hidden_proto (__wcstod_internal)
+ libc_hidden_proto (__wcstold_internal)
+Index: git/stdlib/Makefile
+===================================================================
+--- git.orig/stdlib/Makefile
++++ git/stdlib/Makefile
+@@ -51,6 +51,7 @@ routines-y := \
+ strtol_l strtoul_l strtoll_l strtoull_l \
+ strtof strtod strtold \
+ strtof_l strtod_l strtold_l \
++ strtof_nan strtod_nan strtold_nan \
+ system canonicalize \
+ a64l l64a \
+ getsubopt xpg_basename \
+Index: git/stdlib/strtod_l.c
+===================================================================
+--- git.orig/stdlib/strtod_l.c
++++ git/stdlib/strtod_l.c
+@@ -21,8 +21,6 @@
+ #include <xlocale.h>
+
+ extern double ____strtod_l_internal (const char *, char **, int, __locale_t);
+-extern unsigned long long int ____strtoull_l_internal (const char *, char **,
+- int, int, __locale_t);
+
+ /* Configuration part. These macros are defined by `strtold.c',
+ `strtof.c', `wcstod.c', `wcstold.c', and `wcstof.c' to produce the
+@@ -34,27 +32,20 @@ extern unsigned long long int ____strtou
+ # ifdef USE_WIDE_CHAR
+ # define STRTOF wcstod_l
+ # define __STRTOF __wcstod_l
++# define STRTOF_NAN __wcstod_nan
+ # else
+ # define STRTOF strtod_l
+ # define __STRTOF __strtod_l
++# define STRTOF_NAN __strtod_nan
+ # endif
+ # define MPN2FLOAT __mpn_construct_double
+ # define FLOAT_HUGE_VAL HUGE_VAL
+-# define SET_MANTISSA(flt, mant) \
+- do { union ieee754_double u; \
+- u.d = (flt); \
+- u.ieee_nan.mantissa0 = (mant) >> 32; \
+- u.ieee_nan.mantissa1 = (mant); \
+- if ((u.ieee.mantissa0 | u.ieee.mantissa1) != 0) \
+- (flt) = u.d; \
+- } while (0)
+ #endif
+ /* End of configuration part. */
+
+ #include <ctype.h>
+ #include <errno.h>
+ #include <float.h>
+-#include <ieee754.h>
+ #include "../locale/localeinfo.h"
+ #include <locale.h>
+ #include <math.h>
+@@ -105,7 +96,6 @@ extern unsigned long long int ____strtou
+ # define TOLOWER_C(Ch) __towlower_l ((Ch), _nl_C_locobj_ptr)
+ # define STRNCASECMP(S1, S2, N) \
+ __wcsncasecmp_l ((S1), (S2), (N), _nl_C_locobj_ptr)
+-# define STRTOULL(S, E, B) ____wcstoull_l_internal ((S), (E), (B), 0, loc)
+ #else
+ # define STRING_TYPE char
+ # define CHAR_TYPE char
+@@ -117,7 +107,6 @@ extern unsigned long long int ____strtou
+ # define TOLOWER_C(Ch) __tolower_l ((Ch), _nl_C_locobj_ptr)
+ # define STRNCASECMP(S1, S2, N) \
+ __strncasecmp_l ((S1), (S2), (N), _nl_C_locobj_ptr)
+-# define STRTOULL(S, E, B) ____strtoull_l_internal ((S), (E), (B), 0, loc)
+ #endif
+
+
+@@ -668,33 +657,14 @@ ____STRTOF_INTERNAL (nptr, endptr, group
+ if (*cp == L_('('))
+ {
+ const STRING_TYPE *startp = cp;
+- do
+- ++cp;
+- while ((*cp >= L_('0') && *cp <= L_('9'))
+- || ({ CHAR_TYPE lo = TOLOWER (*cp);
+- lo >= L_('a') && lo <= L_('z'); })
+- || *cp == L_('_'));
+-
+- if (*cp != L_(')'))
+- /* The closing brace is missing. Only match the NAN
+- part. */
+- cp = startp;
++ STRING_TYPE *endp;
++ retval = STRTOF_NAN (cp + 1, &endp, L_(')'));
++ if (*endp == L_(')'))
++ /* Consume the closing parenthesis. */
++ cp = endp + 1;
+ else
+- {
+- /* This is a system-dependent way to specify the
+- bitmask used for the NaN. We expect it to be
+- a number which is put in the mantissa of the
+- number. */
+- STRING_TYPE *endp;
+- unsigned long long int mant;
+-
+- mant = STRTOULL (startp + 1, &endp, 0);
+- if (endp == cp)
+- SET_MANTISSA (retval, mant);
+-
+- /* Consume the closing brace. */
+- ++cp;
+- }
++ /* Only match the NAN part. */
++ cp = startp;
+ }
+
+ if (endptr != NULL)
+Index: git/stdlib/strtod_nan.c
+===================================================================
+--- /dev/null
++++ git/stdlib/strtod_nan.c
+@@ -0,0 +1,24 @@
++/* Convert string for NaN payload to corresponding NaN. Narrow
++ strings, double.
++ Copyright (C) 2015 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
++
++#include <strtod_nan_narrow.h>
++#include <strtod_nan_double.h>
++
++#define STRTOD_NAN __strtod_nan
++#include <strtod_nan_main.c>
+Index: git/stdlib/strtod_nan_double.h
+===================================================================
+--- /dev/null
++++ git/stdlib/strtod_nan_double.h
+@@ -0,0 +1,30 @@
++/* Convert string for NaN payload to corresponding NaN. For double.
++ Copyright (C) 1997-2015 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
++
++#define FLOAT double
++#define SET_MANTISSA(flt, mant) \
++ do \
++ { \
++ union ieee754_double u; \
++ u.d = (flt); \
++ u.ieee_nan.mantissa0 = (mant) >> 32; \
++ u.ieee_nan.mantissa1 = (mant); \
++ if ((u.ieee.mantissa0 | u.ieee.mantissa1) != 0) \
++ (flt) = u.d; \
++ } \
++ while (0)
+Index: git/stdlib/strtod_nan_float.h
+===================================================================
+--- /dev/null
++++ git/stdlib/strtod_nan_float.h
+@@ -0,0 +1,29 @@
++/* Convert string for NaN payload to corresponding NaN. For float.
++ Copyright (C) 1997-2015 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
++
++#define FLOAT float
++#define SET_MANTISSA(flt, mant) \
++ do \
++ { \
++ union ieee754_float u; \
++ u.f = (flt); \
++ u.ieee_nan.mantissa = (mant); \
++ if (u.ieee.mantissa != 0) \
++ (flt) = u.f; \
++ } \
++ while (0)
+Index: git/stdlib/strtod_nan_main.c
+===================================================================
+--- /dev/null
++++ git/stdlib/strtod_nan_main.c
+@@ -0,0 +1,63 @@
++/* Convert string for NaN payload to corresponding NaN.
++ Copyright (C) 1997-2015 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
++
++#include <ieee754.h>
++#include <locale.h>
++#include <math.h>
++#include <stdlib.h>
++#include <wchar.h>
++
++
++/* If STR starts with an optional n-char-sequence as defined by ISO C
++ (a sequence of ASCII letters, digits and underscores), followed by
++ ENDC, return a NaN whose payload is set based on STR. Otherwise,
++ return a default NAN. If ENDPTR is not NULL, set *ENDPTR to point
++ to the character after the initial n-char-sequence. */
++
++internal_function
++FLOAT
++STRTOD_NAN (const STRING_TYPE *str, STRING_TYPE **endptr, STRING_TYPE endc)
++{
++ const STRING_TYPE *cp = str;
++
++ while ((*cp >= L_('0') && *cp <= L_('9'))
++ || (*cp >= L_('A') && *cp <= L_('Z'))
++ || (*cp >= L_('a') && *cp <= L_('z'))
++ || *cp == L_('_'))
++ ++cp;
++
++ FLOAT retval = NAN;
++ if (*cp != endc)
++ goto out;
++
++ /* This is a system-dependent way to specify the bitmask used for
++ the NaN. We expect it to be a number which is put in the
++ mantissa of the number. */
++ STRING_TYPE *endp;
++ unsigned long long int mant;
++
++ mant = STRTOULL (str, &endp, 0);
++ if (endp == cp)
++ SET_MANTISSA (retval, mant);
++
++ out:
++ if (endptr != NULL)
++ *endptr = (STRING_TYPE *) cp;
++ return retval;
++}
++libc_hidden_def (STRTOD_NAN)
+Index: git/stdlib/strtod_nan_narrow.h
+===================================================================
+--- /dev/null
++++ git/stdlib/strtod_nan_narrow.h
+@@ -0,0 +1,22 @@
++/* Convert string for NaN payload to corresponding NaN. Narrow strings.
++ Copyright (C) 1997-2015 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
++
++#define STRING_TYPE char
++#define L_(Ch) Ch
++#define STRTOULL(S, E, B) ____strtoull_l_internal ((S), (E), (B), 0, \
++ _nl_C_locobj_ptr)
+Index: git/stdlib/strtod_nan_wide.h
+===================================================================
+--- /dev/null
++++ git/stdlib/strtod_nan_wide.h
+@@ -0,0 +1,22 @@
++/* Convert string for NaN payload to corresponding NaN. Wide strings.
++ Copyright (C) 1997-2015 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
++
++#define STRING_TYPE wchar_t
++#define L_(Ch) L##Ch
++#define STRTOULL(S, E, B) ____wcstoull_l_internal ((S), (E), (B), 0, \
++ _nl_C_locobj_ptr)
+Index: git/stdlib/strtof_l.c
+===================================================================
+--- git.orig/stdlib/strtof_l.c
++++ git/stdlib/strtof_l.c
+@@ -20,26 +20,19 @@
+ #include <xlocale.h>
+
+ extern float ____strtof_l_internal (const char *, char **, int, __locale_t);
+-extern unsigned long long int ____strtoull_l_internal (const char *, char **,
+- int, int, __locale_t);
+
+ #define FLOAT float
+ #define FLT FLT
+ #ifdef USE_WIDE_CHAR
+ # define STRTOF wcstof_l
+ # define __STRTOF __wcstof_l
++# define STRTOF_NAN __wcstof_nan
+ #else
+ # define STRTOF strtof_l
+ # define __STRTOF __strtof_l
++# define STRTOF_NAN __strtof_nan
+ #endif
+ #define MPN2FLOAT __mpn_construct_float
+ #define FLOAT_HUGE_VAL HUGE_VALF
+-#define SET_MANTISSA(flt, mant) \
+- do { union ieee754_float u; \
+- u.f = (flt); \
+- u.ieee_nan.mantissa = (mant); \
+- if (u.ieee.mantissa != 0) \
+- (flt) = u.f; \
+- } while (0)
+
+ #include "strtod_l.c"
+Index: git/stdlib/strtof_nan.c
+===================================================================
+--- /dev/null
++++ git/stdlib/strtof_nan.c
+@@ -0,0 +1,24 @@
++/* Convert string for NaN payload to corresponding NaN. Narrow
++ strings, float.
++ Copyright (C) 2015 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
++
++#include <strtod_nan_narrow.h>
++#include <strtod_nan_float.h>
++
++#define STRTOD_NAN __strtof_nan
++#include <strtod_nan_main.c>
+Index: git/stdlib/strtold_nan.c
+===================================================================
+--- /dev/null
++++ git/stdlib/strtold_nan.c
+@@ -0,0 +1,30 @@
++/* Convert string for NaN payload to corresponding NaN. Narrow
++ strings, long double.
++ Copyright (C) 2015 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
++
++#include <math.h>
++
++/* This function is unused if long double and double have the same
++ representation. */
++#ifndef __NO_LONG_DOUBLE_MATH
++# include <strtod_nan_narrow.h>
++# include <strtod_nan_ldouble.h>
++
++# define STRTOD_NAN __strtold_nan
++# include <strtod_nan_main.c>
++#endif
+Index: git/sysdeps/ieee754/ldbl-128/strtod_nan_ldouble.h
+===================================================================
+--- /dev/null
++++ git/sysdeps/ieee754/ldbl-128/strtod_nan_ldouble.h
+@@ -0,0 +1,33 @@
++/* Convert string for NaN payload to corresponding NaN. For ldbl-128.
++ Copyright (C) 1997-2015 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
++
++#define FLOAT long double
++#define SET_MANTISSA(flt, mant) \
++ do \
++ { \
++ union ieee854_long_double u; \
++ u.d = (flt); \
++ u.ieee_nan.mantissa0 = 0; \
++ u.ieee_nan.mantissa1 = 0; \
++ u.ieee_nan.mantissa2 = (mant) >> 32; \
++ u.ieee_nan.mantissa3 = (mant); \
++ if ((u.ieee.mantissa0 | u.ieee.mantissa1 \
++ | u.ieee.mantissa2 | u.ieee.mantissa3) != 0) \
++ (flt) = u.d; \
++ } \
++ while (0)
+Index: git/sysdeps/ieee754/ldbl-128/strtold_l.c
+===================================================================
+--- git.orig/sysdeps/ieee754/ldbl-128/strtold_l.c
++++ git/sysdeps/ieee754/ldbl-128/strtold_l.c
+@@ -25,22 +25,13 @@
+ #ifdef USE_WIDE_CHAR
+ # define STRTOF wcstold_l
+ # define __STRTOF __wcstold_l
++# define STRTOF_NAN __wcstold_nan
+ #else
+ # define STRTOF strtold_l
+ # define __STRTOF __strtold_l
++# define STRTOF_NAN __strtold_nan
+ #endif
+ #define MPN2FLOAT __mpn_construct_long_double
+ #define FLOAT_HUGE_VAL HUGE_VALL
+-#define SET_MANTISSA(flt, mant) \
+- do { union ieee854_long_double u; \
+- u.d = (flt); \
+- u.ieee_nan.mantissa0 = 0; \
+- u.ieee_nan.mantissa1 = 0; \
+- u.ieee_nan.mantissa2 = (mant) >> 32; \
+- u.ieee_nan.mantissa3 = (mant); \
+- if ((u.ieee.mantissa0 | u.ieee.mantissa1 \
+- | u.ieee.mantissa2 | u.ieee.mantissa3) != 0) \
+- (flt) = u.d; \
+- } while (0)
+
+ #include <strtod_l.c>
+Index: git/sysdeps/ieee754/ldbl-128ibm/strtod_nan_ldouble.h
+===================================================================
+--- /dev/null
++++ git/sysdeps/ieee754/ldbl-128ibm/strtod_nan_ldouble.h
+@@ -0,0 +1,30 @@
++/* Convert string for NaN payload to corresponding NaN. For ldbl-128ibm.
++ Copyright (C) 1997-2015 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
++
++#define FLOAT long double
++#define SET_MANTISSA(flt, mant) \
++ do \
++ { \
++ union ibm_extended_long_double u; \
++ u.ld = (flt); \
++ u.d[0].ieee_nan.mantissa0 = (mant) >> 32; \
++ u.d[0].ieee_nan.mantissa1 = (mant); \
++ if ((u.d[0].ieee.mantissa0 | u.d[0].ieee.mantissa1) != 0) \
++ (flt) = u.ld; \
++ } \
++ while (0)
+Index: git/sysdeps/ieee754/ldbl-128ibm/strtold_l.c
+===================================================================
+--- git.orig/sysdeps/ieee754/ldbl-128ibm/strtold_l.c
++++ git/sysdeps/ieee754/ldbl-128ibm/strtold_l.c
+@@ -30,25 +30,19 @@ extern long double ____new_wcstold_l (co
+ # define STRTOF __new_wcstold_l
+ # define __STRTOF ____new_wcstold_l
+ # define ____STRTOF_INTERNAL ____wcstold_l_internal
++# define STRTOF_NAN __wcstold_nan
+ #else
+ extern long double ____new_strtold_l (const char *, char **, __locale_t);
+ # define STRTOF __new_strtold_l
+ # define __STRTOF ____new_strtold_l
+ # define ____STRTOF_INTERNAL ____strtold_l_internal
++# define STRTOF_NAN __strtold_nan
+ #endif
+ extern __typeof (__STRTOF) STRTOF;
+ libc_hidden_proto (__STRTOF)
+ libc_hidden_proto (STRTOF)
+ #define MPN2FLOAT __mpn_construct_long_double
+ #define FLOAT_HUGE_VAL HUGE_VALL
+-# define SET_MANTISSA(flt, mant) \
+- do { union ibm_extended_long_double u; \
+- u.ld = (flt); \
+- u.d[0].ieee_nan.mantissa0 = (mant) >> 32; \
+- u.d[0].ieee_nan.mantissa1 = (mant); \
+- if ((u.d[0].ieee.mantissa0 | u.d[0].ieee.mantissa1) != 0) \
+- (flt) = u.ld; \
+- } while (0)
+
+ #include <strtod_l.c>
+
+Index: git/sysdeps/ieee754/ldbl-64-128/strtold_l.c
+===================================================================
+--- git.orig/sysdeps/ieee754/ldbl-64-128/strtold_l.c
++++ git/sysdeps/ieee754/ldbl-64-128/strtold_l.c
+@@ -30,28 +30,19 @@ extern long double ____new_wcstold_l (co
+ # define STRTOF __new_wcstold_l
+ # define __STRTOF ____new_wcstold_l
+ # define ____STRTOF_INTERNAL ____wcstold_l_internal
++# define STRTOF_NAN __wcstold_nan
+ #else
+ extern long double ____new_strtold_l (const char *, char **, __locale_t);
+ # define STRTOF __new_strtold_l
+ # define __STRTOF ____new_strtold_l
+ # define ____STRTOF_INTERNAL ____strtold_l_internal
++# define STRTOF_NAN __strtold_nan
+ #endif
+ extern __typeof (__STRTOF) STRTOF;
+ libc_hidden_proto (__STRTOF)
+ libc_hidden_proto (STRTOF)
+ #define MPN2FLOAT __mpn_construct_long_double
+ #define FLOAT_HUGE_VAL HUGE_VALL
+-#define SET_MANTISSA(flt, mant) \
+- do { union ieee854_long_double u; \
+- u.d = (flt); \
+- u.ieee_nan.mantissa0 = 0; \
+- u.ieee_nan.mantissa1 = 0; \
+- u.ieee_nan.mantissa2 = (mant) >> 32; \
+- u.ieee_nan.mantissa3 = (mant); \
+- if ((u.ieee.mantissa0 | u.ieee.mantissa1 \
+- | u.ieee.mantissa2 | u.ieee.mantissa3) != 0) \
+- (flt) = u.d; \
+- } while (0)
+
+ #include <strtod_l.c>
+
+Index: git/sysdeps/ieee754/ldbl-96/strtod_nan_ldouble.h
+===================================================================
+--- /dev/null
++++ git/sysdeps/ieee754/ldbl-96/strtod_nan_ldouble.h
+@@ -0,0 +1,30 @@
++/* Convert string for NaN payload to corresponding NaN. For ldbl-96.
++ Copyright (C) 1997-2015 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
++
++#define FLOAT long double
++#define SET_MANTISSA(flt, mant) \
++ do \
++ { \
++ union ieee854_long_double u; \
++ u.d = (flt); \
++ u.ieee_nan.mantissa0 = (mant) >> 32; \
++ u.ieee_nan.mantissa1 = (mant); \
++ if ((u.ieee.mantissa0 | u.ieee.mantissa1) != 0) \
++ (flt) = u.d; \
++ } \
++ while (0)
+Index: git/sysdeps/ieee754/ldbl-96/strtold_l.c
+===================================================================
+--- git.orig/sysdeps/ieee754/ldbl-96/strtold_l.c
++++ git/sysdeps/ieee754/ldbl-96/strtold_l.c
+@@ -25,19 +25,13 @@
+ #ifdef USE_WIDE_CHAR
+ # define STRTOF wcstold_l
+ # define __STRTOF __wcstold_l
++# define STRTOF_NAN __wcstold_nan
+ #else
+ # define STRTOF strtold_l
+ # define __STRTOF __strtold_l
++# define STRTOF_NAN __strtold_nan
+ #endif
+ #define MPN2FLOAT __mpn_construct_long_double
+ #define FLOAT_HUGE_VAL HUGE_VALL
+-#define SET_MANTISSA(flt, mant) \
+- do { union ieee854_long_double u; \
+- u.d = (flt); \
+- u.ieee_nan.mantissa0 = (mant) >> 32; \
+- u.ieee_nan.mantissa1 = (mant); \
+- if ((u.ieee.mantissa0 | u.ieee.mantissa1) != 0) \
+- (flt) = u.d; \
+- } while (0)
+
+ #include <stdlib/strtod_l.c>
+Index: git/wcsmbs/Makefile
+===================================================================
+--- git.orig/wcsmbs/Makefile
++++ git/wcsmbs/Makefile
+@@ -39,6 +39,7 @@ routines-$(OPTION_POSIX_C_LANG_WIDE_CHAR
+ wcstol wcstoul wcstoll wcstoull wcstod wcstold wcstof \
+ wcstol_l wcstoul_l wcstoll_l wcstoull_l \
+ wcstod_l wcstold_l wcstof_l \
++ wcstod_nan wcstold_nan wcstof_nan \
+ wcscoll wcsxfrm \
+ wcwidth wcswidth \
+ wcscoll_l wcsxfrm_l \
+Index: git/wcsmbs/wcstod_l.c
+===================================================================
+--- git.orig/wcsmbs/wcstod_l.c
++++ git/wcsmbs/wcstod_l.c
+@@ -23,9 +23,6 @@
+
+ extern double ____wcstod_l_internal (const wchar_t *, wchar_t **, int,
+ __locale_t);
+-extern unsigned long long int ____wcstoull_l_internal (const wchar_t *,
+- wchar_t **, int, int,
+- __locale_t);
+
+ #define USE_WIDE_CHAR 1
+
+Index: git/wcsmbs/wcstod_nan.c
+===================================================================
+--- /dev/null
++++ git/wcsmbs/wcstod_nan.c
+@@ -0,0 +1,23 @@
++/* Convert string for NaN payload to corresponding NaN. Wide strings, double.
++ Copyright (C) 2015 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
++
++#include "../stdlib/strtod_nan_wide.h"
++#include "../stdlib/strtod_nan_double.h"
++
++#define STRTOD_NAN __wcstod_nan
++#include "../stdlib/strtod_nan_main.c"
+Index: git/wcsmbs/wcstof_l.c
+===================================================================
+--- git.orig/wcsmbs/wcstof_l.c
++++ git/wcsmbs/wcstof_l.c
+@@ -25,8 +25,5 @@
+
+ extern float ____wcstof_l_internal (const wchar_t *, wchar_t **, int,
+ __locale_t);
+-extern unsigned long long int ____wcstoull_l_internal (const wchar_t *,
+- wchar_t **, int, int,
+- __locale_t);
+
+ #include <stdlib/strtof_l.c>
+Index: git/wcsmbs/wcstof_nan.c
+===================================================================
+--- /dev/null
++++ git/wcsmbs/wcstof_nan.c
+@@ -0,0 +1,23 @@
++/* Convert string for NaN payload to corresponding NaN. Wide strings, float.
++ Copyright (C) 2015 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
++
++#include "../stdlib/strtod_nan_wide.h"
++#include "../stdlib/strtod_nan_float.h"
++
++#define STRTOD_NAN __wcstof_nan
++#include "../stdlib/strtod_nan_main.c"
+Index: git/wcsmbs/wcstold_l.c
+===================================================================
+--- git.orig/wcsmbs/wcstold_l.c
++++ git/wcsmbs/wcstold_l.c
+@@ -24,8 +24,5 @@
+
+ extern long double ____wcstold_l_internal (const wchar_t *, wchar_t **, int,
+ __locale_t);
+-extern unsigned long long int ____wcstoull_l_internal (const wchar_t *,
+- wchar_t **, int, int,
+- __locale_t);
+
+ #include <strtold_l.c>
+Index: git/wcsmbs/wcstold_nan.c
+===================================================================
+--- /dev/null
++++ git/wcsmbs/wcstold_nan.c
+@@ -0,0 +1,30 @@
++/* Convert string for NaN payload to corresponding NaN. Wide strings,
++ long double.
++ Copyright (C) 2015 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
++
++#include <math.h>
++
++/* This function is unused if long double and double have the same
++ representation. */
++#ifndef __NO_LONG_DOUBLE_MATH
++# include "../stdlib/strtod_nan_wide.h"
++# include <strtod_nan_ldouble.h>
++
++# define STRTOD_NAN __wcstold_nan
++# include "../stdlib/strtod_nan_main.c"
++#endif
+Index: git/ChangeLog
+===================================================================
+--- git.orig/ChangeLog
++++ git/ChangeLog
+@@ -1,3 +1,57 @@
++2015-11-24 Joseph Myers <joseph@codesourcery.com>
++
++ * stdlib/strtod_nan.c: New file.
++ * stdlib/strtod_nan_double.h: Likewise.
++ * stdlib/strtod_nan_float.h: Likewise.
++ * stdlib/strtod_nan_main.c: Likewise.
++ * stdlib/strtod_nan_narrow.h: Likewise.
++ * stdlib/strtod_nan_wide.h: Likewise.
++ * stdlib/strtof_nan.c: Likewise.
++ * stdlib/strtold_nan.c: Likewise.
++ * sysdeps/ieee754/ldbl-128/strtod_nan_ldouble.h: Likewise.
++ * sysdeps/ieee754/ldbl-128ibm/strtod_nan_ldouble.h: Likewise.
++ * sysdeps/ieee754/ldbl-96/strtod_nan_ldouble.h: Likewise.
++ * wcsmbs/wcstod_nan.c: Likewise.
++ * wcsmbs/wcstof_nan.c: Likewise.
++ * wcsmbs/wcstold_nan.c: Likewise.
++ * stdlib/Makefile (routines): Add strtof_nan, strtod_nan and
++ strtold_nan.
++ * wcsmbs/Makefile (routines): Add wcstod_nan, wcstold_nan and
++ wcstof_nan.
++ * include/stdlib.h (__strtof_nan): Declare and use
++ libc_hidden_proto.
++ (__strtod_nan): Likewise.
++ (__strtold_nan): Likewise.
++ (__wcstof_nan): Likewise.
++ (__wcstod_nan): Likewise.
++ (__wcstold_nan): Likewise.
++ * include/wchar.h (____wcstoull_l_internal): Declare.
++ * stdlib/strtod_l.c: Do not include <ieee754.h>.
++ (____strtoull_l_internal): Remove declaration.
++ (STRTOF_NAN): Define macro.
++ (SET_MANTISSA): Remove macro.
++ (STRTOULL): Likewise.
++ (____STRTOF_INTERNAL): Use STRTOF_NAN to parse NaN payload.
++ * stdlib/strtof_l.c (____strtoull_l_internal): Remove declaration.
++ (STRTOF_NAN): Define macro.
++ (SET_MANTISSA): Remove macro.
++ * sysdeps/ieee754/ldbl-128/strtold_l.c (STRTOF_NAN): Define macro.
++ (SET_MANTISSA): Remove macro.
++ * sysdeps/ieee754/ldbl-128ibm/strtold_l.c (STRTOF_NAN): Define
++ macro.
++ (SET_MANTISSA): Remove macro.
++ * sysdeps/ieee754/ldbl-64-128/strtold_l.c (STRTOF_NAN): Define
++ macro.
++ (SET_MANTISSA): Remove macro.
++ * sysdeps/ieee754/ldbl-96/strtold_l.c (STRTOF_NAN): Define macro.
++ (SET_MANTISSA): Remove macro.
++ * wcsmbs/wcstod_l.c (____wcstoull_l_internal): Remove declaration.
++ * wcsmbs/wcstof_l.c (____wcstoull_l_internal): Likewise.
++ * wcsmbs/wcstold_l.c (____wcstoull_l_internal): Likewise.
++
++ [BZ #19266]
++ * stdlib/strtod_l.c (____STRTOF_INTERNAL): Check directly for
++ upper case and lower case letters inside NAN(), not using TOLOWER.
+ 2015-08-08 Paul Pluzhnikov <ppluzhnikov@google.com>
+
+ [BZ #17905]
diff --git a/yocto-poky/meta/recipes-core/glibc/glibc/CVE-2015-9761_2.patch b/yocto-poky/meta/recipes-core/glibc/glibc/CVE-2015-9761_2.patch
new file mode 100644
index 000000000..e30307fbc
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/glibc/glibc/CVE-2015-9761_2.patch
@@ -0,0 +1,385 @@
+From 8f5e8b01a1da2a207228f2072c934fa5918554b8 Mon Sep 17 00:00:00 2001
+From: Joseph Myers <joseph@codesourcery.com>
+Date: Fri, 4 Dec 2015 20:36:28 +0000
+Subject: [PATCH] Fix nan functions handling of payload strings (bug 16961, bug
+ 16962).
+
+The nan, nanf and nanl functions handle payload strings by doing e.g.:
+
+ if (tagp[0] != '\0')
+ {
+ char buf[6 + strlen (tagp)];
+ sprintf (buf, "NAN(%s)", tagp);
+ return strtod (buf, NULL);
+ }
+
+This is an unbounded stack allocation based on the length of the
+argument. Furthermore, if the argument starts with an n-char-sequence
+followed by ')', that n-char-sequence is wrongly treated as
+significant for determining the payload of the resulting NaN, when ISO
+C says the call should be equivalent to strtod ("NAN", NULL), without
+being affected by that initial n-char-sequence. This patch fixes both
+those problems by using the __strtod_nan etc. functions recently
+factored out of strtod etc. for that purpose, with those functions
+being exported from libc at version GLIBC_PRIVATE.
+
+Tested for x86_64, x86, mips64 and powerpc.
+
+ [BZ #16961]
+ [BZ #16962]
+ * math/s_nan.c (__nan): Use __strtod_nan instead of constructing a
+ string on the stack for strtod.
+ * math/s_nanf.c (__nanf): Use __strtof_nan instead of constructing
+ a string on the stack for strtof.
+ * math/s_nanl.c (__nanl): Use __strtold_nan instead of
+ constructing a string on the stack for strtold.
+ * stdlib/Versions (libc): Add __strtof_nan, __strtod_nan and
+ __strtold_nan to GLIBC_PRIVATE.
+ * math/test-nan-overflow.c: New file.
+ * math/test-nan-payload.c: Likewise.
+ * math/Makefile (tests): Add test-nan-overflow and
+ test-nan-payload.
+
+Upstream-Status: Backport
+CVE: CVE-2015-9761 patch #2
+[Yocto # 8980]
+
+https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=8f5e8b01a1da2a207228f2072c934fa5918554b8
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ ChangeLog | 17 +++++++
+ NEWS | 6 +++
+ math/Makefile | 3 +-
+ math/s_nan.c | 9 +---
+ math/s_nanf.c | 9 +---
+ math/s_nanl.c | 9 +---
+ math/test-nan-overflow.c | 66 +++++++++++++++++++++++++
+ math/test-nan-payload.c | 122 +++++++++++++++++++++++++++++++++++++++++++++++
+ stdlib/Versions | 1 +
+ 9 files changed, 217 insertions(+), 25 deletions(-)
+ create mode 100644 math/test-nan-overflow.c
+ create mode 100644 math/test-nan-payload.c
+
+Index: git/ChangeLog
+===================================================================
+--- git.orig/ChangeLog
++++ git/ChangeLog
+@@ -1,3 +1,20 @@
++2015-12-04 Joseph Myers <joseph@codesourcery.com>
++
++ [BZ #16961]
++ [BZ #16962]
++ * math/s_nan.c (__nan): Use __strtod_nan instead of constructing a
++ string on the stack for strtod.
++ * math/s_nanf.c (__nanf): Use __strtof_nan instead of constructing
++ a string on the stack for strtof.
++ * math/s_nanl.c (__nanl): Use __strtold_nan instead of
++ constructing a string on the stack for strtold.
++ * stdlib/Versions (libc): Add __strtof_nan, __strtod_nan and
++ __strtold_nan to GLIBC_PRIVATE.
++ * math/test-nan-overflow.c: New file.
++ * math/test-nan-payload.c: Likewise.
++ * math/Makefile (tests): Add test-nan-overflow and
++ test-nan-payload.
++
+ 2015-11-24 Joseph Myers <joseph@codesourcery.com>
+
+ * stdlib/strtod_nan.c: New file.
+Index: git/NEWS
+===================================================================
+--- git.orig/NEWS
++++ git/NEWS
+@@ -99,6 +99,12 @@ Version 2.22
+
+ Version 2.21
+
++Security related changes:
++
++* The nan, nanf and nanl functions no longer have unbounded stack usage
++ depending on the length of the string passed as an argument to the
++ functions. Reported by Joseph Myers.
++
+ * The following bugs are resolved with this release:
+
+ 6652, 10672, 12674, 12847, 12926, 13862, 14132, 14138, 14171, 14498,
+Index: git/math/Makefile
+===================================================================
+--- git.orig/math/Makefile
++++ git/math/Makefile
+@@ -110,6 +110,7 @@ tests = test-matherr test-fenv atest-exp
+ test-tgmath-ret bug-nextafter bug-nexttoward bug-tgmath1 \
+ test-tgmath-int test-tgmath2 test-powl tst-CMPLX tst-CMPLX2 test-snan \
+ test-fenv-tls test-fenv-preserve test-fenv-return test-fenvinline \
++ test-nan-overflow test-nan-payload \
+ $(tests-static)
+ tests-static = test-fpucw-static test-fpucw-ieee-static
+ # We do the `long double' tests only if this data type is available and
+Index: git/math/s_nan.c
+===================================================================
+--- git.orig/math/s_nan.c
++++ git/math/s_nan.c
+@@ -28,14 +28,7 @@
+ double
+ __nan (const char *tagp)
+ {
+- if (tagp[0] != '\0')
+- {
+- char buf[6 + strlen (tagp)];
+- sprintf (buf, "NAN(%s)", tagp);
+- return strtod (buf, NULL);
+- }
+-
+- return NAN;
++ return __strtod_nan (tagp, NULL, 0);
+ }
+ weak_alias (__nan, nan)
+ #ifdef NO_LONG_DOUBLE
+Index: git/math/s_nanf.c
+===================================================================
+--- git.orig/math/s_nanf.c
++++ git/math/s_nanf.c
+@@ -28,13 +28,6 @@
+ float
+ __nanf (const char *tagp)
+ {
+- if (tagp[0] != '\0')
+- {
+- char buf[6 + strlen (tagp)];
+- sprintf (buf, "NAN(%s)", tagp);
+- return strtof (buf, NULL);
+- }
+-
+- return NAN;
++ return __strtof_nan (tagp, NULL, 0);
+ }
+ weak_alias (__nanf, nanf)
+Index: git/math/s_nanl.c
+===================================================================
+--- git.orig/math/s_nanl.c
++++ git/math/s_nanl.c
+@@ -28,13 +28,6 @@
+ long double
+ __nanl (const char *tagp)
+ {
+- if (tagp[0] != '\0')
+- {
+- char buf[6 + strlen (tagp)];
+- sprintf (buf, "NAN(%s)", tagp);
+- return strtold (buf, NULL);
+- }
+-
+- return NAN;
++ return __strtold_nan (tagp, NULL, 0);
+ }
+ weak_alias (__nanl, nanl)
+Index: git/math/test-nan-overflow.c
+===================================================================
+--- /dev/null
++++ git/math/test-nan-overflow.c
+@@ -0,0 +1,66 @@
++/* Test nan functions stack overflow (bug 16962).
++ Copyright (C) 2015 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
++
++#include <math.h>
++#include <stdio.h>
++#include <string.h>
++#include <sys/resource.h>
++
++#define STACK_LIM 1048576
++#define STRING_SIZE (2 * STACK_LIM)
++
++static int
++do_test (void)
++{
++ int result = 0;
++ struct rlimit lim;
++ getrlimit (RLIMIT_STACK, &lim);
++ lim.rlim_cur = STACK_LIM;
++ setrlimit (RLIMIT_STACK, &lim);
++ char *nanstr = malloc (STRING_SIZE);
++ if (nanstr == NULL)
++ {
++ puts ("malloc failed, cannot test");
++ return 77;
++ }
++ memset (nanstr, '0', STRING_SIZE - 1);
++ nanstr[STRING_SIZE - 1] = 0;
++#define NAN_TEST(TYPE, FUNC) \
++ do \
++ { \
++ char *volatile p = nanstr; \
++ volatile TYPE v = FUNC (p); \
++ if (isnan (v)) \
++ puts ("PASS: " #FUNC); \
++ else \
++ { \
++ puts ("FAIL: " #FUNC); \
++ result = 1; \
++ } \
++ } \
++ while (0)
++ NAN_TEST (float, nanf);
++ NAN_TEST (double, nan);
++#ifndef NO_LONG_DOUBLE
++ NAN_TEST (long double, nanl);
++#endif
++ return result;
++}
++
++#define TEST_FUNCTION do_test ()
++#include "../test-skeleton.c"
+Index: git/math/test-nan-payload.c
+===================================================================
+--- /dev/null
++++ git/math/test-nan-payload.c
+@@ -0,0 +1,122 @@
++/* Test nan functions payload handling (bug 16961).
++ Copyright (C) 2015 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
++
++#include <float.h>
++#include <math.h>
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++
++/* Avoid built-in functions. */
++#define WRAP_NAN(FUNC, STR) \
++ ({ const char *volatile wns = (STR); FUNC (wns); })
++#define WRAP_STRTO(FUNC, STR) \
++ ({ const char *volatile wss = (STR); FUNC (wss, NULL); })
++
++#define CHECK_IS_NAN(TYPE, A) \
++ do \
++ { \
++ if (isnan (A)) \
++ puts ("PASS: " #TYPE " " #A); \
++ else \
++ { \
++ puts ("FAIL: " #TYPE " " #A); \
++ result = 1; \
++ } \
++ } \
++ while (0)
++
++#define CHECK_SAME_NAN(TYPE, A, B) \
++ do \
++ { \
++ if (memcmp (&(A), &(B), sizeof (A)) == 0) \
++ puts ("PASS: " #TYPE " " #A " = " #B); \
++ else \
++ { \
++ puts ("FAIL: " #TYPE " " #A " = " #B); \
++ result = 1; \
++ } \
++ } \
++ while (0)
++
++#define CHECK_DIFF_NAN(TYPE, A, B) \
++ do \
++ { \
++ if (memcmp (&(A), &(B), sizeof (A)) != 0) \
++ puts ("PASS: " #TYPE " " #A " != " #B); \
++ else \
++ { \
++ puts ("FAIL: " #TYPE " " #A " != " #B); \
++ result = 1; \
++ } \
++ } \
++ while (0)
++
++/* Cannot test payloads by memcmp for formats where NaNs have padding
++ bits. */
++#define CAN_TEST_EQ(MANT_DIG) ((MANT_DIG) != 64 && (MANT_DIG) != 106)
++
++#define RUN_TESTS(TYPE, SFUNC, FUNC, MANT_DIG) \
++ do \
++ { \
++ TYPE n123 = WRAP_NAN (FUNC, "123"); \
++ CHECK_IS_NAN (TYPE, n123); \
++ TYPE s123 = WRAP_STRTO (SFUNC, "NAN(123)"); \
++ CHECK_IS_NAN (TYPE, s123); \
++ TYPE n456 = WRAP_NAN (FUNC, "456"); \
++ CHECK_IS_NAN (TYPE, n456); \
++ TYPE s456 = WRAP_STRTO (SFUNC, "NAN(456)"); \
++ CHECK_IS_NAN (TYPE, s456); \
++ TYPE n123x = WRAP_NAN (FUNC, "123)"); \
++ CHECK_IS_NAN (TYPE, n123x); \
++ TYPE nemp = WRAP_NAN (FUNC, ""); \
++ CHECK_IS_NAN (TYPE, nemp); \
++ TYPE semp = WRAP_STRTO (SFUNC, "NAN()"); \
++ CHECK_IS_NAN (TYPE, semp); \
++ TYPE sx = WRAP_STRTO (SFUNC, "NAN"); \
++ CHECK_IS_NAN (TYPE, sx); \
++ if (CAN_TEST_EQ (MANT_DIG)) \
++ CHECK_SAME_NAN (TYPE, n123, s123); \
++ if (CAN_TEST_EQ (MANT_DIG)) \
++ CHECK_SAME_NAN (TYPE, n456, s456); \
++ if (CAN_TEST_EQ (MANT_DIG)) \
++ CHECK_SAME_NAN (TYPE, nemp, semp); \
++ if (CAN_TEST_EQ (MANT_DIG)) \
++ CHECK_SAME_NAN (TYPE, n123x, sx); \
++ CHECK_DIFF_NAN (TYPE, n123, n456); \
++ CHECK_DIFF_NAN (TYPE, n123, nemp); \
++ CHECK_DIFF_NAN (TYPE, n123, n123x); \
++ CHECK_DIFF_NAN (TYPE, n456, nemp); \
++ CHECK_DIFF_NAN (TYPE, n456, n123x); \
++ } \
++ while (0)
++
++static int
++do_test (void)
++{
++ int result = 0;
++ RUN_TESTS (float, strtof, nanf, FLT_MANT_DIG);
++ RUN_TESTS (double, strtod, nan, DBL_MANT_DIG);
++#ifndef NO_LONG_DOUBLE
++ RUN_TESTS (long double, strtold, nanl, LDBL_MANT_DIG);
++#endif
++ return result;
++}
++
++#define TEST_FUNCTION do_test ()
++#include "../test-skeleton.c"
+Index: git/stdlib/Versions
+===================================================================
+--- git.orig/stdlib/Versions
++++ git/stdlib/Versions
+@@ -118,5 +118,6 @@ libc {
+ # Used from other libraries
+ __libc_secure_getenv;
+ __call_tls_dtors;
++ __strtof_nan; __strtod_nan; __strtold_nan;
+ }
+ }
diff --git a/yocto-poky/meta/recipes-core/glibc/glibc/strcoll-Remove-incorrect-STRDIFF-based-optimization-.patch b/yocto-poky/meta/recipes-core/glibc/glibc/strcoll-Remove-incorrect-STRDIFF-based-optimization-.patch
new file mode 100644
index 000000000..8ce255f11
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/glibc/glibc/strcoll-Remove-incorrect-STRDIFF-based-optimization-.patch
@@ -0,0 +1,323 @@
+Upstream-Status: Backport
+
+Signed-off-by: Li Xin <lixin.fnst@cn.fujitsu.com>
+
+From https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=6c84109cfa26f35c3dfed3acb97d347361bd5849
+Author: Carlos O'Donell <carlos@systemhalted.org>
+Date: Thu Oct 8 16:34:53 2015 -0400
+
+ strcoll: Remove incorrect STRDIFF-based optimization (Bug 18589).
+
+ The optimization introduced in commit
+ f13c2a8dff2329c6692a80176262ceaaf8a6f74e, causes regressions in
+ sorting for languages that have digraphs that change sort order, like
+ cs_CZ which sorts ch between h and i.
+
+ My analysis shows the fast-forwarding optimization in STRCOLL advances
+ through a digraph while possibly stopping in the middle which results
+ in a subsequent skipping of the digraph and incorrect sorting. The
+ optimization is incorrect as implemented and because of that I'm
+ removing it for 2.23, and I will also commit this fix for 2.22 where
+ it was originally introduced.
+
+ This patch reverts the optimization, introduces a new bug-strcoll2.c
+ regression test that tests both cs_CZ.UTF-8 and da_DK.ISO-8859-1 and
+ ensures they sort one digraph each correctly. The optimization can't be
+ applied without regressing this test.
+
+ Checked on x86_64, bug-strcoll2.c fails without this patch and passes
+ after. This will also get a fix on 2.22 which has the same bug.
+
+ (cherry picked from commit 87701a58e291bd7ac3b407d10a829dac52c9c16e)
+---
+ locale/C-collate.c | 4 +-
+ locale/categories.def | 1 -
+ locale/langinfo.h | 1 -
+ locale/localeinfo.h | 7 ----
+ locale/programs/ld-collate.c | 9 -----
+ string/bug-strcoll2.c | 95 ++++++++++++++++++++++++++++++++++++++++++++
+ string/strcoll_l.c | 39 +-----------------
+ wcsmbs/wcscoll_l.c | 1 -
+ 8 files changed, 98 insertions(+), 59 deletions(-)
+ create mode 100644 string/bug-strcoll2.c
+
+diff --git a/locale/C-collate.c b/locale/C-collate.c
+index d7f3c55..06dfdfa 100644
+--- a/locale/C-collate.c
++++ b/locale/C-collate.c
+@@ -144,8 +144,6 @@ const struct __locale_data _nl_C_LC_COLLATE attribute_hidden =
+ /* _NL_COLLATE_COLLSEQWC */
+ { .string = (const char *) collseqwc },
+ /* _NL_COLLATE_CODESET */
+- { .string = _nl_C_codeset },
+- /* _NL_COLLATE_ENCODING_TYPE */
+- { .word = __cet_8bit }
++ { .string = _nl_C_codeset }
+ }
+ };
+diff --git a/locale/categories.def b/locale/categories.def
+index 045489d..a8dda53 100644
+--- a/locale/categories.def
++++ b/locale/categories.def
+@@ -58,7 +58,6 @@ DEFINE_CATEGORY
+ DEFINE_ELEMENT (_NL_COLLATE_COLLSEQMB, "collate-collseqmb", std, wstring)
+ DEFINE_ELEMENT (_NL_COLLATE_COLLSEQWC, "collate-collseqwc", std, wstring)
+ DEFINE_ELEMENT (_NL_COLLATE_CODESET, "collate-codeset", std, string)
+- DEFINE_ELEMENT (_NL_COLLATE_ENCODING_TYPE, "collate-encoding-type", std, word)
+ ), NO_POSTLOAD)
+
+
+diff --git a/locale/langinfo.h b/locale/langinfo.h
+index ffc5c7f..a565d9d 100644
+--- a/locale/langinfo.h
++++ b/locale/langinfo.h
+@@ -255,7 +255,6 @@ enum
+ _NL_COLLATE_COLLSEQMB,
+ _NL_COLLATE_COLLSEQWC,
+ _NL_COLLATE_CODESET,
+- _NL_COLLATE_ENCODING_TYPE,
+ _NL_NUM_LC_COLLATE,
+
+ /* LC_CTYPE category: character classification.
+diff --git a/locale/localeinfo.h b/locale/localeinfo.h
+index a7516c0..c076d8e 100644
+--- a/locale/localeinfo.h
++++ b/locale/localeinfo.h
+@@ -110,13 +110,6 @@ enum coll_sort_rule
+ sort_mask
+ };
+
+-/* Collation encoding type. */
+-enum collation_encoding_type
+-{
+- __cet_other,
+- __cet_8bit,
+- __cet_utf8
+-};
+
+ /* We can map the types of the entries into a few categories. */
+ enum value_type
+diff --git a/locale/programs/ld-collate.c b/locale/programs/ld-collate.c
+index 16e9039..3c88c6d 100644
+--- a/locale/programs/ld-collate.c
++++ b/locale/programs/ld-collate.c
+@@ -32,7 +32,6 @@
+ #include "linereader.h"
+ #include "locfile.h"
+ #include "elem-hash.h"
+-#include "../localeinfo.h"
+
+ /* Uncomment the following line in the production version. */
+ /* #define NDEBUG 1 */
+@@ -2130,8 +2129,6 @@ collate_output (struct localedef_t *locale, const struct charmap_t *charmap,
+ /* The words have to be handled specially. */
+ if (idx == _NL_ITEM_INDEX (_NL_COLLATE_SYMB_HASH_SIZEMB))
+ add_locale_uint32 (&file, 0);
+- else if (idx == _NL_ITEM_INDEX (_NL_COLLATE_ENCODING_TYPE))
+- add_locale_uint32 (&file, __cet_other);
+ else
+ add_locale_empty (&file);
+ }
+@@ -2495,12 +2492,6 @@ collate_output (struct localedef_t *locale, const struct charmap_t *charmap,
+ add_locale_raw_data (&file, collate->mbseqorder, 256);
+ add_locale_collseq_table (&file, &collate->wcseqorder);
+ add_locale_string (&file, charmap->code_set_name);
+- if (strcmp (charmap->code_set_name, "UTF-8") == 0)
+- add_locale_uint32 (&file, __cet_utf8);
+- else if (charmap->mb_cur_max == 1)
+- add_locale_uint32 (&file, __cet_8bit);
+- else
+- add_locale_uint32 (&file, __cet_other);
+ write_locale_data (output_path, LC_COLLATE, "LC_COLLATE", &file);
+
+ obstack_free (&weightpool, NULL);
+diff --git a/string/bug-strcoll2.c b/string/bug-strcoll2.c
+new file mode 100644
+index 0000000..950b090
+--- /dev/null
++++ b/string/bug-strcoll2.c
+@@ -0,0 +1,95 @@
++/* Bug 18589: sort-test.sh fails at random.
++ * Copyright (C) 1998-2015 Free Software Foundation, Inc.
++ * This file is part of the GNU C Library.
++ * Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998.
++ *
++ * The GNU C Library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * The GNU C Library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with the GNU C Library; if not, see
++ * <http://www.gnu.org/licenses/>. */
++
++#include <stdio.h>
++#include <string.h>
++#include <locale.h>
++
++/* An incorrect strcoll optimization resulted in incorrect
++ * results from strcoll for cs_CZ and da_DK. */
++
++int
++test_cs_CZ (void)
++{
++ const char t1[] = "config";
++ const char t2[] = "choose";
++ if (setlocale (LC_ALL, "cs_CZ.UTF-8") == NULL)
++ {
++ perror ("setlocale");
++ return 1;
++ }
++ /* In Czech the digraph ch sorts after c, therefore we expect
++ * config to sort before choose. */
++ int a = strcoll (t1, t2);
++ int b = strcoll (t2, t1);
++ printf ("strcoll (\"%s\", \"%s\") = %d\n", t1, t2, a);
++ printf ("strcoll (\"%s\", \"%s\") = %d\n", t2, t1, b);
++ if (a < 0 && b > 0)
++ {
++ puts ("PASS: config < choose");
++ return 0;
++ }
++ else
++ {
++ puts ("FAIL: Wrong sorting in cz_CZ.UTF-8.");
++ return 1;
++ }
++}
++
++int
++test_da_DK (void)
++{
++ const char t1[] = "AS";
++ const char t2[] = "AA";
++ if (setlocale (LC_ALL, "da_DK.ISO-8859-1") == NULL)
++ {
++ perror ("setlocale");
++ return 1;
++ }
++ /* AA should be treated as the last letter of the Danish alphabet,
++ * hence sorting after AS. */
++ int a = strcoll (t1, t2);
++ int b = strcoll (t2, t1);
++ printf ("strcoll (\"%s\", \"%s\") = %d\n", t1, t2, a);
++ printf ("strcoll (\"%s\", \"%s\") = %d\n", t2, t1, b);
++ if (a < 0 && b > 0)
++ {
++ puts ("PASS: AS < AA");
++ return 0;
++ }
++ else
++ {
++ puts ("FAIL: Wrong sorting in da_DK.ISO-8859-1");
++ return 1;
++ }
++}
++
++static int
++do_test (void)
++{
++ int err = 0;
++ err |= test_cs_CZ ();
++ err |= test_da_DK ();
++ return err;
++}
++
++#define TEST_FUNCTION do_test ()
++#include "../test-skeleton.c"
++
++
+diff --git a/string/strcoll_l.c b/string/strcoll_l.c
+index b36b18c..a18b65e 100644
+--- a/string/strcoll_l.c
++++ b/string/strcoll_l.c
+@@ -30,7 +30,6 @@
+ # define STRING_TYPE char
+ # define USTRING_TYPE unsigned char
+ # define STRCOLL __strcoll_l
+-# define STRDIFF __strdiff
+ # define STRCMP strcmp
+ # define WEIGHT_H "../locale/weight.h"
+ # define SUFFIX MB
+@@ -43,19 +42,6 @@
+ #include "../locale/localeinfo.h"
+ #include WEIGHT_H
+
+-#define MASK_UTF8_7BIT (1 << 7)
+-#define MASK_UTF8_START (3 << 6)
+-
+-size_t
+-STRDIFF (const STRING_TYPE *s, const STRING_TYPE *t)
+-{
+- size_t n;
+-
+- for (n = 0; *s != '\0' && *s++ == *t++; ++n)
+- continue;
+-
+- return n;
+-}
+
+ /* Track status while looking for sequences in a string. */
+ typedef struct
+@@ -274,29 +260,9 @@ STRCOLL (const STRING_TYPE *s1, const STRING_TYPE *s2, __locale_t l)
+ const USTRING_TYPE *extra;
+ const int32_t *indirect;
+
+- /* In case there is no locale specific sort order (C / POSIX). */
+ if (nrules == 0)
+ return STRCMP (s1, s2);
+
+- /* Fast forward to the position of the first difference. Needs to be
+- encoding aware as the byte-by-byte comparison can stop in the middle
+- of a char sequence for multibyte encodings like UTF-8. */
+- uint_fast32_t encoding =
+- current->values[_NL_ITEM_INDEX (_NL_COLLATE_ENCODING_TYPE)].word;
+- if (encoding != __cet_other)
+- {
+- size_t diff = STRDIFF (s1, s2);
+- if (diff > 0)
+- {
+- if (encoding == __cet_utf8 && (*(s1 + diff) & MASK_UTF8_7BIT) != 0)
+- do
+- diff--;
+- while (diff > 0 && (*(s1 + diff) & MASK_UTF8_START) != MASK_UTF8_START);
+- s1 += diff;
+- s2 += diff;
+- }
+- }
+-
+ /* Catch empty strings. */
+ if (__glibc_unlikely (*s1 == '\0') || __glibc_unlikely (*s2 == '\0'))
+ return (*s1 != '\0') - (*s2 != '\0');
+@@ -363,9 +329,8 @@ STRCOLL (const STRING_TYPE *s1, const STRING_TYPE *s2, __locale_t l)
+ byte-level comparison to ensure that we don't waste time
+ going through multiple passes for totally equal strings
+ before proceeding to subsequent passes. */
+- if (pass == 0 && encoding == __cet_other &&
+- STRCMP (s1, s2) == 0)
+- return result;
++ if (pass == 0 && STRCMP (s1, s2) == 0)
++ return result;
+ else
+ break;
+ }
+diff --git a/wcsmbs/wcscoll_l.c b/wcsmbs/wcscoll_l.c
+index 6d9384a..87f240d 100644
+--- a/wcsmbs/wcscoll_l.c
++++ b/wcsmbs/wcscoll_l.c
+@@ -23,7 +23,6 @@
+ #define STRING_TYPE wchar_t
+ #define USTRING_TYPE wint_t
+ #define STRCOLL __wcscoll_l
+-#define STRDIFF __wcsdiff
+ #define STRCMP __wcscmp
+ #define WEIGHT_H "../locale/weightwc.h"
+ #define SUFFIX WC
+--
+1.8.4.2
+
diff --git a/yocto-poky/meta/recipes-core/glibc/glibc/use_64bit_atomics.patch b/yocto-poky/meta/recipes-core/glibc/glibc/use_64bit_atomics.patch
new file mode 100644
index 000000000..eb7f2b29b
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/glibc/glibc/use_64bit_atomics.patch
@@ -0,0 +1,24 @@
+This patch alows using 64 bit atomic instructions on a
+32 bit platform. This is safe, providing x86 is Pentium or
+later (would not work on i386, i486). Using 64 bit atomic
+instructions bypasses code containing a bug as documented in
+https://bugzilla.yoctoproject.org/show_bug.cgi?id=8140
+
+Upstream-Status: TBD
+
+Signed-off-by: Juro Bystricky <juro.bystricky@intel.com>
+
+
+Index: libc/sysdeps/i386/i486/bits/atomic.h
+===================================================================
+--- libc.orig/sysdeps/i386/i486/bits/atomic.h
++++ libc/sysdeps/i386/i486/bits/atomic.h
+@@ -54,7 +54,7 @@ typedef uintmax_t uatomic_max_t;
+ # endif
+ #endif
+
+-#define __HAVE_64B_ATOMICS 0
++#define __HAVE_64B_ATOMICS 1
+ #define USE_ATOMIC_COMPILER_BUILTINS 0
+
+
diff --git a/yocto-poky/meta/recipes-core/glibc/glibc_2.22.bb b/yocto-poky/meta/recipes-core/glibc/glibc_2.22.bb
index 09f0428ea..a13b7f94b 100644
--- a/yocto-poky/meta/recipes-core/glibc/glibc_2.22.bb
+++ b/yocto-poky/meta/recipes-core/glibc/glibc_2.22.bb
@@ -9,11 +9,11 @@ DEPENDS += "gperf-native kconfig-frontends-native"
SRCREV ?= "a34d1c6afc86521d6ad17662a3b5362d8481514c"
-BRANCH ?= "release/${PV}/master"
+SRCBRANCH ?= "release/${PV}/master"
GLIBC_GIT_URI ?= "git://sourceware.org/git/glibc.git"
-SRC_URI = "${GLIBC_GIT_URI};branch=${BRANCH};name=glibc \
+SRC_URI = "${GLIBC_GIT_URI};branch=${SRCBRANCH};name=glibc \
file://0004-Backport-https-sourceware.org-ml-libc-ports-2007-12-.patch \
file://0005-fsl-e500-e5500-e6500-603e-fsqrt-implementation.patch \
file://0006-readlib-Add-OECORE_KNOWN_INTERPRETER_NAMES-to-known-.patch \
@@ -39,6 +39,14 @@ SRC_URI = "${GLIBC_GIT_URI};branch=${BRANCH};name=glibc \
file://0026-eglibc-dl_debug_mask-is-controlled-by-__OPTION_EGLIB.patch \
file://0027-eglibc-use-option-groups-Conditionally-exclude-c-tes.patch \
file://nscd-no-bash.patch \
+ file://strcoll-Remove-incorrect-STRDIFF-based-optimization-.patch \
+ file://0028-Clear-ELF_RTYPE_CLASS_EXTERN_PROTECTED_DATA-for-prel.patch \
+ file://CVE-2015-8777.patch \
+ file://CVE-2015-8779.patch \
+ file://CVE-2015-9761_1.patch \
+ file://CVE-2015-9761_2.patch \
+ file://CVE-2015-8776.patch \
+ file://CVE-2015-7547.patch \
"
SRC_URI += "\
@@ -50,6 +58,7 @@ SRC_URI_append_class-nativesdk = "\
file://0001-nativesdk-glibc-Look-for-host-system-ld.so.cache-as-.patch \
file://0002-nativesdk-glibc-Fix-buffer-overrun-with-a-relocated-.patch \
file://0003-nativesdk-glibc-Raise-the-size-of-arrays-containing-.patch \
+ file://use_64bit_atomics.patch \
"
S = "${WORKDIR}/git"
diff --git a/yocto-poky/meta/recipes-core/images/build-appliance-image_12.0.1.bb b/yocto-poky/meta/recipes-core/images/build-appliance-image_12.0.1.bb
index 0a86ba4b3..fdeadb63a 100644
--- a/yocto-poky/meta/recipes-core/images/build-appliance-image_12.0.1.bb
+++ b/yocto-poky/meta/recipes-core/images/build-appliance-image_12.0.1.bb
@@ -21,8 +21,8 @@ IMAGE_FSTYPES = "vmdk"
inherit core-image
-SRCREV ?= "d01cd53429b1c20f01dac97f1b9b659cb9dc9812"
-SRC_URI = "git://git.yoctoproject.org/poky \
+SRCREV ?= "7fe17a2942ff03e2ec47d566fd5393f52b2eb736"
+SRC_URI = "git://git.yoctoproject.org/poky;branch=jethro \
file://Yocto_Build_Appliance.vmx \
file://Yocto_Build_Appliance.vmxf \
"
diff --git a/yocto-poky/meta/recipes-core/initrdscripts/files/init-install-efi.sh b/yocto-poky/meta/recipes-core/initrdscripts/files/init-install-efi.sh
index fc4908ef9..0443a9d11 100644
--- a/yocto-poky/meta/recipes-core/initrdscripts/files/init-install-efi.sh
+++ b/yocto-poky/meta/recipes-core/initrdscripts/files/init-install-efi.sh
@@ -134,7 +134,7 @@ swap_start=$((rootfs_end))
# 2) they are detected asynchronously (need rootwait)
rootwait=""
part_prefix=""
-if [ ! "${device#mmcblk}" = "${device}" ]; then
+if [ ! "${device#/dev/mmcblk}" = "${device}" ]; then
part_prefix="p"
rootwait="rootwait"
fi
@@ -184,8 +184,8 @@ mount -o rw,loop,noatime,nodiratime /run/media/$1/$2 /src_root
echo "Copying rootfs files..."
cp -a /src_root/* /tgt_root
if [ -d /tgt_root/etc/ ] ; then
- boot_uuid=$(blkid -o value -s UUID ${device}1)
- swap_part_uuid=$(blkid -o value -s PARTUUID ${device}3)
+ boot_uuid=$(blkid -o value -s UUID ${bootfs})
+ swap_part_uuid=$(blkid -o value -s PARTUUID ${swap})
echo "/dev/disk/by-partuuid/$swap_part_uuid swap swap defaults 0 0" >> /tgt_root/etc/fstab
echo "UUID=$boot_uuid /boot vfat defaults 1 2" >> /tgt_root/etc/fstab
# We dont want udev to mount our root device while we're booting...
@@ -206,7 +206,7 @@ mkdir -p $EFIDIR
cp /run/media/$1/EFI/BOOT/*.efi $EFIDIR
if [ -f /run/media/$1/EFI/BOOT/grub.cfg ]; then
- root_part_uuid=$(blkid -o value -s PARTUUID ${device}2)
+ root_part_uuid=$(blkid -o value -s PARTUUID ${rootfs})
GRUBCFG="$EFIDIR/grub.cfg"
cp /run/media/$1/EFI/BOOT/grub.cfg $GRUBCFG
# Update grub config for the installed image
@@ -223,6 +223,7 @@ if [ -f /run/media/$1/EFI/BOOT/grub.cfg ]; then
fi
if [ -d /run/media/$1/loader ]; then
+ rootuuid=$(blkid -o value -s PARTUUID ${rootfs})
GUMMIBOOT_CFGS="/boot/loader/entries/*.conf"
# copy config files for gummiboot
cp -dr /run/media/$1/loader /boot
diff --git a/yocto-poky/meta/recipes-core/initrdscripts/initramfs-framework/finish b/yocto-poky/meta/recipes-core/initrdscripts/initramfs-framework/finish
index e712ff03c..d09bbb8be 100755
--- a/yocto-poky/meta/recipes-core/initrdscripts/initramfs-framework/finish
+++ b/yocto-poky/meta/recipes-core/initrdscripts/initramfs-framework/finish
@@ -37,7 +37,7 @@ finish_run() {
fi
mount $flags $bootparam_root $ROOTFS_DIR
else
- debug "root '$bootparam_root' doesn't exist."
+ msg "root '$bootparam_root' doesn't exist."
fi
fi
diff --git a/yocto-poky/meta/recipes-core/initrdscripts/initramfs-framework/init b/yocto-poky/meta/recipes-core/initrdscripts/initramfs-framework/init
index 9291ad5c2..204f2379a 100755
--- a/yocto-poky/meta/recipes-core/initrdscripts/initramfs-framework/init
+++ b/yocto-poky/meta/recipes-core/initrdscripts/initramfs-framework/init
@@ -58,7 +58,7 @@ fatal() {
echo $1 >/dev/console
echo >/dev/console
- if [ -n "bootparam_init_fatal_sh" ]; then
+ if [ -n "$bootparam_init_fatal_sh" ]; then
sh
else
while [ "true" ]; do
diff --git a/yocto-poky/meta/recipes-core/initscripts/initscripts-1.0/sysfs.sh b/yocto-poky/meta/recipes-core/initscripts/initscripts-1.0/sysfs.sh
index 0cfe76e23..0a52c90da 100644
--- a/yocto-poky/meta/recipes-core/initscripts/initscripts-1.0/sysfs.sh
+++ b/yocto-poky/meta/recipes-core/initscripts/initscripts-1.0/sysfs.sh
@@ -21,3 +21,7 @@ fi
if [ -e /sys/kernel/debug ] && grep -q debugfs /proc/filesystems; then
mount -t debugfs debugfs /sys/kernel/debug
fi
+
+if ! [ -e /dev/zero ] && [ -e /dev ] && grep -q devtmpfs /proc/filesystems; then
+ mount -n -t devtmpfs devtmpfs /dev
+fi
diff --git a/yocto-poky/meta/recipes-core/kbd/kbd_2.0.2.bb b/yocto-poky/meta/recipes-core/kbd/kbd_2.0.2.bb
index 136dc7ac0..49bb6c9a3 100644
--- a/yocto-poky/meta/recipes-core/kbd/kbd_2.0.2.bb
+++ b/yocto-poky/meta/recipes-core/kbd/kbd_2.0.2.bb
@@ -34,3 +34,4 @@ ALTERNATIVE_${PN} = "chvt deallocvt fgconsole openvt"
ALTERNATIVE_PRIORITY = "100"
BBCLASSEXTEND = "native"
+PARALLEL_MAKEINST = ""
diff --git a/yocto-poky/meta/recipes-core/libxml/libxml2.inc b/yocto-poky/meta/recipes-core/libxml/libxml2.inc
index 1c3c37d50..310d5bbc5 100644
--- a/yocto-poky/meta/recipes-core/libxml/libxml2.inc
+++ b/yocto-poky/meta/recipes-core/libxml/libxml2.inc
@@ -21,6 +21,22 @@ SRC_URI = "ftp://xmlsoft.org/libxml2/libxml2-${PV}.tar.gz;name=libtar \
file://libxml-m4-use-pkgconfig.patch \
file://configure.ac-fix-cross-compiling-warning.patch \
file://0001-CVE-2015-1819-Enforce-the-reader-to-run-in-constant-.patch \
+ file://CVE-2015-7941-1-Stop-parsing-on-entities-boundaries-errors.patch \
+ file://CVE-2015-7941-2-Cleanup-conditional-section-error-handling.patch \
+ file://CVE-2015-8317-Fail-parsing-early-on-if-encoding-conversion-failed.patch \
+ file://CVE-2015-7942-Another-variation-of-overflow-in-Conditional-section.patch \
+ file://CVE-2015-7942-2-Fix-an-error-in-previous-Conditional-section-patch.patch \
+ file://0001-CVE-2015-8035-Fix-XZ-compression-support-loop.patch \
+ file://CVE-2015-7498-Avoid-processing-entities-after-encoding-conversion-.patch \
+ file://0001-CVE-2015-7497-Avoid-an-heap-buffer-overflow-in-xmlDi.patch \
+ file://CVE-2015-7499-1-Add-xmlHaltParser-to-stop-the-parser.patch \
+ file://CVE-2015-7499-2-Detect-incoherency-on-GROW.patch \
+ file://0001-Fix-a-bug-on-name-parsing-at-the-end-of-current-inpu.patch \
+ file://0001-CVE-2015-7500-Fix-memory-access-error-due-to-incorre.patch \
+ file://0001-CVE-2015-8242-Buffer-overead-with-HTML-parser-in-pus.patch \
+ file://0001-CVE-2015-5312-Another-entity-expansion-issue.patch \
+ file://CVE-2015-8241.patch \
+ file://CVE-2015-8710.patch \
"
BINCONFIG = "${bindir}/xml2-config"
diff --git a/yocto-poky/meta/recipes-core/libxml/libxml2/0001-CVE-2015-5312-Another-entity-expansion-issue.patch b/yocto-poky/meta/recipes-core/libxml/libxml2/0001-CVE-2015-5312-Another-entity-expansion-issue.patch
new file mode 100644
index 000000000..979618d2c
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/libxml/libxml2/0001-CVE-2015-5312-Another-entity-expansion-issue.patch
@@ -0,0 +1,39 @@
+From 69030714cde66d525a8884bda01b9e8f0abf8e1e Mon Sep 17 00:00:00 2001
+From: David Drysdale <drysdale@google.com>
+Date: Fri, 20 Nov 2015 11:13:45 +0800
+Subject: [PATCH] CVE-2015-5312 Another entity expansion issue
+
+For https://bugzilla.gnome.org/show_bug.cgi?id=756733
+It is one case where the code in place to detect entities expansions
+failed to exit when the situation was detected, leading to DoS
+Problem reported by Kostya Serebryany @ Google
+Patch provided by David Drysdale @ Google
+
+Upstream-Status: Backport
+
+CVE-2015-5312
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ parser.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/parser.c b/parser.c
+index b7b6668..da6e729 100644
+--- a/parser.c
++++ b/parser.c
+@@ -2806,6 +2806,10 @@ xmlStringLenDecodeEntities(xmlParserCtxtPtr ctxt, const xmlChar *str, int len,
+ 0, 0, 0);
+ ctxt->depth--;
+
++ if ((ctxt->lastError.code == XML_ERR_ENTITY_LOOP) ||
++ (ctxt->lastError.code == XML_ERR_INTERNAL_ERROR))
++ goto int_error;
++
+ if (rep != NULL) {
+ current = rep;
+ while (*current != 0) { /* non input consuming loop */
+--
+2.3.5
+
diff --git a/yocto-poky/meta/recipes-core/libxml/libxml2/0001-CVE-2015-7497-Avoid-an-heap-buffer-overflow-in-xmlDi.patch b/yocto-poky/meta/recipes-core/libxml/libxml2/0001-CVE-2015-7497-Avoid-an-heap-buffer-overflow-in-xmlDi.patch
new file mode 100644
index 000000000..955c96195
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/libxml/libxml2/0001-CVE-2015-7497-Avoid-an-heap-buffer-overflow-in-xmlDi.patch
@@ -0,0 +1,40 @@
+From 6360a31a84efe69d155ed96306b9a931a40beab9 Mon Sep 17 00:00:00 2001
+From: David Drysdale <drysdale@google.com>
+Date: Fri, 20 Nov 2015 10:47:12 +0800
+Subject: [PATCH] CVE-2015-7497 Avoid an heap buffer overflow in
+ xmlDictComputeFastQKey
+
+For https://bugzilla.gnome.org/show_bug.cgi?id=756528
+It was possible to hit a negative offset in the name indexing
+used to randomize the dictionary key generation
+Reported and fix provided by David Drysdale @ Google
+
+Upstream-Status: Backport
+
+CVE-2015-7497
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ dict.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/dict.c b/dict.c
+index 5f71d55..8c8f931 100644
+--- a/dict.c
++++ b/dict.c
+@@ -486,7 +486,10 @@ xmlDictComputeFastQKey(const xmlChar *prefix, int plen,
+ value += 30 * (*prefix);
+
+ if (len > 10) {
+- value += name[len - (plen + 1 + 1)];
++ int offset = len - (plen + 1 + 1);
++ if (offset < 0)
++ offset = len - (10 + 1);
++ value += name[offset];
+ len = 10;
+ if (plen > 10)
+ plen = 10;
+--
+2.3.5
+
diff --git a/yocto-poky/meta/recipes-core/libxml/libxml2/0001-CVE-2015-7500-Fix-memory-access-error-due-to-incorre.patch b/yocto-poky/meta/recipes-core/libxml/libxml2/0001-CVE-2015-7500-Fix-memory-access-error-due-to-incorre.patch
new file mode 100644
index 000000000..b4860791b
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/libxml/libxml2/0001-CVE-2015-7500-Fix-memory-access-error-due-to-incorre.patch
@@ -0,0 +1,131 @@
+From f1063fdbe7fa66332bbb76874101c2a7b51b519f Mon Sep 17 00:00:00 2001
+From: Daniel Veillard <veillard@redhat.com>
+Date: Fri, 20 Nov 2015 16:06:59 +0800
+Subject: [PATCH] CVE-2015-7500 Fix memory access error due to incorrect
+ entities boundaries
+
+For https://bugzilla.gnome.org/show_bug.cgi?id=756525
+handle properly the case where we popped out of the current entity
+while processing a start tag
+Reported by Kostya Serebryany @ Google
+
+This slightly modifies the output of 754946 in regression tests
+
+Upstream-Status: Backport
+
+CVE-2015-7500
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ parser.c | 28 ++++++++++++++++++++++------
+ result/errors/754946.xml.err | 7 +++++--
+ 2 files changed, 27 insertions(+), 8 deletions(-)
+
+diff --git a/parser.c b/parser.c
+index c7e4574..c5741e3 100644
+--- a/parser.c
++++ b/parser.c
+@@ -9348,7 +9348,7 @@ xmlParseStartTag2(xmlParserCtxtPtr ctxt, const xmlChar **pref,
+ const xmlChar **atts = ctxt->atts;
+ int maxatts = ctxt->maxatts;
+ int nratts, nbatts, nbdef;
+- int i, j, nbNs, attval, oldline, oldcol;
++ int i, j, nbNs, attval, oldline, oldcol, inputNr;
+ const xmlChar *base;
+ unsigned long cur;
+ int nsNr = ctxt->nsNr;
+@@ -9367,6 +9367,7 @@ reparse:
+ SHRINK;
+ base = ctxt->input->base;
+ cur = ctxt->input->cur - ctxt->input->base;
++ inputNr = ctxt->inputNr;
+ oldline = ctxt->input->line;
+ oldcol = ctxt->input->col;
+ nbatts = 0;
+@@ -9392,7 +9393,8 @@ reparse:
+ */
+ SKIP_BLANKS;
+ GROW;
+- if (ctxt->input->base != base) goto base_changed;
++ if ((ctxt->input->base != base) || (inputNr != ctxt->inputNr))
++ goto base_changed;
+
+ while (((RAW != '>') &&
+ ((RAW != '/') || (NXT(1) != '>')) &&
+@@ -9403,7 +9405,7 @@ reparse:
+
+ attname = xmlParseAttribute2(ctxt, prefix, localname,
+ &aprefix, &attvalue, &len, &alloc);
+- if (ctxt->input->base != base) {
++ if ((ctxt->input->base != base) || (inputNr != ctxt->inputNr)) {
+ if ((attvalue != NULL) && (alloc != 0))
+ xmlFree(attvalue);
+ attvalue = NULL;
+@@ -9552,7 +9554,8 @@ skip_ns:
+ break;
+ }
+ SKIP_BLANKS;
+- if (ctxt->input->base != base) goto base_changed;
++ if ((ctxt->input->base != base) || (inputNr != ctxt->inputNr))
++ goto base_changed;
+ continue;
+ }
+
+@@ -9589,7 +9592,8 @@ failed:
+ GROW
+ if (ctxt->instate == XML_PARSER_EOF)
+ break;
+- if (ctxt->input->base != base) goto base_changed;
++ if ((ctxt->input->base != base) || (inputNr != ctxt->inputNr))
++ goto base_changed;
+ if ((RAW == '>') || (((RAW == '/') && (NXT(1) == '>'))))
+ break;
+ if (!IS_BLANK_CH(RAW)) {
+@@ -9605,7 +9609,8 @@ failed:
+ break;
+ }
+ GROW;
+- if (ctxt->input->base != base) goto base_changed;
++ if ((ctxt->input->base != base) || (inputNr != ctxt->inputNr))
++ goto base_changed;
+ }
+
+ /*
+@@ -9772,6 +9777,17 @@ base_changed:
+ if ((ctxt->attallocs[j] != 0) && (atts[i] != NULL))
+ xmlFree((xmlChar *) atts[i]);
+ }
++
++ /*
++ * We can't switch from one entity to another in the middle
++ * of a start tag
++ */
++ if (inputNr != ctxt->inputNr) {
++ xmlFatalErrMsg(ctxt, XML_ERR_ENTITY_BOUNDARY,
++ "Start tag doesn't start and stop in the same entity\n");
++ return(NULL);
++ }
++
+ ctxt->input->cur = ctxt->input->base + cur;
+ ctxt->input->line = oldline;
+ ctxt->input->col = oldcol;
+diff --git a/result/errors/754946.xml.err b/result/errors/754946.xml.err
+index 423dff5..a75088b 100644
+--- a/result/errors/754946.xml.err
++++ b/result/errors/754946.xml.err
+@@ -11,6 +11,9 @@ Entity: line 1: parser error : DOCTYPE improperly terminated
+ Entity: line 1:
+ A<lbbbbbbbbbbbbbbbbbbb_
+ ^
++./test/errors/754946.xml:1: parser error : Start tag doesn't start and stop in the same entity
++>%SYSTEM;<![
++ ^
+ ./test/errors/754946.xml:1: parser error : Extra content at the end of the document
+-<!DOCTYPEA[<!ENTITY %
+- ^
++>%SYSTEM;<![
++ ^
+--
+2.3.5
+
diff --git a/yocto-poky/meta/recipes-core/libxml/libxml2/0001-CVE-2015-8035-Fix-XZ-compression-support-loop.patch b/yocto-poky/meta/recipes-core/libxml/libxml2/0001-CVE-2015-8035-Fix-XZ-compression-support-loop.patch
new file mode 100644
index 000000000..710735570
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/libxml/libxml2/0001-CVE-2015-8035-Fix-XZ-compression-support-loop.patch
@@ -0,0 +1,38 @@
+From f0709e3ca8f8947f2d91ed34e92e38a4c23eae63 Mon Sep 17 00:00:00 2001
+From: Daniel Veillard <veillard@redhat.com>
+Date: Tue, 3 Nov 2015 15:31:25 +0800
+Subject: [PATCH] CVE-2015-8035 Fix XZ compression support loop
+
+For https://bugzilla.gnome.org/show_bug.cgi?id=757466
+DoS when parsing specially crafted XML document if XZ support
+is compiled in (which wasn't the case for 2.9.2 and master since
+Nov 2013, fixed in next commit !)
+
+Upstream-Status: Backport
+
+CVE-2015-8035
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ xzlib.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/xzlib.c b/xzlib.c
+index 0dcb9f4..1fab546 100644
+--- a/xzlib.c
++++ b/xzlib.c
+@@ -581,6 +581,10 @@ xz_decomp(xz_statep state)
+ xz_error(state, LZMA_DATA_ERROR, "compressed data error");
+ return -1;
+ }
++ if (ret == LZMA_PROG_ERROR) {
++ xz_error(state, LZMA_PROG_ERROR, "compression error");
++ return -1;
++ }
+ } while (strm->avail_out && ret != LZMA_STREAM_END);
+
+ /* update available output and crc check value */
+--
+2.3.5
+
diff --git a/yocto-poky/meta/recipes-core/libxml/libxml2/0001-CVE-2015-8242-Buffer-overead-with-HTML-parser-in-pus.patch b/yocto-poky/meta/recipes-core/libxml/libxml2/0001-CVE-2015-8242-Buffer-overead-with-HTML-parser-in-pus.patch
new file mode 100644
index 000000000..73531b3c1
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/libxml/libxml2/0001-CVE-2015-8242-Buffer-overead-with-HTML-parser-in-pus.patch
@@ -0,0 +1,49 @@
+From 8fb4a770075628d6441fb17a1e435100e2f3b1a2 Mon Sep 17 00:00:00 2001
+From: Hugh Davenport <hugh@allthethings.co.nz>
+Date: Fri, 20 Nov 2015 17:16:06 +0800
+Subject: [PATCH] CVE-2015-8242 Buffer overead with HTML parser in push mode
+
+For https://bugzilla.gnome.org/show_bug.cgi?id=756372
+Error in the code pointing to the codepoint in the stack for the
+current char value instead of the pointer in the input that the SAX
+callback expects
+Reported and fixed by Hugh Davenport
+
+Upstream-Status: Backport
+
+CVE-2015-8242
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ HTMLparser.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/HTMLparser.c b/HTMLparser.c
+index bdf7807..b729197 100644
+--- a/HTMLparser.c
++++ b/HTMLparser.c
+@@ -5735,17 +5735,17 @@ htmlParseTryOrFinish(htmlParserCtxtPtr ctxt, int terminate) {
+ if (ctxt->keepBlanks) {
+ if (ctxt->sax->characters != NULL)
+ ctxt->sax->characters(
+- ctxt->userData, &cur, 1);
++ ctxt->userData, &in->cur[0], 1);
+ } else {
+ if (ctxt->sax->ignorableWhitespace != NULL)
+ ctxt->sax->ignorableWhitespace(
+- ctxt->userData, &cur, 1);
++ ctxt->userData, &in->cur[0], 1);
+ }
+ } else {
+ htmlCheckParagraph(ctxt);
+ if (ctxt->sax->characters != NULL)
+ ctxt->sax->characters(
+- ctxt->userData, &cur, 1);
++ ctxt->userData, &in->cur[0], 1);
+ }
+ }
+ ctxt->token = 0;
+--
+2.3.5
+
diff --git a/yocto-poky/meta/recipes-core/libxml/libxml2/0001-Fix-a-bug-on-name-parsing-at-the-end-of-current-inpu.patch b/yocto-poky/meta/recipes-core/libxml/libxml2/0001-Fix-a-bug-on-name-parsing-at-the-end-of-current-inpu.patch
new file mode 100644
index 000000000..a86b9ee86
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/libxml/libxml2/0001-Fix-a-bug-on-name-parsing-at-the-end-of-current-inpu.patch
@@ -0,0 +1,138 @@
+From 51f02b0a03ea1fa6c65b3f9fd88cf60fb5803783 Mon Sep 17 00:00:00 2001
+From: Daniel Veillard <veillard@redhat.com>
+Date: Tue, 15 Sep 2015 16:50:32 +0800
+Subject: [PATCH] Fix a bug on name parsing at the end of current input buffer
+
+For https://bugzilla.gnome.org/show_bug.cgi?id=754946
+
+When hitting the end of the current input buffer while parsing
+a name we could end up loosing the beginning of the name, which
+led to various issues.
+
+Upstream-Status: backport
+
+Depend patch for CVE-2015-7500
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+---
+ parser.c | 29 ++++++++++++++++++++---------
+ result/errors/754946.xml | 0
+ result/errors/754946.xml.err | 16 ++++++++++++++++
+ result/errors/754946.xml.str | 4 ++++
+ test/errors/754946.xml | 1 +
+ 5 files changed, 41 insertions(+), 9 deletions(-)
+ create mode 100644 result/errors/754946.xml
+ create mode 100644 result/errors/754946.xml.err
+ create mode 100644 result/errors/754946.xml.str
+ create mode 100644 test/errors/754946.xml
+
+diff --git a/parser.c b/parser.c
+index 0edd53b..fd29a39 100644
+--- a/parser.c
++++ b/parser.c
+@@ -3491,7 +3491,14 @@ xmlParseNCNameComplex(xmlParserCtxtPtr ctxt) {
+ c = CUR_CHAR(l);
+ if (c == 0) {
+ count = 0;
++ /*
++ * when shrinking to extend the buffer we really need to preserve
++ * the part of the name we already parsed. Hence rolling back
++ * by current lenght.
++ */
++ ctxt->input->cur -= l;
+ GROW;
++ ctxt->input->cur += l;
+ if (ctxt->instate == XML_PARSER_EOF)
+ return(NULL);
+ end = ctxt->input->cur;
+@@ -3523,7 +3530,7 @@ xmlParseNCNameComplex(xmlParserCtxtPtr ctxt) {
+
+ static const xmlChar *
+ xmlParseNCName(xmlParserCtxtPtr ctxt) {
+- const xmlChar *in;
++ const xmlChar *in, *e;
+ const xmlChar *ret;
+ int count = 0;
+
+@@ -3535,16 +3542,19 @@ xmlParseNCName(xmlParserCtxtPtr ctxt) {
+ * Accelerator for simple ASCII names
+ */
+ in = ctxt->input->cur;
+- if (((*in >= 0x61) && (*in <= 0x7A)) ||
+- ((*in >= 0x41) && (*in <= 0x5A)) ||
+- (*in == '_')) {
++ e = ctxt->input->end;
++ if ((((*in >= 0x61) && (*in <= 0x7A)) ||
++ ((*in >= 0x41) && (*in <= 0x5A)) ||
++ (*in == '_')) && (in < e)) {
+ in++;
+- while (((*in >= 0x61) && (*in <= 0x7A)) ||
+- ((*in >= 0x41) && (*in <= 0x5A)) ||
+- ((*in >= 0x30) && (*in <= 0x39)) ||
+- (*in == '_') || (*in == '-') ||
+- (*in == '.'))
++ while ((((*in >= 0x61) && (*in <= 0x7A)) ||
++ ((*in >= 0x41) && (*in <= 0x5A)) ||
++ ((*in >= 0x30) && (*in <= 0x39)) ||
++ (*in == '_') || (*in == '-') ||
++ (*in == '.')) && (in < e))
+ in++;
++ if (in >= e)
++ goto complex;
+ if ((*in > 0) && (*in < 0x80)) {
+ count = in - ctxt->input->cur;
+ if ((count > XML_MAX_NAME_LENGTH) &&
+@@ -3562,6 +3572,7 @@ xmlParseNCName(xmlParserCtxtPtr ctxt) {
+ return(ret);
+ }
+ }
++complex:
+ return(xmlParseNCNameComplex(ctxt));
+ }
+
+diff --git a/result/errors/754946.xml b/result/errors/754946.xml
+new file mode 100644
+index 0000000..e69de29
+diff --git a/result/errors/754946.xml.err b/result/errors/754946.xml.err
+new file mode 100644
+index 0000000..423dff5
+--- /dev/null
++++ b/result/errors/754946.xml.err
+@@ -0,0 +1,16 @@
++Entity: line 1: parser error : internal error: xmlParseInternalSubset: error detected in Markup declaration
++
++ %SYSTEM;
++ ^
++Entity: line 1:
++A<lbbbbbbbbbbbbbbbbbbb_
++^
++Entity: line 1: parser error : DOCTYPE improperly terminated
++ %SYSTEM;
++ ^
++Entity: line 1:
++A<lbbbbbbbbbbbbbbbbbbb_
++^
++./test/errors/754946.xml:1: parser error : Extra content at the end of the document
++<!DOCTYPEA[<!ENTITY %
++ ^
+diff --git a/result/errors/754946.xml.str b/result/errors/754946.xml.str
+new file mode 100644
+index 0000000..3b748cc
+--- /dev/null
++++ b/result/errors/754946.xml.str
+@@ -0,0 +1,4 @@
++./test/errors/754946.xml:1: parser error : Extra content at the end of the document
++<!DOCTYPEA[<!ENTITY %
++ ^
++./test/errors/754946.xml : failed to parse
+diff --git a/test/errors/754946.xml b/test/errors/754946.xml
+new file mode 100644
+index 0000000..6b5f9b0
+--- /dev/null
++++ b/test/errors/754946.xml
+@@ -0,0 +1 @@
++<!DOCTYPEA[<!ENTITY % SYSTEM "A<lbbbbbbbbbbbbbbbbbbb_" >%SYSTEM;<![
+\ No newline at end of file
+--
+2.3.5
+
diff --git a/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7498-Avoid-processing-entities-after-encoding-conversion-.patch b/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7498-Avoid-processing-entities-after-encoding-conversion-.patch
new file mode 100644
index 000000000..47ba8970e
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7498-Avoid-processing-entities-after-encoding-conversion-.patch
@@ -0,0 +1,89 @@
+From afd27c21f6b36e22682b7da20d726bce2dcb2f43 Mon Sep 17 00:00:00 2001
+From: Daniel Veillard <veillard@redhat.com>
+Date: Mon, 9 Nov 2015 18:07:18 +0800
+Subject: [PATCH] Avoid processing entities after encoding conversion failures
+
+For https://bugzilla.gnome.org/show_bug.cgi?id=756527
+and was also raised by Chromium team in the past
+
+When we hit a convwersion failure when switching encoding
+it is bestter to stop parsing there, this was treated as a
+fatal error but the parser was continuing to process to extract
+more errors, unfortunately that makes little sense as the data
+is obviously corrupt and can potentially lead to unexpected behaviour.
+
+Upstream-Status: Backport
+
+CVE-2015-7498
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ parser.c | 7 +++++--
+ parserInternals.c | 11 ++++++++++-
+ 2 files changed, 15 insertions(+), 3 deletions(-)
+
+diff --git a/parser.c b/parser.c
+index 134afe7..c79b4e8 100644
+--- a/parser.c
++++ b/parser.c
+@@ -10665,7 +10665,8 @@ xmlParseXMLDecl(xmlParserCtxtPtr ctxt) {
+ xmlFatalErrMsg(ctxt, XML_ERR_SPACE_REQUIRED, "Blank needed here\n");
+ }
+ xmlParseEncodingDecl(ctxt);
+- if (ctxt->errNo == XML_ERR_UNSUPPORTED_ENCODING) {
++ if ((ctxt->errNo == XML_ERR_UNSUPPORTED_ENCODING) ||
++ (ctxt->instate == XML_PARSER_EOF)) {
+ /*
+ * The XML REC instructs us to stop parsing right here
+ */
+@@ -10789,6 +10790,7 @@ xmlParseDocument(xmlParserCtxtPtr ctxt) {
+
+ if (CUR == 0) {
+ xmlFatalErr(ctxt, XML_ERR_DOCUMENT_EMPTY, NULL);
++ return(-1);
+ }
+
+ /*
+@@ -10806,7 +10808,8 @@ xmlParseDocument(xmlParserCtxtPtr ctxt) {
+ * Note that we will switch encoding on the fly.
+ */
+ xmlParseXMLDecl(ctxt);
+- if (ctxt->errNo == XML_ERR_UNSUPPORTED_ENCODING) {
++ if ((ctxt->errNo == XML_ERR_UNSUPPORTED_ENCODING) ||
++ (ctxt->instate == XML_PARSER_EOF)) {
+ /*
+ * The XML REC instructs us to stop parsing right here
+ */
+diff --git a/parserInternals.c b/parserInternals.c
+index df204fd..c8230c1 100644
+--- a/parserInternals.c
++++ b/parserInternals.c
+@@ -937,6 +937,7 @@ xmlSwitchEncoding(xmlParserCtxtPtr ctxt, xmlCharEncoding enc)
+ {
+ xmlCharEncodingHandlerPtr handler;
+ int len = -1;
++ int ret;
+
+ if (ctxt == NULL) return(-1);
+ switch (enc) {
+@@ -1097,7 +1098,15 @@ xmlSwitchEncoding(xmlParserCtxtPtr ctxt, xmlCharEncoding enc)
+ if (handler == NULL)
+ return(-1);
+ ctxt->charset = XML_CHAR_ENCODING_UTF8;
+- return(xmlSwitchToEncodingInt(ctxt, handler, len));
++ ret = xmlSwitchToEncodingInt(ctxt, handler, len);
++ if ((ret < 0) || (ctxt->errNo == XML_I18N_CONV_FAILED)) {
++ /*
++ * on encoding conversion errors, stop the parser
++ */
++ xmlStopParser(ctxt);
++ ctxt->errNo = XML_I18N_CONV_FAILED;
++ }
++ return(ret);
+ }
+
+ /**
+--
+2.3.5
+
diff --git a/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7499-1-Add-xmlHaltParser-to-stop-the-parser.patch b/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7499-1-Add-xmlHaltParser-to-stop-the-parser.patch
new file mode 100644
index 000000000..e39ec65cd
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7499-1-Add-xmlHaltParser-to-stop-the-parser.patch
@@ -0,0 +1,88 @@
+From 28cd9cb747a94483f4aea7f0968d202c20bb4cfc Mon Sep 17 00:00:00 2001
+From: Daniel Veillard <veillard@redhat.com>
+Date: Fri, 20 Nov 2015 14:55:30 +0800
+Subject: [PATCH] Add xmlHaltParser() to stop the parser
+
+The problem is doing it in a consistent and safe fashion
+It's more complex than just setting ctxt->instate = XML_PARSER_EOF
+Update the public function to reuse that new internal routine
+
+Upstream-Status: Backport
+
+CVE-2015-7499-1
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ parser.c | 34 +++++++++++++++++++++++++++++-----
+ 1 file changed, 29 insertions(+), 5 deletions(-)
+
+diff --git a/parser.c b/parser.c
+index da6e729..b6e99b1 100644
+--- a/parser.c
++++ b/parser.c
+@@ -94,6 +94,8 @@ static xmlParserCtxtPtr
+ xmlCreateEntityParserCtxtInternal(const xmlChar *URL, const xmlChar *ID,
+ const xmlChar *base, xmlParserCtxtPtr pctx);
+
++static void xmlHaltParser(xmlParserCtxtPtr ctxt);
++
+ /************************************************************************
+ * *
+ * Arbitrary limits set in the parser. See XML_PARSE_HUGE *
+@@ -12625,25 +12627,47 @@ xmlCreatePushParserCtxt(xmlSAXHandlerPtr sax, void *user_data,
+ #endif /* LIBXML_PUSH_ENABLED */
+
+ /**
+- * xmlStopParser:
++ * xmlHaltParser:
+ * @ctxt: an XML parser context
+ *
+- * Blocks further parser processing
++ * Blocks further parser processing don't override error
++ * for internal use
+ */
+-void
+-xmlStopParser(xmlParserCtxtPtr ctxt) {
++static void
++xmlHaltParser(xmlParserCtxtPtr ctxt) {
+ if (ctxt == NULL)
+ return;
+ ctxt->instate = XML_PARSER_EOF;
+- ctxt->errNo = XML_ERR_USER_STOP;
+ ctxt->disableSAX = 1;
+ if (ctxt->input != NULL) {
++ /*
++ * in case there was a specific allocation deallocate before
++ * overriding base
++ */
++ if (ctxt->input->free != NULL) {
++ ctxt->input->free((xmlChar *) ctxt->input->base);
++ ctxt->input->free = NULL;
++ }
+ ctxt->input->cur = BAD_CAST"";
+ ctxt->input->base = ctxt->input->cur;
+ }
+ }
+
+ /**
++ * xmlStopParser:
++ * @ctxt: an XML parser context
++ *
++ * Blocks further parser processing
++ */
++void
++xmlStopParser(xmlParserCtxtPtr ctxt) {
++ if (ctxt == NULL)
++ return;
++ xmlHaltParser(ctxt);
++ ctxt->errNo = XML_ERR_USER_STOP;
++}
++
++/**
+ * xmlCreateIOParserCtxt:
+ * @sax: a SAX handler
+ * @user_data: The user data returned on SAX callbacks
+--
+2.3.5
+
diff --git a/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7499-2-Detect-incoherency-on-GROW.patch b/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7499-2-Detect-incoherency-on-GROW.patch
new file mode 100644
index 000000000..aff392095
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7499-2-Detect-incoherency-on-GROW.patch
@@ -0,0 +1,43 @@
+From 35bcb1d758ed70aa7b257c9c3b3ff55e54e3d0da Mon Sep 17 00:00:00 2001
+From: Daniel Veillard <veillard@redhat.com>
+Date: Fri, 20 Nov 2015 15:04:09 +0800
+Subject: [PATCH] Detect incoherency on GROW
+
+the current pointer to the input has to be between the base and end
+if not stop everything we have an internal state error.
+
+Upstream-Status: Backport
+
+CVE-2015-7499-2
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ parser.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/parser.c b/parser.c
+index 1810f99..ab007aa 100644
+--- a/parser.c
++++ b/parser.c
+@@ -2075,9 +2075,16 @@ static void xmlGROW (xmlParserCtxtPtr ctxt) {
+ ((ctxt->input->buf) && (ctxt->input->buf->readcallback != (xmlInputReadCallback) xmlNop)) &&
+ ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+ xmlFatalErr(ctxt, XML_ERR_INTERNAL_ERROR, "Huge input lookup");
+- ctxt->instate = XML_PARSER_EOF;
++ xmlHaltParser(ctxt);
++ return;
+ }
+ xmlParserInputGrow(ctxt->input, INPUT_CHUNK);
++ if ((ctxt->input->cur > ctxt->input->end) ||
++ (ctxt->input->cur < ctxt->input->base)) {
++ xmlHaltParser(ctxt);
++ xmlFatalErr(ctxt, XML_ERR_INTERNAL_ERROR, "cur index out of bound");
++ return;
++ }
+ if ((ctxt->input->cur != NULL) && (*ctxt->input->cur == 0) &&
+ (xmlParserInputGrow(ctxt->input, INPUT_CHUNK) <= 0))
+ xmlPopInput(ctxt);
+--
+2.3.5
+
diff --git a/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7941-1-Stop-parsing-on-entities-boundaries-errors.patch b/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7941-1-Stop-parsing-on-entities-boundaries-errors.patch
new file mode 100644
index 000000000..11da9f9bd
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7941-1-Stop-parsing-on-entities-boundaries-errors.patch
@@ -0,0 +1,39 @@
+From a7dfab7411cbf545f359dd3157e5df1eb0e7ce31 Mon Sep 17 00:00:00 2001
+From: Daniel Veillard <veillard@redhat.com>
+Date: Mon, 23 Feb 2015 11:17:35 +0800
+Subject: [PATCH] Stop parsing on entities boundaries errors
+
+For https://bugzilla.gnome.org/show_bug.cgi?id=744980
+
+There are times, like on unterminated entities that it's preferable to
+stop parsing, even if that means less error reporting. Entities are
+feeding the parser on further processing, and if they are ill defined
+then it's possible to get the parser to bug. Also do the same on
+Conditional Sections if the input is broken, as the structure of
+the document can't be guessed.
+
+Upstream-Status: Backport
+
+CVE-2015-7941-1
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ parser.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/parser.c b/parser.c
+index a8d1b67..bbe97eb 100644
+--- a/parser.c
++++ b/parser.c
+@@ -5658,6 +5658,7 @@ xmlParseEntityDecl(xmlParserCtxtPtr ctxt) {
+ if (RAW != '>') {
+ xmlFatalErrMsgStr(ctxt, XML_ERR_ENTITY_NOT_FINISHED,
+ "xmlParseEntityDecl: entity %s not terminated\n", name);
++ xmlStopParser(ctxt);
+ } else {
+ if (input != ctxt->input) {
+ xmlFatalErrMsg(ctxt, XML_ERR_ENTITY_BOUNDARY,
+--
+2.3.5
+
diff --git a/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7941-2-Cleanup-conditional-section-error-handling.patch b/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7941-2-Cleanup-conditional-section-error-handling.patch
new file mode 100644
index 000000000..b7bd96053
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7941-2-Cleanup-conditional-section-error-handling.patch
@@ -0,0 +1,56 @@
+From 9b8512337d14c8ddf662fcb98b0135f225a1c489 Mon Sep 17 00:00:00 2001
+From: Daniel Veillard <veillard@redhat.com>
+Date: Mon, 23 Feb 2015 11:29:20 +0800
+Subject: [PATCH] Cleanup conditional section error handling
+
+For https://bugzilla.gnome.org/show_bug.cgi?id=744980
+
+The error handling of Conditional Section also need to be
+straightened as the structure of the document can't be
+guessed on a failure there and it's better to stop parsing
+as further errors are likely to be irrelevant.
+
+Upstream-Status: Backport
+
+CVE-2015-7941-2
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ parser.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/parser.c b/parser.c
+index bbe97eb..fe603ac 100644
+--- a/parser.c
++++ b/parser.c
+@@ -6770,6 +6770,8 @@ xmlParseConditionalSections(xmlParserCtxtPtr ctxt) {
+ SKIP_BLANKS;
+ if (RAW != '[') {
+ xmlFatalErr(ctxt, XML_ERR_CONDSEC_INVALID, NULL);
++ xmlStopParser(ctxt);
++ return;
+ } else {
+ if (ctxt->input->id != id) {
+ xmlValidityError(ctxt, XML_ERR_ENTITY_BOUNDARY,
+@@ -6830,6 +6832,8 @@ xmlParseConditionalSections(xmlParserCtxtPtr ctxt) {
+ SKIP_BLANKS;
+ if (RAW != '[') {
+ xmlFatalErr(ctxt, XML_ERR_CONDSEC_INVALID, NULL);
++ xmlStopParser(ctxt);
++ return;
+ } else {
+ if (ctxt->input->id != id) {
+ xmlValidityError(ctxt, XML_ERR_ENTITY_BOUNDARY,
+@@ -6885,6 +6889,8 @@ xmlParseConditionalSections(xmlParserCtxtPtr ctxt) {
+
+ } else {
+ xmlFatalErr(ctxt, XML_ERR_CONDSEC_INVALID_KEYWORD, NULL);
++ xmlStopParser(ctxt);
++ return;
+ }
+
+ if (RAW == 0)
+--
+2.3.5
+
diff --git a/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7942-2-Fix-an-error-in-previous-Conditional-section-patch.patch b/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7942-2-Fix-an-error-in-previous-Conditional-section-patch.patch
new file mode 100644
index 000000000..34b60362c
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7942-2-Fix-an-error-in-previous-Conditional-section-patch.patch
@@ -0,0 +1,35 @@
+From 41ac9049a27f52e7a1f3b341f8714149fc88d450 Mon Sep 17 00:00:00 2001
+From: Daniel Veillard <veillard@redhat.com>
+Date: Tue, 27 Oct 2015 10:53:44 +0800
+Subject: [PATCH] Fix an error in previous Conditional section patch
+
+an off by one mistake in the change, led to error on correct
+document where the end of the included entity was exactly
+the end of the conditional section, leading to regtest failure
+
+Upstream-Status: Backport
+
+CVE-2015-7942-2
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ parser.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/parser.c b/parser.c
+index b9217ff..d67b300 100644
+--- a/parser.c
++++ b/parser.c
+@@ -6916,7 +6916,7 @@ xmlParseConditionalSections(xmlParserCtxtPtr ctxt) {
+ NULL, NULL);
+ }
+ if ((ctxt-> instate != XML_PARSER_EOF) &&
+- ((ctxt->input->cur + 3) < ctxt->input->end))
++ ((ctxt->input->cur + 3) <= ctxt->input->end))
+ SKIP(3);
+ }
+ }
+--
+2.3.5
+
diff --git a/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7942-Another-variation-of-overflow-in-Conditional-section.patch b/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7942-Another-variation-of-overflow-in-Conditional-section.patch
new file mode 100644
index 000000000..40082ec07
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-7942-Another-variation-of-overflow-in-Conditional-section.patch
@@ -0,0 +1,39 @@
+From bd0526e66a56e75a18da8c15c4750db8f801c52d Mon Sep 17 00:00:00 2001
+From: Daniel Veillard <veillard@redhat.com>
+Date: Fri, 23 Oct 2015 19:02:28 +0800
+Subject: [PATCH] Another variation of overflow in Conditional sections
+
+Which happen after the previous fix to
+https://bugzilla.gnome.org/show_bug.cgi?id=756456
+
+But stopping the parser and exiting we didn't pop the intermediary entities
+and doing the SKIP there applies on an input which may be too small
+
+Upstream-Status: Backport
+
+CVE-2015-7942
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ parser.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/parser.c b/parser.c
+index a65e4cc..b9217ff 100644
+--- a/parser.c
++++ b/parser.c
+@@ -6915,7 +6915,9 @@ xmlParseConditionalSections(xmlParserCtxtPtr ctxt) {
+ "All markup of the conditional section is not in the same entity\n",
+ NULL, NULL);
+ }
+- SKIP(3);
++ if ((ctxt-> instate != XML_PARSER_EOF) &&
++ ((ctxt->input->cur + 3) < ctxt->input->end))
++ SKIP(3);
+ }
+ }
+
+--
+2.3.5
+
diff --git a/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-8241.patch b/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-8241.patch
new file mode 100644
index 000000000..89a46ad17
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-8241.patch
@@ -0,0 +1,40 @@
+From ab2b9a93ff19cedde7befbf2fcc48c6e352b6cbe Mon Sep 17 00:00:00 2001
+From: Hugh Davenport <hugh@allthethings.co.nz>
+Date: Tue, 3 Nov 2015 20:40:49 +0800
+Subject: [PATCH] Avoid extra processing of MarkupDecl when EOF
+
+For https://bugzilla.gnome.org/show_bug.cgi?id=756263
+
+One place where ctxt->instate == XML_PARSER_EOF whic was set up
+by entity detection issues doesn't get noticed, and even overrided
+
+Upstream-status: Backport
+
+https://git.gnome.org/browse/libxml2/commit/?id=ab2b9a93ff19cedde7befbf2fcc48c6e352b6cbe
+
+CVE: CVE-2015-8241
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ parser.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+Index: libxml2-2.9.2/parser.c
+===================================================================
+--- libxml2-2.9.2.orig/parser.c
++++ libxml2-2.9.2/parser.c
+@@ -6999,6 +6999,14 @@ xmlParseMarkupDecl(xmlParserCtxtPtr ctxt
+ xmlParsePI(ctxt);
+ }
+ }
++
++ /*
++ * detect requirement to exit there and act accordingly
++ * and avoid having instate overriden later on
++ */
++ if (ctxt->instate == XML_PARSER_EOF)
++ return;
++
+ /*
+ * This is only for internal subset. On external entities,
+ * the replacement is done before parsing stage
diff --git a/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-8317-Fail-parsing-early-on-if-encoding-conversion-failed.patch b/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-8317-Fail-parsing-early-on-if-encoding-conversion-failed.patch
new file mode 100644
index 000000000..59425cbfc
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-8317-Fail-parsing-early-on-if-encoding-conversion-failed.patch
@@ -0,0 +1,42 @@
+From 709a952110e98621c9b78c4f26462a9d8333102e Mon Sep 17 00:00:00 2001
+From: Daniel Veillard <veillard@redhat.com>
+Date: Mon, 29 Jun 2015 16:10:26 +0800
+Subject: [PATCH] Fail parsing early on if encoding conversion failed
+
+For https://bugzilla.gnome.org/show_bug.cgi?id=751631
+
+If we fail conversing the current input stream while
+processing the encoding declaration of the XMLDecl
+then it's safer to just abort there and not try to
+report further errors.
+
+Upstream-Status: Backport
+
+CVE-2015-8317
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ parser.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/parser.c b/parser.c
+index a3a9568..0edd53b 100644
+--- a/parser.c
++++ b/parser.c
+@@ -10471,7 +10471,11 @@ xmlParseEncodingDecl(xmlParserCtxtPtr ctxt) {
+
+ handler = xmlFindCharEncodingHandler((const char *) encoding);
+ if (handler != NULL) {
+- xmlSwitchToEncoding(ctxt, handler);
++ if (xmlSwitchToEncoding(ctxt, handler) < 0) {
++ /* failed to convert */
++ ctxt->errNo = XML_ERR_UNSUPPORTED_ENCODING;
++ return(NULL);
++ }
+ } else {
+ xmlFatalErrMsgStr(ctxt, XML_ERR_UNSUPPORTED_ENCODING,
+ "Unsupported encoding %s\n", encoding);
+--
+2.3.5
+
diff --git a/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-8710.patch b/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-8710.patch
new file mode 100644
index 000000000..be06cc22c
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2015-8710.patch
@@ -0,0 +1,71 @@
+From e724879d964d774df9b7969fc846605aa1bac54c Mon Sep 17 00:00:00 2001
+From: Daniel Veillard <veillard@redhat.com>
+Date: Fri, 30 Oct 2015 21:14:55 +0800
+Subject: [PATCH] Fix parsing short unclosed comment uninitialized access
+
+For https://bugzilla.gnome.org/show_bug.cgi?id=746048
+The HTML parser was too optimistic when processing comments and
+didn't check for the end of the stream on the first 2 characters
+
+Upstream-Status: Backport
+
+https://git.gnome.org/browse/libxml2/commit/?id=e724879d964d774df9b7969fc846605aa1bac54c
+
+CVE: CVE-2015-8710
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ HTMLparser.c | 21 ++++++++++++++-------
+ 1 file changed, 14 insertions(+), 7 deletions(-)
+
+Index: libxml2-2.9.2/HTMLparser.c
+===================================================================
+--- libxml2-2.9.2.orig/HTMLparser.c
++++ libxml2-2.9.2/HTMLparser.c
+@@ -3245,12 +3245,17 @@ htmlParseComment(htmlParserCtxtPtr ctxt)
+ ctxt->instate = state;
+ return;
+ }
++ len = 0;
++ buf[len] = 0;
+ q = CUR_CHAR(ql);
++ if (!IS_CHAR(q))
++ goto unfinished;
+ NEXTL(ql);
+ r = CUR_CHAR(rl);
++ if (!IS_CHAR(r))
++ goto unfinished;
+ NEXTL(rl);
+ cur = CUR_CHAR(l);
+- len = 0;
+ while (IS_CHAR(cur) &&
+ ((cur != '>') ||
+ (r != '-') || (q != '-'))) {
+@@ -3281,18 +3286,20 @@ htmlParseComment(htmlParserCtxtPtr ctxt)
+ }
+ }
+ buf[len] = 0;
+- if (!IS_CHAR(cur)) {
+- htmlParseErr(ctxt, XML_ERR_COMMENT_NOT_FINISHED,
+- "Comment not terminated \n<!--%.50s\n", buf, NULL);
+- xmlFree(buf);
+- } else {
++ if (IS_CHAR(cur)) {
+ NEXT;
+ if ((ctxt->sax != NULL) && (ctxt->sax->comment != NULL) &&
+ (!ctxt->disableSAX))
+ ctxt->sax->comment(ctxt->userData, buf);
+ xmlFree(buf);
++ ctxt->instate = state;
++ return;
+ }
+- ctxt->instate = state;
++
++unfinished:
++ htmlParseErr(ctxt, XML_ERR_COMMENT_NOT_FINISHED,
++ "Comment not terminated \n<!--%.50s\n", buf, NULL);
++ xmlFree(buf);
+ }
+
+ /**
diff --git a/yocto-poky/meta/recipes-core/meta/meta-ide-support.bb b/yocto-poky/meta/recipes-core/meta/meta-ide-support.bb
index 2f9291281..86c57cda2 100644
--- a/yocto-poky/meta/recipes-core/meta/meta-ide-support.bb
+++ b/yocto-poky/meta/recipes-core/meta/meta-ide-support.bb
@@ -13,5 +13,4 @@ do_populate_ide_support () {
toolchain_create_tree_env_script
}
-do_populate_ide_support[nostamp] = "1"
addtask populate_ide_support before do_build after do_install
diff --git a/yocto-poky/meta/recipes-core/meta/nativesdk-buildtools-perl-dummy.bb b/yocto-poky/meta/recipes-core/meta/nativesdk-buildtools-perl-dummy.bb
index d971c3ca3..904173489 100644
--- a/yocto-poky/meta/recipes-core/meta/nativesdk-buildtools-perl-dummy.bb
+++ b/yocto-poky/meta/recipes-core/meta/nativesdk-buildtools-perl-dummy.bb
@@ -2,10 +2,17 @@ SUMMARY = "Dummy package which ensures perl is excluded from buildtools"
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://${COMMON_LICENSE_DIR}/MIT;md5=0835ade698e0bcf8506ecda2f7b4f302"
-inherit nativesdk
+inherit allarch
-# Put it somewhere separate to ensure it's never used except when we want it
-PACKAGE_ARCH = "buildtools-dummy-${SDKPKGSUFFIX}"
+PR = "r1"
+
+python() {
+ # Put the package somewhere separate to ensure it's never used except
+ # when we want it
+ # (note that we have to do this in anonymous python here to avoid
+ # allarch.bbclass disabling itself)
+ d.setVar('PACKAGE_ARCH', 'buildtools-dummy-${SDKPKGSUFFIX}')
+}
PERLPACKAGES = "nativesdk-perl \
nativesdk-perl-module-file-path"
diff --git a/yocto-poky/meta/recipes-core/meta/signing-keys.bb b/yocto-poky/meta/recipes-core/meta/signing-keys.bb
new file mode 100644
index 000000000..cc401f3b6
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/meta/signing-keys.bb
@@ -0,0 +1,45 @@
+# Copyright (C) 2015 Intel Corporation
+# Released under the MIT license (see COPYING.MIT for the terms)
+
+DESCRIPTION = "Make public keys of the signing keys available"
+LICENSE = "MIT"
+PACKAGES = ""
+
+do_fetch[noexec] = "1"
+do_unpack[noexec] = "1"
+do_patch[noexec] = "1"
+do_configure[noexec] = "1"
+do_compile[noexec] = "1"
+do_install[noexec] = "1"
+do_package[noexec] = "1"
+do_packagedata[noexec] = "1"
+do_package_write_ipk[noexec] = "1"
+do_package_write_rpm[noexec] = "1"
+do_package_write_deb[noexec] = "1"
+do_populate_sysroot[noexec] = "1"
+
+EXCLUDE_FROM_WORLD = "1"
+
+def export_gpg_pubkey(d, keyid, path):
+ import bb
+ gpg_bin = d.getVar('GPG_BIN', True) or \
+ bb.utils.which(os.getenv('PATH'), "gpg")
+ cmd = '%s --batch --yes --export --armor -o %s %s' % \
+ (gpg_bin, path, keyid)
+ status, output = oe.utils.getstatusoutput(cmd)
+ if status:
+ raise bb.build.FuncFailed('Failed to export gpg public key (%s): %s' %
+ (keyid, output))
+
+python do_export_public_keys () {
+ if d.getVar("RPM_SIGN_PACKAGES", True):
+ # Export public key of the rpm signing key
+ export_gpg_pubkey(d, d.getVar("RPM_GPG_NAME", True),
+ d.getVar('RPM_GPG_PUBKEY', True))
+
+ if d.getVar('PACKAGE_FEED_SIGN', True) == '1':
+ # Export public key of the feed signing key
+ export_gpg_pubkey(d, d.getVar("PACKAGE_FEED_GPG_NAME", True),
+ d.getVar('PACKAGE_FEED_GPG_PUBKEY', True))
+}
+addtask do_export_public_keys before do_build
diff --git a/yocto-poky/meta/recipes-core/meta/uninative-tarball.bb b/yocto-poky/meta/recipes-core/meta/uninative-tarball.bb
index 41f7927e1..21f3bd985 100644
--- a/yocto-poky/meta/recipes-core/meta/uninative-tarball.bb
+++ b/yocto-poky/meta/recipes-core/meta/uninative-tarball.bb
@@ -7,6 +7,7 @@ TOOLCHAIN_TARGET_TASK = ""
TOOLCHAIN_HOST_TASK = "\
nativesdk-glibc \
+ nativesdk-glibc-gconv-ibm850 \
nativesdk-patchelf \
"
diff --git a/yocto-poky/meta/recipes-core/os-release/os-release.bb b/yocto-poky/meta/recipes-core/os-release/os-release.bb
index cc431d2cd..c690b82b2 100644
--- a/yocto-poky/meta/recipes-core/os-release/os-release.bb
+++ b/yocto-poky/meta/recipes-core/os-release/os-release.bb
@@ -32,11 +32,12 @@ python do_compile () {
f.write('{0}={1}\n'.format(field, value))
if d.getVar('RPM_SIGN_PACKAGES', True) == '1':
rpm_gpg_pubkey = d.getVar('RPM_GPG_PUBKEY', True)
- os.mkdir('${B}/rpm-gpg')
- distro_version = self.d.getVar('DISTRO_VERSION', True) or "oe.0"
+ bb.utils.mkdirhier('${B}/rpm-gpg')
+ distro_version = d.getVar('DISTRO_VERSION', True) or "oe.0"
shutil.copy2(rpm_gpg_pubkey, d.expand('${B}/rpm-gpg/RPM-GPG-KEY-%s' % distro_version))
}
do_compile[vardeps] += "${OS_RELEASE_FIELDS}"
+do_compile[depends] += "signing-keys:do_export_public_keys"
do_install () {
install -d ${D}${sysconfdir}
diff --git a/yocto-poky/meta/recipes-core/packagegroups/packagegroup-core-standalone-sdk-target.bb b/yocto-poky/meta/recipes-core/packagegroups/packagegroup-core-standalone-sdk-target.bb
index 37f5e43a7..6997f396b 100644
--- a/yocto-poky/meta/recipes-core/packagegroups/packagegroup-core-standalone-sdk-target.bb
+++ b/yocto-poky/meta/recipes-core/packagegroups/packagegroup-core-standalone-sdk-target.bb
@@ -7,6 +7,8 @@ inherit packagegroup
RDEPENDS_${PN} = "\
libgcc \
libgcc-dev \
+ libatomic \
+ libatomic-dev \
libstdc++ \
libstdc++-dev \
${LIBC_DEPENDENCIES} \
diff --git a/yocto-poky/meta/recipes-core/readline/readline-6.3/readline63-003 b/yocto-poky/meta/recipes-core/readline/readline-6.3/readline-cve-2014-2524.patch
index 98a9d810b..98a9d810b 100644
--- a/yocto-poky/meta/recipes-core/readline/readline-6.3/readline63-003
+++ b/yocto-poky/meta/recipes-core/readline/readline-6.3/readline-cve-2014-2524.patch
diff --git a/yocto-poky/meta/recipes-core/readline/readline_6.3.bb b/yocto-poky/meta/recipes-core/readline/readline_6.3.bb
index 55964a6cf..fc362ae5a 100644
--- a/yocto-poky/meta/recipes-core/readline/readline_6.3.bb
+++ b/yocto-poky/meta/recipes-core/readline/readline_6.3.bb
@@ -1,6 +1,6 @@
require readline.inc
-SRC_URI += "file://readline63-003 \
+SRC_URI += "file://readline-cve-2014-2524.patch;striplevel=0 \
file://readline-dispatch-multikey.patch"
SRC_URI[archive.md5sum] = "33c8fb279e981274f485fd91da77e94a"
diff --git a/yocto-poky/meta/recipes-core/systemd/systemd/0001-fix-build-on-uClibc-exp10.patch b/yocto-poky/meta/recipes-core/systemd/systemd/0001-fix-build-on-uClibc-exp10.patch
new file mode 100644
index 000000000..76ce4b781
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/systemd/systemd/0001-fix-build-on-uClibc-exp10.patch
@@ -0,0 +1,22 @@
+Inspired by: http://peter.korsgaard.com/patches/alsa-utils/alsamixer-fix-build-on-uClibc-exp10.patch
+
+exp10 extension is not part of uClibc, so compute it.
+
+
+Signed-off-by: Samuel Martin <s.martin49@gmail.com>
+
+Upstream-Status: Pending
+
+Index: git/src/basic/missing.h
+===================================================================
+--- git.orig/src/basic/missing.h
++++ git/src/basic/missing.h
+@@ -1036,3 +1036,8 @@ static inline int kcmp(pid_t pid1, pid_t
+ #ifndef INPUT_PROP_ACCELEROMETER
+ #define INPUT_PROP_ACCELEROMETER 0x06
+ #endif
++
++#ifdef __UCLIBC__
++/* 10^x = 10^(log e^x) = (e^x)^log10 = e^(x * log 10) */
++#define exp10(x) (exp((x) * log(10)))
++#endif /* __UCLIBC__ */
diff --git a/yocto-poky/meta/recipes-core/systemd/systemd/0022-Use-getenv-when-secure-versions-are-not-available.patch b/yocto-poky/meta/recipes-core/systemd/systemd/0022-Use-getenv-when-secure-versions-are-not-available.patch
new file mode 100644
index 000000000..30e38173e
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/systemd/systemd/0022-Use-getenv-when-secure-versions-are-not-available.patch
@@ -0,0 +1,39 @@
+From cb71e4beea3b3b11e5951f95c829cd2eee9fcf7b Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Sat, 12 Sep 2015 19:10:04 +0000
+Subject: [PATCH 22/31] Use getenv when secure versions are not available
+
+musl doesnt implement secure version, so we default
+to it if configure does not detect a secure imeplementation
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+Upstream-Status: Rejected
+
+ src/basic/missing.h | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/src/basic/missing.h b/src/basic/missing.h
+index bf9b490..d6dbc7d 100644
+--- a/src/basic/missing.h
++++ b/src/basic/missing.h
+@@ -584,13 +584,14 @@ static inline int name_to_handle_at(int fd, const char *name, struct file_handle
+ return syscall(__NR_name_to_handle_at, fd, name, handle, mnt_id, flags);
+ }
+ #endif
+-
+-#ifndef HAVE_SECURE_GETENV
++#ifdef HAVE_SECURE_GETENV
+ # ifdef HAVE___SECURE_GETENV
+ # define secure_getenv __secure_getenv
+ # else
+ # error "neither secure_getenv nor __secure_getenv are available"
+ # endif
++#else
++# define secure_getenv getenv
+ #endif
+
+ #ifndef CIFS_MAGIC_NUMBER
+--
+2.5.2
+
diff --git a/yocto-poky/meta/recipes-core/systemd/systemd/rules-whitelist-hd-devices.patch b/yocto-poky/meta/recipes-core/systemd/systemd/rules-whitelist-hd-devices.patch
new file mode 100644
index 000000000..8975b05e0
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/systemd/systemd/rules-whitelist-hd-devices.patch
@@ -0,0 +1,32 @@
+From f77b7e5626e70c3a775e993816a33af5a61dea42 Mon Sep 17 00:00:00 2001
+From: Patrick Ohly <patrick.ohly@intel.com>
+Date: Wed, 16 Sep 2015 13:55:58 +0200
+Subject: [PATCH] rules: whitelist hd* devices
+
+qemu by default emulates IDE and the linux-yocto kernel(s) use
+CONFIG_IDE instead of the more modern libsata, so disks appear as
+/dev/hd*. Patch rejected upstream because CONFIG_IDE is deprecated.
+
+Upstream-Status: Denied [https://github.com/systemd/systemd/pull/1276]
+
+Signed-off-by: Patrick Ohly <patrick.ohly@intel.com>
+---
+ rules/60-persistent-storage.rules | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/rules/60-persistent-storage.rules b/rules/60-persistent-storage.rules
+index 0b14bb4..1c4d97a 100644
+--- a/rules/60-persistent-storage.rules
++++ b/rules/60-persistent-storage.rules
+@@ -6,7 +6,7 @@
+ ACTION=="remove", GOTO="persistent_storage_end"
+
+ SUBSYSTEM!="block", GOTO="persistent_storage_end"
+-KERNEL!="loop*|mmcblk*[0-9]|msblk*[0-9]|mspblk*[0-9]|nvme*|sd*|sr*|vd*|xvd*|bcache*|cciss*|dasd*|ubd*", GOTO="persistent_storage_end"
++KERNEL!="loop*|mmcblk*[0-9]|msblk*[0-9]|mspblk*[0-9]|nvme*|hd*|sd*|sr*|vd*|xvd*|bcache*|cciss*|dasd*|ubd*", GOTO="persistent_storage_end"
+
+ # ignore partitions that span the entire disk
+ TEST=="whole_disk", GOTO="persistent_storage_end"
+--
+2.1.4
+
diff --git a/yocto-poky/meta/recipes-core/systemd/systemd_225.bb b/yocto-poky/meta/recipes-core/systemd/systemd_225.bb
index f7d4c7df4..18c244812 100644
--- a/yocto-poky/meta/recipes-core/systemd/systemd_225.bb
+++ b/yocto-poky/meta/recipes-core/systemd/systemd_225.bb
@@ -18,7 +18,7 @@ PROVIDES = "udev"
PE = "1"
-DEPENDS = "kmod docbook-sgml-dtd-4.1-native intltool-native gperf-native acl readline dbus libcap libcgroup glib-2.0 qemu-native util-linux"
+DEPENDS = "kmod docbook-sgml-dtd-4.1-native intltool-native gperf-native acl readline dbus libcap libcgroup qemu-native util-linux"
SECTION = "base/shell"
@@ -45,6 +45,7 @@ SRC_URI = "git://github.com/systemd/systemd.git;protocol=git \
file://00-create-volatile.conf \
file://init \
file://run-ptest \
+ file://rules-whitelist-hd-devices.patch \
"
SRC_URI_append_qemuall = " file://qemuall_io_latency-core-device.c-Change-the-default-device-timeout-to-2.patch"
@@ -52,6 +53,8 @@ S = "${WORKDIR}/git"
SRC_URI_append_libc-uclibc = "\
file://0001-units-Prefer-getty-to-agetty-in-console-setup-system.patch \
+ file://0022-Use-getenv-when-secure-versions-are-not-available.patch \
+ file://0001-fix-build-on-uClibc-exp10.patch \
"
LDFLAGS_append_libc-uclibc = " -lrt"
@@ -87,6 +90,7 @@ PACKAGECONFIG[iptc] = "--enable-libiptc,--disable-libiptc,iptables"
PACKAGECONFIG[ldconfig] = "--enable-ldconfig,--disable-ldconfig,,"
PACKAGECONFIG[selinux] = "--enable-selinux,--disable-selinux,libselinux"
PACKAGECONFIG[valgrind] = "ac_cv_header_valgrind_memcheck_h=yes ac_cv_header_valgrind_valgrind_h=yes ,ac_cv_header_valgrind_memcheck_h=no ac_cv_header_valgrind_valgrind_h=no ,valgrind"
+PACKAGECONFIG[qrencode] = "--enable-qrencode,--disable-qrencode,qrencode"
CACHED_CONFIGUREVARS += "ac_cv_path_KILL=${base_bindir}/kill"
CACHED_CONFIGUREVARS += "ac_cv_path_KMOD=${base_bindir}/kmod"
@@ -123,6 +127,9 @@ EXTRA_OECONF = " --with-rootprefix=${rootprefix} \
# uclibc does not have NSS
EXTRA_OECONF_append_libc-uclibc = " --disable-myhostname "
+# disable problematic GCC 5.2 optimizations [YOCTO #8291]
+FULL_OPTIMIZATION_append_arm = " -fno-schedule-insns -fno-schedule-insns2"
+
do_configure_prepend() {
export NM="${HOST_PREFIX}gcc-nm"
export AR="${HOST_PREFIX}gcc-ar"
@@ -186,8 +193,8 @@ do_install() {
sed -i -e 's/.*ForwardToSyslog.*/ForwardToSyslog=yes/' ${D}${sysconfdir}/systemd/journald.conf
# this file is needed to exist if networkd is disabled but timesyncd is still in use since timesyncd checks it
# for existence else it fails
- if [ -s ${D}${libdir}/tmpfiles.d/systemd.conf ]; then
- ${@bb.utils.contains('PACKAGECONFIG', 'networkd', ':', 'sed -i -e "\$ad /run/systemd/netif/links 0755 root root -" ${D}${libdir}/tmpfiles.d/systemd.conf', d)}
+ if [ -s ${D}${exec_prefix}/lib/tmpfiles.d/systemd.conf ]; then
+ ${@bb.utils.contains('PACKAGECONFIG', 'networkd', ':', 'sed -i -e "\$ad /run/systemd/netif/links 0755 root root -" ${D}${exec_prefix}/lib/tmpfiles.d/systemd.conf', d)}
fi
install -Dm 0755 ${S}/src/systemctl/systemd-sysv-install.SKELETON ${D}${systemd_unitdir}/systemd-sysv-install
}
diff --git a/yocto-poky/meta/recipes-core/uclibc/uclibc-git.inc b/yocto-poky/meta/recipes-core/uclibc/uclibc-git.inc
index 14a577f43..b7184794d 100644
--- a/yocto-poky/meta/recipes-core/uclibc/uclibc-git.inc
+++ b/yocto-poky/meta/recipes-core/uclibc/uclibc-git.inc
@@ -16,5 +16,10 @@ SRC_URI = "git://uclibc.org/uClibc.git;branch=master \
file://0005-Always-use-O2-for-compiling-fork.c.patch \
file://0006-ldso-limited-support-for-ORIGIN-in-rpath.patch \
file://0007-nptl-atfork-Hide-pthread_atfork-in-shared-versions.patch \
+ file://0001-gcc5-optimizes-away-the-write-only-static-functions-.patch \
+ file://0001-fcntl-Add-AT_EMPTY_PATH-for-all-and-O_PATH-for-arm.patch \
+ file://0001-wire-in-syncfs.patch \
+ file://CVE-2016-2224.patch \
+ file://CVE-2016-2225.patch \
"
S = "${WORKDIR}/git"
diff --git a/yocto-poky/meta/recipes-core/uclibc/uclibc-git/0001-fcntl-Add-AT_EMPTY_PATH-for-all-and-O_PATH-for-arm.patch b/yocto-poky/meta/recipes-core/uclibc/uclibc-git/0001-fcntl-Add-AT_EMPTY_PATH-for-all-and-O_PATH-for-arm.patch
new file mode 100644
index 000000000..6942db462
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/uclibc/uclibc-git/0001-fcntl-Add-AT_EMPTY_PATH-for-all-and-O_PATH-for-arm.patch
@@ -0,0 +1,42 @@
+From 4c8f5fe7d41493e8e181941ae5a01713155f44d1 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Thu, 15 Oct 2015 15:34:39 +0000
+Subject: [PATCH] fcntl: Add AT_EMPTY_PATH for all and O_PATH for arm
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+Upstream-Status: Pending
+
+ include/fcntl.h | 3 +++
+ libc/sysdeps/linux/arm/bits/fcntl.h | 1 +
+ 2 files changed, 4 insertions(+)
+
+diff --git a/include/fcntl.h b/include/fcntl.h
+index 11000dd..8a7ad9b 100644
+--- a/include/fcntl.h
++++ b/include/fcntl.h
+@@ -65,6 +65,9 @@ __BEGIN_DECLS
+ # define AT_SYMLINK_FOLLOW 0x400 /* Follow symbolic links. */
+ # define AT_EACCESS 0x200 /* Test access permitted for
+ effective IDs, not real IDs. */
++# ifdef __USE_GNU
++# define AT_EMPTY_PATH 0x1000 /* Allow empty relative pathname. */
++# endif
+ #endif
+
+ /* Do the file control operation described by CMD on FD.
+diff --git a/libc/sysdeps/linux/arm/bits/fcntl.h b/libc/sysdeps/linux/arm/bits/fcntl.h
+index aedc154..c6ba958 100644
+--- a/libc/sysdeps/linux/arm/bits/fcntl.h
++++ b/libc/sysdeps/linux/arm/bits/fcntl.h
+@@ -50,6 +50,7 @@
+ # define O_DIRECT 0200000 /* Direct disk access. */
+ # define O_NOATIME 01000000 /* Do not set atime. */
+ # define O_CLOEXEC 02000000 /* Set close_on_exec. */
++# define O_PATH 010000000 /* Resolve pathname but do not open file. */
+ #endif
+
+ /* For now Linux has synchronisity options for data and read operations.
+--
+2.6.1
+
diff --git a/yocto-poky/meta/recipes-core/uclibc/uclibc-git/0001-gcc5-optimizes-away-the-write-only-static-functions-.patch b/yocto-poky/meta/recipes-core/uclibc/uclibc-git/0001-gcc5-optimizes-away-the-write-only-static-functions-.patch
new file mode 100644
index 000000000..e622f87ba
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/uclibc/uclibc-git/0001-gcc5-optimizes-away-the-write-only-static-functions-.patch
@@ -0,0 +1,51 @@
+From 2659fb25d32f4b29c1c96aa5730fe40e19d53ab0 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Wed, 14 Oct 2015 17:38:37 -0700
+Subject: [PATCH] gcc5 optimizes away the write only static functions and we
+ end up with
+
+ librt/librt_so.a(rt-unwind-resume.oS): In function `_Unwind_Resume':
+ rt-unwind-resume.c:(.text+0x3c): undefined reference to `libgcc_s_resume'
+ collect2: error: ld returned 1 exit status
+ make[2]: *** [lib/librt.so] Error 1
+
+marking these functions explicitly used with __attribute_used__ avoids
+that optimization.
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+Upstream-Status: Pending
+
+ libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c | 2 +-
+ libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-resume.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c
+index f4d6f41..0c2edd7 100644
+--- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c
++++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c
+@@ -27,7 +27,7 @@
+ #define __libc_fatal(x) {/*write(STDERR_FILENO, x, strlen(x));*/ abort();}
+
+ static void *libgcc_s_handle;
+-static void (*libgcc_s_resume) (struct _Unwind_Exception *exc);
++static void (*libgcc_s_resume) (struct _Unwind_Exception *exc) __attribute_used__;
+ static _Unwind_Reason_Code (*libgcc_s_personality)
+ (_Unwind_State, struct _Unwind_Exception *, struct _Unwind_Context *);
+ static _Unwind_Reason_Code (*libgcc_s_forcedunwind)
+diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-resume.c b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-resume.c
+index f9a4ffb..f0c3047 100644
+--- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-resume.c
++++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-resume.c
+@@ -25,7 +25,7 @@
+ #define __libc_dlclose dlclose
+ #define __libc_fatal(x) {/*write(STDERR_FILENO, x, strlen(x));*/ abort();}
+
+-static void (*libgcc_s_resume) (struct _Unwind_Exception *exc);
++static void (*libgcc_s_resume) (struct _Unwind_Exception *exc) __attribute_used__;
+ static _Unwind_Reason_Code (*libgcc_s_personality)
+ (_Unwind_State, struct _Unwind_Exception *, struct _Unwind_Context *);
+
+--
+2.6.1
+
diff --git a/yocto-poky/meta/recipes-core/uclibc/uclibc-git/0001-wire-in-syncfs.patch b/yocto-poky/meta/recipes-core/uclibc/uclibc-git/0001-wire-in-syncfs.patch
new file mode 100644
index 000000000..079ad6b3b
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/uclibc/uclibc-git/0001-wire-in-syncfs.patch
@@ -0,0 +1,49 @@
+From 4f2db1b46bda5e376245ec36198b137709f069e8 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Thu, 15 Oct 2015 17:03:37 +0000
+Subject: [PATCH] wire in syncfs
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+Upstream-Status: Pending
+
+ include/unistd.h | 2 +-
+ libc/sysdeps/linux/common/syncfs.c | 13 +++++++++++++
+ 2 files changed, 14 insertions(+), 1 deletion(-)
+ create mode 100644 libc/sysdeps/linux/common/syncfs.c
+
+diff --git a/include/unistd.h b/include/unistd.h
+index 3793d2d..d01bb08 100644
+--- a/include/unistd.h
++++ b/include/unistd.h
+@@ -1073,7 +1073,7 @@ extern char *getpass (const char *__prompt) __nonnull ((1));
+ extern int fsync (int __fd);
+ #endif /* Use BSD || X/Open || Unix98. */
+
+-#if 0 /*def __USE_GNU */
++#ifdef __USE_GNU
+ /* Make all changes done to all files on the file system associated
+ * with FD actually appear on disk. */
+ extern int syncfs (int __fd) __THROW;
+diff --git a/libc/sysdeps/linux/common/syncfs.c b/libc/sysdeps/linux/common/syncfs.c
+new file mode 100644
+index 0000000..d2eed05
+--- /dev/null
++++ b/libc/sysdeps/linux/common/syncfs.c
+@@ -0,0 +1,13 @@
++/* vi: set sw=4 ts=4: */
++/*
++ * fsync() for uClibc
++ *
++ * Copyright (C) 2000-2006 Erik Andersen <andersen@uclibc.org>
++ *
++ * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
++ */
++
++#include <sys/syscall.h>
++#include <unistd.h>
++
++_syscall1(int, syncfs, int, fd)
+--
+2.6.1
+
diff --git a/yocto-poky/meta/recipes-core/uclibc/uclibc-git/CVE-2016-2224.patch b/yocto-poky/meta/recipes-core/uclibc/uclibc-git/CVE-2016-2224.patch
new file mode 100644
index 000000000..218b60a85
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/uclibc/uclibc-git/CVE-2016-2224.patch
@@ -0,0 +1,49 @@
+From 16719c1a7078421928e6d31dd1dec574825ef515 Mon Sep 17 00:00:00 2001
+From: Waldemar Brodkorb <wbx@openadk.org>
+Date: Sun, 17 Jan 2016 15:47:22 +0100
+Subject: [PATCH] Do not follow compressed items forever.
+
+It is possible to get stuck in an infinite loop when receiving a
+specially crafted DNS reply. Exit the loop after a number of iteration
+and consider the packet invalid.
+
+Signed-off-by: Daniel Fahlgren <daniel@fahlgren.se>
+Signed-off-by: Waldemar Brodkorb <wbx@uclibc-ng.org>
+
+Upstream-status: Backport
+http://repo.or.cz/uclibc-ng.git/commit/16719c1a7078421928e6d31dd1dec574825ef515
+
+CVE: CVE-2016-2224
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ libc/inet/resolv.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+Index: git/libc/inet/resolv.c
+===================================================================
+--- git.orig/libc/inet/resolv.c
++++ git/libc/inet/resolv.c
+@@ -666,11 +666,12 @@ int __decode_dotted(const unsigned char
+ bool measure = 1;
+ unsigned total = 0;
+ unsigned used = 0;
++ unsigned maxiter = 256;
+
+ if (!packet)
+ return -1;
+
+- while (1) {
++ while (--maxiter) {
+ if (offset >= packet_len)
+ return -1;
+ b = packet[offset++];
+@@ -707,6 +708,8 @@ int __decode_dotted(const unsigned char
+ else
+ dest[used++] = '\0';
+ }
++ if (!maxiter)
++ return -1;
+
+ /* The null byte must be counted too */
+ if (measure)
diff --git a/yocto-poky/meta/recipes-core/uclibc/uclibc-git/CVE-2016-2225.patch b/yocto-poky/meta/recipes-core/uclibc/uclibc-git/CVE-2016-2225.patch
new file mode 100644
index 000000000..0217e4bf5
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/uclibc/uclibc-git/CVE-2016-2225.patch
@@ -0,0 +1,32 @@
+From bb01edff0377f2585ce304ecbadcb7b6cde372ac Mon Sep 17 00:00:00 2001
+From: Waldemar Brodkorb <wbx@openadk.org>
+Date: Mon, 25 Jan 2016 21:11:34 +0100
+Subject: [PATCH] Make sure to always terminate decoded string
+
+Write a terminating '\0' to dest when the first byte of the encoded data
+is 0. This corner case was previously missed.
+
+Signed-off-by: Daniel Fahlgren <daniel@fahlgren.se>
+Signed-off-by: Waldemar Brodkorb <wbx@uclibc-ng.org>
+
+Upstream-Status: Backport
+http://repo.or.cz/uclibc-ng.git/commit/bb01edff0377f2585ce304ecbadcb7b6cde372ac
+CVE: CVE-2016-2225
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ libc/inet/resolv.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+Index: git/libc/inet/resolv.c
+===================================================================
+--- git.orig/libc/inet/resolv.c
++++ git/libc/inet/resolv.c
+@@ -671,6 +671,7 @@ int __decode_dotted(const unsigned char
+ if (!packet)
+ return -1;
+
++ dest[0] = '\0';
+ while (--maxiter) {
+ if (offset >= packet_len)
+ return -1;
diff --git a/yocto-poky/meta/recipes-core/uclibc/uclibc-git/uClibc.distro b/yocto-poky/meta/recipes-core/uclibc/uclibc-git/uClibc.distro
index 6575482b3..3827b66e9 100644
--- a/yocto-poky/meta/recipes-core/uclibc/uclibc-git/uClibc.distro
+++ b/yocto-poky/meta/recipes-core/uclibc/uclibc-git/uClibc.distro
@@ -182,6 +182,8 @@ UCLIBC_HAS_FLOATS=y
# COMPILE_IN_THUMB_MODE is not set
+# needed by shadow
+UCLIBC_HAS_UTMP=y
# needed by systemd
UCLIBC_HAS_UTMPX=y
UCLIBC_LINUX_MODULE_26=y
diff --git a/yocto-poky/meta/recipes-core/udev/udev.inc b/yocto-poky/meta/recipes-core/udev/udev.inc
index a00dad5db..c378ae3cd 100644
--- a/yocto-poky/meta/recipes-core/udev/udev.inc
+++ b/yocto-poky/meta/recipes-core/udev/udev.inc
@@ -15,6 +15,8 @@ LDFLAGS += "-lrt"
DEPENDS = "glib-2.0 libusb usbutils pciutils glib-2.0-native gperf-native libxslt-native util-linux"
RPROVIDES_${PN} = "hotplug"
+PROVIDES = "libgudev"
+
SRC_URI = "${KERNELORG_MIRROR}/linux/utils/kernel/hotplug/udev-${PV}.tar.gz \
file://0001-Fixing-keyboard_force_release.sh-shell-script-path.patch \
file://avoid-mouse-autosuspend.patch \
diff --git a/yocto-poky/meta/recipes-core/util-linux/util-linux.inc b/yocto-poky/meta/recipes-core/util-linux/util-linux.inc
index a4072bc6c..594108f68 100644
--- a/yocto-poky/meta/recipes-core/util-linux/util-linux.inc
+++ b/yocto-poky/meta/recipes-core/util-linux/util-linux.inc
@@ -163,6 +163,12 @@ do_install () {
echo 'MOUNTALL="-t nonfs,nosmbfs,noncpfs"' > ${D}${sysconfdir}/default/mountall
rm -f ${D}${bindir}/chkdupexe
+
+ if [ "${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'pam', '', d)}" = "pam" ]; then
+ install -d ${D}${sysconfdir}/pam.d
+ install -m 0644 ${WORKDIR}/runuser.pamd ${D}${sysconfdir}/pam.d/runuser
+ install -m 0644 ${WORKDIR}/runuser-l.pamd ${D}${sysconfdir}/pam.d/runuser-l
+ fi
}
# reset and nologin causes a conflict with ncurses-native and shadow-native
diff --git a/yocto-poky/meta/recipes-core/util-linux/util-linux/runuser-l.pamd b/yocto-poky/meta/recipes-core/util-linux/util-linux/runuser-l.pamd
new file mode 100644
index 000000000..4b368ccf5
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/util-linux/util-linux/runuser-l.pamd
@@ -0,0 +1,3 @@
+auth include runuser
+session optional pam_keyinit.so force revoke
+session include runuser
diff --git a/yocto-poky/meta/recipes-core/util-linux/util-linux/runuser.pamd b/yocto-poky/meta/recipes-core/util-linux/util-linux/runuser.pamd
new file mode 100644
index 000000000..48d133b9e
--- /dev/null
+++ b/yocto-poky/meta/recipes-core/util-linux/util-linux/runuser.pamd
@@ -0,0 +1,4 @@
+auth sufficient pam_rootok.so
+session optional pam_keyinit.so revoke
+session required pam_limits.so
+session required pam_unix.so
diff --git a/yocto-poky/meta/recipes-core/util-linux/util-linux_2.26.2.bb b/yocto-poky/meta/recipes-core/util-linux/util-linux_2.26.2.bb
index fc7dc6e09..e09fdfaf5 100644
--- a/yocto-poky/meta/recipes-core/util-linux/util-linux_2.26.2.bb
+++ b/yocto-poky/meta/recipes-core/util-linux/util-linux_2.26.2.bb
@@ -13,6 +13,8 @@ SRC_URI += "file://util-linux-ng-replace-siginterrupt.patch \
file://uclibc-__progname-conflict.patch \
file://configure-sbindir.patch \
file://fix-parallel-build.patch \
+ file://runuser.pamd \
+ file://runuser-l.pamd \
${OLDHOST} \
"
SRC_URI[md5sum] = "9bdf368c395f1b70325d0eb22c7f48fb"
diff --git a/yocto-poky/meta/recipes-devtools/binutils/binutils/binutils-octeon3.patch b/yocto-poky/meta/recipes-devtools/binutils/binutils/binutils-octeon3.patch
index 6108c0d5c..4e8c69f3e 100644
--- a/yocto-poky/meta/recipes-devtools/binutils/binutils/binutils-octeon3.patch
+++ b/yocto-poky/meta/recipes-devtools/binutils/binutils/binutils-octeon3.patch
@@ -229,7 +229,7 @@ Index: git/opcodes/mips-dis.c
+ { "octeon3", 1, bfd_mach_mips_octeon3, CPU_OCTEON3,
+ ISA_MIPS64R2 | INSN_OCTEON3, ASE_VIRT | ASE_VIRT64,
+ mips_cp0_names_numeric,
-+ NULL, 0, mips_hwr_names_numeric },
++ NULL, 0, mips_cp1_names_mips3264, mips_hwr_names_numeric },
+
{ "xlr", 1, bfd_mach_mips_xlr, CPU_XLR,
ISA_MIPS64 | INSN_XLR, 0,
diff --git a/yocto-poky/meta/recipes-devtools/build-compare/build-compare_git.bb b/yocto-poky/meta/recipes-devtools/build-compare/build-compare_git.bb
index 7ac37846b..676f11dd8 100644
--- a/yocto-poky/meta/recipes-devtools/build-compare/build-compare_git.bb
+++ b/yocto-poky/meta/recipes-devtools/build-compare/build-compare_git.bb
@@ -10,7 +10,6 @@ SRC_URI = "git://github.com/openSUSE/build-compare.git \
file://Ignore-DWARF-sections.patch;striplevel=1 \
file://0001-Add-support-for-deb-and-ipk-packaging.patch \
"
-PATCHTOOL = "git"
SRCREV = "c5352c054c6ef15735da31b76d6d88620f4aff0a"
diff --git a/yocto-poky/meta/recipes-devtools/ccache/ccache_3.2.3.bb b/yocto-poky/meta/recipes-devtools/ccache/ccache_3.2.3.bb
index 357df750b..97f557a9b 100644
--- a/yocto-poky/meta/recipes-devtools/ccache/ccache_3.2.3.bb
+++ b/yocto-poky/meta/recipes-devtools/ccache/ccache_3.2.3.bb
@@ -5,4 +5,6 @@ LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=b3c337e7664559a789d9f7a93e5283c1"
SRCREV = "4cad46e8ee0053144bb00919f0dadd20c1f87013"
-SRC_URI += "file://0001-Fix-regression-in-recent-change-related-to-zlib-in-n.patch"
+SRC_URI += "file://0001-Fix-regression-in-recent-change-related-to-zlib-in-n.patch \
+ file://0002-dev.mk.in-fix-file-name-too-long.patch \
+"
diff --git a/yocto-poky/meta/recipes-devtools/ccache/files/0002-dev.mk.in-fix-file-name-too-long.patch b/yocto-poky/meta/recipes-devtools/ccache/files/0002-dev.mk.in-fix-file-name-too-long.patch
new file mode 100644
index 000000000..837cfadf6
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/ccache/files/0002-dev.mk.in-fix-file-name-too-long.patch
@@ -0,0 +1,32 @@
+From 71bd0082c6edcf73f054a8a4fa34bd8dd4de7cd7 Mon Sep 17 00:00:00 2001
+From: Robert Yang <liezhi.yang@windriver.com>
+Date: Wed, 16 Sep 2015 19:45:40 -0700
+Subject: [PATCH] dev.mk.in: fix file name too long
+
+The all_cppflags change paths to filename which cause file name too long
+error when the path is longer than NAME_MAX (usually 255). Strip srcdir
+to fix the problem.
+
+Upstream-Status: Pending
+
+Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
+---
+ dev.mk.in | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/dev.mk.in b/dev.mk.in
+index 1261ad3..ec55ac4 100644
+--- a/dev.mk.in
++++ b/dev.mk.in
+@@ -1,7 +1,7 @@
+ # GNU make syntax reigns in this file.
+
+ all_cflags += -Werror
+-all_cppflags += -MD -MP -MF .deps/$(subst .._,,$(subst /,_,$<)).d
++all_cppflags += -MD -MP -MF .deps/$(subst .._,,$(subst /,_,$(subst $(srcdir)/,,$<))).d
+
+ ASCIIDOC = asciidoc
+ GPERF = gperf
+--
+1.7.9.5
+
diff --git a/yocto-poky/meta/recipes-devtools/dpkg/dpkg/CVE-2015-0860.patch b/yocto-poky/meta/recipes-devtools/dpkg/dpkg/CVE-2015-0860.patch
new file mode 100644
index 000000000..1f259d34d
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/dpkg/dpkg/CVE-2015-0860.patch
@@ -0,0 +1,52 @@
+From f1aac7d933819569bf6f347c3c0d5a64a90bbce0 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Hanno=20B=C3=B6ck?= <hanno@hboeck.de>
+Date: Thu, 19 Nov 2015 20:03:10 +0100
+Subject: [PATCH] dpkg-deb: Fix off-by-one write access on ctrllenbuf variable
+
+This affects old format .deb packages.
+
+Fixes: CVE-2015-0860
+Warned-by: afl
+Signed-off-by: Guillem Jover <guillem@debian.org>
+
+Upstream-Status: Backport
+
+https://anonscm.debian.org/cgit/dpkg/dpkg.git/commit/?h=wheezy&id=f1aac7d933819569bf6f347c3c0d5a64a90bbce0
+
+CVE: CVE-2015-0860
+
+hand merge Changelog
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ debian/changelog | 3 +++
+ dpkg-deb/extract.c | 2 +-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+Index: dpkg-1.18.2/dpkg-deb/extract.c
+===================================================================
+--- dpkg-1.18.2.orig/dpkg-deb/extract.c
++++ dpkg-1.18.2/dpkg-deb/extract.c
+@@ -247,7 +247,7 @@ extracthalf(const char *debar, const cha
+ if (errstr)
+ ohshit(_("archive has invalid format version: %s"), errstr);
+
+- r = read_line(arfd, ctrllenbuf, 1, sizeof(ctrllenbuf));
++ r = read_line(arfd, ctrllenbuf, 1, sizeof(ctrllenbuf) - 1);
+ if (r < 0)
+ read_fail(r, debar, _("archive control member size"));
+ if (sscanf(ctrllenbuf, "%jd%c%d", &ctrllennum, &nlc, &dummy) != 2 ||
+Index: dpkg-1.18.2/ChangeLog
+===================================================================
+--- dpkg-1.18.2.orig/ChangeLog
++++ dpkg-1.18.2/ChangeLog
+@@ -1,3 +1,8 @@
++[ Guillem Jover ]
++ * Fix an off-by-one write access in dpkg-deb when parsing the old format
++ .deb control member size. Thanks to Hanno Böck <hanno@hboeck.de>.
++ Fixes CVE-2015-0860.
++
+ commit 5459d330c73cdcfd1327bc93c0ebddc2da4a3a3a (HEAD -> master, tag: 1.18.2)
+ Author: Guillem Jover <guillem@debian.org>
+ Date: Mon Aug 3 15:41:05 2015 +0200
diff --git a/yocto-poky/meta/recipes-devtools/dpkg/dpkg_1.18.2.bb b/yocto-poky/meta/recipes-devtools/dpkg/dpkg_1.18.2.bb
index 4c3fa4f39..2fc096db4 100644
--- a/yocto-poky/meta/recipes-devtools/dpkg/dpkg_1.18.2.bb
+++ b/yocto-poky/meta/recipes-devtools/dpkg/dpkg_1.18.2.bb
@@ -12,6 +12,7 @@ SRC_URI += "file://noman.patch \
file://0003-Our-pre-postinsts-expect-D-to-be-set-when-running-in.patch \
file://0004-The-lutimes-function-doesn-t-work-properly-for-all-s.patch \
file://0005-dpkg-compiler.m4-remove-Wvla.patch \
+ file://CVE-2015-0860.patch \
"
SRC_URI[md5sum] = "63b9d869081ec49adeef6c5ff62d6576"
diff --git a/yocto-poky/meta/recipes-devtools/e2fsprogs/e2fsprogs/copy-in-create-hardlinks-with-the-correct-directory-.patch b/yocto-poky/meta/recipes-devtools/e2fsprogs/e2fsprogs/copy-in-create-hardlinks-with-the-correct-directory-.patch
new file mode 100644
index 000000000..f54969357
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/e2fsprogs/e2fsprogs/copy-in-create-hardlinks-with-the-correct-directory-.patch
@@ -0,0 +1,81 @@
+From 2dcf8e92bc39e05b3c799f53fe911c024aee4375 Mon Sep 17 00:00:00 2001
+From: Robert Yang <liezhi.yang@windriver.com>
+Date: Fri, 23 Oct 2015 03:21:05 -0700
+Subject: [PATCH] copy-in: create hardlinks with the correct directory
+ filetype
+
+When we're creating hard links via ext2fs_link, the (misnamed?) flags
+argument specifies the filetype for the directory entry. This is
+*derived* from i_mode, so provide a translator. Otherwise, fsck will
+complain about unset file types.
+
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+
+Upstream-Status: Backport
+
+Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
+---
+ misc/create_inode.c | 33 +++++++++++++++++++++++++++++++--
+ 1 file changed, 31 insertions(+), 2 deletions(-)
+
+diff --git a/misc/create_inode.c b/misc/create_inode.c
+index fcec5aa..b8565da 100644
+--- a/misc/create_inode.c
++++ b/misc/create_inode.c
+@@ -22,6 +22,33 @@
+ /* For saving the hard links */
+ int hdlink_cnt = HDLINK_CNT;
+
++static int ext2_file_type(unsigned int mode)
++{
++ if (LINUX_S_ISREG(mode))
++ return EXT2_FT_REG_FILE;
++
++ if (LINUX_S_ISDIR(mode))
++ return EXT2_FT_DIR;
++
++ if (LINUX_S_ISCHR(mode))
++ return EXT2_FT_CHRDEV;
++
++ if (LINUX_S_ISBLK(mode))
++ return EXT2_FT_BLKDEV;
++
++ if (LINUX_S_ISLNK(mode))
++ return EXT2_FT_SYMLINK;
++
++ if (LINUX_S_ISFIFO(mode))
++ return EXT2_FT_FIFO;
++
++ if (LINUX_S_ISSOCK(mode))
++ return EXT2_FT_SOCK;
++
++ return 0;
++}
++
++
+ /* Link an inode number to a directory */
+ static errcode_t add_link(ext2_ino_t parent_ino, ext2_ino_t ino, const char *name)
+ {
+@@ -34,14 +61,16 @@ static errcode_t add_link(ext2_ino_t parent_ino, ext2_ino_t ino, const char *nam
+ return retval;
+ }
+
+- retval = ext2fs_link(current_fs, parent_ino, name, ino, inode.i_flags);
++ retval = ext2fs_link(current_fs, parent_ino, name, ino,
++ ext2_file_type(inode.i_mode));
+ if (retval == EXT2_ET_DIR_NO_SPACE) {
+ retval = ext2fs_expand_dir(current_fs, parent_ino);
+ if (retval) {
+ com_err(__func__, retval, "while expanding directory");
+ return retval;
+ }
+- retval = ext2fs_link(current_fs, parent_ino, name, ino, inode.i_flags);
++ retval = ext2fs_link(current_fs, parent_ino, name, ino,
++ ext2_file_type(inode.i_mode));
+ }
+ if (retval) {
+ com_err(__func__, retval, "while linking %s", name);
+--
+1.7.9.5
+
diff --git a/yocto-poky/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.42.9.bb b/yocto-poky/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.42.9.bb
index 97e29c891..a8edeefc9 100644
--- a/yocto-poky/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.42.9.bb
+++ b/yocto-poky/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.42.9.bb
@@ -23,6 +23,7 @@ SRC_URI += "file://acinclude.m4 \
file://cache_inode.patch \
file://CVE-2015-0247.patch \
file://0001-libext2fs-fix-potential-buffer-overflow-in-closefs.patch \
+ file://copy-in-create-hardlinks-with-the-correct-directory-.patch \
"
SRC_URI[md5sum] = "3f8e41e63b432ba114b33f58674563f7"
@@ -60,12 +61,27 @@ do_install () {
install -v -m 755 ${S}/contrib/populate-extfs.sh ${D}${base_sbindir}/
}
+# Need to find the right mke2fs.conf file
+e2fsprogs_conf_fixup () {
+ for i in mke2fs mkfs.ext2 mkfs.ext3 mkfs.ext4 mkfs.ext4dev; do
+ create_wrapper ${D}${base_sbindir}/$i MKE2FS_CONFIG=${sysconfdir}/mke2fs.conf
+ done
+}
+
do_install_append_class-target() {
# Clean host path in compile_et, mk_cmds
sed -i -e "s,ET_DIR=\"${S}/lib/et\",ET_DIR=\"${datadir}/et\",g" ${D}${bindir}/compile_et
sed -i -e "s,SS_DIR=\"${S}/lib/ss\",SS_DIR=\"${datadir}/ss\",g" ${D}${bindir}/mk_cmds
}
+do_install_append_class-native() {
+ e2fsprogs_conf_fixup
+}
+
+do_install_append_class-nativesdk() {
+ e2fsprogs_conf_fixup
+}
+
RDEPENDS_e2fsprogs = "e2fsprogs-badblocks"
RRECOMMENDS_e2fsprogs = "e2fsprogs-mke2fs e2fsprogs-e2fsck"
diff --git a/yocto-poky/meta/recipes-devtools/file/file/host-file.patch b/yocto-poky/meta/recipes-devtools/file/file/host-file.patch
new file mode 100644
index 000000000..a7efbdcbc
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/file/file/host-file.patch
@@ -0,0 +1,32 @@
+Upstream-Status: Submitted (http://bugs.gw.com/view.php?id=485)
+Signed-off-by: Ross Burton <ross.burton@intel.com>
+
+From 3cde199d03b39632361c275cd30fa0612a03138b Mon Sep 17 00:00:00 2001
+From: Ross Burton <ross.burton@intel.com>
+Date: Mon, 19 Oct 2015 10:30:57 +0100
+Subject: [PATCH 2/2] When using the host file, respect FILE_COMPILE
+
+If we're cross-compiling and not using the file binary that was just built,
+execute the binary that we've been told to use (via FILE_COMPILE) when checking
+the version instead of assuming that "file" is correct as the actual compile
+uses FILE_COMPILE so different binaries may be used.
+---
+ magic/Makefile.am | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/magic/Makefile.am b/magic/Makefile.am
+index 89ac844..67067fe 100644
+--- a/magic/Makefile.am
++++ b/magic/Makefile.am
+@@ -293,7 +293,7 @@ ${MAGIC}: $(EXTRA_DIST) $(FILE_COMPILE_DEP)
+ @(if expr "${FILE_COMPILE}" : '.*/.*' > /dev/null; then \
+ echo "Using ${FILE_COMPILE} to generate ${MAGIC}" > /dev/null; \
+ else \
+- v=$$(file --version | sed -e s/file-// -e q); \
++ v=$$(${FILE_COMPILE} --version | sed -e s/file-// -e q); \
+ if [ "$$v" != "${PACKAGE_VERSION}" ]; then \
+ echo "Cannot use the installed version of file ($$v) to"; \
+ echo "cross-compile file ${PACKAGE_VERSION}"; \
+--
+2.1.4
+
diff --git a/yocto-poky/meta/recipes-devtools/file/file_5.24.bb b/yocto-poky/meta/recipes-devtools/file/file_5.24.bb
index 08b95d7b6..d04f1218a 100644
--- a/yocto-poky/meta/recipes-devtools/file/file_5.24.bb
+++ b/yocto-poky/meta/recipes-devtools/file/file_5.24.bb
@@ -8,12 +8,13 @@ SECTION = "console/utils"
LICENSE = "BSD"
LIC_FILES_CHKSUM = "file://COPYING;beginline=2;md5=6a7382872edb68d33e1a9398b6e03188"
-DEPENDS = "zlib file-native"
+DEPENDS = "zlib file-replacement-native"
DEPENDS_class-native = "zlib-native"
SRC_URI = "git://github.com/file/file.git \
file://debian-742262.patch \
file://0001-Add-P-prompt-into-Usage-info.patch \
+ file://host-file.patch \
"
SRCREV = "3c0874be4d3232d672b20f513451a39cfd7c585a"
@@ -21,6 +22,9 @@ S = "${WORKDIR}/git"
inherit autotools
+EXTRA_OEMAKE_append_class-target = "-e FILE_COMPILE=${STAGING_BINDIR_NATIVE}/file-native/file"
+EXTRA_OEMAKE_append_class-nativesdk = "-e FILE_COMPILE=${STAGING_BINDIR_NATIVE}/file-native/file"
+
FILES_${PN} += "${datadir}/misc/*.mgc"
do_install_append_class-native() {
@@ -34,3 +38,7 @@ do_install_append_class-nativesdk() {
}
BBCLASSEXTEND = "native nativesdk"
+PROVIDES_append_class-native = " file-replacement-native"
+# Don't use NATIVE_PACKAGE_PATH_SUFFIX as that hides libmagic from anyone who
+# depends on file-replacement-native.
+bindir_append_class-native = "/file-native"
diff --git a/yocto-poky/meta/recipes-devtools/gcc/gcc-4.8.inc b/yocto-poky/meta/recipes-devtools/gcc/gcc-4.8.inc
index 6a2454d86..b3e1c332e 100644
--- a/yocto-poky/meta/recipes-devtools/gcc/gcc-4.8.inc
+++ b/yocto-poky/meta/recipes-devtools/gcc/gcc-4.8.inc
@@ -69,6 +69,7 @@ SRC_URI = "\
file://0047-repomembug.patch \
file://0049-Enable-SPE-AltiVec-generation-on-powepc-linux-target.patch \
file://target-gcc-includedir.patch \
+ file://0051-gcc-483-universal-initializer-no-warning.patch \
"
SRC_URI[md5sum] = "5a84a30839b2aca22a2d723de2a626ec"
SRC_URI[sha256sum] = "4a80aa23798b8e9b5793494b8c976b39b8d9aa2e53cd5ed5534aff662a7f8695"
diff --git a/yocto-poky/meta/recipes-devtools/gcc/gcc-4.8/0051-gcc-483-universal-initializer-no-warning.patch b/yocto-poky/meta/recipes-devtools/gcc/gcc-4.8/0051-gcc-483-universal-initializer-no-warning.patch
new file mode 100644
index 000000000..fde227b8a
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/gcc/gcc-4.8/0051-gcc-483-universal-initializer-no-warning.patch
@@ -0,0 +1,107 @@
+Upstream-Status: Backport
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+---
+Fix for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53119
+wrong warning when using the universal zero initializer {0}
+
+Backported to GCC 4.8.3
+
+Subject: 2014-06-05 S. Gilles <sgilles@terpmail.umd.edu>
+X-Git-Url: http://repo.or.cz/w/official-gcc.git/commitdiff_plain/95cdf3fdf2d440eb7775def8e35ab970651c33d9?hp=14a3093e9943937cbc63dfbf4d51ca60f8325b29
+git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@211289 138bc75d-0d04-0410-961f-82ee72b054a4
+
+--- gcc-4.8.3.org/gcc/c/c-typeck.c 2014-08-03 20:52:09.257042137 +0200
++++ gcc-4.8.3/gcc/c/c-typeck.c 2014-08-03 20:57:10.645042614 +0200
+@@ -62,9 +62,9 @@
+ if expr.original_code == SIZEOF_EXPR. */
+ tree c_last_sizeof_arg;
+
+-/* Nonzero if we've already printed a "missing braces around initializer"
+- message within this initializer. */
+-static int missing_braces_mentioned;
++/* Nonzero if we might need to print a "missing braces around
++ initializer" message within this initializer. */
++static int found_missing_braces;
+
+ static int require_constant_value;
+ static int require_constant_elements;
+@@ -6379,6 +6379,9 @@
+ /* 1 if this constructor is erroneous so far. */
+ static int constructor_erroneous;
+
++/* 1 if this constructor is the universal zero initializer { 0 }. */
++static int constructor_zeroinit;
++
+ /* Structure for managing pending initializer elements, organized as an
+ AVL tree. */
+
+@@ -6540,7 +6543,7 @@
+ constructor_stack = 0;
+ constructor_range_stack = 0;
+
+- missing_braces_mentioned = 0;
++ found_missing_braces = 0;
+
+ spelling_base = 0;
+ spelling_size = 0;
+@@ -6635,6 +6638,7 @@
+ constructor_type = type;
+ constructor_incremental = 1;
+ constructor_designated = 0;
++ constructor_zeroinit = 1;
+ designator_depth = 0;
+ designator_erroneous = 0;
+
+@@ -6832,11 +6836,8 @@
+ set_nonincremental_init (braced_init_obstack);
+ }
+
+- if (implicit == 1 && warn_missing_braces && !missing_braces_mentioned)
+- {
+- missing_braces_mentioned = 1;
+- warning_init (OPT_Wmissing_braces, "missing braces around initializer");
+- }
++ if (implicit == 1)
++ found_missing_braces = 1;
+
+ if (TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE)
+@@ -6969,16 +6970,23 @@
+ }
+ }
+
++ if (vec_safe_length (constructor_elements) != 1)
++ constructor_zeroinit = 0;
++
++ /* Warn when some structs are initialized with direct aggregation. */
++ if (!implicit && found_missing_braces && warn_missing_braces
++ && !constructor_zeroinit)
++ {
++ warning_init (OPT_Wmissing_braces,
++ "missing braces around initializer");
++ }
++
+ /* Warn when some struct elements are implicitly initialized to zero. */
+ if (warn_missing_field_initializers
+ && constructor_type
+ && TREE_CODE (constructor_type) == RECORD_TYPE
+ && constructor_unfilled_fields)
+ {
+- bool constructor_zeroinit =
+- (vec_safe_length (constructor_elements) == 1
+- && integer_zerop ((*constructor_elements)[0].value));
+-
+ /* Do not warn for flexible array members or zero-length arrays. */
+ while (constructor_unfilled_fields
+ && (!DECL_SIZE (constructor_unfilled_fields)
+@@ -8093,6 +8101,9 @@
+ designator_depth = 0;
+ designator_erroneous = 0;
+
++ if (!implicit && value.value && !integer_zerop (value.value))
++ constructor_zeroinit = 0;
++
+ /* Handle superfluous braces around string cst as in
+ char x[] = {"foo"}; */
+ if (string_flag
diff --git a/yocto-poky/meta/recipes-devtools/gcc/gcc-4.9.inc b/yocto-poky/meta/recipes-devtools/gcc/gcc-4.9.inc
index 691ba5fbc..95b553cb8 100644
--- a/yocto-poky/meta/recipes-devtools/gcc/gcc-4.9.inc
+++ b/yocto-poky/meta/recipes-devtools/gcc/gcc-4.9.inc
@@ -80,6 +80,7 @@ SRC_URI = "\
file://0062-Use-SYSTEMLIBS_DIR-replacement-instead-of-hardcoding.patch \
file://0063-nativesdk-gcc-support.patch \
file://0064-handle-target-sysroot-multilib.patch \
+ file://0065-gcc-483-universal-initializer-no-warning.patch \
"
SRC_URI[md5sum] = "6f831b4d251872736e8e9cc09746f327"
SRC_URI[sha256sum] = "2332b2a5a321b57508b9031354a8503af6fdfb868b8c1748d33028d100a8b67e"
diff --git a/yocto-poky/meta/recipes-devtools/gcc/gcc-4.9/0065-gcc-483-universal-initializer-no-warning.patch b/yocto-poky/meta/recipes-devtools/gcc/gcc-4.9/0065-gcc-483-universal-initializer-no-warning.patch
new file mode 100644
index 000000000..fde227b8a
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/gcc/gcc-4.9/0065-gcc-483-universal-initializer-no-warning.patch
@@ -0,0 +1,107 @@
+Upstream-Status: Backport
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+---
+Fix for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53119
+wrong warning when using the universal zero initializer {0}
+
+Backported to GCC 4.8.3
+
+Subject: 2014-06-05 S. Gilles <sgilles@terpmail.umd.edu>
+X-Git-Url: http://repo.or.cz/w/official-gcc.git/commitdiff_plain/95cdf3fdf2d440eb7775def8e35ab970651c33d9?hp=14a3093e9943937cbc63dfbf4d51ca60f8325b29
+git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@211289 138bc75d-0d04-0410-961f-82ee72b054a4
+
+--- gcc-4.8.3.org/gcc/c/c-typeck.c 2014-08-03 20:52:09.257042137 +0200
++++ gcc-4.8.3/gcc/c/c-typeck.c 2014-08-03 20:57:10.645042614 +0200
+@@ -62,9 +62,9 @@
+ if expr.original_code == SIZEOF_EXPR. */
+ tree c_last_sizeof_arg;
+
+-/* Nonzero if we've already printed a "missing braces around initializer"
+- message within this initializer. */
+-static int missing_braces_mentioned;
++/* Nonzero if we might need to print a "missing braces around
++ initializer" message within this initializer. */
++static int found_missing_braces;
+
+ static int require_constant_value;
+ static int require_constant_elements;
+@@ -6379,6 +6379,9 @@
+ /* 1 if this constructor is erroneous so far. */
+ static int constructor_erroneous;
+
++/* 1 if this constructor is the universal zero initializer { 0 }. */
++static int constructor_zeroinit;
++
+ /* Structure for managing pending initializer elements, organized as an
+ AVL tree. */
+
+@@ -6540,7 +6543,7 @@
+ constructor_stack = 0;
+ constructor_range_stack = 0;
+
+- missing_braces_mentioned = 0;
++ found_missing_braces = 0;
+
+ spelling_base = 0;
+ spelling_size = 0;
+@@ -6635,6 +6638,7 @@
+ constructor_type = type;
+ constructor_incremental = 1;
+ constructor_designated = 0;
++ constructor_zeroinit = 1;
+ designator_depth = 0;
+ designator_erroneous = 0;
+
+@@ -6832,11 +6836,8 @@
+ set_nonincremental_init (braced_init_obstack);
+ }
+
+- if (implicit == 1 && warn_missing_braces && !missing_braces_mentioned)
+- {
+- missing_braces_mentioned = 1;
+- warning_init (OPT_Wmissing_braces, "missing braces around initializer");
+- }
++ if (implicit == 1)
++ found_missing_braces = 1;
+
+ if (TREE_CODE (constructor_type) == RECORD_TYPE
+ || TREE_CODE (constructor_type) == UNION_TYPE)
+@@ -6969,16 +6970,23 @@
+ }
+ }
+
++ if (vec_safe_length (constructor_elements) != 1)
++ constructor_zeroinit = 0;
++
++ /* Warn when some structs are initialized with direct aggregation. */
++ if (!implicit && found_missing_braces && warn_missing_braces
++ && !constructor_zeroinit)
++ {
++ warning_init (OPT_Wmissing_braces,
++ "missing braces around initializer");
++ }
++
+ /* Warn when some struct elements are implicitly initialized to zero. */
+ if (warn_missing_field_initializers
+ && constructor_type
+ && TREE_CODE (constructor_type) == RECORD_TYPE
+ && constructor_unfilled_fields)
+ {
+- bool constructor_zeroinit =
+- (vec_safe_length (constructor_elements) == 1
+- && integer_zerop ((*constructor_elements)[0].value));
+-
+ /* Do not warn for flexible array members or zero-length arrays. */
+ while (constructor_unfilled_fields
+ && (!DECL_SIZE (constructor_unfilled_fields)
+@@ -8093,6 +8101,9 @@
+ designator_depth = 0;
+ designator_erroneous = 0;
+
++ if (!implicit && value.value && !integer_zerop (value.value))
++ constructor_zeroinit = 0;
++
+ /* Handle superfluous braces around string cst as in
+ char x[] = {"foo"}; */
+ if (string_flag
diff --git a/yocto-poky/meta/recipes-devtools/gcc/gcc-5.2.inc b/yocto-poky/meta/recipes-devtools/gcc/gcc-5.2.inc
index f691f582d..a6b385ac6 100644
--- a/yocto-poky/meta/recipes-devtools/gcc/gcc-5.2.inc
+++ b/yocto-poky/meta/recipes-devtools/gcc/gcc-5.2.inc
@@ -73,6 +73,7 @@ SRC_URI = "\
file://0039-libcc1-fix-libcc1-s-install-path-and-rpath.patch \
file://0040-nativesdk-gcc-support.patch \
file://0041-handle-target-sysroot-multilib.patch \
+ file://0042-cxxflags-for-build.patch \
"
BACKPORTS = ""
@@ -98,6 +99,7 @@ EXTRA_OECONF_BASE = "\
--with-cloog=no \
--enable-checking=release \
--enable-cheaders=c_global \
+ --without-isl \
"
EXTRA_OECONF_INITIAL = "\
@@ -109,6 +111,7 @@ EXTRA_OECONF_INITIAL = "\
--disable-lto \
--disable-plugin \
--enable-decimal-float=no \
+ --without-isl \
"
EXTRA_OECONF_append_libc-uclibc = " --disable-decimal-float "
diff --git a/yocto-poky/meta/recipes-devtools/gcc/gcc-5.2/0042-cxxflags-for-build.patch b/yocto-poky/meta/recipes-devtools/gcc/gcc-5.2/0042-cxxflags-for-build.patch
new file mode 100644
index 000000000..1105e29f6
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/gcc/gcc-5.2/0042-cxxflags-for-build.patch
@@ -0,0 +1,123 @@
+Fix various _FOR_BUILD and related variables
+
+When doing a FOR_BUILD thing, you have to override CFLAGS with
+CFLAGS_FOR_BUILD. And if you use C++, you also have to override
+CXXFLAGS with CXXFLAGS_FOR_BUILD.
+Without this, when building for mingw, you end up trying to use
+the mingw headers for a host build.
+
+The same goes for other variables as well, such as CPPFLAGS,
+CPP, and GMPINC.
+
+Upstream-Status: Pending
+
+Signed-off-by: Peter Seebach <peter.seebach@windriver.com>
+Signed-off-by: Mark Hatle <mark.hatle@windriver.com>
+
+diff --git a/Makefile.in b/Makefile.in
+index 9370174..011c29a 100644
+--- a/Makefile.in
++++ b/Makefile.in
+@@ -152,6 +152,7 @@ BUILD_EXPORTS = \
+ CPP="$(CC_FOR_BUILD) -E"; export CPP; \
+ CFLAGS="$(CFLAGS_FOR_BUILD)"; export CFLAGS; \
+ CONFIG_SHELL="$(SHELL)"; export CONFIG_SHELL; \
++ CPPFLAGS="$(CPPFLAGS_FOR_BUILD)"; export CPPFLAGS; \
+ CXX="$(CXX_FOR_BUILD)"; export CXX; \
+ CXXFLAGS="$(CXXFLAGS_FOR_BUILD)"; export CXXFLAGS; \
+ GCJ="$(GCJ_FOR_BUILD)"; export GCJ; \
+@@ -170,6 +171,9 @@ BUILD_EXPORTS = \
+ # built for the build system to override those in BASE_FLAGS_TO_PASS.
+ EXTRA_BUILD_FLAGS = \
+ CFLAGS="$(CFLAGS_FOR_BUILD)" \
++ CXXFLAGS="$(CXXFLAGS_FOR_BUILD)" \
++ CPP="$(CC_FOR_BUILD) -E" \
++ CPPFLAGS="$(CPPFLAGS_FOR_BUILD)" \
+ LDFLAGS="$(LDFLAGS_FOR_BUILD)"
+
+ # This is the list of directories to built for the host system.
+@@ -187,6 +191,7 @@ HOST_SUBDIR = @host_subdir@
+ HOST_EXPORTS = \
+ $(BASE_EXPORTS) \
+ CC="$(CC)"; export CC; \
++ CPP="$(CC) -E"; export CPP; \
+ ADA_CFLAGS="$(ADA_CFLAGS)"; export ADA_CFLAGS; \
+ CFLAGS="$(CFLAGS)"; export CFLAGS; \
+ CONFIG_SHELL="$(SHELL)"; export CONFIG_SHELL; \
+@@ -711,6 +715,7 @@ BASE_FLAGS_TO_PASS = \
+ "CC_FOR_BUILD=$(CC_FOR_BUILD)" \
+ "CFLAGS_FOR_BUILD=$(CFLAGS_FOR_BUILD)" \
+ "CXX_FOR_BUILD=$(CXX_FOR_BUILD)" \
++ "CXXFLAGS_FOR_BUILD=$(CXXFLAGS_FOR_BUILD)" \
+ "EXPECT=$(EXPECT)" \
+ "FLEX=$(FLEX)" \
+ "INSTALL=$(INSTALL)" \
+diff --git a/Makefile.tpl b/Makefile.tpl
+index 1ea1954..78a59c3 100644
+--- a/Makefile.tpl
++++ b/Makefile.tpl
+@@ -154,6 +154,7 @@ BUILD_EXPORTS = \
+ CC="$(CC_FOR_BUILD)"; export CC; \
+ CFLAGS="$(CFLAGS_FOR_BUILD)"; export CFLAGS; \
+ CONFIG_SHELL="$(SHELL)"; export CONFIG_SHELL; \
++ CPPFLAGS="$(CPPFLAGS_FOR_BUILD)"; export CPPFLAGS; \
+ CXX="$(CXX_FOR_BUILD)"; export CXX; \
+ CXXFLAGS="$(CXXFLAGS_FOR_BUILD)"; export CXXFLAGS; \
+ GCJ="$(GCJ_FOR_BUILD)"; export GCJ; \
+@@ -172,6 +173,9 @@ BUILD_EXPORTS = \
+ # built for the build system to override those in BASE_FLAGS_TO_PASS.
+ EXTRA_BUILD_FLAGS = \
+ CFLAGS="$(CFLAGS_FOR_BUILD)" \
++ CXXFLAGS="$(CXXFLAGS_FOR_BUILD)" \
++ CPP="$(CC_FOR_BUILD) -E" \
++ CPPFLAGS="$(CPPFLAGS_FOR_BUILD)" \
+ LDFLAGS="$(LDFLAGS_FOR_BUILD)"
+
+ # This is the list of directories to built for the host system.
+@@ -189,6 +193,7 @@ HOST_SUBDIR = @host_subdir@
+ HOST_EXPORTS = \
+ $(BASE_EXPORTS) \
+ CC="$(CC)"; export CC; \
++ CPP="$(CC) -E"; export CPP; \
+ ADA_CFLAGS="$(ADA_CFLAGS)"; export ADA_CFLAGS; \
+ CFLAGS="$(CFLAGS)"; export CFLAGS; \
+ CONFIG_SHELL="$(SHELL)"; export CONFIG_SHELL; \
+diff --git a/gcc/Makefile.in b/gcc/Makefile.in
+index cd5bc4a..98ae4f4 100644
+--- a/gcc/Makefile.in
++++ b/gcc/Makefile.in
+@@ -762,7 +762,7 @@ BUILD_LINKERFLAGS = $(BUILD_CXXFLAGS)
+ # Native linker and preprocessor flags. For x-fragment overrides.
+ BUILD_LDFLAGS=@BUILD_LDFLAGS@
+ BUILD_CPPFLAGS= -I. -I$(@D) -I$(srcdir) -I$(srcdir)/$(@D) \
+- -I$(srcdir)/../include @INCINTL@ $(CPPINC) $(CPPFLAGS)
++ -I$(srcdir)/../include @INCINTL@ $(CPPINC) $(CPPFLAGS_FOR_BUILD)
+
+ # Actual name to use when installing a native compiler.
+ GCC_INSTALL_NAME := $(shell echo gcc|sed '$(program_transform_name)')
+diff --git a/gcc/configure b/gcc/configure
+index c7ac14b..5ac63e4 100755
+--- a/gcc/configure
++++ b/gcc/configure
+@@ -11521,7 +11521,7 @@ else
+ CC="${CC_FOR_BUILD}" CFLAGS="${CFLAGS_FOR_BUILD}" \
+ CXX="${CXX_FOR_BUILD}" CXXFLAGS="${CXXFLAGS_FOR_BUILD}" \
+ LD="${LD_FOR_BUILD}" LDFLAGS="${LDFLAGS_FOR_BUILD}" \
+- GMPINC="" CPPFLAGS="${CPPFLAGS} -DGENERATOR_FILE" \
++ GMPINC="" CPPFLAGS="${CPPFLAGS_FOR_BUILD} -DGENERATOR_FILE" \
+ ${realsrcdir}/configure \
+ --enable-languages=${enable_languages-all} \
+ --target=$target_alias --host=$build_alias --build=$build_alias
+diff --git a/gcc/configure.ac b/gcc/configure.ac
+index 50856e6..17a4dfd 100644
+--- a/gcc/configure.ac
++++ b/gcc/configure.ac
+@@ -1633,7 +1633,7 @@ else
+ CC="${CC_FOR_BUILD}" CFLAGS="${CFLAGS_FOR_BUILD}" \
+ CXX="${CXX_FOR_BUILD}" CXXFLAGS="${CXXFLAGS_FOR_BUILD}" \
+ LD="${LD_FOR_BUILD}" LDFLAGS="${LDFLAGS_FOR_BUILD}" \
+- GMPINC="" CPPFLAGS="${CPPFLAGS} -DGENERATOR_FILE" \
++ GMPINC="" CPPFLAGS="${CPPFLAGS_FOR_BUILD} -DGENERATOR_FILE" \
+ ${realsrcdir}/configure \
+ --enable-languages=${enable_languages-all} \
+ --target=$target_alias --host=$build_alias --build=$build_alias
diff --git a/yocto-poky/meta/recipes-devtools/gcc/gcc-common.inc b/yocto-poky/meta/recipes-devtools/gcc/gcc-common.inc
index d63c07f68..6f2f224a1 100644
--- a/yocto-poky/meta/recipes-devtools/gcc/gcc-common.inc
+++ b/yocto-poky/meta/recipes-devtools/gcc/gcc-common.inc
@@ -25,6 +25,11 @@ def get_gcc_mips_plt_setting(bb, d):
return "--with-mips-plt"
return ""
+def get_gcc_ppc_plt_settings(bb, d):
+ if d.getVar('TRANSLATED_TARGET_ARCH', True) in [ 'powerpc' ] and not bb.utils.contains('DISTRO_FEATURES', 'bssplt', True, False, d):
+ return "--enable-secureplt"
+ return ""
+
def get_long_double_setting(bb, d):
if d.getVar('TRANSLATED_TARGET_ARCH', True) in [ 'powerpc', 'powerpc64' ] and d.getVar('TCLIBC', True) in [ 'uclibc', 'glibc' ]:
return "--with-long-double-128"
diff --git a/yocto-poky/meta/recipes-devtools/gcc/gcc-configure-common.inc b/yocto-poky/meta/recipes-devtools/gcc/gcc-configure-common.inc
index a14be738c..cee6f4a58 100644
--- a/yocto-poky/meta/recipes-devtools/gcc/gcc-configure-common.inc
+++ b/yocto-poky/meta/recipes-devtools/gcc/gcc-configure-common.inc
@@ -47,6 +47,7 @@ EXTRA_OECONF = "\
${EXTRA_OECONF_GCC_FLOAT} \
${EXTRA_OECONF_PATHS} \
${@get_gcc_mips_plt_setting(bb, d)} \
+ ${@get_gcc_ppc_plt_settings(bb, d)} \
${@get_long_double_setting(bb, d)} \
${@get_gcc_multiarch_setting(bb, d)} \
"
diff --git a/yocto-poky/meta/recipes-devtools/gcc/gcc-cross-initial.inc b/yocto-poky/meta/recipes-devtools/gcc/gcc-cross-initial.inc
index 719744708..c0fa139a8 100644
--- a/yocto-poky/meta/recipes-devtools/gcc/gcc-cross-initial.inc
+++ b/yocto-poky/meta/recipes-devtools/gcc/gcc-cross-initial.inc
@@ -26,6 +26,7 @@ EXTRA_OECONF = "\
${EXTRA_OECONF_INITIAL} \
${@bb.utils.contains('DISTRO_FEATURES', 'ld-is-gold', '--with-ld=${STAGING_BINDIR_TOOLCHAIN}/${TARGET_PREFIX}ld.bfd', '', d)} \
${EXTRA_OECONF_GCC_FLOAT} \
+ ${@get_gcc_ppc_plt_settings(bb, d)} \
"
EXTRA_OECONF += "--with-native-system-header-dir=${SYSTEMHEADERS}"
diff --git a/yocto-poky/meta/recipes-devtools/gcc/gcc-multilib-config.inc b/yocto-poky/meta/recipes-devtools/gcc/gcc-multilib-config.inc
index f7f9f557d..1c0a45a36 100644
--- a/yocto-poky/meta/recipes-devtools/gcc/gcc-multilib-config.inc
+++ b/yocto-poky/meta/recipes-devtools/gcc/gcc-multilib-config.inc
@@ -206,7 +206,7 @@ python gcc_multilib_setup() {
# take out '-' mcpu='s and march='s from parameters
opts = []
whitelist = (d.getVar("MULTILIB_OPTION_WHITELIST", True) or "").split()
- for i in tune_parameters['ccargs'].split():
+ for i in d.expand(tune_parameters['ccargs']).split():
if i in whitelist:
# Need to strip '-' from option
opts.append(i[1:])
@@ -223,5 +223,6 @@ python gcc_multilib_setup() {
}
gcc_multilib_setup[cleandirs] = "${B}/gcc/config"
+gcc_multilib_setup[vardepsexclude] = "SDK_ARCH"
EXTRACONFFUNCS += "gcc_multilib_setup"
diff --git a/yocto-poky/meta/recipes-devtools/gcc/gcc-runtime.inc b/yocto-poky/meta/recipes-devtools/gcc/gcc-runtime.inc
index 09757e6cc..690d78012 100644
--- a/yocto-poky/meta/recipes-devtools/gcc/gcc-runtime.inc
+++ b/yocto-poky/meta/recipes-devtools/gcc/gcc-runtime.inc
@@ -53,6 +53,9 @@ do_install () {
if [ -d ${D}${infodir} ]; then
rmdir --ignore-fail-on-non-empty -p ${D}${infodir}
fi
+ if [ "${TARGET_VENDOR_MULTILIB_ORIGINAL}" != "" -a "${TARGET_VENDOR}" != "${TARGET_VENDOR_MULTILIB_ORIGINAL}" ]; then
+ ln -s ${TARGET_SYS} ${D}${includedir}/c++/${BINV}/${TARGET_ARCH}${TARGET_VENDOR_MULTILIB_ORIGINAL}-${TARGET_OS}
+ fi
if [ "${TARGET_OS}" = "linux-gnuspe" ]; then
ln -s ${TARGET_SYS} ${D}${includedir}/c++/${BINV}/${TARGET_ARCH}${TARGET_VENDOR}-linux
fi
diff --git a/yocto-poky/meta/recipes-devtools/gcc/gcc-shared-source.inc b/yocto-poky/meta/recipes-devtools/gcc/gcc-shared-source.inc
index 9acffb1da..aac4b4931 100644
--- a/yocto-poky/meta/recipes-devtools/gcc/gcc-shared-source.inc
+++ b/yocto-poky/meta/recipes-devtools/gcc/gcc-shared-source.inc
@@ -5,5 +5,7 @@ do_fetch[noexec] = "1"
deltask do_unpack
deltask do_patch
+SRC_URI = ""
+
do_configure[depends] += "gcc-source-${PV}:do_preconfigure"
do_populate_lic[depends] += "gcc-source-${PV}:do_unpack"
diff --git a/yocto-poky/meta/recipes-devtools/gcc/gcc-target.inc b/yocto-poky/meta/recipes-devtools/gcc/gcc-target.inc
index 6e160c0d1..d62c15afd 100644
--- a/yocto-poky/meta/recipes-devtools/gcc/gcc-target.inc
+++ b/yocto-poky/meta/recipes-devtools/gcc/gcc-target.inc
@@ -31,7 +31,7 @@ PACKAGES = "\
FILES_${PN} = "\
${bindir}/${TARGET_PREFIX}gcc* \
- ${libexecdir}/gcc/${TARGET_SYS}/${BINV}/collect2 \
+ ${libexecdir}/gcc/${TARGET_SYS}/${BINV}/collect2* \
${libexecdir}/gcc/${TARGET_SYS}/${BINV}/cc* \
${libexecdir}/gcc/${TARGET_SYS}/${BINV}/lto* \
${libexecdir}/gcc/${TARGET_SYS}/${BINV}/lib*${SOLIBS} \
@@ -83,20 +83,20 @@ FILES_gfortran-symlinks = "\
${bindir}/f95"
FILES_cpp = "\
- ${bindir}/${TARGET_PREFIX}cpp \
+ ${bindir}/${TARGET_PREFIX}cpp* \
${base_libdir}/cpp \
${libexecdir}/gcc/${TARGET_SYS}/${BINV}/cc1"
FILES_cpp-symlinks = "${bindir}/cpp"
-FILES_gcov = "${bindir}/${TARGET_PREFIX}gcov \
- ${bindir}/${TARGET_PREFIX}gcov-tool \
+FILES_gcov = "${bindir}/${TARGET_PREFIX}gcov* \
+ ${bindir}/${TARGET_PREFIX}gcov-tool* \
"
FILES_gcov-symlinks = "${bindir}/gcov \
${bindir}/gcov-tool \
"
FILES_g++ = "\
- ${bindir}/${TARGET_PREFIX}g++ \
+ ${bindir}/${TARGET_PREFIX}g++* \
${libexecdir}/gcc/${TARGET_SYS}/${BINV}/cc1plus \
"
FILES_g++-symlinks = "\
@@ -141,7 +141,7 @@ do_install () {
cd ${D}${bindir}
# We care about g++ not c++
- rm -f *c++
+ rm -f *c++*
# We don't care about the gcc-<version> ones for this
rm -f *gcc-?.?*
diff --git a/yocto-poky/meta/recipes-devtools/gcc/gcc_4.9.bb b/yocto-poky/meta/recipes-devtools/gcc/gcc_4.9.bb
index b84baae6d..a9dc612fb 100644
--- a/yocto-poky/meta/recipes-devtools/gcc/gcc_4.9.bb
+++ b/yocto-poky/meta/recipes-devtools/gcc/gcc_4.9.bb
@@ -1,10 +1,9 @@
require recipes-devtools/gcc/gcc-${PV}.inc
require gcc-target.inc
-# Building with thumb enabled on armv4t fails with
-# | gcc-4.8.1-r0/gcc-4.8.1/gcc/cp/decl.c:7438:(.text.unlikely+0x2fa): relocation truncated to fit: R_ARM_THM_CALL against symbol `fancy_abort(char const*, int, char const*)' defined in .glue_7 section in linker stubs
-# | gcc-4.8.1-r0/gcc-4.8.1/gcc/cp/decl.c:7442:(.text.unlikely+0x318): additional relocation overflows omitted from the output
+# http://errors.yoctoproject.org/Errors/Details/20497/
ARM_INSTRUCTION_SET_armv4 = "arm"
+ARM_INSTRUCTION_SET_armv5 = "arm"
BBCLASSEXTEND = "nativesdk"
diff --git a/yocto-poky/meta/recipes-devtools/gcc/libgcc.inc b/yocto-poky/meta/recipes-devtools/gcc/libgcc.inc
index 739adbd96..95fa3f40b 100644
--- a/yocto-poky/meta/recipes-devtools/gcc/libgcc.inc
+++ b/yocto-poky/meta/recipes-devtools/gcc/libgcc.inc
@@ -15,17 +15,10 @@ LICENSE_${PN}-dev = "GPL-3.0-with-GCC-exception"
LICENSE_${PN}-dbg = "GPL-3.0-with-GCC-exception"
-FILES_${PN} = "${base_libdir}/libgcc*.so.*"
FILES_${PN}-dev = "\
${base_libdir}/libgcc*.so \
- ${libdir}/${TARGET_SYS}/${BINV}/*crt* \
- ${libdir}/${TARGET_SYS}/${BINV}/64 \
- ${libdir}/${TARGET_SYS}/${BINV}/32 \
- ${libdir}/${TARGET_SYS}/${BINV}/x32 \
- ${libdir}/${TARGET_SYS}/${BINV}/n32 \
- ${libdir}/${TARGET_SYS}/${BINV}/libgcc* \
${@base_conditional('BASETARGET_SYS', '${TARGET_SYS}', '', '${libdir}/${BASETARGET_SYS}', d)} \
- ${libdir}/${TARGET_SYS}/${BINV}/libgcov.a \
+ ${libdir}/${TARGET_SYS}/${BINV}* \
"
FILES_${PN}-dbg += "${base_libdir}/.debug/"
diff --git a/yocto-poky/meta/recipes-devtools/git/git-2.5.0/0008-CVE-2015-7545-1.patch b/yocto-poky/meta/recipes-devtools/git/git-2.5.0/0008-CVE-2015-7545-1.patch
new file mode 100644
index 000000000..b552c099f
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/git/git-2.5.0/0008-CVE-2015-7545-1.patch
@@ -0,0 +1,446 @@
+From a5adaced2e13c135d5d9cc65be9eb95aa3bacedf Mon Sep 17 00:00:00 2001
+From: Jeff King <peff@peff.net>
+Date: Wed, 16 Sep 2015 13:12:52 -0400
+Subject: [PATCH] transport: add a protocol-whitelist environment variable
+
+If we are cloning an untrusted remote repository into a
+sandbox, we may also want to fetch remote submodules in
+order to get the complete view as intended by the other
+side. However, that opens us up to attacks where a malicious
+user gets us to clone something they would not otherwise
+have access to (this is not necessarily a problem by itself,
+but we may then act on the cloned contents in a way that
+exposes them to the attacker).
+
+Ideally such a setup would sandbox git entirely away from
+high-value items, but this is not always practical or easy
+to set up (e.g., OS network controls may block multiple
+protocols, and we would want to enable some but not others).
+
+We can help this case by providing a way to restrict
+particular protocols. We use a whitelist in the environment.
+This is more annoying to set up than a blacklist, but
+defaults to safety if the set of protocols git supports
+grows). If no whitelist is specified, we continue to default
+to allowing all protocols (this is an "unsafe" default, but
+since the minority of users will want this sandboxing
+effect, it is the only sensible one).
+
+A note on the tests: ideally these would all be in a single
+test file, but the git-daemon and httpd test infrastructure
+is an all-or-nothing proposition rather than a test-by-test
+prerequisite. By putting them all together, we would be
+unable to test the file-local code on machines without
+apache.
+
+Signed-off-by: Jeff King <peff@peff.net>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport
+
+http://archive.ubuntu.com/ubuntu/pool/main/g/git/git_2.5.0-1ubuntu0.1.debian.tar.xz
+
+CVE: CVE-2015-7545 #1
+Singed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ Documentation/git.txt | 32 ++++++++++++++
+ connect.c | 5 +++
+ t/lib-proto-disable.sh | 96 ++++++++++++++++++++++++++++++++++++++++++
+ t/t5810-proto-disable-local.sh | 14 ++++++
+ t/t5811-proto-disable-git.sh | 20 +++++++++
+ t/t5812-proto-disable-http.sh | 20 +++++++++
+ t/t5813-proto-disable-ssh.sh | 20 +++++++++
+ t/t5814-proto-disable-ext.sh | 18 ++++++++
+ transport-helper.c | 2 +
+ transport.c | 21 ++++++++-
+ transport.h | 7 +++
+ 11 files changed, 254 insertions(+), 1 deletion(-)
+ create mode 100644 t/lib-proto-disable.sh
+ create mode 100755 t/t5810-proto-disable-local.sh
+ create mode 100755 t/t5811-proto-disable-git.sh
+ create mode 100755 t/t5812-proto-disable-http.sh
+ create mode 100755 t/t5813-proto-disable-ssh.sh
+ create mode 100755 t/t5814-proto-disable-ext.sh
+
+Index: git-2.5.0/Documentation/git.txt
+===================================================================
+--- git-2.5.0.orig/Documentation/git.txt 2015-12-11 12:46:48.975637719 -0500
++++ git-2.5.0/Documentation/git.txt 2015-12-11 12:46:48.967637661 -0500
+@@ -1069,6 +1069,38 @@
+ an operation has touched every ref (e.g., because you are
+ cloning a repository to make a backup).
+
++`GIT_ALLOW_PROTOCOL`::
++ If set, provide a colon-separated list of protocols which are
++ allowed to be used with fetch/push/clone. This is useful to
++ restrict recursive submodule initialization from an untrusted
++ repository. Any protocol not mentioned will be disallowed (i.e.,
++ this is a whitelist, not a blacklist). If the variable is not
++ set at all, all protocols are enabled. The protocol names
++ currently used by git are:
++
++ - `file`: any local file-based path (including `file://` URLs,
++ or local paths)
++
++ - `git`: the anonymous git protocol over a direct TCP
++ connection (or proxy, if configured)
++
++ - `ssh`: git over ssh (including `host:path` syntax,
++ `git+ssh://`, etc).
++
++ - `rsync`: git over rsync
++
++ - `http`: git over http, both "smart http" and "dumb http".
++ Note that this does _not_ include `https`; if you want both,
++ you should specify both as `http:https`.
++
++ - any external helpers are named by their protocol (e.g., use
++ `hg` to allow the `git-remote-hg` helper)
+++
++Note that this controls only git's internal protocol selection.
++If libcurl is used (e.g., by the `http` transport), it may
++redirect to other protocols. There is not currently any way to
++restrict this.
++
+
+ Discussion[[Discussion]]
+ ------------------------
+Index: git-2.5.0/connect.c
+===================================================================
+--- git-2.5.0.orig/connect.c 2015-12-11 12:46:48.975637719 -0500
++++ git-2.5.0/connect.c 2015-12-11 12:46:48.967637661 -0500
+@@ -9,6 +9,7 @@
+ #include "url.h"
+ #include "string-list.h"
+ #include "sha1-array.h"
++#include "transport.h"
+
+ static char *server_capabilities;
+ static const char *parse_feature_value(const char *, const char *, int *);
+@@ -694,6 +695,8 @@
+ else
+ target_host = xstrdup(hostandport);
+
++ transport_check_allowed("git");
++
+ /* These underlying connection commands die() if they
+ * cannot connect.
+ */
+@@ -727,6 +730,7 @@
+ int putty, tortoiseplink = 0;
+ char *ssh_host = hostandport;
+ const char *port = NULL;
++ transport_check_allowed("ssh");
+ get_host_and_port(&ssh_host, &port);
+
+ if (!port)
+@@ -781,6 +785,7 @@
+ /* remove repo-local variables from the environment */
+ conn->env = local_repo_env;
+ conn->use_shell = 1;
++ transport_check_allowed("file");
+ }
+ argv_array_push(&conn->args, cmd.buf);
+
+Index: git-2.5.0/t/lib-proto-disable.sh
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ git-2.5.0/t/lib-proto-disable.sh 2015-12-11 12:46:48.967637661 -0500
+@@ -0,0 +1,96 @@
++# Test routines for checking protocol disabling.
++
++# test cloning a particular protocol
++# $1 - description of the protocol
++# $2 - machine-readable name of the protocol
++# $3 - the URL to try cloning
++test_proto () {
++ desc=$1
++ proto=$2
++ url=$3
++
++ test_expect_success "clone $1 (enabled)" '
++ rm -rf tmp.git &&
++ (
++ GIT_ALLOW_PROTOCOL=$proto &&
++ export GIT_ALLOW_PROTOCOL &&
++ git clone --bare "$url" tmp.git
++ )
++ '
++
++ test_expect_success "fetch $1 (enabled)" '
++ (
++ cd tmp.git &&
++ GIT_ALLOW_PROTOCOL=$proto &&
++ export GIT_ALLOW_PROTOCOL &&
++ git fetch
++ )
++ '
++
++ test_expect_success "push $1 (enabled)" '
++ (
++ cd tmp.git &&
++ GIT_ALLOW_PROTOCOL=$proto &&
++ export GIT_ALLOW_PROTOCOL &&
++ git push origin HEAD:pushed
++ )
++ '
++
++ test_expect_success "push $1 (disabled)" '
++ (
++ cd tmp.git &&
++ GIT_ALLOW_PROTOCOL=none &&
++ export GIT_ALLOW_PROTOCOL &&
++ test_must_fail git push origin HEAD:pushed
++ )
++ '
++
++ test_expect_success "fetch $1 (disabled)" '
++ (
++ cd tmp.git &&
++ GIT_ALLOW_PROTOCOL=none &&
++ export GIT_ALLOW_PROTOCOL &&
++ test_must_fail git fetch
++ )
++ '
++
++ test_expect_success "clone $1 (disabled)" '
++ rm -rf tmp.git &&
++ (
++ GIT_ALLOW_PROTOCOL=none &&
++ export GIT_ALLOW_PROTOCOL &&
++ test_must_fail git clone --bare "$url" tmp.git
++ )
++ '
++}
++
++# set up an ssh wrapper that will access $host/$repo in the
++# trash directory, and enable it for subsequent tests.
++setup_ssh_wrapper () {
++ test_expect_success 'setup ssh wrapper' '
++ write_script ssh-wrapper <<-\EOF &&
++ echo >&2 "ssh: $*"
++ host=$1; shift
++ cd "$TRASH_DIRECTORY/$host" &&
++ eval "$*"
++ EOF
++ GIT_SSH="$PWD/ssh-wrapper" &&
++ export GIT_SSH &&
++ export TRASH_DIRECTORY
++ '
++}
++
++# set up a wrapper that can be used with remote-ext to
++# access repositories in the "remote" directory of trash-dir,
++# like "ext::fake-remote %S repo.git"
++setup_ext_wrapper () {
++ test_expect_success 'setup ext wrapper' '
++ write_script fake-remote <<-\EOF &&
++ echo >&2 "fake-remote: $*"
++ cd "$TRASH_DIRECTORY/remote" &&
++ eval "$*"
++ EOF
++ PATH=$TRASH_DIRECTORY:$PATH &&
++ export TRASH_DIRECTORY
++ '
++}
+Index: git-2.5.0/t/t5810-proto-disable-local.sh
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ git-2.5.0/t/t5810-proto-disable-local.sh 2015-12-11 12:46:48.967637661 -0500
+@@ -0,0 +1,14 @@
++#!/bin/sh
++
++test_description='test disabling of local paths in clone/fetch'
++. ./test-lib.sh
++. "$TEST_DIRECTORY/lib-proto-disable.sh"
++
++test_expect_success 'setup repository to clone' '
++ test_commit one
++'
++
++test_proto "file://" file "file://$PWD"
++test_proto "path" file .
++
++test_done
+Index: git-2.5.0/t/t5811-proto-disable-git.sh
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ git-2.5.0/t/t5811-proto-disable-git.sh 2015-12-11 12:46:48.967637661 -0500
+@@ -0,0 +1,20 @@
++#!/bin/sh
++
++test_description='test disabling of git-over-tcp in clone/fetch'
++. ./test-lib.sh
++. "$TEST_DIRECTORY/lib-proto-disable.sh"
++. "$TEST_DIRECTORY/lib-git-daemon.sh"
++start_git_daemon
++
++test_expect_success 'create git-accessible repo' '
++ bare="$GIT_DAEMON_DOCUMENT_ROOT_PATH/repo.git" &&
++ test_commit one &&
++ git --bare init "$bare" &&
++ git push "$bare" HEAD &&
++ >"$bare/git-daemon-export-ok" &&
++ git -C "$bare" config daemon.receivepack true
++'
++
++test_proto "git://" git "$GIT_DAEMON_URL/repo.git"
++
++test_done
+Index: git-2.5.0/t/t5812-proto-disable-http.sh
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ git-2.5.0/t/t5812-proto-disable-http.sh 2015-12-11 12:46:48.967637661 -0500
+@@ -0,0 +1,20 @@
++#!/bin/sh
++
++test_description='test disabling of git-over-http in clone/fetch'
++. ./test-lib.sh
++. "$TEST_DIRECTORY/lib-proto-disable.sh"
++. "$TEST_DIRECTORY/lib-httpd.sh"
++start_httpd
++
++test_expect_success 'create git-accessible repo' '
++ bare="$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
++ test_commit one &&
++ git --bare init "$bare" &&
++ git push "$bare" HEAD &&
++ git -C "$bare" config http.receivepack true
++'
++
++test_proto "smart http" http "$HTTPD_URL/smart/repo.git"
++
++stop_httpd
++test_done
+Index: git-2.5.0/t/t5813-proto-disable-ssh.sh
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ git-2.5.0/t/t5813-proto-disable-ssh.sh 2015-12-11 12:46:48.967637661 -0500
+@@ -0,0 +1,20 @@
++#!/bin/sh
++
++test_description='test disabling of git-over-ssh in clone/fetch'
++. ./test-lib.sh
++. "$TEST_DIRECTORY/lib-proto-disable.sh"
++
++setup_ssh_wrapper
++
++test_expect_success 'setup repository to clone' '
++ test_commit one &&
++ mkdir remote &&
++ git init --bare remote/repo.git &&
++ git push remote/repo.git HEAD
++'
++
++test_proto "host:path" ssh "remote:repo.git"
++test_proto "ssh://" ssh "ssh://remote/$PWD/remote/repo.git"
++test_proto "git+ssh://" ssh "git+ssh://remote/$PWD/remote/repo.git"
++
++test_done
+Index: git-2.5.0/t/t5814-proto-disable-ext.sh
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ git-2.5.0/t/t5814-proto-disable-ext.sh 2015-12-11 12:46:48.967637661 -0500
+@@ -0,0 +1,18 @@
++#!/bin/sh
++
++test_description='test disabling of remote-helper paths in clone/fetch'
++. ./test-lib.sh
++. "$TEST_DIRECTORY/lib-proto-disable.sh"
++
++setup_ext_wrapper
++
++test_expect_success 'setup repository to clone' '
++ test_commit one &&
++ mkdir remote &&
++ git init --bare remote/repo.git &&
++ git push remote/repo.git HEAD
++'
++
++test_proto "remote-helper" ext "ext::fake-remote %S repo.git"
++
++test_done
+Index: git-2.5.0/transport-helper.c
+===================================================================
+--- git-2.5.0.orig/transport-helper.c 2015-12-11 12:46:48.975637719 -0500
++++ git-2.5.0/transport-helper.c 2015-12-11 12:46:48.967637661 -0500
+@@ -1039,6 +1039,8 @@
+ struct helper_data *data = xcalloc(1, sizeof(*data));
+ data->name = name;
+
++ transport_check_allowed(name);
++
+ if (getenv("GIT_TRANSPORT_HELPER_DEBUG"))
+ debug = 1;
+
+Index: git-2.5.0/transport.c
+===================================================================
+--- git-2.5.0.orig/transport.c 2015-12-11 12:46:48.975637719 -0500
++++ git-2.5.0/transport.c 2015-12-11 12:46:48.967637661 -0500
+@@ -912,6 +912,20 @@
+ return strchr(url, ':') - url;
+ }
+
++void transport_check_allowed(const char *type)
++{
++ struct string_list allowed = STRING_LIST_INIT_DUP;
++ const char *v = getenv("GIT_ALLOW_PROTOCOL");
++
++ if (!v)
++ return;
++
++ string_list_split(&allowed, v, ':', -1);
++ if (!unsorted_string_list_has_string(&allowed, type))
++ die("transport '%s' not allowed", type);
++ string_list_clear(&allowed, 0);
++}
++
+ struct transport *transport_get(struct remote *remote, const char *url)
+ {
+ const char *helper;
+@@ -943,12 +957,14 @@
+ if (helper) {
+ transport_helper_init(ret, helper);
+ } else if (starts_with(url, "rsync:")) {
++ transport_check_allowed("rsync");
+ ret->get_refs_list = get_refs_via_rsync;
+ ret->fetch = fetch_objs_via_rsync;
+ ret->push = rsync_transport_push;
+ ret->smart_options = NULL;
+ } else if (url_is_local_not_ssh(url) && is_file(url) && is_bundle(url, 1)) {
+ struct bundle_transport_data *data = xcalloc(1, sizeof(*data));
++ transport_check_allowed("file");
+ ret->data = data;
+ ret->get_refs_list = get_refs_from_bundle;
+ ret->fetch = fetch_refs_from_bundle;
+@@ -960,7 +976,10 @@
+ || starts_with(url, "ssh://")
+ || starts_with(url, "git+ssh://")
+ || starts_with(url, "ssh+git://")) {
+- /* These are builtin smart transports. */
++ /*
++ * These are builtin smart transports; "allowed" transports
++ * will be checked individually in git_connect.
++ */
+ struct git_transport_data *data = xcalloc(1, sizeof(*data));
+ ret->data = data;
+ ret->set_option = NULL;
+Index: git-2.5.0/transport.h
+===================================================================
+--- git-2.5.0.orig/transport.h 2015-12-11 12:46:48.975637719 -0500
++++ git-2.5.0/transport.h 2015-12-11 12:46:48.971637690 -0500
+@@ -133,6 +133,13 @@
+ /* Returns a transport suitable for the url */
+ struct transport *transport_get(struct remote *, const char *);
+
++/*
++ * Check whether a transport is allowed by the environment,
++ * and die otherwise. type should generally be the URL scheme,
++ * as described in Documentation/git.txt
++ */
++void transport_check_allowed(const char *type);
++
+ /* Transport options which apply to git:// and scp-style URLs */
+
+ /* The program to use on the remote side to send a pack */
diff --git a/yocto-poky/meta/recipes-devtools/git/git-2.5.0/0009-CVE-2015-7545-2.patch b/yocto-poky/meta/recipes-devtools/git/git-2.5.0/0009-CVE-2015-7545-2.patch
new file mode 100644
index 000000000..8000e26d7
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/git/git-2.5.0/0009-CVE-2015-7545-2.patch
@@ -0,0 +1,112 @@
+From 33cfccbbf35a56e190b79bdec5c85457c952a021 Mon Sep 17 00:00:00 2001
+From: Jeff King <peff@peff.net>
+Date: Wed, 16 Sep 2015 13:13:12 -0400
+Subject: [PATCH] submodule: allow only certain protocols for submodule fetches
+
+Some protocols (like git-remote-ext) can execute arbitrary
+code found in the URL. The URLs that submodules use may come
+from arbitrary sources (e.g., .gitmodules files in a remote
+repository). Let's restrict submodules to fetching from a
+known-good subset of protocols.
+
+Note that we apply this restriction to all submodule
+commands, whether the URL comes from .gitmodules or not.
+This is more restrictive than we need to be; for example, in
+the tests we run:
+
+ git submodule add ext::...
+
+which should be trusted, as the URL comes directly from the
+command line provided by the user. But doing it this way is
+simpler, and makes it much less likely that we would miss a
+case. And since such protocols should be an exception
+(especially because nobody who clones from them will be able
+to update the submodules!), it's not likely to inconvenience
+anyone in practice.
+
+Reported-by: Blake Burkhart <bburky@bburky.com>
+Signed-off-by: Jeff King <peff@peff.net>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport
+
+http://archive.ubuntu.com/ubuntu/pool/main/g/git/git_2.5.0-1ubuntu0.1.debian.tar.xz
+
+CVE: CVE-2015-7545 #2
+Singed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ git-submodule.sh | 9 +++++++++
+ t/t5815-submodule-protos.sh | 43 +++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 52 insertions(+)
+ create mode 100755 t/t5815-submodule-protos.sh
+
+diff --git a/git-submodule.sh b/git-submodule.sh
+index 36797c3..78c2740 100755
+--- a/git-submodule.sh
++++ b/git-submodule.sh
+@@ -22,6 +22,15 @@ require_work_tree
+ wt_prefix=$(git rev-parse --show-prefix)
+ cd_to_toplevel
+
++# Restrict ourselves to a vanilla subset of protocols; the URLs
++# we get are under control of a remote repository, and we do not
++# want them kicking off arbitrary git-remote-* programs.
++#
++# If the user has already specified a set of allowed protocols,
++# we assume they know what they're doing and use that instead.
++: ${GIT_ALLOW_PROTOCOL=file:git:http:https:ssh}
++export GIT_ALLOW_PROTOCOL
++
+ command=
+ branch=
+ force=
+diff --git a/t/t5815-submodule-protos.sh b/t/t5815-submodule-protos.sh
+new file mode 100755
+index 0000000..06f55a1
+--- /dev/null
++++ b/t/t5815-submodule-protos.sh
+@@ -0,0 +1,43 @@
++#!/bin/sh
++
++test_description='test protocol whitelisting with submodules'
++. ./test-lib.sh
++. "$TEST_DIRECTORY"/lib-proto-disable.sh
++
++setup_ext_wrapper
++setup_ssh_wrapper
++
++test_expect_success 'setup repository with submodules' '
++ mkdir remote &&
++ git init remote/repo.git &&
++ (cd remote/repo.git && test_commit one) &&
++ # submodule-add should probably trust what we feed it on the cmdline,
++ # but its implementation is overly conservative.
++ GIT_ALLOW_PROTOCOL=ssh git submodule add remote:repo.git ssh-module &&
++ GIT_ALLOW_PROTOCOL=ext git submodule add "ext::fake-remote %S repo.git" ext-module &&
++ git commit -m "add submodules"
++'
++
++test_expect_success 'clone with recurse-submodules fails' '
++ test_must_fail git clone --recurse-submodules . dst
++'
++
++test_expect_success 'setup individual updates' '
++ rm -rf dst &&
++ git clone . dst &&
++ git -C dst submodule init
++'
++
++test_expect_success 'update of ssh allowed' '
++ git -C dst submodule update ssh-module
++'
++
++test_expect_success 'update of ext not allowed' '
++ test_must_fail git -C dst submodule update ext-module
++'
++
++test_expect_success 'user can override whitelist' '
++ GIT_ALLOW_PROTOCOL=ext git -C dst submodule update ext-module
++'
++
++test_done
diff --git a/yocto-poky/meta/recipes-devtools/git/git-2.5.0/0010-CVE-2015-7545-3.patch b/yocto-poky/meta/recipes-devtools/git/git-2.5.0/0010-CVE-2015-7545-3.patch
new file mode 100644
index 000000000..b6edc9d7c
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/git/git-2.5.0/0010-CVE-2015-7545-3.patch
@@ -0,0 +1,112 @@
+From 5088d3b38775f8ac12d7f77636775b16059b67ef Mon Sep 17 00:00:00 2001
+From: Jeff King <peff@peff.net>
+Date: Tue, 22 Sep 2015 18:03:49 -0400
+Subject: [PATCH] transport: refactor protocol whitelist code
+
+The current callers only want to die when their transport is
+prohibited. But future callers want to query the mechanism
+without dying.
+
+Let's break out a few query functions, and also save the
+results in a static list so we don't have to re-parse for
+each query.
+
+Based-on-a-patch-by: Blake Burkhart <bburky@bburky.com>
+Signed-off-by: Jeff King <peff@peff.net>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport
+
+http://archive.ubuntu.com/ubuntu/pool/main/g/git/git_2.5.0-1ubuntu0.1.debian.tar.xz
+
+CVE: CVE-2015-7545 #3
+Singed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ transport.c | 38 ++++++++++++++++++++++++++++++--------
+ transport.h | 15 +++++++++++++--
+ 2 files changed, 43 insertions(+), 10 deletions(-)
+
+Index: git-2.5.0/transport.c
+===================================================================
+--- git-2.5.0.orig/transport.c 2015-12-11 12:47:09.547784038 -0500
++++ git-2.5.0/transport.c 2015-12-11 12:47:09.543784009 -0500
+@@ -912,18 +912,40 @@
+ return strchr(url, ':') - url;
+ }
+
+-void transport_check_allowed(const char *type)
++static const struct string_list *protocol_whitelist(void)
+ {
+- struct string_list allowed = STRING_LIST_INIT_DUP;
+- const char *v = getenv("GIT_ALLOW_PROTOCOL");
++ static int enabled = -1;
++ static struct string_list allowed = STRING_LIST_INIT_DUP;
++
++ if (enabled < 0) {
++ const char *v = getenv("GIT_ALLOW_PROTOCOL");
++ if (v) {
++ string_list_split(&allowed, v, ':', -1);
++ string_list_sort(&allowed);
++ enabled = 1;
++ } else {
++ enabled = 0;
++ }
++ }
++
++ return enabled ? &allowed : NULL;
++}
+
+- if (!v)
+- return;
++int is_transport_allowed(const char *type)
++{
++ const struct string_list *allowed = protocol_whitelist();
++ return !allowed || string_list_has_string(allowed, type);
++}
+
+- string_list_split(&allowed, v, ':', -1);
+- if (!unsorted_string_list_has_string(&allowed, type))
++void transport_check_allowed(const char *type)
++{
++ if (!is_transport_allowed(type))
+ die("transport '%s' not allowed", type);
+- string_list_clear(&allowed, 0);
++}
++
++int transport_restrict_protocols(void)
++{
++ return !!protocol_whitelist();
+ }
+
+ struct transport *transport_get(struct remote *remote, const char *url)
+Index: git-2.5.0/transport.h
+===================================================================
+--- git-2.5.0.orig/transport.h 2015-12-11 12:47:09.547784038 -0500
++++ git-2.5.0/transport.h 2015-12-11 12:47:09.543784009 -0500
+@@ -134,12 +134,23 @@
+ struct transport *transport_get(struct remote *, const char *);
+
+ /*
++ * Check whether a transport is allowed by the environment. Type should
++ * generally be the URL scheme, as described in Documentation/git.txt
++ */
++int is_transport_allowed(const char *type);
++
++/*
+ * Check whether a transport is allowed by the environment,
+- * and die otherwise. type should generally be the URL scheme,
+- * as described in Documentation/git.txt
++ * and die otherwise.
+ */
+ void transport_check_allowed(const char *type);
+
++/*
++ * Returns true if the user has attempted to turn on protocol
++ * restrictions at all.
++ */
++int transport_restrict_protocols(void);
++
+ /* Transport options which apply to git:// and scp-style URLs */
+
+ /* The program to use on the remote side to send a pack */
diff --git a/yocto-poky/meta/recipes-devtools/git/git-2.5.0/0011-CVE-2015-7545-4.patch b/yocto-poky/meta/recipes-devtools/git/git-2.5.0/0011-CVE-2015-7545-4.patch
new file mode 100644
index 000000000..44dcd1e33
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/git/git-2.5.0/0011-CVE-2015-7545-4.patch
@@ -0,0 +1,150 @@
+Backport of:
+
+From f4113cac0c88b4f36ee6f3abf3218034440a68e3 Mon Sep 17 00:00:00 2001
+From: Blake Burkhart <bburky@bburky.com>
+Date: Tue, 22 Sep 2015 18:06:04 -0400
+Subject: [PATCH] http: limit redirection to protocol-whitelist
+
+Previously, libcurl would follow redirection to any protocol
+it was compiled for support with. This is desirable to allow
+redirection from HTTP to HTTPS. However, it would even
+successfully allow redirection from HTTP to SFTP, a protocol
+that git does not otherwise support at all. Furthermore
+git's new protocol-whitelisting could be bypassed by
+following a redirect within the remote helper, as it was
+only enforced at transport selection time.
+
+This patch limits redirects within libcurl to HTTP, HTTPS,
+FTP and FTPS. If there is a protocol-whitelist present, this
+list is limited to those also allowed by the whitelist. As
+redirection happens from within libcurl, it is impossible
+for an HTTP redirect to a protocol implemented within
+another remote helper.
+
+When the curl version git was compiled with is too old to
+support restrictions on protocol redirection, we warn the
+user if GIT_ALLOW_PROTOCOL restrictions were requested. This
+is a little inaccurate, as even without that variable in the
+environment, we would still restrict SFTP, etc, and we do
+not warn in that case. But anything else means we would
+literally warn every time git accesses an http remote.
+
+This commit includes a test, but it is not as robust as we
+would hope. It redirects an http request to ftp, and checks
+that curl complained about the protocol, which means that we
+are relying on curl's specific error message to know what
+happened. Ideally we would redirect to a working ftp server
+and confirm that we can clone without protocol restrictions,
+and not with them. But we do not have a portable way of
+providing an ftp server, nor any other protocol that curl
+supports (https is the closest, but we would have to deal
+with certificates).
+
+[jk: added test and version warning]
+
+Signed-off-by: Jeff King <peff@peff.net>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport
+
+http://archive.ubuntu.com/ubuntu/pool/main/g/git/git_2.5.0-1ubuntu0.1.debian.tar.xz
+
+CVE: CVE-2015-7545 #4
+Singed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ Documentation/git.txt | 5 -----
+ http.c | 17 +++++++++++++++++
+ t/lib-httpd/apache.conf | 1 +
+ t/t5812-proto-disable-http.sh | 9 +++++++++
+ 4 files changed, 27 insertions(+), 5 deletions(-)
+
+Index: git-2.5.0/Documentation/git.txt
+===================================================================
+--- git-2.5.0.orig/Documentation/git.txt 2015-12-11 12:47:18.707849212 -0500
++++ git-2.5.0/Documentation/git.txt 2015-12-11 12:47:18.703849183 -0500
+@@ -1095,11 +1095,6 @@
+
+ - any external helpers are named by their protocol (e.g., use
+ `hg` to allow the `git-remote-hg` helper)
+-+
+-Note that this controls only git's internal protocol selection.
+-If libcurl is used (e.g., by the `http` transport), it may
+-redirect to other protocols. There is not currently any way to
+-restrict this.
+
+
+ Discussion[[Discussion]]
+Index: git-2.5.0/http.c
+===================================================================
+--- git-2.5.0.orig/http.c 2015-12-11 12:47:18.707849212 -0500
++++ git-2.5.0/http.c 2015-12-11 12:47:34.171959268 -0500
+@@ -8,6 +8,7 @@
+ #include "credential.h"
+ #include "version.h"
+ #include "pkt-line.h"
++#include "transport.h"
+ #include "gettext.h"
+
+ int active_requests;
+@@ -340,6 +341,7 @@
+ static CURL *get_curl_handle(void)
+ {
+ CURL *result = curl_easy_init();
++ long allowed_protocols = 0;
+
+ if (!result)
+ die("curl_easy_init failed");
+@@ -399,6 +401,21 @@
+ #elif LIBCURL_VERSION_NUM >= 0x071101
+ curl_easy_setopt(result, CURLOPT_POST301, 1);
+ #endif
++#if LIBCURL_VERSION_NUM >= 0x071304
++ if (is_transport_allowed("http"))
++ allowed_protocols |= CURLPROTO_HTTP;
++ if (is_transport_allowed("https"))
++ allowed_protocols |= CURLPROTO_HTTPS;
++ if (is_transport_allowed("ftp"))
++ allowed_protocols |= CURLPROTO_FTP;
++ if (is_transport_allowed("ftps"))
++ allowed_protocols |= CURLPROTO_FTPS;
++ curl_easy_setopt(result, CURLOPT_REDIR_PROTOCOLS, allowed_protocols);
++#else
++ if (transport_restrict_protocols())
++ warning("protocol restrictions not applied to curl redirects because\n"
++ "your curl version is too old (>= 7.19.4)");
++#endif
+
+ if (getenv("GIT_CURL_VERBOSE"))
+ curl_easy_setopt(result, CURLOPT_VERBOSE, 1);
+Index: git-2.5.0/t/lib-httpd/apache.conf
+===================================================================
+--- git-2.5.0.orig/t/lib-httpd/apache.conf 2015-12-11 12:47:18.707849212 -0500
++++ git-2.5.0/t/lib-httpd/apache.conf 2015-12-11 12:47:18.703849183 -0500
+@@ -119,6 +119,7 @@
+ RewriteRule ^/smart-redir-temp/(.*)$ /smart/$1 [R=302]
+ RewriteRule ^/smart-redir-auth/(.*)$ /auth/smart/$1 [R=301]
+ RewriteRule ^/smart-redir-limited/(.*)/info/refs$ /smart/$1/info/refs [R=301]
++RewriteRule ^/ftp-redir/(.*)$ ftp://localhost:1000/$1 [R=302]
+
+ <IfDefine SSL>
+ LoadModule ssl_module modules/mod_ssl.so
+Index: git-2.5.0/t/t5812-proto-disable-http.sh
+===================================================================
+--- git-2.5.0.orig/t/t5812-proto-disable-http.sh 2015-12-11 12:47:18.707849212 -0500
++++ git-2.5.0/t/t5812-proto-disable-http.sh 2015-12-11 12:47:18.703849183 -0500
+@@ -16,5 +16,14 @@
+
+ test_proto "smart http" http "$HTTPD_URL/smart/repo.git"
+
++test_expect_success 'curl redirects respect whitelist' '
++ test_must_fail env GIT_ALLOW_PROTOCOL=http:https \
++ git clone "$HTTPD_URL/ftp-redir/repo.git" 2>stderr &&
++ {
++ test_i18ngrep "ftp.*disabled" stderr ||
++ test_i18ngrep "your curl version is too old"
++ }
++'
++
+ stop_httpd
+ test_done
diff --git a/yocto-poky/meta/recipes-devtools/git/git-2.5.0/0012-CVE-2015-7545-5.patch b/yocto-poky/meta/recipes-devtools/git/git-2.5.0/0012-CVE-2015-7545-5.patch
new file mode 100644
index 000000000..76d66bad9
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/git/git-2.5.0/0012-CVE-2015-7545-5.patch
@@ -0,0 +1,69 @@
+From b258116462399b318c86165c61a5c7123043cfd4 Mon Sep 17 00:00:00 2001
+From: Blake Burkhart <bburky@bburky.com>
+Date: Tue, 22 Sep 2015 18:06:20 -0400
+Subject: [PATCH] http: limit redirection depth
+
+By default, libcurl will follow circular http redirects
+forever. Let's put a cap on this so that somebody who can
+trigger an automated fetch of an arbitrary repository (e.g.,
+for CI) cannot convince git to loop infinitely.
+
+The value chosen is 20, which is the same default that
+Firefox uses.
+
+Signed-off-by: Jeff King <peff@peff.net>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport
+
+http://archive.ubuntu.com/ubuntu/pool/main/g/git/git_2.5.0-1ubuntu0.1.debian.tar.xz
+
+CVE: CVE-2015-7545 #5
+Singed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ http.c | 1 +
+ t/lib-httpd/apache.conf | 3 +++
+ t/t5812-proto-disable-http.sh | 4 ++++
+ 3 files changed, 8 insertions(+)
+
+Index: git-2.5.0/http.c
+===================================================================
+--- git-2.5.0.orig/http.c 2015-12-11 12:48:02.900163824 -0500
++++ git-2.5.0/http.c 2015-12-11 12:48:02.896163796 -0500
+@@ -396,6 +396,7 @@
+ }
+
+ curl_easy_setopt(result, CURLOPT_FOLLOWLOCATION, 1);
++ curl_easy_setopt(result, CURLOPT_MAXREDIRS, 20);
+ #if LIBCURL_VERSION_NUM >= 0x071301
+ curl_easy_setopt(result, CURLOPT_POSTREDIR, CURL_REDIR_POST_ALL);
+ #elif LIBCURL_VERSION_NUM >= 0x071101
+Index: git-2.5.0/t/lib-httpd/apache.conf
+===================================================================
+--- git-2.5.0.orig/t/lib-httpd/apache.conf 2015-12-11 12:48:02.900163824 -0500
++++ git-2.5.0/t/lib-httpd/apache.conf 2015-12-11 12:48:02.896163796 -0500
+@@ -121,6 +121,9 @@
+ RewriteRule ^/smart-redir-limited/(.*)/info/refs$ /smart/$1/info/refs [R=301]
+ RewriteRule ^/ftp-redir/(.*)$ ftp://localhost:1000/$1 [R=302]
+
++RewriteRule ^/loop-redir/x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-(.*) /$1 [R=302]
++RewriteRule ^/loop-redir/(.*)$ /loop-redir/x-$1 [R=302]
++
+ <IfDefine SSL>
+ LoadModule ssl_module modules/mod_ssl.so
+
+Index: git-2.5.0/t/t5812-proto-disable-http.sh
+===================================================================
+--- git-2.5.0.orig/t/t5812-proto-disable-http.sh 2015-12-11 12:48:02.900163824 -0500
++++ git-2.5.0/t/t5812-proto-disable-http.sh 2015-12-11 12:48:02.896163796 -0500
+@@ -25,5 +25,9 @@
+ }
+ '
+
++test_expect_success 'curl limits redirects' '
++ test_must_fail git clone "$HTTPD_URL/loop-redir/smart/repo.git"
++'
++
+ stop_httpd
+ test_done
diff --git a/yocto-poky/meta/recipes-devtools/git/git_2.5.0.bb b/yocto-poky/meta/recipes-devtools/git/git_2.5.0.bb
index de686c2b2..792f25838 100644
--- a/yocto-poky/meta/recipes-devtools/git/git_2.5.0.bb
+++ b/yocto-poky/meta/recipes-devtools/git/git_2.5.0.bb
@@ -9,3 +9,11 @@ SRC_URI[tarball.md5sum] = "3bc9b0a803ae8ec6c5316cc64f0b7f78"
SRC_URI[tarball.sha256sum] = "8fa13ba8434ff83d24f57f831d55dbb9046434c266641180a37744facfce72ac"
SRC_URI[manpages.md5sum] = "134b049e51420a336049aac21c88a75a"
SRC_URI[manpages.sha256sum] = "745e4e797fe5061e781c880d370b1beb480199127da5acaf4e376e0b09d4d685"
+
+SRC_URI += "\
+ file://0008-CVE-2015-7545-1.patch \
+ file://0009-CVE-2015-7545-2.patch \
+ file://0010-CVE-2015-7545-3.patch \
+ file://0011-CVE-2015-7545-4.patch \
+ file://0012-CVE-2015-7545-5.patch \
+ "
diff --git a/yocto-poky/meta/recipes-devtools/guile/guile_2.0.11.bb b/yocto-poky/meta/recipes-devtools/guile/guile_2.0.11.bb
index 486969422..98b465bfd 100644
--- a/yocto-poky/meta/recipes-devtools/guile/guile_2.0.11.bb
+++ b/yocto-poky/meta/recipes-devtools/guile/guile_2.0.11.bb
@@ -39,7 +39,11 @@ DEPENDS = "libunistring bdwgc gmp libtool libffi ncurses readline"
# add guile-native only to the target recipe's DEPENDS
DEPENDS_append_class-target = " guile-native libatomic-ops"
-RDEPENDS_${PN}_append_libc-glibc_class-target = "glibc-gconv-iso8859-1"
+# The comment of the script guile-config said it has been deprecated but we should
+# at least add the required dependency to make it work since we still provide the script.
+RDEPENDS_${PN} = "pkgconfig"
+
+RDEPENDS_${PN}_append_libc-glibc_class-target = " glibc-gconv-iso8859-1"
EXTRA_OECONF += "${@['--without-libltdl-prefix --without-libgmp-prefix --without-libreadline-prefix', ''][bb.data.inherits_class('native',d)]}"
@@ -77,6 +81,12 @@ do_install_append_class-native() {
GUILE_LOAD_COMPILED_PATH=${STAGING_LIBDIR_NATIVE}/guile/2.0/ccache
}
+do_install_append_class-target() {
+ # cleanup buildpaths in scripts
+ sed -i -e 's:${STAGING_DIR_NATIVE}::' ${D}/usr/bin/guile-config
+ sed -i -e 's:${STAGING_DIR_HOST}::' ${D}/usr/bin/guile-snarf
+}
+
SYSROOT_PREPROCESS_FUNCS = "guile_cross_config"
guile_cross_config() {
@@ -109,3 +119,7 @@ guile_sstate_postinst() {
find ${STAGING_DIR_TARGET}/${libdir}/guile/2.0/ccache -type f | xargs touch
fi
}
+
+# http://errors.yoctoproject.org/Errors/Details/20491/
+ARM_INSTRUCTION_SET_armv4 = "arm"
+ARM_INSTRUCTION_SET_armv5 = "arm"
diff --git a/yocto-poky/meta/recipes-devtools/i2c-tools/i2c-tools_3.1.2.bb b/yocto-poky/meta/recipes-devtools/i2c-tools/i2c-tools_3.1.2.bb
index 92f4d69d3..042695bdc 100644
--- a/yocto-poky/meta/recipes-devtools/i2c-tools/i2c-tools_3.1.2.bb
+++ b/yocto-poky/meta/recipes-devtools/i2c-tools/i2c-tools_3.1.2.bb
@@ -30,5 +30,4 @@ FILES_${PN}-misc = "${sbindir}/i2c-stub-from-dump \
${bindir}/decode-dimms \
${bindir}/decode-vaio \
"
-RDEPENDS_${PN} += "${PN}-misc"
-RDEPENDS_${PN}-misc += "perl"
+RDEPENDS_${PN}-misc = "${PN} perl"
diff --git a/yocto-poky/meta/recipes-devtools/installer/adt-installer_1.0.bb b/yocto-poky/meta/recipes-devtools/installer/adt-installer_1.0.bb
index 22e385070..4c2f0971a 100644
--- a/yocto-poky/meta/recipes-devtools/installer/adt-installer_1.0.bb
+++ b/yocto-poky/meta/recipes-devtools/installer/adt-installer_1.0.bb
@@ -82,7 +82,6 @@ fakeroot do_populate_adt () {
cp ${WORKDIR}/adt_installer.tar.bz2 ${ADT_DEPLOY}
}
-do_populate_adt[nostamp] = "1"
do_configure[noexec] = "1"
do_compile[noexec] = "1"
do_package[noexec] = "1"
diff --git a/yocto-poky/meta/recipes-devtools/libtool/libtool-2.4.6.inc b/yocto-poky/meta/recipes-devtools/libtool/libtool-2.4.6.inc
index a977c7370..de06ccb25 100644
--- a/yocto-poky/meta/recipes-devtools/libtool/libtool-2.4.6.inc
+++ b/yocto-poky/meta/recipes-devtools/libtool/libtool-2.4.6.inc
@@ -19,6 +19,7 @@ SRC_URI = "${GNU_MIRROR}/libtool/libtool-${PV}.tar.gz \
file://fix-resolve-lt-sysroot.patch \
file://nohardcodepaths.patch \
file://unwind-opt-parsing.patch \
+ file://0001-libtool-Fix-support-for-NIOS2-processor.patch \
"
SRC_URI[md5sum] = "addf44b646ddb4e3919805aa88fa7c5e"
diff --git a/yocto-poky/meta/recipes-devtools/libtool/libtool/0001-libtool-Fix-support-for-NIOS2-processor.patch b/yocto-poky/meta/recipes-devtools/libtool/libtool/0001-libtool-Fix-support-for-NIOS2-processor.patch
new file mode 100644
index 000000000..bbd36d8dc
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/libtool/libtool/0001-libtool-Fix-support-for-NIOS2-processor.patch
@@ -0,0 +1,68 @@
+From df2cd898e48208f26320d40c3ed6b19c75c27142 Mon Sep 17 00:00:00 2001
+From: Marek Vasut <marex@denx.de>
+Date: Thu, 17 Sep 2015 00:43:15 +0200
+Subject: [PATCH] libtool: Fix support for NIOS2 processor
+
+The name of the system contains the string "nios2". This string
+is caught by the some of the greedy checks for OS/2 in libtool,
+in particular the *os2* branches of switch statements match for
+the nios2 string, which results in incorrect behavior of libtool.
+
+This patch adds an explicit check for *nios2* before the *os2*
+checks to prevent the OS/2 check incorrectly trapping the nios2
+as well.
+
+Signed-off-by: Marek Vasut <marex@denx.de>
+Upstream-Status: Submitted
+---
+ build-aux/ltmain.in | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+diff --git a/build-aux/ltmain.in b/build-aux/ltmain.in
+index d5cf07a..4164284 100644
+--- a/build-aux/ltmain.in
++++ b/build-aux/ltmain.in
+@@ -504,6 +504,12 @@ libtool_validate_options ()
+ test : = "$debug_cmd" || func_append preserve_args " --debug"
+
+ case $host in
++ # For NIOS2, we want to make sure that it's not caught by the
++ # more general OS/2 check below. Otherwise, NIOS2 is the same
++ # as the default option.
++ *nios2*)
++ opt_duplicate_compiler_generated_deps=$opt_preserve_dup_deps
++ ;;
+ # Solaris2 added to fix http://debbugs.gnu.org/cgi/bugreport.cgi?bug=16452
+ # see also: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=59788
+ *cygwin* | *mingw* | *pw32* | *cegcc* | *solaris2* | *os2*)
+@@ -6220,6 +6226,15 @@ func_mode_link ()
+ if test -n "$library_names" &&
+ { test no = "$use_static_libs" || test -z "$old_library"; }; then
+ case $host in
++ *nios2*)
++ # For NIOS2, we want to make sure that it's not caught by the
++ # more general OS/2 check below. Otherwise, NIOS2 is the same
++ # as the default option.
++ if test no = "$installed"; then
++ func_append notinst_deplibs " $lib"
++ need_relink=yes
++ fi
++ ;;
+ *cygwin* | *mingw* | *cegcc* | *os2*)
+ # No point in relinking DLLs because paths are not encoded
+ func_append notinst_deplibs " $lib"
+@@ -6290,6 +6305,11 @@ func_mode_link ()
+ elif test -n "$soname_spec"; then
+ # bleh windows
+ case $host in
++ *nios2*)
++ # For NIOS2, we want to make sure that it's not caught by the
++ # more general OS/2 check below. Otherwise, NIOS2 is the same
++ # as the default option.
++ ;;
+ *cygwin* | mingw* | *cegcc* | *os2*)
+ func_arith $current - $age
+ major=$func_arith_result
+--
+2.5.1
+
diff --git a/yocto-poky/meta/recipes-devtools/mmc/mmc-utils_git.bb b/yocto-poky/meta/recipes-devtools/mmc/mmc-utils_git.bb
index 895036093..546f7f216 100644
--- a/yocto-poky/meta/recipes-devtools/mmc/mmc-utils_git.bb
+++ b/yocto-poky/meta/recipes-devtools/mmc/mmc-utils_git.bb
@@ -3,12 +3,12 @@ HOMEPAGE = "http://git.kernel.org/cgit/linux/kernel/git/cjb/mmc-utils.git/"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://mmc.c;beginline=1;endline=17;md5=d7747fc87f1eb22b946ef819969503f0"
-BRANCH ?= "master"
+SRCBRANCH ?= "master"
SRCREV = "f4eb241519f8d500ce6068a70d2389be39ac5189"
PV = "0.1"
-SRC_URI = "git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc-utils.git;branch=${BRANCH} \
+SRC_URI = "git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc-utils.git;branch=${SRCBRANCH} \
file://0001-mmc.h-don-t-include-asm-generic-int-ll64.h.patch"
S = "${WORKDIR}/git"
diff --git a/yocto-poky/meta/recipes-devtools/mtools/mtools_4.0.18.bb b/yocto-poky/meta/recipes-devtools/mtools/mtools_4.0.18.bb
index 52decfdf6..24c9d4931 100644
--- a/yocto-poky/meta/recipes-devtools/mtools/mtools_4.0.18.bb
+++ b/yocto-poky/meta/recipes-devtools/mtools/mtools_4.0.18.bb
@@ -45,3 +45,8 @@ do_install_prepend () {
mkdir -p ${D}/${bindir}
mkdir -p ${D}/${datadir}
}
+
+do_install_append_class-native () {
+ create_wrapper ${D}${bindir}/mcopy \
+ GCONV_PATH=${libdir}/gconv
+}
diff --git a/yocto-poky/meta/recipes-devtools/opensp/opensp_1.5.2.bb b/yocto-poky/meta/recipes-devtools/opensp/opensp_1.5.2.bb
index a1f115c00..60a7d2e47 100644
--- a/yocto-poky/meta/recipes-devtools/opensp/opensp_1.5.2.bb
+++ b/yocto-poky/meta/recipes-devtools/opensp/opensp_1.5.2.bb
@@ -53,3 +53,7 @@ do_install_append_class-native() {
FILES_${PN} += "${datadir}/OpenSP/"
BBCLASSEXTEND = "native"
+
+# http://errors.yoctoproject.org/Errors/Details/20489/
+ARM_INSTRUCTION_SET_armv4 = "arm"
+ARM_INSTRUCTION_SET_armv5 = "arm"
diff --git a/yocto-poky/meta/recipes-devtools/opkg/opkg/0001-libopkg-include-stdio.h-for-getting-FILE-defined.patch b/yocto-poky/meta/recipes-devtools/opkg/opkg/0001-libopkg-include-stdio.h-for-getting-FILE-defined.patch
new file mode 100644
index 000000000..acc133864
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/opkg/opkg/0001-libopkg-include-stdio.h-for-getting-FILE-defined.patch
@@ -0,0 +1,45 @@
+From 58f4d3d63cd6097154205ea7ee042005036659b3 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Thu, 10 Sep 2015 21:43:32 -0700
+Subject: [PATCH] libopkg: include stdio.h for getting FILE defined
+To: opkg-devel@googlegroups.com
+Cc: paul@paulbarker.me.uk
+
+For some libc(musl) stdio.h may not get included indirectly which means
+we need to mention it in explicit include list
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+Upstream-Status: Submitted
+
+ libopkg/opkg_verify.c | 1 +
+ libopkg/pkg_src.c | 1 +
+ 2 files changed, 2 insertions(+)
+
+diff --git a/libopkg/opkg_verify.c b/libopkg/opkg_verify.c
+index 41dc3f4..a71591d 100644
+--- a/libopkg/opkg_verify.c
++++ b/libopkg/opkg_verify.c
+@@ -18,6 +18,7 @@
+
+ #include <malloc.h>
+ #include <string.h>
++#include <stdio.h>
+
+ #include "file_util.h"
+ #include "opkg_conf.h"
+diff --git a/libopkg/pkg_src.c b/libopkg/pkg_src.c
+index e31ec21..6b49a00 100644
+--- a/libopkg/pkg_src.c
++++ b/libopkg/pkg_src.c
+@@ -20,6 +20,7 @@
+
+ #include <malloc.h>
+ #include <unistd.h>
++#include <stdio.h>
+
+ #include "file_util.h"
+ #include "opkg_conf.h"
+--
+2.5.1
+
diff --git a/yocto-poky/meta/recipes-devtools/opkg/opkg/0001-opkg_conf-create-opkg.lock-in-run-instead-of-var-run.patch b/yocto-poky/meta/recipes-devtools/opkg/opkg/0001-opkg_conf-create-opkg.lock-in-run-instead-of-var-run.patch
new file mode 100644
index 000000000..255021b4b
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/opkg/opkg/0001-opkg_conf-create-opkg.lock-in-run-instead-of-var-run.patch
@@ -0,0 +1,34 @@
+From a4628a6171f393add9a2b287483ca39bb72b4dd6 Mon Sep 17 00:00:00 2001
+From: Jonathan Liu <net147@gmail.com>
+Date: Mon, 21 Sep 2015 20:23:23 +1000
+Subject: [PATCH] opkg_conf: create opkg.lock in /run instead of /var/run
+
+This avoids a "Could not unlink" warning when extracting a /var/run
+symbolic link pointing to /run from a package as it is unable to
+unlink the /var/run directory when it contains opkg.lock.
+
+This also fixes an issue where /var/run is created as a directory
+instead of a symbolic link to /run.
+
+Upstream-Status: Inappropriate [OE-Specific]
+Signed-off-by: Jonathan Liu <net147@gmail.com>
+---
+ libopkg/opkg_conf.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/libopkg/opkg_conf.h b/libopkg/opkg_conf.h
+index 7bca948..5a1bc44 100644
+--- a/libopkg/opkg_conf.h
++++ b/libopkg/opkg_conf.h
+@@ -40,7 +40,7 @@ extern "C" {
+ #define OPKG_CONF_DEFAULT_STATUS_FILE "/var/lib/opkg/status"
+ #define OPKG_CONF_DEFAULT_CACHE_DIR "/var/cache/opkg"
+ #define OPKG_CONF_DEFAULT_CONF_FILE_DIR "/etc/opkg"
+-#define OPKG_CONF_DEFAULT_LOCK_FILE "/var/run/opkg.lock"
++#define OPKG_CONF_DEFAULT_LOCK_FILE "/run/opkg.lock"
+
+ /* In case the config file defines no dest */
+ #define OPKG_CONF_DEFAULT_DEST_NAME "root"
+--
+2.5.0
+
diff --git a/yocto-poky/meta/recipes-devtools/opkg/opkg/0001-string_util-New-file-with-bin_to_hex-function.patch b/yocto-poky/meta/recipes-devtools/opkg/opkg/0001-string_util-New-file-with-bin_to_hex-function.patch
new file mode 100644
index 000000000..fb3ac462d
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/opkg/opkg/0001-string_util-New-file-with-bin_to_hex-function.patch
@@ -0,0 +1,122 @@
+From 646b80024567a6245c598be3374653fa1fa09a12 Mon Sep 17 00:00:00 2001
+From: Paul Barker <paul@paulbarker.me.uk>
+Date: Sat, 7 Nov 2015 10:23:49 +0000
+Subject: [PATCH 1/4] string_util: New file with bin_to_hex function
+
+This function does very simple conversion from binary data to a hex string.
+
+Signed-off-by: Paul Barker <paul@paulbarker.me.uk>
+Signed-off-by: Alejandro del Castillo <alejandro.delcastillo@ni.com>
+
+Upstream-Status: Accepted
+---
+ libopkg/Makefile.am | 4 ++--
+ libopkg/string_util.c | 42 ++++++++++++++++++++++++++++++++++++++++++
+ libopkg/string_util.h | 24 ++++++++++++++++++++++++
+ 3 files changed, 68 insertions(+), 2 deletions(-)
+ create mode 100644 libopkg/string_util.c
+ create mode 100644 libopkg/string_util.h
+
+diff --git a/libopkg/Makefile.am b/libopkg/Makefile.am
+index ee3fbee..3e62c24 100644
+--- a/libopkg/Makefile.am
++++ b/libopkg/Makefile.am
+@@ -13,7 +13,7 @@ opkg_headers = active_list.h cksum_list.h conffile.h conffile_list.h \
+ pkg_depends.h pkg_dest.h pkg_dest_list.h pkg_extract.h pkg_hash.h \
+ pkg_parse.h pkg_src.h pkg_src_list.h pkg_vec.h release.h \
+ release_parse.h sha256.h sprintf_alloc.h str_list.h void_list.h \
+- xregex.h xsystem.h xfuncs.h opkg_verify.h
++ xregex.h xsystem.h xfuncs.h opkg_verify.h string_util.h
+
+ opkg_sources = opkg_cmd.c opkg_configure.c opkg_download.c \
+ opkg_install.c opkg_remove.c opkg_conf.c release.c \
+@@ -23,7 +23,7 @@ opkg_sources = opkg_cmd.c opkg_configure.c opkg_download.c \
+ pkg_src.c pkg_src_list.c str_list.c void_list.c active_list.c \
+ file_util.c opkg_message.c md5.c parse_util.c cksum_list.c \
+ sprintf_alloc.c xregex.c xsystem.c xfuncs.c opkg_archive.c \
+- opkg_verify.c
++ opkg_verify.c string_util.c
+
+ if HAVE_CURL
+ opkg_sources += opkg_download_curl.c
+diff --git a/libopkg/string_util.c b/libopkg/string_util.c
+new file mode 100644
+index 0000000..822cab6
+--- /dev/null
++++ b/libopkg/string_util.c
+@@ -0,0 +1,42 @@
++/* vi: set expandtab sw=4 sts=4: */
++/* string_util.c - convenience routines for common string operations
++
++ Copyright (C) 2015 Paul Barker
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License as
++ published by the Free Software Foundation; either version 2, or (at
++ your option) any later version.
++
++ This program is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++*/
++
++#include "config.h"
++
++#include "string_util.h"
++#include "xfuncs.h"
++
++char *bin_to_hex(const void *bin_data, size_t len)
++{
++ const unsigned char *src = (const unsigned char *)bin_data;
++ char *buf = xmalloc(2 * len + 1);
++ int i;
++
++ static const unsigned char bin2hex[16] = {
++ '0', '1', '2', '3',
++ '4', '5', '6', '7',
++ '8', '9', 'a', 'b',
++ 'c', 'd', 'e', 'f'
++ };
++
++ for (i = 0; i < len; i++) {
++ buf[i * 2] = bin2hex[src[i] >> 4];
++ buf[i * 2 + 1] = bin2hex[src[i] & 0xf];
++ }
++
++ buf[len * 2] = '\0';
++ return buf;
++}
+diff --git a/libopkg/string_util.h b/libopkg/string_util.h
+new file mode 100644
+index 0000000..a920e2a
+--- /dev/null
++++ b/libopkg/string_util.h
+@@ -0,0 +1,24 @@
++/* vi: set expandtab sw=4 sts=4: */
++/* string_util.h - convenience routines for common file operations
++
++ Copyright (C) 2015 Paul Barker
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License as
++ published by the Free Software Foundation; either version 2, or (at
++ your option) any later version.
++
++ This program is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++*/
++
++#ifndef STRING_UTIL_H
++#define STRING_UTIL_H
++
++#include <stddef.h>
++
++char *bin_to_hex(const void *bin_data, size_t len);
++
++#endif /* STRING_UTIL_H */
+--
+1.9.1
+
diff --git a/yocto-poky/meta/recipes-devtools/opkg/opkg/0002-md5-Add-md5_to_string-function.patch b/yocto-poky/meta/recipes-devtools/opkg/opkg/0002-md5-Add-md5_to_string-function.patch
new file mode 100644
index 000000000..3b823c693
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/opkg/opkg/0002-md5-Add-md5_to_string-function.patch
@@ -0,0 +1,110 @@
+From ecad8afab377d8be95eeaafc08afa228c8e030c3 Mon Sep 17 00:00:00 2001
+From: Paul Barker <paul@paulbarker.me.uk>
+Date: Sat, 7 Nov 2015 10:23:50 +0000
+Subject: [PATCH 2/4] md5: Add md5_to_string function
+
+Signed-off-by: Paul Barker <paul@paulbarker.me.uk>
+Signed-off-by: Alejandro del Castillo <alejandro.delcastillo@ni.com>
+
+Upstream-Status: Accepted
+---
+ libopkg/file_util.c | 28 +++-------------------------
+ libopkg/md5.c | 7 +++++++
+ libopkg/md5.h | 3 +++
+ 3 files changed, 13 insertions(+), 25 deletions(-)
+
+diff --git a/libopkg/file_util.c b/libopkg/file_util.c
+index 5eff469..cb3dbf0 100644
+--- a/libopkg/file_util.c
++++ b/libopkg/file_util.c
+@@ -349,27 +349,13 @@ int file_mkdir_hier(const char *path, long mode)
+
+ char *file_md5sum_alloc(const char *file_name)
+ {
+- static const int md5sum_bin_len = 16;
+- static const int md5sum_hex_len = 32;
+-
+- static const unsigned char bin2hex[16] = {
+- '0', '1', '2', '3',
+- '4', '5', '6', '7',
+- '8', '9', 'a', 'b',
+- 'c', 'd', 'e', 'f'
+- };
+-
+- int i, err;
++ int err;
+ FILE *file;
+- char *md5sum_hex;
+- unsigned char md5sum_bin[md5sum_bin_len];
+-
+- md5sum_hex = xcalloc(1, md5sum_hex_len + 1);
++ unsigned char md5sum_bin[16];
+
+ file = fopen(file_name, "r");
+ if (file == NULL) {
+ opkg_perror(ERROR, "Failed to open file %s", file_name);
+- free(md5sum_hex);
+ return NULL;
+ }
+
+@@ -377,20 +363,12 @@ char *file_md5sum_alloc(const char *file_name)
+ if (err) {
+ opkg_msg(ERROR, "Could't compute md5sum for %s.\n", file_name);
+ fclose(file);
+- free(md5sum_hex);
+ return NULL;
+ }
+
+ fclose(file);
+
+- for (i = 0; i < md5sum_bin_len; i++) {
+- md5sum_hex[i * 2] = bin2hex[md5sum_bin[i] >> 4];
+- md5sum_hex[i * 2 + 1] = bin2hex[md5sum_bin[i] & 0xf];
+- }
+-
+- md5sum_hex[md5sum_hex_len] = '\0';
+-
+- return md5sum_hex;
++ return md5_to_string(md5sum_bin);
+ }
+
+ #ifdef HAVE_SHA256
+diff --git a/libopkg/md5.c b/libopkg/md5.c
+index d476b8b..bc2b229 100644
+--- a/libopkg/md5.c
++++ b/libopkg/md5.c
+@@ -30,6 +30,8 @@
+ #include <string.h>
+ #include <sys/types.h>
+
++#include "string_util.h"
++
+ #if USE_UNLOCKED_IO
+ #include "unlocked-io.h"
+ #endif
+@@ -431,3 +433,8 @@ void md5_process_block(const void *buffer, size_t len, struct md5_ctx *ctx)
+ ctx->C = C;
+ ctx->D = D;
+ }
++
++char *md5_to_string(const void *md5sum_bin)
++{
++ return bin_to_hex(md5sum_bin, 16);
++}
+diff --git a/libopkg/md5.h b/libopkg/md5.h
+index 01320f5..2a7274d 100644
+--- a/libopkg/md5.h
++++ b/libopkg/md5.h
+@@ -118,6 +118,9 @@ extern int __md5_stream(FILE * stream, void *resblock) __THROW;
+ extern void *__md5_buffer(const char *buffer, size_t len,
+ void *resblock) __THROW;
+
++/* Convert a binary md5sum value to an ASCII string. */
++char *md5_to_string(const void *md5sum_bin);
++
+ #ifdef __cplusplus
+ }
+ #endif
+--
+1.9.1
+
diff --git a/yocto-poky/meta/recipes-devtools/opkg/opkg/0003-sha256-Add-sha256_to_string-function.patch b/yocto-poky/meta/recipes-devtools/opkg/opkg/0003-sha256-Add-sha256_to_string-function.patch
new file mode 100644
index 000000000..16e82d741
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/opkg/opkg/0003-sha256-Add-sha256_to_string-function.patch
@@ -0,0 +1,110 @@
+From 92e8378103bba3b91f2dec4e6fda3e1755a7c0fd Mon Sep 17 00:00:00 2001
+From: Paul Barker <paul@paulbarker.me.uk>
+Date: Sat, 7 Nov 2015 10:23:51 +0000
+Subject: [PATCH 3/4] sha256: Add sha256_to_string function
+
+Signed-off-by: Paul Barker <paul@paulbarker.me.uk>
+Signed-off-by: Alejandro del Castillo <alejandro.delcastillo@ni.com>
+
+Upstream-Status: Accepted
+---
+ libopkg/file_util.c | 28 +++-------------------------
+ libopkg/sha256.c | 7 +++++++
+ libopkg/sha256.h | 3 +++
+ 3 files changed, 13 insertions(+), 25 deletions(-)
+
+diff --git a/libopkg/file_util.c b/libopkg/file_util.c
+index cb3dbf0..864aedb 100644
+--- a/libopkg/file_util.c
++++ b/libopkg/file_util.c
+@@ -374,27 +374,13 @@ char *file_md5sum_alloc(const char *file_name)
+ #ifdef HAVE_SHA256
+ char *file_sha256sum_alloc(const char *file_name)
+ {
+- static const int sha256sum_bin_len = 32;
+- static const int sha256sum_hex_len = 64;
+-
+- static const unsigned char bin2hex[16] = {
+- '0', '1', '2', '3',
+- '4', '5', '6', '7',
+- '8', '9', 'a', 'b',
+- 'c', 'd', 'e', 'f'
+- };
+-
+- int i, err;
++ int err;
+ FILE *file;
+- char *sha256sum_hex;
+- unsigned char sha256sum_bin[sha256sum_bin_len];
+-
+- sha256sum_hex = xcalloc(1, sha256sum_hex_len + 1);
++ unsigned char sha256sum_bin[32];
+
+ file = fopen(file_name, "r");
+ if (file == NULL) {
+ opkg_perror(ERROR, "Failed to open file %s", file_name);
+- free(sha256sum_hex);
+ return NULL;
+ }
+
+@@ -402,20 +388,12 @@ char *file_sha256sum_alloc(const char *file_name)
+ if (err) {
+ opkg_msg(ERROR, "Could't compute sha256sum for %s.\n", file_name);
+ fclose(file);
+- free(sha256sum_hex);
+ return NULL;
+ }
+
+ fclose(file);
+
+- for (i = 0; i < sha256sum_bin_len; i++) {
+- sha256sum_hex[i * 2] = bin2hex[sha256sum_bin[i] >> 4];
+- sha256sum_hex[i * 2 + 1] = bin2hex[sha256sum_bin[i] & 0xf];
+- }
+-
+- sha256sum_hex[sha256sum_hex_len] = '\0';
+-
+- return sha256sum_hex;
++ return sha256_to_string(sha256sum_bin);
+ }
+
+ #endif
+diff --git a/libopkg/sha256.c b/libopkg/sha256.c
+index 0816858..bceed72 100644
+--- a/libopkg/sha256.c
++++ b/libopkg/sha256.c
+@@ -29,6 +29,8 @@
+ #include <stddef.h>
+ #include <string.h>
+
++#include "string_util.h"
++
+ #if USE_UNLOCKED_IO
+ #include "unlocked-io.h"
+ #endif
+@@ -517,3 +519,8 @@ void sha256_process_block(const void *buffer, size_t len,
+ h = ctx->state[7] += h;
+ }
+ }
++
++char *sha256_to_string(const void *sha256sum_bin)
++{
++ return bin_to_hex(sha256sum_bin, 32);
++}
+diff --git a/libopkg/sha256.h b/libopkg/sha256.h
+index 734ab54..0d1e9e5 100644
+--- a/libopkg/sha256.h
++++ b/libopkg/sha256.h
+@@ -85,6 +85,9 @@ extern int sha224_stream(FILE * stream, void *resblock);
+ extern void *sha256_buffer(const char *buffer, size_t len, void *resblock);
+ extern void *sha224_buffer(const char *buffer, size_t len, void *resblock);
+
++/* Convert a binary sha256sum value to an ASCII string. */
++char *sha256_to_string(const void *sha256sum_bin);
++
+ #ifdef __cplusplus
+ }
+ #endif
+--
+1.9.1
+
diff --git a/yocto-poky/meta/recipes-devtools/opkg/opkg/0004-opkg_download-Use-short-cache-file-name.patch b/yocto-poky/meta/recipes-devtools/opkg/opkg/0004-opkg_download-Use-short-cache-file-name.patch
new file mode 100644
index 000000000..7ea661dcf
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/opkg/opkg/0004-opkg_download-Use-short-cache-file-name.patch
@@ -0,0 +1,85 @@
+From 61636f15718edc7ea17b91f22f1d97b905eaf951 Mon Sep 17 00:00:00 2001
+From: Paul Barker <paul@paulbarker.me.uk>
+Date: Sat, 7 Nov 2015 10:23:52 +0000
+Subject: [PATCH 4/4] opkg_download: Use short cache file name
+
+Source URIs can be very long. The cache directory itself may already have a very
+long path, especially if we're installing packages into an offline rootfs.
+Therefore it's not a good idea to simply tag the source URI onto the cache
+directory path to create a cache file name.
+
+To create shorter cache file names which are deterministic and very likely to be
+unique, we use the md5sum of the source URI along with the basename of the
+source URI. The basename is length limited to ensure that it the resulting
+filename length is always reasonable.
+
+Signed-off-by: Paul Barker <paul@paulbarker.me.uk>
+Signed-off-by: Alejandro del Castillo <alejandro.delcastillo@ni.com>
+
+Upstream-Status: Accepted
+---
+ libopkg/opkg_download.c | 35 ++++++++++++++++++++++++++++-------
+ 1 file changed, 28 insertions(+), 7 deletions(-)
+
+diff --git a/libopkg/opkg_download.c b/libopkg/opkg_download.c
+index e9b86a5..a37b10d 100644
+--- a/libopkg/opkg_download.c
++++ b/libopkg/opkg_download.c
+@@ -29,10 +29,18 @@
+ #include "opkg_verify.h"
+ #include "opkg_utils.h"
+
++#include "md5.h"
+ #include "sprintf_alloc.h"
+ #include "file_util.h"
+ #include "xfuncs.h"
+
++/* Limit the short file name used to generate cache file names to 90 characters
++ * so that when added to the md5sum (32 characters) and an underscore, the
++ * resulting length is below 128 characters. The maximum file name length
++ * differs between plaforms but 128 characters should be reasonable.
++ */
++#define MAX_SHORT_FILE_NAME_LENGTH 90
++
+ static int opkg_download_set_env()
+ {
+ int r;
+@@ -135,15 +143,28 @@ int opkg_download_internal(const char *src, const char *dest,
+ */
+ char *get_cache_location(const char *src)
+ {
+- char *cache_name = xstrdup(src);
+- char *cache_location, *p;
++ unsigned char md5sum_bin[16];
++ char *md5sum_hex;
++ char *cache_location;
++ char *short_file_name;
++ char *tmp = xstrdup(src);
+
+- for (p = cache_name; *p; p++)
+- if (*p == '/')
+- *p = '_';
++ md5_buffer(src, strlen(src), md5sum_bin);
++ md5sum_hex = md5_to_string(md5sum_bin);
+
+- sprintf_alloc(&cache_location, "%s/%s", opkg_config->cache_dir, cache_name);
+- free(cache_name);
++ /* Generate a short file name which will be used along with an md5sum of the
++ * full src URI in the cache file name. This short file name is limited to
++ * MAX_SHORT_FILE_NAME_LENGTH to ensure that the total cache file name
++ * length is reasonable.
++ */
++ short_file_name = basename(tmp);
++ if (strlen(short_file_name) > MAX_SHORT_FILE_NAME_LENGTH)
++ short_file_name[MAX_SHORT_FILE_NAME_LENGTH] = '\0';
++
++ sprintf_alloc(&cache_location, "%s/%s_%s", opkg_config->cache_dir,
++ md5sum_hex, short_file_name);
++ free(md5sum_hex);
++ free(tmp);
+ return cache_location;
+ }
+
+--
+1.9.1
+
diff --git a/yocto-poky/meta/recipes-devtools/opkg/opkg_0.3.0.bb b/yocto-poky/meta/recipes-devtools/opkg/opkg_0.3.0.bb
index f4dbb2dd4..5ad3e92cf 100644
--- a/yocto-poky/meta/recipes-devtools/opkg/opkg_0.3.0.bb
+++ b/yocto-poky/meta/recipes-devtools/opkg/opkg_0.3.0.bb
@@ -15,6 +15,12 @@ SRC_URI = "http://downloads.yoctoproject.org/releases/${BPN}/${BPN}-${PV}.tar.gz
file://opkg-configure.service \
file://opkg.conf \
file://0001-opkg_archive-add-support-for-empty-compressed-files.patch \
+ file://0001-libopkg-include-stdio.h-for-getting-FILE-defined.patch \
+ file://0001-opkg_conf-create-opkg.lock-in-run-instead-of-var-run.patch \
+ file://0001-string_util-New-file-with-bin_to_hex-function.patch \
+ file://0002-md5-Add-md5_to_string-function.patch \
+ file://0003-sha256-Add-sha256_to_string-function.patch \
+ file://0004-opkg_download-Use-short-cache-file-name.patch \
"
SRC_URI[md5sum] = "3412cdc71d78b98facc84b19331ec64e"
diff --git a/yocto-poky/meta/recipes-devtools/perl/perl-native_5.22.0.bb b/yocto-poky/meta/recipes-devtools/perl/perl-native_5.22.0.bb
index a9a1cab94..b4dda3128 100644
--- a/yocto-poky/meta/recipes-devtools/perl/perl-native_5.22.0.bb
+++ b/yocto-poky/meta/recipes-devtools/perl/perl-native_5.22.0.bb
@@ -28,16 +28,15 @@ do_configure () {
-Dcf_by="Open Embedded" \
-Dprefix=${prefix} \
-Dvendorprefix=${prefix} \
- -Dvendorprefix=${prefix} \
-Dsiteprefix=${prefix} \
\
-Dbin=${STAGING_BINDIR}/${PN} \
-Dprivlib=${STAGING_LIBDIR}/perl/${PV} \
-Darchlib=${STAGING_LIBDIR}/perl/${PV} \
- -Dvendorlib=${STAGING_LIBDIR}/perl/${PV} \
- -Dvendorarch=${STAGING_LIBDIR}/perl/${PV} \
- -Dsitelib=${STAGING_LIBDIR}/perl/${PV} \
- -Dsitearch=${STAGING_LIBDIR}/perl/${PV} \
+ -Dvendorlib=${STAGING_LIBDIR}/perl/vendor_perl/${PV} \
+ -Dvendorarch=${STAGING_LIBDIR}/perl/vendor_perl/${PV} \
+ -Dsitelib=${STAGING_LIBDIR}/perl/site_perl/${PV} \
+ -Dsitearch=${STAGING_LIBDIR}/perl/site_perl/${PV} \
\
-Duseshrplib \
-Dusethreads \
@@ -95,8 +94,11 @@ do_install () {
install $i ${D}${libdir}/perl/${PV}/CORE
done
- create_wrapper ${D}${bindir}/perl PERL5LIB='$PERL5LIB:${STAGING_LIBDIR}/perl/${PV}:${STAGING_LIBDIR}/perl:${STAGING_LIBDIR}/perl/site_perl/${PV}:${STAGING_LIBDIR}/perl/vendor_perl/${PV}'
- create_wrapper ${D}${bindir}/perl${PV} PERL5LIB='$PERL5LIB:${STAGING_LIBDIR}/perl/${PV}:${STAGING_LIBDIR}/perl${STAGING_LIBDIR}/perl:${STAGING_LIBDIR}/perl/site_perl/${PV}:${STAGING_LIBDIR}/perl/vendor_perl/${PV}'
+ # Those wrappers mean that perl installed from sstate (which may change
+ # path location) works and that in the nativesdk case, the SDK can be
+ # installed to a different location from the one it was built for.
+ create_wrapper ${D}${bindir}/perl PERL5LIB='$PERL5LIB:${STAGING_LIBDIR}/perl/site_perl/${PV}:${STAGING_LIBDIR}/perl/vendor_perl/${PV}:${STAGING_LIBDIR}/perl/${PV}'
+ create_wrapper ${D}${bindir}/perl${PV} PERL5LIB='$PERL5LIB:${STAGING_LIBDIR}/perl/site_perl/${PV}:${STAGING_LIBDIR}/perl/vendor_perl/${PV}:${STAGING_LIBDIR}/perl/${PV}'
# Use /usr/bin/env nativeperl for the perl script.
for f in `grep -Il '#! *${bindir}/perl' ${D}/${bindir}/*`; do
diff --git a/yocto-poky/meta/recipes-devtools/perl/perl/perl-errno-generation-gcc5.patch b/yocto-poky/meta/recipes-devtools/perl/perl/perl-errno-generation-gcc5.patch
new file mode 100644
index 000000000..efbc55df2
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/perl/perl/perl-errno-generation-gcc5.patch
@@ -0,0 +1,23 @@
+Upstream-Status:Inappropriate [embedded specific]
+
+The upstream code assumes that the compiler version used to compiler miniperl/perl-native
+is the same as the one being used to build the perl binary. Since most people are not running
+systems with gcc 5, it is unlikely that it will work on any supported host. Switch out gccversion
+for the version extracted from $CC --version.
+
+--- perl-5.22.0/ext/Errno/Errno_pm.PL 2015-10-19 18:01:20.622143786 -0400
++++ perl-5.22.0-fixed/ext/Errno/Errno_pm.PL 2015-10-19 17:50:35.662137367 -0400
+@@ -224,9 +224,12 @@
+
+ { # BeOS (support now removed) did not enter this block
+ # invoke CPP and read the output
++ my $compiler = $ENV{'CC'};
++ my $compiler_out = `$compiler --version`;
++ my @compiler_version = split / /,$compiler_out;
+
+ my $inhibit_linemarkers = '';
+- if ($Config{gccversion} =~ /\A(\d+)\./ and $1 >= 5) {
++ if (@compiler_version[2] =~ /\A(\d+)\./ and $1 >= 5) {
+ # GCC 5.0 interleaves expanded macros with line numbers breaking
+ # each line into multiple lines. RT#123784
+ $inhibit_linemarkers = ' -P';
diff --git a/yocto-poky/meta/recipes-devtools/perl/perl_5.22.0.bb b/yocto-poky/meta/recipes-devtools/perl/perl_5.22.0.bb
index 3ce7849f9..9df8d043e 100644
--- a/yocto-poky/meta/recipes-devtools/perl/perl_5.22.0.bb
+++ b/yocto-poky/meta/recipes-devtools/perl/perl_5.22.0.bb
@@ -62,6 +62,7 @@ SRC_URI += " \
file://ext-ODBM_File-hints-linux.pl-link-libgdbm_compat.patch \
file://ext-ODBM_File-t-odbm.t-fix-the-path-of-dbmt_common.p.patch \
file://perl-PathTools-don-t-filter-out-blib-from-INC.patch \
+ file://perl-errno-generation-gcc5.patch \
"
# Fix test case issues
@@ -245,7 +246,7 @@ do_install() {
do_install_append_class-nativesdk () {
create_wrapper ${D}${bindir}/perl \
- PERL5LIB='$PERL5LIB:$OECORE_NATIVE_SYSROOT/${libdir_nativesdk}/perl:$OECORE_NATIVE_SYSROOT/${libdir_nativesdk}/perl/${PV}:$OECORE_NATIVE_SYSROOT/${libdir_nativesdk}/perl/site_perl/${PV}:$OECORE_NATIVE_SYSROOT/${libdir_nativesdk}/perl/vendor_perl/${PV}'
+ PERL5LIB='$PERL5LIB:$OECORE_NATIVE_SYSROOT/${libdir_nativesdk}/perl/site_perl/${PV}:$OECORE_NATIVE_SYSROOT/${libdir_nativesdk}/perl/vendor_perl/${PV}:$OECORE_NATIVE_SYSROOT/${libdir_nativesdk}/perl/${PV}'
}
PACKAGE_PREPROCESS_FUNCS += "perl_package_preprocess"
diff --git a/yocto-poky/meta/recipes-devtools/prelink/prelink_git.bb b/yocto-poky/meta/recipes-devtools/prelink/prelink_git.bb
index 79a5f5011..e223ef689 100644
--- a/yocto-poky/meta/recipes-devtools/prelink/prelink_git.bb
+++ b/yocto-poky/meta/recipes-devtools/prelink/prelink_git.bb
@@ -8,7 +8,7 @@ and executables, so that far fewer relocations need to be resolved at \
runtime and thus programs come up faster."
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=c93c0550bd3173f4504b2cbd8991e50b"
-SRCREV = "cdee5a4dd226cc5e9f30f370067a9031f398ef3c"
+SRCREV = "927979bbd115eeb8a75db3231906ef6aca4c4eb6"
PV = "1.0+git${SRCPV}"
#
@@ -35,7 +35,7 @@ SRC_URI = "git://git.yoctoproject.org/prelink-cross.git;branch=cross_prelink \
TARGET_OS_ORIG := "${TARGET_OS}"
OVERRIDES_append = ":${TARGET_OS_ORIG}"
-S = "${WORKDIR}/git/trunk"
+S = "${WORKDIR}/git"
inherit autotools
diff --git a/yocto-poky/meta/recipes-devtools/pseudo/pseudo_1.7.3.bb b/yocto-poky/meta/recipes-devtools/pseudo/pseudo_1.7.4.bb
index 1e9ef3bb9..d68e0af7d 100644
--- a/yocto-poky/meta/recipes-devtools/pseudo/pseudo_1.7.3.bb
+++ b/yocto-poky/meta/recipes-devtools/pseudo/pseudo_1.7.4.bb
@@ -6,8 +6,8 @@ SRC_URI = " \
file://fallback-group \
"
-SRC_URI[md5sum] = "2bd0a44eadd4713e90ad8c152eea77aa"
-SRC_URI[sha256sum] = "e9fc3922f8feb97839b50d14eb1987afdc8f22cdcac93119323cccd5f8444652"
+SRC_URI[md5sum] = "6e4b59a346d08d4a29133c335ea12052"
+SRC_URI[sha256sum] = "f33ff84da328f943155f22cfd49030ef4ad85ad35fc2d9419a203521b65c384c"
PSEUDO_EXTRA_OPTS ?= "--enable-force-async --without-passwd-fallback"
diff --git a/yocto-poky/meta/recipes-devtools/pseudo/pseudo_git.bb b/yocto-poky/meta/recipes-devtools/pseudo/pseudo_git.bb
index 31e1223a4..eb666c064 100644
--- a/yocto-poky/meta/recipes-devtools/pseudo/pseudo_git.bb
+++ b/yocto-poky/meta/recipes-devtools/pseudo/pseudo_git.bb
@@ -1,7 +1,7 @@
require pseudo.inc
-SRCREV = "e795df44a90a426a76b790f1b2774f3046a8fc31"
-PV = "1.7.2+git${SRCPV}"
+SRCREV = "3bc3909fa70535c2ef876009dc58e577b10a7e0e"
+PV = "1.7.4+git${SRCPV}"
DEFAULT_PREFERENCE = "-1"
diff --git a/yocto-poky/meta/recipes-devtools/python/python-3.4-manifest.inc b/yocto-poky/meta/recipes-devtools/python/python-3.4-manifest.inc
index 07e149027..97070b6fa 100644
--- a/yocto-poky/meta/recipes-devtools/python/python-3.4-manifest.inc
+++ b/yocto-poky/meta/recipes-devtools/python/python-3.4-manifest.inc
@@ -58,7 +58,7 @@ RDEPENDS_${PN}-db="${PN}-core"
FILES_${PN}-db="${libdir}/python3.4/anydbm.* ${libdir}/python3.4/dumbdbm.* ${libdir}/python3.4/whichdb.* ${libdir}/python3.4/dbm ${libdir}/python3.4/lib-dynload/_dbm.*.so "
SUMMARY_${PN}-debugger="Python debugger"
-RDEPENDS_${PN}-debugger="${PN}-core ${PN}-io ${PN}-lang ${PN}-re ${PN}-stringold ${PN}-shell ${PN}-pprint"
+RDEPENDS_${PN}-debugger="${PN}-core ${PN}-io ${PN}-lang ${PN}-re ${PN}-stringold ${PN}-shell ${PN}-pprint ${PN}-importlib ${PN}-pkgutil"
FILES_${PN}-debugger="${libdir}/python3.4/bdb.* ${libdir}/python3.4/pdb.* "
SUMMARY_${PN}-dev="Python development package"
diff --git a/yocto-poky/meta/recipes-devtools/python/python-async_0.6.2.bb b/yocto-poky/meta/recipes-devtools/python/python-async_0.6.2.bb
index 8ed0b0393..5a17a1a00 100644
--- a/yocto-poky/meta/recipes-devtools/python/python-async_0.6.2.bb
+++ b/yocto-poky/meta/recipes-devtools/python/python-async_0.6.2.bb
@@ -10,7 +10,7 @@ SRC_URI[sha256sum] = "ac6894d876e45878faae493b0cf61d0e28ec417334448ac0a6ea2229d8
S = "${WORKDIR}/async-${PV}"
-inherit distutils
+inherit setuptools
RDEPENDS_${PN} += "python-threading python-lang"
diff --git a/yocto-poky/meta/recipes-devtools/python/python-pygtk_2.24.0.bb b/yocto-poky/meta/recipes-devtools/python/python-pygtk_2.24.0.bb
index e4c33a803..79b3110e3 100644
--- a/yocto-poky/meta/recipes-devtools/python/python-pygtk_2.24.0.bb
+++ b/yocto-poky/meta/recipes-devtools/python/python-pygtk_2.24.0.bb
@@ -26,7 +26,9 @@ S = "${WORKDIR}/${SRCNAME}-${PV}"
EXTRA_OECONF = "--disable-docs --with-python-includes=${STAGING_INCDIR}/../"
-inherit autotools pkgconfig distutils-base
+inherit autotools pkgconfig distutils-base distro_features_check
+
+ANY_OF_DISTRO_FEATURES = "${GTK2DISTROFEATURES}"
do_configure_prepend() {
install -m 0644 ${WORKDIR}/acinclude.m4 ${S}/
diff --git a/yocto-poky/meta/recipes-devtools/python/python-smartpm/smart-cache.py-getPackages-matches-name-version.patch b/yocto-poky/meta/recipes-devtools/python/python-smartpm/smart-cache.py-getPackages-matches-name-version.patch
new file mode 100644
index 000000000..225b02f96
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/python/python-smartpm/smart-cache.py-getPackages-matches-name-version.patch
@@ -0,0 +1,43 @@
+From ee05e55e84b53f4bb0d0baba13ca47a8f84b7cb4 Mon Sep 17 00:00:00 2001
+From: Robert Yang <liezhi.yang@windriver.com>
+Date: Wed, 30 Sep 2015 01:12:52 -0700
+Subject: [PATCH] smart:cache.py: getPackages() matches name + arch
+
+It only matched name ony in the past, for example:
+smart install busybox (matched)
+but:
+smart install busybox@core2_64 (didn't match)
+
+The installation is very slow when no match since it would seach all the
+packages in the repo
+This patch makes it match both.
+
+Upstream-Status: Pending
+
+Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
+---
+ smart/cache.py | 3 ++-
+ smart/ccache.c | 9 ++++++++-
+ 2 files changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/smart/control.py b/smart/control.py
+index d44abe7..f23a604 100644
+--- a/smart/control.py
++++ b/smart/control.py
+@@ -876,9 +876,13 @@ class Control(object):
+ objects = []
+
+ # If we find packages with exactly the given
+- # name or name-version, use them.
+- for pkg in self._cache.getPackages(s):
+- if pkg.name == s or "%s-%s" % (pkg.name, pkg.version) == s:
++ # name, name-version, or name@arch, use them.
++ s_name = s
++ if "@" in s:
++ s_name = s.split("@")[0]
++ for pkg in self._cache.getPackages(s_name):
++ if pkg.name == s or "%s-%s" % (pkg.name, pkg.version) == s \
++ or "%s@%s" % (pkg.name, pkg.version.split('@')[1]) == s:
+ objects.append((1.0, pkg))
+
+ if not objects:
diff --git a/yocto-poky/meta/recipes-devtools/python/python-smartpm_git.bb b/yocto-poky/meta/recipes-devtools/python/python-smartpm_git.bb
index 8b974b0c3..d6c378bcf 100644
--- a/yocto-poky/meta/recipes-devtools/python/python-smartpm_git.bb
+++ b/yocto-poky/meta/recipes-devtools/python/python-smartpm_git.bb
@@ -23,6 +23,7 @@ SRC_URI = "\
file://smart-add-for-rpm-ignoresize-check.patch \
file://smart-already-installed-message.patch \
file://smart-set-noprogress-for-pycurl.patch \
+ file://smart-cache.py-getPackages-matches-name-version.patch \
"
SRCREV = "407a7eca766431257dcd1da15175cc36a1bb22d0"
diff --git a/yocto-poky/meta/recipes-devtools/qemu/qemu.inc b/yocto-poky/meta/recipes-devtools/qemu/qemu.inc
index f6c0ae304..b17da2f23 100644
--- a/yocto-poky/meta/recipes-devtools/qemu/qemu.inc
+++ b/yocto-poky/meta/recipes-devtools/qemu/qemu.inc
@@ -31,9 +31,6 @@ SRC_URI_append_class-native = "\
EXTRA_OECONF += "--target-list=${@get_qemu_target_list(d)} --disable-werror --disable-bluez --disable-libiscsi --with-system-pixman --extra-cflags='${CFLAGS}'"
-EXTRA_OECONF_append_class-native = " --enable-debug --enable-debug-info"
-INHIBIT_SYSROOT_STRIP = "1"
-
EXTRA_OECONF_class-nativesdk = "--target-list=${@get_qemu_target_list(d)} --disable-werror \
"
export LIBTOOL="${HOST_SYS}-libtool"
@@ -135,9 +132,4 @@ PACKAGECONFIG[gnutls] = "--enable-gnutls,--disable-gnutls,gnutls"
EXTRA_OECONF += "${@bb.utils.contains('PACKAGECONFIG', 'alsa', '--audio-drv-list=oss,alsa', '', d)}"
-# Qemu target will not build in world build for ARM or Mips
-BROKEN_qemuarm = "1"
-BROKEN_qemumips64 = "1"
-BROKEN_qemumips = "1"
-
INSANE_SKIP_${PN} = "arch"
diff --git a/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-7295_1.patch b/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-7295_1.patch
new file mode 100644
index 000000000..d7ae8713c
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-7295_1.patch
@@ -0,0 +1,63 @@
+From ce317461573bac12b10d67699b4ddf1f97cf066c Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Fri, 25 Sep 2015 13:21:28 +0800
+Subject: [PATCH] virtio: introduce virtqueue_unmap_sg()
+
+Factor out sg unmapping logic. This will be reused by the patch that
+can discard descriptor.
+
+Cc: Michael S. Tsirkin <mst@redhat.com>
+Cc: Andrew James <andrew.james@hpe.com>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+
+Upstream-Status: Backport
+
+git.qemu.org/?p=qemu.git;a=commit;h=ce317461573bac12b10d67699b4ddf1f97cf066c
+
+CVE: CVE-2015-7295 patch #1
+[Yocto # 9013]
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ hw/virtio/virtio.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+Index: qemu-2.4.0/hw/virtio/virtio.c
+===================================================================
+--- qemu-2.4.0.orig/hw/virtio/virtio.c
++++ qemu-2.4.0/hw/virtio/virtio.c
+@@ -243,14 +243,12 @@ int virtio_queue_empty(VirtQueue *vq)
+ return vring_avail_idx(vq) == vq->last_avail_idx;
+ }
+
+-void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
+- unsigned int len, unsigned int idx)
++static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
++ unsigned int len)
+ {
+ unsigned int offset;
+ int i;
+
+- trace_virtqueue_fill(vq, elem, len, idx);
+-
+ offset = 0;
+ for (i = 0; i < elem->in_num; i++) {
+ size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
+@@ -266,6 +264,14 @@ void virtqueue_fill(VirtQueue *vq, const
+ cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
+ elem->out_sg[i].iov_len,
+ 0, elem->out_sg[i].iov_len);
++}
++
++void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
++ unsigned int len, unsigned int idx)
++{
++ trace_virtqueue_fill(vq, elem, len, idx);
++
++ virtqueue_unmap_sg(vq, elem, len);
+
+ idx = (idx + vring_used_idx(vq)) % vq->vring.num;
+
diff --git a/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-7295_2.patch b/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-7295_2.patch
new file mode 100644
index 000000000..45dfab36e
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-7295_2.patch
@@ -0,0 +1,58 @@
+From 29b9f5efd78ae0f9cc02dd169b6e80d2c404bade Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Fri, 25 Sep 2015 13:21:29 +0800
+Subject: [PATCH] virtio: introduce virtqueue_discard()
+
+This patch introduces virtqueue_discard() to discard a descriptor and
+unmap the sgs. This will be used by the patch that will discard
+descriptor when packet is truncated.
+
+Cc: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Upstream-Status: Backport
+
+git.qemu.org/?p=qemu.git;a=commit;h=29b9f5efd78ae0f9cc02dd169b6e80d2c404bade
+
+CVE: CVE-2015-7295 patch #2
+[Yocto # 9013]
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ hw/virtio/virtio.c | 7 +++++++
+ include/hw/virtio/virtio.h | 2 ++
+ 2 files changed, 9 insertions(+)
+
+Index: qemu-2.4.0/hw/virtio/virtio.c
+===================================================================
+--- qemu-2.4.0.orig/hw/virtio/virtio.c
++++ qemu-2.4.0/hw/virtio/virtio.c
+@@ -266,6 +266,13 @@ static void virtqueue_unmap_sg(VirtQueue
+ 0, elem->out_sg[i].iov_len);
+ }
+
++void virtqueue_discard(VirtQueue *vq, const VirtQueueElement *elem,
++ unsigned int len)
++{
++ vq->last_avail_idx--;
++ virtqueue_unmap_sg(vq, elem, len);
++}
++
+ void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
+ unsigned int len, unsigned int idx)
+ {
+Index: qemu-2.4.0/include/hw/virtio/virtio.h
+===================================================================
+--- qemu-2.4.0.orig/include/hw/virtio/virtio.h
++++ qemu-2.4.0/include/hw/virtio/virtio.h
+@@ -146,6 +146,8 @@ void virtio_del_queue(VirtIODevice *vdev
+ void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
+ unsigned int len);
+ void virtqueue_flush(VirtQueue *vq, unsigned int count);
++void virtqueue_discard(VirtQueue *vq, const VirtQueueElement *elem,
++ unsigned int len);
+ void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
+ unsigned int len, unsigned int idx);
+
diff --git a/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-7295_3.patch b/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-7295_3.patch
new file mode 100644
index 000000000..74442e32f
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-7295_3.patch
@@ -0,0 +1,52 @@
+From 0cf33fb6b49a19de32859e2cdc6021334f448fb3 Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Fri, 25 Sep 2015 13:21:30 +0800
+Subject: [PATCH] virtio-net: correctly drop truncated packets
+
+When packet is truncated during receiving, we drop the packets but
+neither discard the descriptor nor add and signal used
+descriptor. This will lead several issues:
+
+- sg mappings are leaked
+- rx will be stalled if a lots of packets were truncated
+
+In order to be consistent with vhost, fix by discarding the descriptor
+in this case.
+
+Cc: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+
+Upstream-Status: Backport
+
+git.qemu.org/?p=qemu.git;a=commit;h=0cf33fb6b49a19de32859e2cdc6021334f448fb3
+
+CVE: CVE-2015-7295 patch #3
+[Yocto # 9013]
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ hw/net/virtio-net.c | 8 +-------
+ 1 file changed, 1 insertion(+), 7 deletions(-)
+
+Index: qemu-2.4.0/hw/net/virtio-net.c
+===================================================================
+--- qemu-2.4.0.orig/hw/net/virtio-net.c
++++ qemu-2.4.0/hw/net/virtio-net.c
+@@ -1086,13 +1086,7 @@ static ssize_t virtio_net_receive(NetCli
+ * must have consumed the complete packet.
+ * Otherwise, drop it. */
+ if (!n->mergeable_rx_bufs && offset < size) {
+-#if 0
+- error_report("virtio-net truncated non-mergeable packet: "
+- "i %zd mergeable %d offset %zd, size %zd, "
+- "guest hdr len %zd, host hdr len %zd",
+- i, n->mergeable_rx_bufs,
+- offset, size, n->guest_hdr_len, n->host_hdr_len);
+-#endif
++ virtqueue_discard(q->rx_vq, &elem, total);
+ return size;
+ }
+
diff --git a/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-7504.patch b/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-7504.patch
new file mode 100644
index 000000000..90a7947ab
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-7504.patch
@@ -0,0 +1,56 @@
+From 837f21aacf5a714c23ddaadbbc5212f9b661e3f7 Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Fri, 20 Nov 2015 11:50:31 +0530
+Subject: [PATCH] net: pcnet: add check to validate receive data
+ size(CVE-2015-7504)
+
+In loopback mode, pcnet_receive routine appends CRC code to the
+receive buffer. If the data size given is same as the buffer size,
+the appended CRC code overwrites 4 bytes after s->buffer. Added a
+check to avoid that.
+
+Reported by: Qinghao Tang <luodalongde@gmail.com>
+Cc: qemu-stable@nongnu.org
+Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+
+Upstream-Status: Backport
+
+http://git.qemu.org/?p=qemu.git;a=commit;h=837f21aacf5a714c23ddaadbbc5212f9b661e3f7
+
+CVE: CVE-2015-7504
+[Yocto # 9013]
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ hw/net/pcnet.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+Index: qemu-2.4.0/hw/net/pcnet.c
+===================================================================
+--- qemu-2.4.0.orig/hw/net/pcnet.c
++++ qemu-2.4.0/hw/net/pcnet.c
+@@ -1085,7 +1085,7 @@ ssize_t pcnet_receive(NetClientState *nc
+ uint32_t fcs = ~0;
+ uint8_t *p = src;
+
+- while (p != &src[size-4])
++ while (p != &src[size])
+ CRC(fcs, *p++);
+ crc_err = (*(uint32_t *)p != htonl(fcs));
+ }
+@@ -1234,8 +1234,10 @@ static void pcnet_transmit(PCNetState *s
+ bcnt = 4096 - GET_FIELD(tmd.length, TMDL, BCNT);
+
+ /* if multi-tmd packet outsizes s->buffer then skip it silently.
+- Note: this is not what real hw does */
+- if (s->xmit_pos + bcnt > sizeof(s->buffer)) {
++ * Note: this is not what real hw does.
++ * Last four bytes of s->buffer are used to store CRC FCS code.
++ */
++ if (s->xmit_pos + bcnt > sizeof(s->buffer) - 4) {
+ s->xmit_pos = -1;
+ goto txdone;
+ }
diff --git a/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-7512.patch b/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-7512.patch
new file mode 100644
index 000000000..50b8a6cee
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-7512.patch
@@ -0,0 +1,44 @@
+From 8b98a2f07175d46c3f7217639bd5e03f2ec56343 Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Mon, 30 Nov 2015 15:00:06 +0800
+Subject: [PATCH] pcnet: fix rx buffer overflow(CVE-2015-7512)
+
+Backends could provide a packet whose length is greater than buffer
+size. Check for this and truncate the packet to avoid rx buffer
+overflow in this case.
+
+Cc: Prasad J Pandit <pjp@fedoraproject.org>
+Cc: qemu-stable@nongnu.org
+Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+
+Upsteam_Status: Backport
+
+http://git.qemu.org/?p=qemu.git;a=commit;h=8b98a2f07175d46c3f7217639bd5e03f2ec56343
+
+CVE: CVE-2015-7512
+[Yocto # 9013]
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ hw/net/pcnet.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+Index: qemu-2.4.0/hw/net/pcnet.c
+===================================================================
+--- qemu-2.4.0.orig/hw/net/pcnet.c
++++ qemu-2.4.0/hw/net/pcnet.c
+@@ -1065,6 +1065,12 @@ ssize_t pcnet_receive(NetClientState *nc
+ int pktcount = 0;
+
+ if (!s->looptest) {
++ if (size > 4092) {
++#ifdef PCNET_DEBUG_RMD
++ fprintf(stderr, "pcnet: truncates rx packet.\n");
++#endif
++ size = 4092;
++ }
+ memcpy(src, buf, size);
+ /* no need to compute the CRC */
+ src[size] = 0;
diff --git a/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-8345.patch b/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-8345.patch
new file mode 100644
index 000000000..310b458a0
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-8345.patch
@@ -0,0 +1,73 @@
+From 00837731d254908a841d69298a4f9f077babaf24 Mon Sep 17 00:00:00 2001
+From: Stefan Weil <sw@weilnetz.de>
+Date: Fri, 20 Nov 2015 08:42:33 +0100
+Subject: [PATCH] eepro100: Prevent two endless loops
+
+http://lists.nongnu.org/archive/html/qemu-devel/2015-11/msg04592.html
+shows an example how an endless loop in function action_command can
+be achieved.
+
+During my code review, I noticed a 2nd case which can result in an
+endless loop.
+
+Reported-by: Qinghao Tang <luodalongde@gmail.com>
+Signed-off-by: Stefan Weil <sw@weilnetz.de>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+
+Upstream-Status: Backport
+
+http://git.qemu.org/?p=qemu.git;a=commit;h=00837731d254908a841d69298a4f9f077babaf24
+
+CVE: CVE-2015-8345
+[Yocto # 9013]
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ hw/net/eepro100.c | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+diff --git a/hw/net/eepro100.c b/hw/net/eepro100.c
+index 60333b7..685a478 100644
+--- a/hw/net/eepro100.c
++++ b/hw/net/eepro100.c
+@@ -774,6 +774,11 @@ static void tx_command(EEPRO100State *s)
+ #if 0
+ uint16_t tx_buffer_el = lduw_le_pci_dma(&s->dev, tbd_address + 6);
+ #endif
++ if (tx_buffer_size == 0) {
++ /* Prevent an endless loop. */
++ logout("loop in %s:%u\n", __FILE__, __LINE__);
++ break;
++ }
+ tbd_address += 8;
+ TRACE(RXTX, logout
+ ("TBD (simplified mode): buffer address 0x%08x, size 0x%04x\n",
+@@ -855,6 +860,10 @@ static void set_multicast_list(EEPRO100State *s)
+
+ static void action_command(EEPRO100State *s)
+ {
++ /* The loop below won't stop if it gets special handcrafted data.
++ Therefore we limit the number of iterations. */
++ unsigned max_loop_count = 16;
++
+ for (;;) {
+ bool bit_el;
+ bool bit_s;
+@@ -870,6 +879,13 @@ static void action_command(EEPRO100State *s)
+ #if 0
+ bool bit_sf = ((s->tx.command & COMMAND_SF) != 0);
+ #endif
++
++ if (max_loop_count-- == 0) {
++ /* Prevent an endless loop. */
++ logout("loop in %s:%u\n", __FILE__, __LINE__);
++ break;
++ }
++
+ s->cu_offset = s->tx.link;
+ TRACE(OTHER,
+ logout("val=(cu start), status=0x%04x, command=0x%04x, link=0x%08x\n",
+--
+2.3.5
+
diff --git a/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-8504.patch b/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-8504.patch
new file mode 100644
index 000000000..9e660217f
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2015-8504.patch
@@ -0,0 +1,51 @@
+From 4c65fed8bdf96780735dbdb92a8bd0d6b6526cc3 Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Thu, 3 Dec 2015 18:54:17 +0530
+Subject: [PATCH] ui: vnc: avoid floating point exception
+
+While sending 'SetPixelFormat' messages to a VNC server,
+the client could set the 'red-max', 'green-max' and 'blue-max'
+values to be zero. This leads to a floating point exception in
+write_png_palette while doing frame buffer updates.
+
+Reported-by: Lian Yihan <lianyihan@360.cn>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Reviewed-by: Gerd Hoffmann <kraxel@redhat.com>
+Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
+
+Upstream-Status: Backport
+
+http://git.qemu.org/?p=qemu.git;a=commitdiff;h=4c65fed8bdf96780735dbdb92a8
+
+CVE: CVE-2015-8504
+[Yocto # 9013]
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ ui/vnc.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+Index: qemu-2.4.0/ui/vnc.c
+===================================================================
+--- qemu-2.4.0.orig/ui/vnc.c
++++ qemu-2.4.0/ui/vnc.c
+@@ -2189,15 +2189,15 @@ static void set_pixel_format(VncState *v
+ return;
+ }
+
+- vs->client_pf.rmax = red_max;
++ vs->client_pf.rmax = red_max ? red_max : 0xFF;
+ vs->client_pf.rbits = hweight_long(red_max);
+ vs->client_pf.rshift = red_shift;
+ vs->client_pf.rmask = red_max << red_shift;
+- vs->client_pf.gmax = green_max;
++ vs->client_pf.gmax = green_max ? green_max : 0xFF;
+ vs->client_pf.gbits = hweight_long(green_max);
+ vs->client_pf.gshift = green_shift;
+ vs->client_pf.gmask = green_max << green_shift;
+- vs->client_pf.bmax = blue_max;
++ vs->client_pf.bmax = blue_max ? blue_max : 0xFF;
+ vs->client_pf.bbits = hweight_long(blue_max);
+ vs->client_pf.bshift = blue_shift;
+ vs->client_pf.bmask = blue_max << blue_shift;
diff --git a/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2016-1568.patch b/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2016-1568.patch
new file mode 100644
index 000000000..9c40ffb5f
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2016-1568.patch
@@ -0,0 +1,46 @@
+From 4ab0359a8ae182a7ac5c99609667273167703fab Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Mon, 11 Jan 2016 14:10:42 -0500
+Subject: [PATCH] ide: ahci: reset ncq object to unused on error
+
+When processing NCQ commands, AHCI device emulation prepares a
+NCQ transfer object; To which an aio control block(aiocb) object
+is assigned in 'execute_ncq_command'. In case, when the NCQ
+command is invalid, the 'aiocb' object is not assigned, and NCQ
+transfer object is left as 'used'. This leads to a use after
+free kind of error in 'bdrv_aio_cancel_async' via 'ahci_reset_port'.
+Reset NCQ transfer object to 'unused' to avoid it.
+
+[Maintainer edit: s/ACHI/AHCI/ in the commit message. --js]
+
+Reported-by: Qinghao Tang <luodalongde@gmail.com>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Reviewed-by: John Snow <jsnow@redhat.com>
+Message-id: 1452282511-4116-1-git-send-email-ppandit@redhat.com
+Signed-off-by: John Snow <jsnow@redhat.com>
+
+Upstream-Status: Backport
+
+http://git.qemu.org/?p=qemu.git;a=commit;h=4ab0359a8ae182a7ac5c99609667273167703fab
+
+CVE: CVE-2016-1568
+[Yocto # 9013]
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ hw/ide/ahci.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+Index: qemu-2.4.0/hw/ide/ahci.c
+===================================================================
+--- qemu-2.4.0.orig/hw/ide/ahci.c
++++ qemu-2.4.0/hw/ide/ahci.c
+@@ -898,6 +898,7 @@ static void ncq_err(NCQTransferState *nc
+ ide_state->error = ABRT_ERR;
+ ide_state->status = READY_STAT | ERR_STAT;
+ ncq_tfs->drive->port_regs.scr_err |= (1 << ncq_tfs->tag);
++ ncq_tfs->used = 0;
+ }
+
+ static void ncq_finish(NCQTransferState *ncq_tfs)
diff --git a/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2016-2197.patch b/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2016-2197.patch
new file mode 100644
index 000000000..946435c43
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2016-2197.patch
@@ -0,0 +1,59 @@
+From: Prasad J Pandit <address@hidden>
+
+When IDE AHCI emulation uses Frame Information Structures(FIS)
+engine for data transfer, the mapped FIS buffer address is stored
+in a static 'bounce.buffer'. When a request is made to map another
+memory region, address_space_map() returns NULL because
+'bounce.buffer' is in_use. It leads to a null pointer dereference
+error while doing 'dma_memory_unmap'. Add a check to avoid it.
+
+Reported-by: Zuozhi fzz <address@hidden>
+Signed-off-by: Prasad J Pandit <address@hidden>
+
+Upstream-Status: Backport
+https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg05740.html
+
+CVE: CVE-2016-2197
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ hw/ide/ahci.c | 16 ++++++++++------
+ 1 file changed, 10 insertions(+), 6 deletions(-)
+
+ Update as per review
+ -> https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg05715.html
+
+Index: qemu-2.5.0/hw/ide/ahci.c
+===================================================================
+--- qemu-2.5.0.orig/hw/ide/ahci.c
++++ qemu-2.5.0/hw/ide/ahci.c
+@@ -661,9 +661,11 @@ static bool ahci_map_fis_address(AHCIDev
+
+ static void ahci_unmap_fis_address(AHCIDevice *ad)
+ {
+- dma_memory_unmap(ad->hba->as, ad->res_fis, 256,
+- DMA_DIRECTION_FROM_DEVICE, 256);
+- ad->res_fis = NULL;
++ if (ad->res_fis) {
++ dma_memory_unmap(ad->hba->as, ad->res_fis, 256,
++ DMA_DIRECTION_FROM_DEVICE, 256);
++ ad->res_fis = NULL;
++ }
+ }
+
+ static bool ahci_map_clb_address(AHCIDevice *ad)
+@@ -677,9 +679,11 @@ static bool ahci_map_clb_address(AHCIDev
+
+ static void ahci_unmap_clb_address(AHCIDevice *ad)
+ {
+- dma_memory_unmap(ad->hba->as, ad->lst, 1024,
+- DMA_DIRECTION_FROM_DEVICE, 1024);
+- ad->lst = NULL;
++ if (ad->lst) {
++ dma_memory_unmap(ad->hba->as, ad->lst, 1024,
++ DMA_DIRECTION_FROM_DEVICE, 1024);
++ ad->lst = NULL;
++ }
+ }
+
+ static void ahci_write_fis_sdb(AHCIState *s, NCQTransferState *ncq_tfs)
diff --git a/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2016-2198.patch b/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2016-2198.patch
new file mode 100644
index 000000000..f1201f061
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/qemu/qemu/CVE-2016-2198.patch
@@ -0,0 +1,45 @@
+From: Prasad J Pandit <address@hidden>
+
+USB Ehci emulation supports host controller capability registers.
+But its mmio '.write' function was missing, which lead to a null
+pointer dereference issue. Add a do nothing 'ehci_caps_write'
+definition to avoid it; Do nothing because capability registers
+are Read Only(RO).
+
+Reported-by: Zuozhi Fzz <address@hidden>
+Signed-off-by: Prasad J Pandit <address@hidden>
+
+Upstream-Status: Backport
+https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg05899.html
+
+CVE: CVE-2016-2198
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ hw/usb/hcd-ehci.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+Index: qemu-2.5.0/hw/usb/hcd-ehci.c
+===================================================================
+--- qemu-2.5.0.orig/hw/usb/hcd-ehci.c
++++ qemu-2.5.0/hw/usb/hcd-ehci.c
+@@ -893,6 +893,11 @@ static uint64_t ehci_caps_read(void *ptr
+ return s->caps[addr];
+ }
+
++static void ehci_caps_write(void *ptr, hwaddr addr,
++ uint64_t val, unsigned size)
++{
++}
++
+ static uint64_t ehci_opreg_read(void *ptr, hwaddr addr,
+ unsigned size)
+ {
+@@ -2310,6 +2315,7 @@ static void ehci_frame_timer(void *opaqu
+
+ static const MemoryRegionOps ehci_mmio_caps_ops = {
+ .read = ehci_caps_read,
++ .write = ehci_caps_write,
+ .valid.min_access_size = 1,
+ .valid.max_access_size = 4,
+ .impl.min_access_size = 1,
diff --git a/yocto-poky/meta/recipes-devtools/qemu/qemu/no-valgrind.patch b/yocto-poky/meta/recipes-devtools/qemu/qemu/no-valgrind.patch
new file mode 100644
index 000000000..91f728042
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/qemu/qemu/no-valgrind.patch
@@ -0,0 +1,19 @@
+There isn't an option to enable or disable valgrind support, so disable it to avoid non-deterministic builds.
+
+Upstream-Status: Inappropriate
+Signed-off-by: Ross Burton <ross.burton@intel.com>
+
+diff --git a/configure b/configure
+index b3c4f51..4d3929e 100755
+--- a/configure
++++ b/configure
+@@ -4193,9 +4192,0 @@ valgrind_h=no
+-cat > $TMPC << EOF
+-#include <valgrind/valgrind.h>
+-int main(void) {
+- return 0;
+-}
+-EOF
+-if compile_prog "" "" ; then
+- valgrind_h=yes
+-fi
diff --git a/yocto-poky/meta/recipes-devtools/qemu/qemu_2.4.0.bb b/yocto-poky/meta/recipes-devtools/qemu/qemu_2.4.0.bb
index 59b178800..8d47b16e6 100644
--- a/yocto-poky/meta/recipes-devtools/qemu/qemu_2.4.0.bb
+++ b/yocto-poky/meta/recipes-devtools/qemu/qemu_2.4.0.bb
@@ -9,6 +9,17 @@ SRC_URI += "file://configure-fix-Darwin-target-detection.patch \
file://smc91c111_fix1.patch \
file://smc91c111_fix2.patch \
file://smc91c111_fix3.patch \
+ file://no-valgrind.patch \
+ file://CVE-2015-8504.patch \
+ file://CVE-2015-7504.patch \
+ file://CVE-2015-7512.patch \
+ file://CVE-2015-8345.patch \
+ file://CVE-2016-1568.patch \
+ file://CVE-2015-7295_1.patch \
+ file://CVE-2015-7295_2.patch \
+ file://CVE-2015-7295_3.patch \
+ file://CVE-2016-2197.patch \
+ file://CVE-2016-2198.patch \
"
SRC_URI_prepend = "http://wiki.qemu-project.org/download/${BP}.tar.bz2"
SRC_URI[md5sum] = "186ee8194140a484a455f8e3c74589f4"
diff --git a/yocto-poky/meta/recipes-devtools/rpm/rpm/configure.ac-check-for-both-gpg2-and-gpg.patch b/yocto-poky/meta/recipes-devtools/rpm/rpm/configure.ac-check-for-both-gpg2-and-gpg.patch
new file mode 100644
index 000000000..7894a4263
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/rpm/rpm/configure.ac-check-for-both-gpg2-and-gpg.patch
@@ -0,0 +1,29 @@
+configure.ac: search for both gpg2 and gpg
+
+On some platforms the GnuPG binary is named 'gpg2' whereas others have 'gpg'.
+This patch increases compatibility by searching for 'gpg' in addition to
+'gpg2'.
+
+Upstream-Status: Pending
+
+Signed-off-by: Markus Lehtonen <markus.lehtonen@linux.intel.com>
+---
+ configure.ac | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/configure.ac b/configure.ac
+index 6746b4c..f6922ae 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -562,7 +562,7 @@ AC_PATH_PROG(__DIFF, diff, /bin/diff, $MYPATH)
+ AC_PATH_PROG(__DITTO, ditto, %{_bindir}/ditto, $MYPATH)
+ AC_PATH_PROG(__FILE, file, %{_bindir}/file, $MYPATH)
+ AC_PATH_PROG(__GIT, git, %{_bindir}/git, $MYPATH)
+-AC_PATH_PROG(__GPG, gpg2, %{_bindir}/gpg2, $MYPATH)
++AC_PATH_PROGS(__GPG, [gpg2 gpg], %{_bindir}/gpg2, $MYPATH)
+ AC_PATH_PROG(__GSR, gsr, %{_bindir}/gsr, $MYPATH)
+ AC_PATH_PROG(__GST_INSPECT, gst-inspect-0.10, %{_bindir}/gst-inspect-0.10, $MYPATH)
+ AC_PATH_PROG(__GZIP, gzip, /bin/gzip, $MYPATH)
+--
+2.1.4
+
diff --git a/yocto-poky/meta/recipes-devtools/rpm/rpm/rpm-check-rootpath-reasonableness.patch b/yocto-poky/meta/recipes-devtools/rpm/rpm/rpm-check-rootpath-reasonableness.patch
index 398603066..3d8d645a7 100644
--- a/yocto-poky/meta/recipes-devtools/rpm/rpm/rpm-check-rootpath-reasonableness.patch
+++ b/yocto-poky/meta/recipes-devtools/rpm/rpm/rpm-check-rootpath-reasonableness.patch
@@ -37,7 +37,7 @@ index 40c42bd..88d85ab 100644
+ int ret,rootdir_len;
+
+ if(rootdir == NULL) {
-+ return;
++ return -1;
+ }
+
+ rootdir_len = strlen(rootdir);
diff --git a/yocto-poky/meta/recipes-devtools/rpm/rpm_4.11.2.bb b/yocto-poky/meta/recipes-devtools/rpm/rpm_4.11.2.bb
index 210c9433d..f4a2110ae 100644
--- a/yocto-poky/meta/recipes-devtools/rpm/rpm_4.11.2.bb
+++ b/yocto-poky/meta/recipes-devtools/rpm/rpm_4.11.2.bb
@@ -22,7 +22,8 @@ HOMEPAGE = "http://www.rpm.org"
LICENSE = "GPL-2.0+"
LIC_FILES_CHKSUM ??= "file://${COMMON_LICENSE_DIR}/GPL-2.0;md5=801f80980d171dd6425610833a22dbe6"
-DEPENDS = "db libxml2 xz findutils file popt nss bzip2 elfutils patch attr zlib acl gzip make binutils python"
+DEPENDS = "db libxml2 xz findutils file popt nss bzip2 elfutils attr zlib acl gzip python"
+DEPENDS_append_class-native = " file-replacement-native"
SRC_URI += "http://rpm.org/releases/rpm-4.11.x/${BP}.tar.bz2 \
file://use-pkgconfig-for-python.patch \
diff --git a/yocto-poky/meta/recipes-devtools/rpm/rpm_5.4+cvs.bb b/yocto-poky/meta/recipes-devtools/rpm/rpm_5.4+cvs.bb
index 8903f3bc3..951b2517f 100644
--- a/yocto-poky/meta/recipes-devtools/rpm/rpm_5.4+cvs.bb
+++ b/yocto-poky/meta/recipes-devtools/rpm/rpm_5.4+cvs.bb
@@ -43,6 +43,7 @@ LICENSE = "LGPLv2.1"
LIC_FILES_CHKSUM = "file://COPYING.LIB;md5=2d5025d4aa3495befef8f17206a5b0a1"
DEPENDS = "libpcre attr acl popt ossp-uuid file byacc-native"
+DEPENDS_append_class-native = " file-replacement-native"
S = "${WORKDIR}/rpm"
diff --git a/yocto-poky/meta/recipes-devtools/rpm/rpm_5.4.14.bb b/yocto-poky/meta/recipes-devtools/rpm/rpm_5.4.14.bb
index 1f9a4bd97..73b3734d5 100644
--- a/yocto-poky/meta/recipes-devtools/rpm/rpm_5.4.14.bb
+++ b/yocto-poky/meta/recipes-devtools/rpm/rpm_5.4.14.bb
@@ -41,6 +41,7 @@ LICENSE = "LGPLv2.1"
LIC_FILES_CHKSUM = "file://COPYING.LIB;md5=2d5025d4aa3495befef8f17206a5b0a1"
DEPENDS = "libpcre attr acl popt ossp-uuid file byacc-native"
+DEPENDS_append_class-native = " file-replacement-native"
# rpm2cpio is a shell script, which is part of the rpm src.rpm. It is needed
# in order to extract the distribution SRPM into a format we can extract...
@@ -98,6 +99,7 @@ SRC_URI = "http://www.rpm5.org/files/rpm/rpm-5.4/rpm-5.4.14-0.20131024.src.rpm;e
file://rpm-check-rootpath-reasonableness.patch \
file://rpm-macros.in-disable-external-key-server.patch \
file://rpm-opendb-before-verifyscript-to-avoid-null-point.patch \
+ file://configure.ac-check-for-both-gpg2-and-gpg.patch \
"
# Uncomment the following line to enable platform score debugging
diff --git a/yocto-poky/meta/recipes-devtools/rpm/rpmresolve/rpmresolve.c b/yocto-poky/meta/recipes-devtools/rpm/rpmresolve/rpmresolve.c
index 7f4caf988..c0b4d567f 100644
--- a/yocto-poky/meta/recipes-devtools/rpm/rpmresolve/rpmresolve.c
+++ b/yocto-poky/meta/recipes-devtools/rpm/rpmresolve/rpmresolve.c
@@ -42,7 +42,7 @@ FILE *outf;
int getPackageStr(rpmts ts, const char *NVRA, rpmTag tag, char **value)
{
int rc = -1;
- rpmmi mi = rpmtsInitIterator(ts, RPMTAG_NVRA, NVRA, 0);
+ rpmmi mi = rpmmiInit(rpmtsGetRdb(ts), RPMTAG_NVRA, NVRA, 0);
Header h;
if ((h = rpmmiNext(mi)) != NULL) {
HE_t he = (HE_t) memset(alloca(sizeof(*he)), 0, sizeof(*he));
@@ -225,7 +225,7 @@ int processPackages(rpmts *ts, int tscount, const char *packagelistfn, int ignor
int lookupProvider(rpmts ts, const char *req, char **provider)
{
int rc = 0;
- rpmmi provmi = rpmtsInitIterator(ts, RPMTAG_PROVIDENAME, req, 0);
+ rpmmi provmi = rpmmiInit(rpmtsGetRdb(ts), RPMTAG_PROVIDENAME, req, 0);
if(provmi) {
Header h;
if ((h = rpmmiNext(provmi)) != NULL) {
@@ -266,7 +266,7 @@ int printDepList(rpmts *ts, int tscount)
HE_t he = (HE_t) memset(alloca(sizeof(*he)), 0, sizeof(*he));
int nkeys = argvCount(keys);
for(i=0; i<nkeys; i++) {
- rpmmi mi = rpmtsInitIterator(ts[0], RPMTAG_NVRA, keys[i], 0);
+ rpmmi mi = rpmmiInit(db, RPMTAG_NVRA, keys[i], 0);
Header h;
if ((h = rpmmiNext(mi)) != NULL) {
/* Get name of package */
@@ -280,6 +280,8 @@ int printDepList(rpmts *ts, int tscount)
printf("DEBUG: %s requires null\n", name);
}
rc = 0;
+ free(name);
+ (void)rpmmiFree(mi);
continue;
}
ARGV_t reqs = (ARGV_t)he->p.ptr;
@@ -412,7 +414,7 @@ int main(int argc, char **argv)
}
for(i=0; i<tscount; i++)
- (void) rpmtsCloseDB(ts[i]);
+ (void)rpmtsFree(ts[i]);
free(ts);
if( outfile ) {
diff --git a/yocto-poky/meta/recipes-devtools/subversion/subversion-1.8.13/subversion-CVE-2015-3184.patch b/yocto-poky/meta/recipes-devtools/subversion/subversion-1.8.13/subversion-CVE-2015-3184.patch
new file mode 100644
index 000000000..0663bd271
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/subversion/subversion-1.8.13/subversion-CVE-2015-3184.patch
@@ -0,0 +1,2094 @@
+Fix CVE-2015-3184
+
+Patch is from:
+http://subversion.apache.org/security/CVE-2015-3184-advisory.txt
+
+Upstream-Status: Backport
+
+Signed-off-by: Wenzong Fan <wenzong.fan@windriver.com>
+
+Index: Makefile.in
+===================================================================
+--- a/Makefile.in (revision 1691883)
++++ b/Makefile.in (working copy)
+@@ -357,6 +357,7 @@ TEST_SHLIB_VAR_SWIG_RB=\
+ fi;
+
+ APXS = @APXS@
++HTTPD_VERSION = @HTTPD_VERSION@
+
+ PYTHON = @PYTHON@
+ PERL = @PERL@
+@@ -509,6 +510,9 @@ check: bin @TRANSFORM_LIBTOOL_SCRIPTS@ $(TEST_DEPS
+ if test "$(HTTP_LIBRARY)" != ""; then \
+ flags="--http-library $(HTTP_LIBRARY) $$flags"; \
+ fi; \
++ if test "$(HTTPD_VERSION)" != ""; then \
++ flags="--httpd-version $(HTTPD_VERSION) $$flags"; \
++ fi; \
+ if test "$(SERVER_MINOR_VERSION)" != ""; then \
+ flags="--server-minor-version $(SERVER_MINOR_VERSION) $$flags"; \
+ fi; \
+Index: build/ac-macros/apache.m4
+===================================================================
+--- a/build/ac-macros/apache.m4 (revision 1691883)
++++ b/build/ac-macros/apache.m4 (working copy)
+@@ -160,6 +160,20 @@ if test -n "$APXS" && test "$APXS" != "no"; then
+ BUILD_APACHE_RULE=apache-mod
+ INSTALL_APACHE_RULE=install-mods-shared
+ INSTALL_APACHE_MODS=true
++ HTTPD="`$APXS -q sbindir`/`$APXS -q PROGNAME`"
++ if ! test -e $HTTPD ; then
++ HTTPD="`$APXS -q bindir`/`$APXS -q PROGNAME`"
++ fi
++ HTTPD_VERSION=["`$HTTPD -v | $SED -e 's@^.*/\([0-9.]*\)\(.*$\)@\1@ ; 1q'`"]
++ AC_ARG_ENABLE(broken-httpd-auth,
++ AS_HELP_STRING([--enable-broken-httpd-auth],
++ [Allow building against httpd 2.4 with broken auth]),
++ [broken_httpd_auth=$enableval],[broken_httpd_auth=no])
++ if test "$enable_broken_httpd_auth" = "yes"; then
++ AC_MSG_NOTICE([Building with broken httpd auth])
++ AC_DEFINE(SVN_ALLOW_BROKEN_HTTPD_AUTH, 1,
++ [Defined to allow building against httpd 2.4 with broken auth])
++ fi
+
+ case $host in
+ *-*-cygwin*)
+@@ -178,6 +192,7 @@ AC_SUBST(APACHE_LDFLAGS)
+ AC_SUBST(APACHE_INCLUDES)
+ AC_SUBST(APACHE_LIBEXECDIR)
+ AC_SUBST(INSTALL_APACHE_MODS)
++AC_SUBST(HTTPD_VERSION)
+
+ # there aren't any flags that interest us ...
+ #if test -n "$APXS" && test "$APXS" != "no"; then
+Index: build/run_tests.py
+===================================================================
+--- a/build/run_tests.py (revision 1691883)
++++ b/build/run_tests.py (working copy)
+@@ -29,6 +29,7 @@
+ [--fs-type=<fs-type>] [--fsfs-packing] [--fsfs-sharding=<n>]
+ [--list] [--milestone-filter=<regex>] [--mode-filter=<type>]
+ [--server-minor-version=<version>] [--http-proxy=<host>:<port>]
++ [--httpd-version=<version>]
+ [--config-file=<file>] [--ssl-cert=<file>]
+ <abs_srcdir> <abs_builddir>
+ <prog ...>
+@@ -125,7 +126,7 @@ class TestHarness:
+ fsfs_sharding=None, fsfs_packing=None,
+ list_tests=None, svn_bin=None, mode_filter=None,
+ milestone_filter=None, set_log_level=None, ssl_cert=None,
+- http_proxy=None):
++ http_proxy=None, httpd_version=None):
+ '''Construct a TestHarness instance.
+
+ ABS_SRCDIR and ABS_BUILDDIR are the source and build directories.
+@@ -178,6 +179,7 @@ class TestHarness:
+ self.log = None
+ self.ssl_cert = ssl_cert
+ self.http_proxy = http_proxy
++ self.httpd_version = httpd_version
+ if not sys.stdout.isatty() or sys.platform == 'win32':
+ TextColors.disable()
+
+@@ -481,6 +483,8 @@ class TestHarness:
+ svntest.main.options.ssl_cert = self.ssl_cert
+ if self.http_proxy is not None:
+ svntest.main.options.http_proxy = self.http_proxy
++ if self.httpd_version is not None:
++ svntest.main.options.httpd_version = self.httpd_version
+
+ svntest.main.options.srcdir = self.srcdir
+
+@@ -645,7 +649,7 @@ def main():
+ 'enable-sasl', 'parallel', 'config-file=',
+ 'log-to-stdout', 'list', 'milestone-filter=',
+ 'mode-filter=', 'set-log-level=', 'ssl-cert=',
+- 'http-proxy='])
++ 'http-proxy=', 'httpd-version='])
+ except getopt.GetoptError:
+ args = []
+
+@@ -656,9 +660,9 @@ def main():
+ base_url, fs_type, verbose, cleanup, enable_sasl, http_library, \
+ server_minor_version, fsfs_sharding, fsfs_packing, parallel, \
+ config_file, log_to_stdout, list_tests, mode_filter, milestone_filter, \
+- set_log_level, ssl_cert, http_proxy = \
++ set_log_level, ssl_cert, http_proxy, httpd_version = \
+ None, None, None, None, None, None, None, None, None, None, None, \
+- None, None, None, None, None, None, None
++ None, None, None, None, None, None, None, None
+ for opt, val in opts:
+ if opt in ['-u', '--url']:
+ base_url = val
+@@ -696,6 +700,8 @@ def main():
+ ssl_cert = val
+ elif opt in ['--http-proxy']:
+ http_proxy = val
++ elif opt in ['--httpd-version']:
++ httpd_version = val
+ else:
+ raise getopt.GetoptError
+
+@@ -712,7 +718,7 @@ def main():
+ fsfs_sharding, fsfs_packing, list_tests,
+ mode_filter=mode_filter, milestone_filter=milestone_filter,
+ set_log_level=set_log_level, ssl_cert=ssl_cert,
+- http_proxy=http_proxy)
++ http_proxy=http_proxy, httpd_version=httpd_version)
+
+ failed = th.run(args[2:])
+ if failed:
+Index: subversion/mod_authz_svn/mod_authz_svn.c
+===================================================================
+--- a/subversion/mod_authz_svn/mod_authz_svn.c (revision 1691883)
++++ b/subversion/mod_authz_svn/mod_authz_svn.c (working copy)
+@@ -48,6 +48,23 @@
+ #include "svn_dirent_uri.h"
+ #include "private/svn_fspath.h"
+
++/* The apache headers define these and they conflict with our definitions. */
++#ifdef PACKAGE_BUGREPORT
++#undef PACKAGE_BUGREPORT
++#endif
++#ifdef PACKAGE_NAME
++#undef PACKAGE_NAME
++#endif
++#ifdef PACKAGE_STRING
++#undef PACKAGE_STRING
++#endif
++#ifdef PACKAGE_TARNAME
++#undef PACKAGE_TARNAME
++#endif
++#ifdef PACKAGE_VERSION
++#undef PACKAGE_VERSION
++#endif
++#include "svn_private_config.h"
+
+ #ifdef APLOG_USE_MODULE
+ APLOG_USE_MODULE(authz_svn);
+@@ -67,6 +84,30 @@ typedef struct authz_svn_config_rec {
+ const char *force_username_case;
+ } authz_svn_config_rec;
+
++#if AP_MODULE_MAGIC_AT_LEAST(20060110,0) /* version where
++ ap_some_auth_required breaks */
++# if AP_MODULE_MAGIC_AT_LEAST(20120211,47) /* first version with
++ force_authn hook and
++ ap_some_authn_required() which
++ allows us to work without
++ ap_some_auth_required() */
++# define USE_FORCE_AUTHN 1
++# define IN_SOME_AUTHN_NOTE "authz_svn-in-some-authn"
++# define FORCE_AUTHN_NOTE "authz_svn-force-authn"
++# else
++ /* ap_some_auth_required() is busted and no viable alternative exists */
++# ifndef SVN_ALLOW_BROKEN_HTTPD_AUTH
++# error This version of httpd has a security hole with mod_authz_svn
++# else
++ /* user wants to build anyway */
++# define USE_FORCE_AUTHN 0
++# endif
++# endif
++#else
++ /* old enough that ap_some_auth_required() still works */
++# define USE_FORCE_AUTHN 0
++#endif
++
+ /*
+ * Configuration
+ */
+@@ -819,9 +860,51 @@ access_checker(request_rec *r)
+ &authz_svn_module);
+ const char *repos_path = NULL;
+ const char *dest_repos_path = NULL;
+- int status;
++ int status, authn_required;
+
++#if USE_FORCE_AUTHN
++ /* Use the force_authn() hook available in 2.4.x to work securely
++ * given that ap_some_auth_required() is no longer functional for our
++ * purposes in 2.4.x.
++ */
++ int authn_configured;
++
+ /* We are not configured to run */
++ if (!conf->anonymous || apr_table_get(r->notes, IN_SOME_AUTHN_NOTE)
++ || (! (conf->access_file || conf->repo_relative_access_file)))
++ return DECLINED;
++
++ /* Authentication is configured */
++ authn_configured = ap_auth_type(r) != NULL;
++ if (authn_configured)
++ {
++ /* If the user is trying to authenticate, let him. It doesn't
++ * make much sense to grant anonymous access but deny authenticated
++ * users access, even though you can do that with '$anon' in the
++ * access file.
++ */
++ if (apr_table_get(r->headers_in,
++ (PROXYREQ_PROXY == r->proxyreq)
++ ? "Proxy-Authorization" : "Authorization"))
++ {
++ /* Set the note to force authn regardless of what access_checker_ex
++ hook requires */
++ apr_table_setn(r->notes, FORCE_AUTHN_NOTE, (const char*)1);
++
++ /* provide the proper return so the access_checker hook doesn't
++ * prevent the code from continuing on to the other auth hooks */
++ if (ap_satisfies(r) != SATISFY_ANY)
++ return OK;
++ else
++ return HTTP_FORBIDDEN;
++ }
++ }
++
++#else
++ /* Support for older versions of httpd that have a working
++ * ap_some_auth_required() */
++
++ /* We are not configured to run */
+ if (!conf->anonymous
+ || (! (conf->access_file || conf->repo_relative_access_file)))
+ return DECLINED;
+@@ -834,9 +917,10 @@ access_checker(request_rec *r)
+ if (ap_satisfies(r) != SATISFY_ANY)
+ return DECLINED;
+
+- /* If the user is trying to authenticate, let him. If anonymous
+- * access is allowed, so is authenticated access, by definition
+- * of the meaning of '*' in the access file.
++ /* If the user is trying to authenticate, let him. It doesn't
++ * make much sense to grant anonymous access but deny authenticated
++ * users access, even though you can do that with '$anon' in the
++ * access file.
+ */
+ if (apr_table_get(r->headers_in,
+ (PROXYREQ_PROXY == r->proxyreq)
+@@ -848,6 +932,7 @@ access_checker(request_rec *r)
+ return HTTP_FORBIDDEN;
+ }
+ }
++#endif
+
+ /* If anon access is allowed, return OK */
+ status = req_check_access(r, conf, &repos_path, &dest_repos_path);
+@@ -856,7 +941,26 @@ access_checker(request_rec *r)
+ if (!conf->authoritative)
+ return DECLINED;
+
++#if USE_FORCE_AUTHN
++ if (authn_configured) {
++ /* We have to check to see if authn is required because if so we must
++ * return UNAUTHORIZED (401) rather than FORBIDDEN (403) since returning
++ * the 403 leaks information about what paths may exist to
++ * unauthenticated users. We must set a note here in order
++ * to use ap_some_authn_rquired() without triggering an infinite
++ * loop since the call will trigger this function to be called again. */
++ apr_table_setn(r->notes, IN_SOME_AUTHN_NOTE, (const char*)1);
++ authn_required = ap_some_authn_required(r);
++ apr_table_unset(r->notes, IN_SOME_AUTHN_NOTE);
++ if (authn_required)
++ {
++ ap_note_auth_failure(r);
++ return HTTP_UNAUTHORIZED;
++ }
++ }
++#else
+ if (!ap_some_auth_required(r))
++#endif
+ log_access_verdict(APLOG_MARK, r, 0, repos_path, dest_repos_path);
+
+ return HTTP_FORBIDDEN;
+@@ -937,6 +1041,17 @@ auth_checker(request_rec *r)
+ return OK;
+ }
+
++#if USE_FORCE_AUTHN
++static int
++force_authn(request_rec *r)
++{
++ if (apr_table_get(r->notes, FORCE_AUTHN_NOTE))
++ return OK;
++
++ return DECLINED;
++}
++#endif
++
+ /*
+ * Module flesh
+ */
+@@ -953,6 +1068,9 @@ register_hooks(apr_pool_t *p)
+ * give SSLOptions +FakeBasicAuth a chance to work. */
+ ap_hook_check_user_id(check_user_id, mod_ssl, NULL, APR_HOOK_FIRST);
+ ap_hook_auth_checker(auth_checker, NULL, NULL, APR_HOOK_FIRST);
++#if USE_FORCE_AUTHN
++ ap_hook_force_authn(force_authn, NULL, NULL, APR_HOOK_FIRST);
++#endif
+ ap_register_provider(p,
+ AUTHZ_SVN__SUBREQ_BYPASS_PROV_GRP,
+ AUTHZ_SVN__SUBREQ_BYPASS_PROV_NAME,
+Index: subversion/tests/cmdline/README
+===================================================================
+--- a/subversion/tests/cmdline/README (revision 1691883)
++++ b/subversion/tests/cmdline/README (working copy)
+@@ -83,6 +83,133 @@ paths adjusted appropriately:
+ Require valid-user
+ </Location>
+
++ <Location /authz-test-work/anon>
++ DAV svn
++ SVNParentPath /home/yourusernamehere/projects/svn/subversion/tests/cmdline/svn-test-work/local_tmp
++ AuthzSVNAccessFile /home/yourusernamehere/projects/svn/subversion/tests/cmdline/svn-test-work/authz
++ SVNListParentPath On
++ # This may seem unnecessary but granting access to everyone here is necessary
++ # to exercise a bug with httpd 2.3.x+. The "Require all granted" syntax is
++ # new to 2.3.x+ which we can detect with the mod_authz_core.c module
++ # signature. Use the "Allow from all" syntax with older versions for symmetry.
++ <IfModule mod_authz_core.c>
++ Require all granted
++ </IfModule>
++ <IfModule !mod_authz_core.c>
++ Allow from all
++ </IfMOdule>
++ </Location>
++ <Location /authz-test-work/mixed>
++ DAV svn
++ SVNParentPath /home/yourusernamehere/projects/svn/subversion/tests/cmdline/svn-test-work/local_tmp
++ AuthzSVNAccessFile /home/yourusernamehere/projects/svn/subversion/tests/cmdline/svn-test-work/authz
++ SVNListParentPath On
++ AuthType Basic
++ AuthName "Subversion Repository"
++ AuthUserFile /usr/local/apache2/conf/users
++ Require valid-user
++ Satisfy Any
++ </Location>
++ <Location /authz-test-work/mixed-noauthwhenanon>
++ DAV svn
++ SVNParentPath /home/yourusernamehere/projects/svn/subversion/tests/cmdline/svn-test-work/local_tmp
++ AuthzSVNAccessFile /home/yourusernamehere/projects/svn/subversion/tests/cmdline/svn-test-work/authz
++ SVNListParentPath On
++ AuthType Basic
++ AuthName "Subversion Repository"
++ AuthUserFile /usr/local/apache2/conf/users
++ Require valid-user
++ AuthzSVNNoAuthWhenAnonymousAllowed On
++ </Location>
++ <Location /authz-test-work/authn>
++ DAV svn
++ SVNParentPath /home/yourusernamehere/projects/svn/subversion/tests/cmdline/svn-test-work/local_tmp
++ AuthzSVNAccessFile /home/yourusernamehere/projects/svn/subversion/tests/cmdline/svn-test-work/authz
++ SVNListParentPath On
++ AuthType Basic
++ AuthName "Subversion Repository"
++ AuthUserFile /usr/local/apache2/conf/users
++ Require valid-user
++ </Location>
++ <Location /authz-test-work/authn-anonoff>
++ DAV svn
++ SVNParentPath /home/yourusernamehere/projects/svn/subversion/tests/cmdline/svn-test-work/local_tmp
++ AuthzSVNAccessFile /home/yourusernamehere/projects/svn/subversion/tests/cmdline/svn-test-work/authz
++ SVNListParentPath On
++ AuthType Basic
++ AuthName "Subversion Repository"
++ AuthUserFile /usr/local/apache2/conf/users
++ Require valid-user
++ AuthzSVNAnonymous Off
++ </Location>
++ <Location /authz-test-work/authn-lcuser>
++ DAV svn
++ SVNParentPath /home/yourusernamehere/projects/svn/subversion/tests/cmdline/svn-test-work/local_tmp
++ AuthzSVNAccessFile /home/yourusernamehere/projects/svn/subversion/tests/cmdline/svn-test-work/authz
++ SVNListParentPath On
++ AuthType Basic
++ AuthName "Subversion Repository"
++ AuthUserFile /usr/local/apache2/conf/users
++ Require valid-user
++ AuthzForceUsernameCase Lower
++ </Location>
++ <Location /authz-test-work/authn-lcuser>
++ DAV svn
++ SVNParentPath /home/yourusernamehere/projects/svn/subversion/tests/cmdline/svn-test-work/local_tmp
++ AuthzSVNAccessFile /home/yourusernamehere/projects/svn/subversion/tests/cmdline/svn-test-work/authz
++ SVNListParentPath On
++ AuthType Basic
++ AuthName "Subversion Repository"
++ AuthUserFile /usr/local/apache2/conf/users
++ Require valid-user
++ AuthzForceUsernameCase Lower
++ </Location>
++ <Location /authz-test-work/authn-group>
++ DAV svn
++ SVNParentPath /home/yourusernamehere/projects/svn/subversion/tests/cmdline/svn-test-work/local_tmp
++ AuthzSVNAccessFile /home/yourusernamehere/projects/svn/subversion/tests/cmdline/svn-test-work/authz
++ SVNListParentPath On
++ AuthType Basic
++ AuthName "Subversion Repository"
++ AuthUserFile /usr/local/apache2/conf/users
++ AuthGroupFile /usr/local/apache2/conf/groups
++ Require group random
++ AuthzSVNAuthoritative Off
++ </Location>
++ <IfModule mod_authz_core.c>
++ <Location /authz-test-work/sallrany>
++ DAV svn
++ SVNParentPath /home/yourusernamehere/projects/svn/subversion/tests/cmdline/svn-test-work/local_tmp
++ AuthzSVNAccessFile /home/yourusernamehere/projects/svn/subversion/tests/cmdline/svn-test-work/authz
++ SVNListParentPath On
++ AuthType Basic
++ AuthName "Subversion Repository"
++ AuthUserFile /usr/local/apache2/conf/users
++ AuthzSendForbiddenOnFailure On
++ Satisfy All
++ <RequireAny>
++ Require valid-user
++ Require expr req('ALLOW') == '1'
++ </RequireAny>
++ </Location>
++ <Location /authz-test-work/sallrall>
++ DAV svn
++ SVNParentPath /home/yourusernamehere/projects/svn/subversion/tests/cmdline/svn-test-work/local_tmp
++ AuthzSVNAccessFile /home/yourusernamehere/projects/svn/subversion/tests/cmdline/svn-test-work/authz
++ SVNListParentPath On
++ AuthType Basic
++ AuthName "Subversion Repository"
++ AuthUserFile /usr/local/apache2/conf/users
++ AuthzSendForbiddenOnFailure On
++ Satisfy All
++ <RequireAll>
++ Require valid-user
++ Require expr req('ALLOW') == '1'
++ </RequireAll>
++ </Location>
++ </IfModule>
++
++
+ RedirectMatch permanent ^/svn-test-work/repositories/REDIRECT-PERM-(.*)$ /svn-test-work/repositories/$1
+ RedirectMatch ^/svn-test-work/repositories/REDIRECT-TEMP-(.*)$ /svn-test-work/repositories/$1
+
+@@ -101,8 +228,17 @@ just drop the following 2-line snippet into the
+ ----------------------------
+ jrandom:xCGl35kV9oWCY
+ jconstant:xCGl35kV9oWCY
++JRANDOM:xCGl35kV9oWCY
++JCONSTANT:xCGl35kV9oWCY
+ ----------------------------
+
++and these lines into the
++/usr/local/apache/conf/groups file:
++----------------------------
++random: jrandom
++constant: jconstant
++----------------------------
++
+ Now, (re)start Apache and run the tests over mod_dav_svn.
+
+ You can run a test script over DAV:
+@@ -138,6 +274,8 @@ Note [1]: It would be quite too much to expect tho
+ ----------------------------
+ jrandom:$apr1$3p1.....$FQW6RceW5QhJ2blWDQgKn0
+ jconstant:$apr1$jp1.....$Usrqji1c9H6AbOxOGAzzb0
++ JRANDOM:$apr1$3p1.....$FQW6RceW5QhJ2blWDQgKn0
++ JCONSTANT:$apr1$jp1.....$Usrqji1c9H6AbOxOGAzzb0
+ ----------------------------
+
+
+Index: subversion/tests/cmdline/davautocheck.sh
+===================================================================
+--- a/subversion/tests/cmdline/davautocheck.sh (revision 1691883)
++++ b/subversion/tests/cmdline/davautocheck.sh (working copy)
+@@ -289,8 +289,6 @@ LOAD_MOD_AUTHN_CORE="$(get_loadmodule_config mod_a
+ || fail "Authn_Core module not found."
+ LOAD_MOD_AUTHZ_CORE="$(get_loadmodule_config mod_authz_core)" \
+ || fail "Authz_Core module not found."
+-LOAD_MOD_AUTHZ_HOST="$(get_loadmodule_config mod_authz_host)" \
+- || fail "Authz_Host module not found."
+ LOAD_MOD_UNIXD=$(get_loadmodule_config mod_unixd) \
+ || fail "UnixD module not found"
+ }
+@@ -298,6 +296,10 @@ LOAD_MOD_AUTHN_FILE="$(get_loadmodule_config mod_a
+ || fail "Authn_File module not found."
+ LOAD_MOD_AUTHZ_USER="$(get_loadmodule_config mod_authz_user)" \
+ || fail "Authz_User module not found."
++LOAD_MOD_AUTHZ_GROUPFILE="$(get_loadmodule_config mod_authz_groupfile)" \
++ || fail "Authz_GroupFile module not found."
++LOAD_MOD_AUTHZ_HOST="$(get_loadmodule_config mod_authz_host)" \
++ || fail "Authz_Host module not found."
+ }
+ if [ ${APACHE_MPM:+set} ]; then
+ LOAD_MOD_MPM=$(get_loadmodule_config mod_mpm_$APACHE_MPM) \
+@@ -328,6 +330,7 @@ HTTPD_ERROR_LOG="$HTTPD_ROOT/error_log"
+ HTTPD_MIME_TYPES="$HTTPD_ROOT/mime.types"
+ BASE_URL="http://localhost:$HTTPD_PORT"
+ HTTPD_USERS="$HTTPD_ROOT/users"
++HTTPD_GROUPS="$HTTPD_ROOT/groups"
+
+ mkdir "$HTTPD_ROOT" \
+ || fail "couldn't create temporary directory '$HTTPD_ROOT'"
+@@ -388,6 +391,14 @@ fi
+ say "Adding users for lock authentication"
+ $HTPASSWD -bc $HTTPD_USERS jrandom rayjandom
+ $HTPASSWD -b $HTTPD_USERS jconstant rayjandom
++$HTPASSWD -b $HTTPD_USERS JRANDOM rayjandom
++$HTPASSWD -b $HTTPD_USERS JCONSTANT rayjandom
++
++say "Adding groups for mod_authz_svn tests"
++cat > "$HTTPD_GROUPS" <<__EOF__
++random: jrandom
++constant: jconstant
++__EOF__
+
+ touch $HTTPD_MIME_TYPES
+
+@@ -405,7 +416,9 @@ $LOAD_MOD_AUTHN_CORE
+ $LOAD_MOD_AUTHN_FILE
+ $LOAD_MOD_AUTHZ_CORE
+ $LOAD_MOD_AUTHZ_USER
++$LOAD_MOD_AUTHZ_GROUPFILE
+ $LOAD_MOD_AUTHZ_HOST
++$LOAD_MOD_ACCESS_COMPAT
+ LoadModule authz_svn_module "$MOD_AUTHZ_SVN"
+
+ __EOF__
+@@ -497,6 +510,161 @@ CustomLog "$HTTPD_ROOT/ops" "%t %u %{SVN
+ SVNAdvertiseV2Protocol ${ADVERTISE_V2_PROTOCOL}
+ ${SVN_PATH_AUTHZ_LINE}
+ </Location>
++<Location /authz-test-work/anon>
++ DAV svn
++ SVNParentPath "$ABS_BUILDDIR/subversion/tests/cmdline/svn-test-work/local_tmp"
++ AuthzSVNAccessFile "$ABS_BUILDDIR/subversion/tests/cmdline/svn-test-work/authz"
++ SVNAdvertiseV2Protocol ${ADVERTISE_V2_PROTOCOL}
++ SVNCacheRevProps ${CACHE_REVPROPS_SETTING}
++ SVNListParentPath On
++ # This may seem unnecessary but granting access to everyone here is necessary
++ # to exercise a bug with httpd 2.3.x+. The "Require all granted" syntax is
++ # new to 2.3.x+ which we can detect with the mod_authz_core.c module
++ # signature. Use the "Allow from all" syntax with older versions for symmetry.
++ <IfModule mod_authz_core.c>
++ Require all granted
++ </IfModule>
++ <IfModule !mod_authz_core.c>
++ Allow from all
++ </IfMOdule>
++ ${SVN_PATH_AUTHZ_LINE}
++</Location>
++<Location /authz-test-work/mixed>
++ DAV svn
++ SVNParentPath "$ABS_BUILDDIR/subversion/tests/cmdline/svn-test-work/local_tmp"
++ AuthzSVNAccessFile "$ABS_BUILDDIR/subversion/tests/cmdline/svn-test-work/authz"
++ SVNAdvertiseV2Protocol ${ADVERTISE_V2_PROTOCOL}
++ SVNCacheRevProps ${CACHE_REVPROPS_SETTING}
++ SVNListParentPath On
++ AuthType Basic
++ AuthName "Subversion Repository"
++ AuthUserFile $HTTPD_USERS
++ Require valid-user
++ Satisfy Any
++ ${SVN_PATH_AUTHZ_LINE}
++</Location>
++<Location /authz-test-work/mixed-noauthwhenanon>
++ DAV svn
++ SVNParentPath "$ABS_BUILDDIR/subversion/tests/cmdline/svn-test-work/local_tmp"
++ AuthzSVNAccessFile "$ABS_BUILDDIR/subversion/tests/cmdline/svn-test-work/authz"
++ SVNAdvertiseV2Protocol ${ADVERTISE_V2_PROTOCOL}
++ SVNCacheRevProps ${CACHE_REVPROPS_SETTING}
++ SVNListParentPath On
++ AuthType Basic
++ AuthName "Subversion Repository"
++ AuthUserFile $HTTPD_USERS
++ Require valid-user
++ AuthzSVNNoAuthWhenAnonymousAllowed On
++ SVNPathAuthz On
++</Location>
++<Location /authz-test-work/authn>
++ DAV svn
++ SVNParentPath "$ABS_BUILDDIR/subversion/tests/cmdline/svn-test-work/local_tmp"
++ AuthzSVNAccessFile "$ABS_BUILDDIR/subversion/tests/cmdline/svn-test-work/authz"
++ SVNAdvertiseV2Protocol ${ADVERTISE_V2_PROTOCOL}
++ SVNCacheRevProps ${CACHE_REVPROPS_SETTING}
++ SVNListParentPath On
++ AuthType Basic
++ AuthName "Subversion Repository"
++ AuthUserFile $HTTPD_USERS
++ Require valid-user
++ ${SVN_PATH_AUTHZ_LINE}
++</Location>
++<Location /authz-test-work/authn-anonoff>
++ DAV svn
++ SVNParentPath "$ABS_BUILDDIR/subversion/tests/cmdline/svn-test-work/local_tmp"
++ AuthzSVNAccessFile "$ABS_BUILDDIR/subversion/tests/cmdline/svn-test-work/authz"
++ SVNAdvertiseV2Protocol ${ADVERTISE_V2_PROTOCOL}
++ SVNCacheRevProps ${CACHE_REVPROPS_SETTING}
++ SVNListParentPath On
++ AuthType Basic
++ AuthName "Subversion Repository"
++ AuthUserFile $HTTPD_USERS
++ Require valid-user
++ AuthzSVNAnonymous Off
++ SVNPathAuthz On
++</Location>
++<Location /authz-test-work/authn-lcuser>
++ DAV svn
++ SVNParentPath "$ABS_BUILDDIR/subversion/tests/cmdline/svn-test-work/local_tmp"
++ AuthzSVNAccessFile "$ABS_BUILDDIR/subversion/tests/cmdline/svn-test-work/authz"
++ SVNAdvertiseV2Protocol ${ADVERTISE_V2_PROTOCOL}
++ SVNCacheRevProps ${CACHE_REVPROPS_SETTING}
++ SVNListParentPath On
++ AuthType Basic
++ AuthName "Subversion Repository"
++ AuthUserFile $HTTPD_USERS
++ Require valid-user
++ AuthzForceUsernameCase Lower
++ ${SVN_PATH_AUTHZ_LINE}
++</Location>
++<Location /authz-test-work/authn-lcuser>
++ DAV svn
++ SVNParentPath "$ABS_BUILDDIR/subversion/tests/cmdline/svn-test-work/local_tmp"
++ AuthzSVNAccessFile "$ABS_BUILDDIR/subversion/tests/cmdline/svn-test-work/authz"
++ SVNAdvertiseV2Protocol ${ADVERTISE_V2_PROTOCOL}
++ SVNCacheRevProps ${CACHE_REVPROPS_SETTING}
++ SVNListParentPath On
++ AuthType Basic
++ AuthName "Subversion Repository"
++ AuthUserFile $HTTPD_USERS
++ Require valid-user
++ AuthzForceUsernameCase Lower
++ ${SVN_PATH_AUTHZ_LINE}
++</Location>
++<Location /authz-test-work/authn-group>
++ DAV svn
++ SVNParentPath "$ABS_BUILDDIR/subversion/tests/cmdline/svn-test-work/local_tmp"
++ AuthzSVNAccessFile "$ABS_BUILDDIR/subversion/tests/cmdline/svn-test-work/authz"
++ SVNAdvertiseV2Protocol ${ADVERTISE_V2_PROTOCOL}
++ SVNCacheRevProps ${CACHE_REVPROPS_SETTING}
++ SVNListParentPath On
++ AuthType Basic
++ AuthName "Subversion Repository"
++ AuthUserFile $HTTPD_USERS
++ AuthGroupFile $HTTPD_GROUPS
++ Require group random
++ AuthzSVNAuthoritative Off
++ SVNPathAuthz On
++</Location>
++<IfModule mod_authz_core.c>
++ <Location /authz-test-work/sallrany>
++ DAV svn
++ SVNParentPath "$ABS_BUILDDIR/subversion/tests/cmdline/svn-test-work/local_tmp"
++ AuthzSVNAccessFile "$ABS_BUILDDIR/subversion/tests/cmdline/svn-test-work/authz"
++ SVNAdvertiseV2Protocol ${ADVERTISE_V2_PROTOCOL}
++ SVNCacheRevProps ${CACHE_REVPROPS_SETTING}
++ SVNListParentPath On
++ AuthType Basic
++ AuthName "Subversion Repository"
++ AuthUserFile $HTTPD_USERS
++ AuthzSendForbiddenOnFailure On
++ Satisfy All
++ <RequireAny>
++ Require valid-user
++ Require expr req('ALLOW') == '1'
++ </RequireAny>
++ ${SVN_PATH_AUTHZ_LINE}
++ </Location>
++ <Location /authz-test-work/sallrall>
++ DAV svn
++ SVNParentPath "$ABS_BUILDDIR/subversion/tests/cmdline/svn-test-work/local_tmp"
++ AuthzSVNAccessFile "$ABS_BUILDDIR/subversion/tests/cmdline/svn-test-work/authz"
++ SVNAdvertiseV2Protocol ${ADVERTISE_V2_PROTOCOL}
++ SVNCacheRevProps ${CACHE_REVPROPS_SETTING}
++ SVNListParentPath On
++ AuthType Basic
++ AuthName "Subversion Repository"
++ AuthUserFile $HTTPD_USERS
++ AuthzSendForbiddenOnFailure On
++ Satisfy All
++ <RequireAll>
++ Require valid-user
++ Require expr req('ALLOW') == '1'
++ </RequireAll>
++ ${SVN_PATH_AUTHZ_LINE}
++ </Location>
++</IfModule>
+ RedirectMatch permanent ^/svn-test-work/repositories/REDIRECT-PERM-(.*)\$ /svn-test-work/repositories/\$1
+ RedirectMatch ^/svn-test-work/repositories/REDIRECT-TEMP-(.*)\$ /svn-test-work/repositories/\$1
+ __EOF__
+Index: subversion/tests/cmdline/mod_authz_svn_tests.py
+===================================================================
+--- a/subversion/tests/cmdline/mod_authz_svn_tests.py (nonexistent)
++++ b/subversion/tests/cmdline/mod_authz_svn_tests.py (working copy)
+@@ -0,0 +1,1073 @@
++#!/usr/bin/env python
++#
++# mod_authz_svn_tests.py: testing mod_authz_svn
++#
++# Subversion is a tool for revision control.
++# See http://subversion.apache.org for more information.
++#
++# ====================================================================
++# Licensed to the Apache Software Foundation (ASF) under one
++# or more contributor license agreements. See the NOTICE file
++# distributed with this work for additional information
++# regarding copyright ownership. The ASF licenses this file
++# to you under the Apache License, Version 2.0 (the
++# "License"); you may not use this file except in compliance
++# with the License. You may obtain a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing,
++# software distributed under the License is distributed on an
++# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++# KIND, either express or implied. See the License for the
++# specific language governing permissions and limitations
++# under the License.
++######################################################################
++
++# General modules
++import os, re, logging
++
++logger = logging.getLogger()
++
++# Our testing module
++import svntest
++
++# (abbreviation)
++Skip = svntest.testcase.Skip_deco
++SkipUnless = svntest.testcase.SkipUnless_deco
++XFail = svntest.testcase.XFail_deco
++Issues = svntest.testcase.Issues_deco
++Issue = svntest.testcase.Issue_deco
++Wimp = svntest.testcase.Wimp_deco
++
++ls_of_D_no_H = '''<html><head><title>repos - Revision 1: /A/D</title></head>
++<body>
++ <h2>repos - Revision 1: /A/D</h2>
++ <ul>
++ <li><a href="../">..</a></li>
++ <li><a href="G/">G/</a></li>
++ <li><a href="gamma">gamma</a></li>
++ </ul>
++</body></html>'''
++
++ls_of_D_H = '''<html><head><title>repos - Revision 1: /A/D</title></head>
++<body>
++ <h2>repos - Revision 1: /A/D</h2>
++ <ul>
++ <li><a href="../">..</a></li>
++ <li><a href="G/">G/</a></li>
++ <li><a href="H/">H/</a></li>
++ <li><a href="gamma">gamma</a></li>
++ </ul>
++</body></html>'''
++
++ls_of_H = '''<html><head><title>repos - Revision 1: /A/D/H</title></head>
++<body>
++ <h2>repos - Revision 1: /A/D/H</h2>
++ <ul>
++ <li><a href="../">..</a></li>
++ <li><a href="chi">chi</a></li>
++ <li><a href="omega">omega</a></li>
++ <li><a href="psi">psi</a></li>
++ </ul>
++</body></html>'''
++
++user1 = svntest.main.wc_author
++user1_upper = user1.upper()
++user1_pass = svntest.main.wc_passwd
++user1_badpass = 'XXX'
++assert user1_pass != user1_badpass, "Passwords can't match"
++user2 = svntest.main.wc_author2
++user2_upper = user2.upper()
++user2_pass = svntest.main.wc_passwd
++user2_badpass = 'XXX'
++assert user2_pass != user2_badpass, "Passwords can't match"
++
++def write_authz_file(sbox):
++ svntest.main.write_authz_file(sbox, {
++ '/': '$anonymous = r\n' +
++ 'jrandom = rw\n' +
++ 'jconstant = rw',
++ '/A/D/H': '$anonymous =\n' +
++ '$authenticated =\n' +
++ 'jrandom = rw'
++ })
++
++def write_authz_file_groups(sbox):
++ authz_name = sbox.authz_name()
++ svntest.main.write_authz_file(sbox,{
++ '/': '* =',
++ })
++
++def verify_get(test_area_url, path, user, pw,
++ expected_status, expected_body, headers):
++ import httplib
++ from urlparse import urlparse
++ import base64
++
++ req_url = test_area_url + path
++
++ loc = urlparse(req_url)
++
++ if loc.scheme == 'http':
++ h = httplib.HTTPConnection(loc.hostname, loc.port)
++ else:
++ h = httplib.HTTPSConnection(loc.hostname, loc.port)
++
++ if headers is None:
++ headers = {}
++
++ if user and pw:
++ auth_info = user + ':' + pw
++ headers['Authorization'] = 'Basic ' + base64.b64encode(auth_info)
++ else:
++ auth_info = "anonymous"
++
++ h.request('GET', req_url, None, headers)
++
++ r = h.getresponse()
++
++ actual_status = r.status
++ if expected_status and expected_status != actual_status:
++
++ logger.warn("Expected status '" + str(expected_status) +
++ "' but got '" + str(actual_status) +
++ "' on url '" + req_url + "' (" +
++ auth_info + ").")
++ raise svntest.Failure
++
++ if expected_body:
++ actual_body = r.read()
++ if expected_body != actual_body:
++ logger.warn("Expected body:")
++ logger.warn(expected_body)
++ logger.warn("But got:")
++ logger.warn(actual_body)
++ logger.warn("on url '" + req_url + "' (" + auth_info + ").")
++ raise svntest.Failure
++
++def verify_gets(test_area_url, tests):
++ for test in tests:
++ verify_get(test_area_url, test['path'], test.get('user'), test.get('pw'),
++ test['status'], test.get('body'), test.get('headers'))
++
++
++######################################################################
++# Tests
++#
++# Each test must return on success or raise on failure.
++
++
++#----------------------------------------------------------------------
++
++
++@SkipUnless(svntest.main.is_ra_type_dav)
++def anon(sbox):
++ "test anonymous access"
++ sbox.build(read_only = True, create_wc = False)
++
++ test_area_url = sbox.repo_url.replace('/svn-test-work/local_tmp/repos',
++ '/authz-test-work/anon')
++
++ write_authz_file(sbox)
++
++ anon_tests = (
++ { 'path': '', 'status': 301 },
++ { 'path': '/', 'status': 200 },
++ { 'path': '/repos', 'status': 301 },
++ { 'path': '/repos/', 'status': 200 },
++ { 'path': '/repos/A', 'status': 301 },
++ { 'path': '/repos/A/', 'status': 200 },
++ { 'path': '/repos/A/D', 'status': 301 },
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H },
++ { 'path': '/repos/A/D/gamma', 'status': 200 },
++ { 'path': '/repos/A/D/H', 'status': 403 },
++ { 'path': '/repos/A/D/H/', 'status': 403 },
++ { 'path': '/repos/A/D/H/chi', 'status': 403 },
++ # auth isn't configured so nothing should change when passing
++ # authn details
++ { 'path': '', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
++ 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/gamma', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H', 'status': 403, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H/', 'status': 403, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user1, 'pw': user1_pass},
++ { 'path': '', 'status': 301, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/', 'status': 200, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos', 'status': 301, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/', 'status': 200, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A', 'status': 301, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/', 'status': 200, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D', 'status': 301, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
++ 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/gamma', 'status': 200, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/H', 'status': 403, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/H/', 'status': 403, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user1, 'pw': user1_badpass},
++ { 'path': '', 'status': 301, 'user': user2, 'pw': user1_pass},
++ { 'path': '/', 'status': 200, 'user': user2, 'pw': user1_pass},
++ { 'path': '/repos', 'status': 301, 'user': user2, 'pw': user1_pass},
++ { 'path': '/repos/', 'status': 200, 'user': user2, 'pw': user1_pass},
++ { 'path': '/repos/A', 'status': 301, 'user': user2, 'pw': user1_pass},
++ { 'path': '/repos/A/', 'status': 200, 'user': user2, 'pw': user1_pass},
++ { 'path': '/repos/A/D', 'status': 301, 'user': user2, 'pw': user1_pass},
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
++ 'user': user2, 'pw': user1_pass},
++ { 'path': '/repos/A/D/gamma', 'status': 200, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H', 'status': 403, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H/', 'status': 403, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2, 'pw': user2_pass},
++ { 'path': '', 'status': 301, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/', 'status': 200, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos', 'status': 301, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/', 'status': 200, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A', 'status': 301, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/', 'status': 200, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D', 'status': 301, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
++ 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/gamma', 'status': 200, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/H', 'status': 403, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/H/', 'status': 403, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2, 'pw': user2_badpass},
++ )
++
++ verify_gets(test_area_url, anon_tests)
++
++
++@SkipUnless(svntest.main.is_ra_type_dav)
++def mixed(sbox):
++ "test mixed anonymous and authenticated access"
++ sbox.build(read_only = True, create_wc = False)
++
++ test_area_url = sbox.repo_url.replace('/svn-test-work/local_tmp/repos',
++ '/authz-test-work/mixed')
++
++ write_authz_file(sbox)
++
++ mixed_tests = (
++ { 'path': '', 'status': 301, },
++ { 'path': '/', 'status': 200, },
++ { 'path': '/repos', 'status': 301, },
++ { 'path': '/repos/', 'status': 200, },
++ { 'path': '/repos/A', 'status': 301, },
++ { 'path': '/repos/A/', 'status': 200, },
++ { 'path': '/repos/A/D', 'status': 301, },
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
++ },
++ { 'path': '/repos/A/D/gamma', 'status': 200, },
++ { 'path': '/repos/A/D/H', 'status': 401, },
++ { 'path': '/repos/A/D/H/', 'status': 401, },
++ { 'path': '/repos/A/D/H/chi', 'status': 401, },
++ # auth is configured and user1 is allowed access to H
++ { 'path': '', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_H,
++ 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/gamma', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H/', 'status': 200, 'body': ls_of_H, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H/chi', 'status': 200, 'user': user1, 'pw': user1_pass},
++ # try with the wrong password for user1
++ { 'path': '', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/gamma', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/H', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/H/', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ # auth is configured and user2 is not allowed access to H
++ { 'path': '', 'status': 301, 'user': user2, 'pw': user2_pass},
++ { 'path': '/', 'status': 200, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos', 'status': 301, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/', 'status': 200, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A', 'status': 301, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/', 'status': 200, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D', 'status': 301, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
++ 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/gamma', 'status': 200, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H', 'status': 403, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H/', 'status': 403, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2, 'pw': user2_pass},
++ # try with the wrong password for user2
++ { 'path': '', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/gamma', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/H', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/H/', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ )
++
++ verify_gets(test_area_url, mixed_tests)
++
++@SkipUnless(svntest.main.is_ra_type_dav)
++@XFail(svntest.main.is_httpd_authz_provider_enabled)
++# uses the AuthzSVNNoAuthWhenAnonymousAllowed On directive
++# this is broken with httpd 2.3.x+ since it requires the auth system to accept
++# r->user == NULL and there is a test for this in server/request.c now. It
++# was intended as a workaround for the lack of Satisfy Any in 2.3.x+ which
++# was resolved by httpd with mod_access_compat in 2.3.x+.
++def mixed_noauthwhenanon(sbox):
++ "test mixed with noauthwhenanon directive"
++ sbox.build(read_only = True, create_wc = False)
++
++ test_area_url = sbox.repo_url.replace('/svn-test-work/local_tmp/repos',
++ '/authz-test-work/mixed-noauthwhenanon')
++
++ write_authz_file(sbox)
++
++ noauthwhenanon_tests = (
++ { 'path': '', 'status': 301, },
++ { 'path': '/', 'status': 200, },
++ { 'path': '/repos', 'status': 301, },
++ { 'path': '/repos/', 'status': 200, },
++ { 'path': '/repos/A', 'status': 301, },
++ { 'path': '/repos/A/', 'status': 200, },
++ { 'path': '/repos/A/D', 'status': 301, },
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
++ },
++ { 'path': '/repos/A/D/gamma', 'status': 200, },
++ { 'path': '/repos/A/D/H', 'status': 401, },
++ { 'path': '/repos/A/D/H/', 'status': 401, },
++ { 'path': '/repos/A/D/H/chi', 'status': 401, },
++ # auth is configured and user1 is allowed access to H
++ { 'path': '', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_H,
++ 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/gamma', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H/', 'status': 200, 'body': ls_of_H, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H/chi', 'status': 200, 'user': user1, 'pw': user1_pass},
++ # try with the wrong password for user1
++ # note that unlike doing this with Satisfy Any this case
++ # actually provides anon access when provided with an invalid
++ # password
++ { 'path': '', 'status': 301, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/', 'status': 200, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos', 'status': 301, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/', 'status': 200, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A', 'status': 301, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/', 'status': 200, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D', 'status': 301, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/', 'status': 200, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/gamma', 'status': 200, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/H', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/H/', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ # auth is configured and user2 is not allowed access to H
++ { 'path': '', 'status': 301, 'user': user2, 'pw': user2_pass},
++ { 'path': '/', 'status': 200, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos', 'status': 301, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/', 'status': 200, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A', 'status': 301, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/', 'status': 200, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D', 'status': 301, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
++ 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/gamma', 'status': 200, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H', 'status': 403, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H/', 'status': 403, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2, 'pw': user2_pass},
++ # try with the wrong password for user2
++ { 'path': '', 'status': 301, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/', 'status': 200, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos', 'status': 301, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/', 'status': 200, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A', 'status': 301, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/', 'status': 200, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D', 'status': 301, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/', 'status': 200, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/gamma', 'status': 200, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/H', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/H/', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ )
++
++ verify_gets(test_area_url, noauthwhenanon_tests)
++
++
++@SkipUnless(svntest.main.is_ra_type_dav)
++def authn(sbox):
++ "test authenticated only access"
++ sbox.build(read_only = True, create_wc = False)
++
++ test_area_url = sbox.repo_url.replace('/svn-test-work/local_tmp/repos',
++ '/authz-test-work/authn')
++
++ write_authz_file(sbox)
++
++ authn_tests = (
++ { 'path': '', 'status': 401, },
++ { 'path': '/', 'status': 401, },
++ { 'path': '/repos', 'status': 401, },
++ { 'path': '/repos/', 'status': 401, },
++ { 'path': '/repos/A', 'status': 401, },
++ { 'path': '/repos/A/', 'status': 401, },
++ { 'path': '/repos/A/D', 'status': 401, },
++ { 'path': '/repos/A/D/', 'status': 401, },
++ { 'path': '/repos/A/D/gamma', 'status': 401, },
++ { 'path': '/repos/A/D/H', 'status': 401, },
++ { 'path': '/repos/A/D/H/', 'status': 401, },
++ { 'path': '/repos/A/D/H/chi', 'status': 401, },
++ # auth is configured and user1 is allowed access to H
++ { 'path': '', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_H,
++ 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/gamma', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H/', 'status': 200, 'body': ls_of_H, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H/chi', 'status': 200, 'user': user1, 'pw': user1_pass},
++ # try with upper case username for user1
++ { 'path': '', 'status': 301, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/', 'status': 200, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/A', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/A/', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/A/D', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/A/D/', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/A/D/gamma', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H/', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
++ # try with the wrong password for user1
++ { 'path': '', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/gamma', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/H', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/H/', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ # auth is configured and user2 is not allowed access to H
++ { 'path': '', 'status': 301, 'user': user2, 'pw': user2_pass},
++ { 'path': '/', 'status': 200, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos', 'status': 301, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/', 'status': 200, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A', 'status': 301, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/', 'status': 200, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D', 'status': 301, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
++ 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/gamma', 'status': 200, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H', 'status': 403, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H/', 'status': 403, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2, 'pw': user2_pass},
++ # try with upper case username for user2
++ { 'path': '', 'status': 301, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/', 'status': 200, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/A', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/A/', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/A/D', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/A/D/', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/A/D/gamma', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H/', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
++ # try with the wrong password for user2
++ { 'path': '', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/gamma', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/H', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/H/', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ )
++
++ verify_gets(test_area_url, authn_tests)
++
++@SkipUnless(svntest.main.is_ra_type_dav)
++def authn_anonoff(sbox):
++ "test authenticated only access with anonoff"
++ sbox.build(read_only = True, create_wc = False)
++
++ test_area_url = sbox.repo_url.replace('/svn-test-work/local_tmp/repos',
++ '/authz-test-work/authn-anonoff')
++
++ write_authz_file(sbox)
++
++ anonoff_tests = (
++ { 'path': '', 'status': 401, },
++ { 'path': '/', 'status': 401, },
++ { 'path': '/repos', 'status': 401, },
++ { 'path': '/repos/', 'status': 401, },
++ { 'path': '/repos/A', 'status': 401, },
++ { 'path': '/repos/A/', 'status': 401, },
++ { 'path': '/repos/A/D', 'status': 401, },
++ { 'path': '/repos/A/D/', 'status': 401, },
++ { 'path': '/repos/A/D/gamma', 'status': 401, },
++ { 'path': '/repos/A/D/H', 'status': 401, },
++ { 'path': '/repos/A/D/H/', 'status': 401, },
++ { 'path': '/repos/A/D/H/chi', 'status': 401, },
++ # auth is configured and user1 is allowed access to H
++ { 'path': '', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_H,
++ 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/gamma', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H/', 'status': 200, 'body': ls_of_H, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H/chi', 'status': 200, 'user': user1, 'pw': user1_pass},
++ # try with upper case username for user1
++ { 'path': '', 'status': 301, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/', 'status': 200, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/A', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/A/', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/A/D', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/A/D/', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/A/D/gamma', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H/', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user1_upper, 'pw': user1_pass},
++ # try with the wrong password for user1
++ { 'path': '', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/gamma', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/H', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/H/', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ # auth is configured and user2 is not allowed access to H
++ { 'path': '', 'status': 301, 'user': user2, 'pw': user2_pass},
++ { 'path': '/', 'status': 200, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos', 'status': 301, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/', 'status': 200, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A', 'status': 301, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/', 'status': 200, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D', 'status': 301, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
++ 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/gamma', 'status': 200, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H', 'status': 403, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H/', 'status': 403, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2, 'pw': user2_pass},
++ # try with upper case username for user2
++ { 'path': '', 'status': 301, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/', 'status': 200, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/A', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/A/', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/A/D', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/A/D/', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/A/D/gamma', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H/', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
++ # try with the wrong password for user2
++ { 'path': '', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/gamma', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/H', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/H/', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ )
++
++ verify_gets(test_area_url, anonoff_tests)
++
++@SkipUnless(svntest.main.is_ra_type_dav)
++def authn_lcuser(sbox):
++ "test authenticated only access with lcuser"
++ sbox.build(read_only = True, create_wc = False)
++
++ test_area_url = sbox.repo_url.replace('/svn-test-work/local_tmp/repos',
++ '/authz-test-work/authn-lcuser')
++
++ write_authz_file(sbox)
++
++ lcuser_tests = (
++ # try with upper case username for user1 (works due to lcuser option)
++ { 'path': '', 'status': 301, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/', 'status': 200, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos', 'status': 301, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/', 'status': 200, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/A', 'status': 301, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/A/', 'status': 200, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/A/D', 'status': 301, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_H,
++ 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/A/D/gamma', 'status': 200, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H', 'status': 301, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H/', 'status': 200, 'body': ls_of_H, 'user': user1_upper, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H/chi', 'status': 200, 'user': user1_upper, 'pw': user1_pass},
++ # try with upper case username for user2 (works due to lcuser option)
++ { 'path': '', 'status': 301, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/', 'status': 200, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos', 'status': 301, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/', 'status': 200, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/A', 'status': 301, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/A/', 'status': 200, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/A/D', 'status': 301, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
++ 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/A/D/gamma', 'status': 200, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H/', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2_upper, 'pw': user2_pass},
++ )
++
++ verify_gets(test_area_url, lcuser_tests)
++
++# authenticated access only by group - a excuse to use AuthzSVNAuthoritative Off
++# this is terribly messed up, Require group runs after mod_authz_svn.
++# so if mod_authz_svn grants the access then it doesn't matter what the group
++# requirement says. If we reject the access then you can use the AuthzSVNAuthoritative Off
++# directive to fall through to the group check. Overall the behavior of setups like this
++# is almost guaranteed to not be what users expect.
++@SkipUnless(svntest.main.is_ra_type_dav)
++def authn_group(sbox):
++ "test authenticated only access via groups"
++ sbox.build(read_only = True, create_wc = False)
++
++ test_area_url = sbox.repo_url.replace('/svn-test-work/local_tmp/repos',
++ '/authz-test-work/authn-group')
++
++ # Can't use write_authz_file() as most tests because we want to deny all
++ # access with mod_authz_svn so the tests fall through to the group handling
++ authz_name = sbox.authz_name()
++ svntest.main.write_authz_file(sbox, {
++ '/': '* =',
++ })
++
++ group_tests = (
++ { 'path': '', 'status': 401, },
++ { 'path': '/', 'status': 401, },
++ { 'path': '/repos', 'status': 401, },
++ { 'path': '/repos/', 'status': 401, },
++ { 'path': '/repos/A', 'status': 401, },
++ { 'path': '/repos/A/', 'status': 401, },
++ { 'path': '/repos/A/D', 'status': 401, },
++ { 'path': '/repos/A/D/', 'status': 401, },
++ { 'path': '/repos/A/D/gamma', 'status': 401, },
++ { 'path': '/repos/A/D/H', 'status': 401, },
++ { 'path': '/repos/A/D/H/', 'status': 401, },
++ { 'path': '/repos/A/D/H/chi', 'status': 401, },
++ # auth is configured and user1 is allowed access repo including H
++ { 'path': '', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_H,
++ 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/gamma', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H/', 'status': 200, 'body': ls_of_H, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H/chi', 'status': 200, 'user': user1, 'pw': user1_pass},
++ )
++
++ verify_gets(test_area_url, group_tests)
++
++# This test exists to validate our behavior when used with the new authz
++# provider system introduced in httpd 2.3.x. The Satisfy directive
++# determines how older authz hooks are combined and the RequireA(ll|ny)
++# blocks handles how new authz providers are combined. The overall results of
++# all the authz providers (combined per the Require* blocks) are then
++# combined with the other authz hooks via the Satisfy directive.
++# Meaning this test requires that mod_authz_svn says yes and there is
++# either a valid user or the ALLOW header is 1. The header may seem
++# like a silly test but it's easier to excercise than say a host directive
++# in a repeatable test.
++@SkipUnless(svntest.main.is_httpd_authz_provider_enabled)
++def authn_sallrany(sbox):
++ "test satisfy all require any config"
++ sbox.build(read_only = True, create_wc = False)
++
++ test_area_url = sbox.repo_url.replace('/svn-test-work/local_tmp/repos',
++ '/authz-test-work/sallrany')
++
++ write_authz_file(sbox)
++
++ allow_header = { 'ALLOW': '1' }
++
++ sallrany_tests = (
++ #anon access isn't allowed without ALLOW header
++ { 'path': '', 'status': 401, },
++ { 'path': '/', 'status': 401, },
++ { 'path': '/repos', 'status': 401, },
++ { 'path': '/repos/', 'status': 401, },
++ { 'path': '/repos/A', 'status': 401, },
++ { 'path': '/repos/A/', 'status': 401, },
++ { 'path': '/repos/A/D', 'status': 401, },
++ { 'path': '/repos/A/D/', 'status': 401, },
++ { 'path': '/repos/A/D/gamma', 'status': 401, },
++ { 'path': '/repos/A/D/H', 'status': 401, },
++ { 'path': '/repos/A/D/H/', 'status': 401, },
++ { 'path': '/repos/A/D/H/chi', 'status': 401, },
++ # auth is configured and user1 is allowed access repo including H
++ { 'path': '', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_H,
++ 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/gamma', 'status': 200, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H', 'status': 301, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H/', 'status': 200, 'body': ls_of_H, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H/chi', 'status': 200, 'user': user1, 'pw': user1_pass},
++ # try with the wrong password for user1
++ { 'path': '', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/gamma', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/H', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/H/', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user1, 'pw': user1_badpass},
++ # auth is configured and user2 is not allowed access to H
++ { 'path': '', 'status': 301, 'user': user2, 'pw': user2_pass},
++ { 'path': '/', 'status': 200, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos', 'status': 301, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/', 'status': 200, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A', 'status': 301, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/', 'status': 200, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D', 'status': 301, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
++ 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/gamma', 'status': 200, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H', 'status': 403, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H/', 'status': 403, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2, 'pw': user2_pass},
++ # try with the wrong password for user2
++ { 'path': '', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/gamma', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/H', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/H/', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user2, 'pw': user2_badpass},
++ # anon is allowed with the ALLOW header
++ { 'path': '', 'status': 301, 'headers': allow_header },
++ { 'path': '/', 'status': 200, 'headers': allow_header },
++ { 'path': '/repos', 'status': 301, 'headers': allow_header },
++ { 'path': '/repos/', 'status': 200, 'headers': allow_header },
++ { 'path': '/repos/A', 'status': 301, 'headers': allow_header },
++ { 'path': '/repos/A/', 'status': 200, 'headers': allow_header },
++ { 'path': '/repos/A/D', 'status': 301, 'headers': allow_header },
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H, 'headers': allow_header },
++ { 'path': '/repos/A/D/gamma', 'status': 200, 'headers': allow_header },
++ # these 3 tests return 403 instead of 401 becasue the config allows
++ # the anon user with the ALLOW header without any auth and the old hook
++ # system has no way of knowing it should return 401 since authentication is
++ # configured and can change the behavior. It could decide to return 401 just on
++ # the basis of authentication being configured but then that leaks info in other
++ # cases so it's better for this case to be "broken".
++ { 'path': '/repos/A/D/H', 'status': 403, 'headers': allow_header },
++ { 'path': '/repos/A/D/H/', 'status': 403, 'headers': allow_header },
++ { 'path': '/repos/A/D/H/chi', 'status': 403, 'headers': allow_header },
++ # auth is configured and user1 is allowed access repo including H
++ { 'path': '', 'status': 301, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
++ { 'path': '/', 'status': 200, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
++ { 'path': '/repos', 'status': 301, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
++ { 'path': '/repos/', 'status': 200, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
++ { 'path': '/repos/A', 'status': 301, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
++ { 'path': '/repos/A/', 'status': 200, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
++ { 'path': '/repos/A/D', 'status': 301, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_H,
++ 'user': user1, 'pw': user1_pass, 'headers': allow_header },
++ { 'path': '/repos/A/D/gamma', 'status': 200, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
++ { 'path': '/repos/A/D/H', 'status': 301, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
++ { 'path': '/repos/A/D/H/', 'status': 200, 'body': ls_of_H, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
++ { 'path': '/repos/A/D/H/chi', 'status': 200, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
++ # try with the wrong password for user1
++ { 'path': '', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
++ { 'path': '/', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
++ { 'path': '/repos', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
++ { 'path': '/repos/', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
++ { 'path': '/repos/A', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/D', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/D/', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/D/gamma', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/D/H', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/D/H/', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
++ # auth is configured and user2 is not allowed access to H
++ { 'path': '', 'status': 301, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
++ { 'path': '/', 'status': 200, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
++ { 'path': '/repos', 'status': 301, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
++ { 'path': '/repos/', 'status': 200, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
++ { 'path': '/repos/A', 'status': 301, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
++ { 'path': '/repos/A/', 'status': 200, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
++ { 'path': '/repos/A/D', 'status': 301, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
++ 'user': user2, 'pw': user2_pass, 'headers': allow_header },
++ { 'path': '/repos/A/D/gamma', 'status': 200, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
++ { 'path': '/repos/A/D/H', 'status': 403, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
++ { 'path': '/repos/A/D/H/', 'status': 403, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
++ { 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
++ # try with the wrong password for user2
++ { 'path': '', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
++ { 'path': '/', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
++ { 'path': '/repos', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
++ { 'path': '/repos/', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
++ { 'path': '/repos/A', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/D', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/D/', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/D/gamma', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/D/H', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/D/H/', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
++
++ )
++
++ verify_gets(test_area_url, sallrany_tests)
++
++# See comments on authn_sallrany test for some background on the interaction
++# of Satisfy Any and the newer Require blocks.
++@SkipUnless(svntest.main.is_httpd_authz_provider_enabled)
++def authn_sallrall(sbox):
++ "test satisfy all require all config"
++ sbox.build(read_only = True, create_wc = False)
++
++ test_area_url = sbox.repo_url.replace('/svn-test-work/local_tmp/repos',
++ '/authz-test-work/sallrall')
++
++ write_authz_file(sbox)
++
++ allow_header = { 'ALLOW': '1' }
++
++ sallrall_tests = (
++ #anon access isn't allowed without ALLOW header
++ { 'path': '', 'status': 403, },
++ { 'path': '/', 'status': 403, },
++ { 'path': '/repos', 'status': 403, },
++ { 'path': '/repos/', 'status': 403, },
++ { 'path': '/repos/A', 'status': 403, },
++ { 'path': '/repos/A/', 'status': 403, },
++ { 'path': '/repos/A/D', 'status': 403, },
++ { 'path': '/repos/A/D/', 'status': 403, },
++ { 'path': '/repos/A/D/gamma', 'status': 403, },
++ { 'path': '/repos/A/D/H', 'status': 403, },
++ { 'path': '/repos/A/D/H/', 'status': 403, },
++ { 'path': '/repos/A/D/H/chi', 'status': 403, },
++ # auth is configured but no access is allowed without the ALLOW header
++ { 'path': '', 'status': 403, 'user': user1, 'pw': user1_pass},
++ { 'path': '/', 'status': 403, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos', 'status': 403, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/', 'status': 403, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A', 'status': 403, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/', 'status': 403, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D', 'status': 403, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/', 'status': 403, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/gamma', 'status': 403, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H', 'status': 403, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H/', 'status': 403, 'user': user1, 'pw': user1_pass},
++ { 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user1, 'pw': user1_pass},
++ # try with the wrong password for user1
++ { 'path': '', 'status': 403, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/', 'status': 403, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos', 'status': 403, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/', 'status': 403, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A', 'status': 403, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/', 'status': 403, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D', 'status': 403, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/', 'status': 403, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/gamma', 'status': 403, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/H', 'status': 403, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/H/', 'status': 403, 'user': user1, 'pw': user1_badpass},
++ { 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user1, 'pw': user1_badpass},
++ # auth is configured but no access is allowed without the ALLOW header
++ { 'path': '', 'status': 403, 'user': user2, 'pw': user2_pass},
++ { 'path': '/', 'status': 403, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos', 'status': 403, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/', 'status': 403, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A', 'status': 403, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/', 'status': 403, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D', 'status': 403, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/', 'status': 403, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/gamma', 'status': 403, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H', 'status': 403, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H/', 'status': 403, 'user': user2, 'pw': user2_pass},
++ { 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2, 'pw': user2_pass},
++ # try with the wrong password for user2
++ { 'path': '', 'status': 403, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/', 'status': 403, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos', 'status': 403, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/', 'status': 403, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A', 'status': 403, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/', 'status': 403, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D', 'status': 403, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/', 'status': 403, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/gamma', 'status': 403, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/H', 'status': 403, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/H/', 'status': 403, 'user': user2, 'pw': user2_badpass},
++ { 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2, 'pw': user2_badpass},
++ # anon is not allowed even with ALLOW header
++ { 'path': '', 'status': 401, 'headers': allow_header },
++ { 'path': '/', 'status': 401, 'headers': allow_header },
++ { 'path': '/repos', 'status': 401, 'headers': allow_header },
++ { 'path': '/repos/', 'status': 401, 'headers': allow_header },
++ { 'path': '/repos/A', 'status': 401, 'headers': allow_header },
++ { 'path': '/repos/A/', 'status': 401, 'headers': allow_header },
++ { 'path': '/repos/A/D', 'status': 401, 'headers': allow_header },
++ { 'path': '/repos/A/D/', 'status': 401, 'headers': allow_header },
++ { 'path': '/repos/A/D/gamma', 'status': 401, 'headers': allow_header },
++ { 'path': '/repos/A/D/H', 'status': 401, 'headers': allow_header },
++ { 'path': '/repos/A/D/H/', 'status': 401, 'headers': allow_header },
++ { 'path': '/repos/A/D/H/chi', 'status': 401, 'headers': allow_header },
++ # auth is configured and user1 is allowed access repo including H
++ { 'path': '', 'status': 301, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
++ { 'path': '/', 'status': 200, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
++ { 'path': '/repos', 'status': 301, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
++ { 'path': '/repos/', 'status': 200, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
++ { 'path': '/repos/A', 'status': 301, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
++ { 'path': '/repos/A/', 'status': 200, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
++ { 'path': '/repos/A/D', 'status': 301, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_H,
++ 'user': user1, 'pw': user1_pass, 'headers': allow_header },
++ { 'path': '/repos/A/D/gamma', 'status': 200, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
++ { 'path': '/repos/A/D/H', 'status': 301, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
++ { 'path': '/repos/A/D/H/', 'status': 200, 'body': ls_of_H, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
++ { 'path': '/repos/A/D/H/chi', 'status': 200, 'user': user1, 'pw': user1_pass, 'headers': allow_header },
++ # try with the wrong password for user1
++ { 'path': '', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
++ { 'path': '/', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
++ { 'path': '/repos', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
++ { 'path': '/repos/', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
++ { 'path': '/repos/A', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/D', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/D/', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/D/gamma', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/D/H', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/D/H/', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user1, 'pw': user1_badpass, 'headers': allow_header },
++ # auth is configured and user2 is not allowed access to H
++ { 'path': '', 'status': 301, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
++ { 'path': '/', 'status': 200, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
++ { 'path': '/repos', 'status': 301, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
++ { 'path': '/repos/', 'status': 200, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
++ { 'path': '/repos/A', 'status': 301, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
++ { 'path': '/repos/A/', 'status': 200, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
++ { 'path': '/repos/A/D', 'status': 301, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
++ { 'path': '/repos/A/D/', 'status': 200, 'body': ls_of_D_no_H,
++ 'user': user2, 'pw': user2_pass, 'headers': allow_header },
++ { 'path': '/repos/A/D/gamma', 'status': 200, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
++ { 'path': '/repos/A/D/H', 'status': 403, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
++ { 'path': '/repos/A/D/H/', 'status': 403, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
++ { 'path': '/repos/A/D/H/chi', 'status': 403, 'user': user2, 'pw': user2_pass, 'headers': allow_header },
++ # try with the wrong password for user2
++ { 'path': '', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
++ { 'path': '/', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
++ { 'path': '/repos', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
++ { 'path': '/repos/', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
++ { 'path': '/repos/A', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/D', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/D/', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/D/gamma', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/D/H', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/D/H/', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
++ { 'path': '/repos/A/D/H/chi', 'status': 401, 'user': user2, 'pw': user2_badpass, 'headers': allow_header },
++
++ )
++
++ verify_gets(test_area_url, sallrall_tests)
++
++
++########################################################################
++# Run the tests
++
++
++# list all tests here, starting with None:
++test_list = [ None,
++ anon,
++ mixed,
++ mixed_noauthwhenanon,
++ authn,
++ authn_anonoff,
++ authn_lcuser,
++ authn_group,
++ authn_sallrany,
++ authn_sallrall,
++ ]
++serial_only = True
++
++if __name__ == '__main__':
++ svntest.main.run_tests(test_list)
++ # NOTREACHED
++
++
++### End of file.
+
+Property changes on: subversion/tests/cmdline/mod_authz_svn_tests.py
+___________________________________________________________________
+Added: svn:eol-style
+## -0,0 +1 ##
++native
+\ No newline at end of property
+Index: subversion/tests/cmdline/svntest/main.py
+===================================================================
+--- a/subversion/tests/cmdline/svntest/main.py (revision 1691883)
++++ b/subversion/tests/cmdline/svntest/main.py (working copy)
+@@ -1378,6 +1378,30 @@ def is_plaintext_password_storage_disabled():
+ return False
+ return True
+
++
++# https://issues.apache.org/bugzilla/show_bug.cgi?id=56480
++# https://issues.apache.org/bugzilla/show_bug.cgi?id=55397
++__mod_dav_url_quoting_broken_versions = frozenset([
++ '2.2.27',
++ '2.2.26',
++ '2.2.25',
++ '2.4.9',
++ '2.4.8',
++ '2.4.7',
++ '2.4.6',
++ '2.4.5',
++])
++def is_mod_dav_url_quoting_broken():
++ if is_ra_type_dav():
++ return (options.httpd_version in __mod_dav_url_quoting_broken_versions)
++ return None
++
++def is_httpd_authz_provider_enabled():
++ if is_ra_type_dav():
++ v = options.httpd_version.split('.')
++ return (v[0] == '2' and int(v[1]) >= 3) or int(v[0]) > 2
++ return None
++
+ ######################################################################
+
+
+@@ -1435,6 +1459,8 @@ class TestSpawningThread(threading.Thread):
+ args.append('--ssl-cert=' + options.ssl_cert)
+ if options.http_proxy:
+ args.append('--http-proxy=' + options.http_proxy)
++ if options.httpd_version:
++ args.append('--httpd-version=' + options.httpd_version)
+
+ result, stdout_lines, stderr_lines = spawn_process(command, 0, False, None,
+ *args)
+@@ -1600,6 +1626,12 @@ class TestRunner:
+ sandbox.cleanup_test_paths()
+ return exit_code
+
++def is_httpd_authz_provider_enabled():
++ if is_ra_type_dav():
++ v = options.httpd_version.split('.')
++ return (v[0] == '2' and int(v[1]) >= 3) or int(v[0]) > 2
++ return None
++
+ ######################################################################
+ # Main testing functions
+
+@@ -1780,6 +1812,8 @@ def _create_parser():
+ help='Path to SSL server certificate.')
+ parser.add_option('--http-proxy', action='store',
+ help='Use the HTTP Proxy at hostname:port.')
++ parser.add_option('--httpd-version', action='store',
++ help='Assume HTTPD is this version.')
+ parser.add_option('--tools-bin', action='store', dest='tools_bin',
+ help='Use the svn tools installed in this path')
+
+Index: win-tests.py
+===================================================================
+--- a/win-tests.py (revision 1691883)
++++ b/win-tests.py (working copy)
+@@ -481,6 +481,7 @@ class Httpd:
+ self.httpd_config = os.path.join(self.root, 'httpd.conf')
+ self.httpd_users = os.path.join(self.root, 'users')
+ self.httpd_mime_types = os.path.join(self.root, 'mime.types')
++ self.httpd_groups = os.path.join(self.root, 'groups')
+ self.abs_builddir = abs_builddir
+ self.abs_objdir = abs_objdir
+ self.service_name = 'svn-test-httpd-' + str(httpd_port)
+@@ -494,6 +495,7 @@ class Httpd:
+ create_target_dir(self.root_dir)
+
+ self._create_users_file()
++ self._create_groups_file()
+ self._create_mime_types_file()
+ self._create_dontdothat_file()
+
+@@ -540,6 +542,8 @@ class Httpd:
+ if self.httpd_ver >= 2.2:
+ fp.write(self._sys_module('auth_basic_module', 'mod_auth_basic.so'))
+ fp.write(self._sys_module('authn_file_module', 'mod_authn_file.so'))
++ fp.write(self._sys_module('authz_groupfile_module', 'mod_authz_groupfile.so'))
++ fp.write(self._sys_module('authz_host_module', 'mod_authz_host.so'))
+ else:
+ fp.write(self._sys_module('auth_module', 'mod_auth.so'))
+ fp.write(self._sys_module('alias_module', 'mod_alias.so'))
+@@ -562,6 +566,7 @@ class Httpd:
+ # Define two locations for repositories
+ fp.write(self._svn_repo('repositories'))
+ fp.write(self._svn_repo('local_tmp'))
++ fp.write(self._svn_authz_repo())
+
+ # And two redirects for the redirect tests
+ fp.write('RedirectMatch permanent ^/svn-test-work/repositories/'
+@@ -592,7 +597,18 @@ class Httpd:
+ 'jrandom', 'rayjandom'])
+ os.spawnv(os.P_WAIT, htpasswd, ['htpasswd.exe', '-bp', self.httpd_users,
+ 'jconstant', 'rayjandom'])
++ os.spawnv(os.P_WAIT, htpasswd, ['htpasswd.exe', '-bp', self.httpd_users,
++ 'JRANDOM', 'rayjandom'])
++ os.spawnv(os.P_WAIT, htpasswd, ['htpasswd.exe', '-bp', self.httpd_users,
++ 'JCONSTANT', 'rayjandom'])
+
++ def _create_groups_file(self):
++ "Create groups for mod_authz_svn tests"
++ fp = open(self.httpd_groups, 'w')
++ fp.write('random: jrandom\n')
++ fp.write('constant: jconstant\n')
++ fp.close()
++
+ def _create_mime_types_file(self):
+ "Create empty mime.types file"
+ fp = open(self.httpd_mime_types, 'w')
+@@ -652,6 +668,153 @@ class Httpd:
+ ' DontDoThatConfigFile ' + self._quote(self.dontdothat_file) + '\n' \
+ '</Location>\n'
+
++ def _svn_authz_repo(self):
++ local_tmp = os.path.join(self.abs_builddir,
++ CMDLINE_TEST_SCRIPT_NATIVE_PATH,
++ 'svn-test-work', 'local_tmp')
++ return \
++ '<Location /authz-test-work/anon>' + '\n' \
++ ' DAV svn' + '\n' \
++ ' SVNParentPath ' + local_tmp + '\n' \
++ ' AuthzSVNAccessFile ' + self._quote(self.authz_file) + '\n' \
++ ' SVNAdvertiseV2Protocol ' + self.httpv2_option + '\n' \
++ ' SVNListParentPath On' + '\n' \
++ ' <IfModule mod_authz_core.c>' + '\n' \
++ ' Require all granted' + '\n' \
++ ' </IfModule>' + '\n' \
++ ' <IfModule !mod_authz_core.c>' + '\n' \
++ ' Allow from all' + '\n' \
++ ' </IfModule>' + '\n' \
++ ' SVNPathAuthz ' + self.path_authz_option + '\n' \
++ '</Location>' + '\n' \
++ '<Location /authz-test-work/mixed>' + '\n' \
++ ' DAV svn' + '\n' \
++ ' SVNParentPath ' + local_tmp + '\n' \
++ ' AuthzSVNAccessFile ' + self._quote(self.authz_file) + '\n' \
++ ' SVNAdvertiseV2Protocol ' + self.httpv2_option + '\n' \
++ ' SVNListParentPath On' + '\n' \
++ ' AuthType Basic' + '\n' \
++ ' AuthName "Subversion Repository"' + '\n' \
++ ' AuthUserFile ' + self._quote(self.httpd_users) + '\n' \
++ ' Require valid-user' + '\n' \
++ ' Satisfy Any' + '\n' \
++ ' SVNPathAuthz ' + self.path_authz_option + '\n' \
++ '</Location>' + '\n' \
++ '<Location /authz-test-work/mixed-noauthwhenanon>' + '\n' \
++ ' DAV svn' + '\n' \
++ ' SVNParentPath ' + local_tmp + '\n' \
++ ' AuthzSVNAccessFile ' + self._quote(self.authz_file) + '\n' \
++ ' SVNAdvertiseV2Protocol ' + self.httpv2_option + '\n' \
++ ' SVNListParentPath On' + '\n' \
++ ' AuthType Basic' + '\n' \
++ ' AuthName "Subversion Repository"' + '\n' \
++ ' AuthUserFile ' + self._quote(self.httpd_users) + '\n' \
++ ' Require valid-user' + '\n' \
++ ' AuthzSVNNoAuthWhenAnonymousAllowed On' + '\n' \
++ ' SVNPathAuthz On' + '\n' \
++ '</Location>' + '\n' \
++ '<Location /authz-test-work/authn>' + '\n' \
++ ' DAV svn' + '\n' \
++ ' SVNParentPath ' + local_tmp + '\n' \
++ ' AuthzSVNAccessFile ' + self._quote(self.authz_file) + '\n' \
++ ' SVNAdvertiseV2Protocol ' + self.httpv2_option + '\n' \
++ ' SVNListParentPath On' + '\n' \
++ ' AuthType Basic' + '\n' \
++ ' AuthName "Subversion Repository"' + '\n' \
++ ' AuthUserFile ' + self._quote(self.httpd_users) + '\n' \
++ ' Require valid-user' + '\n' \
++ ' SVNPathAuthz ' + self.path_authz_option + '\n' \
++ '</Location>' + '\n' \
++ '<Location /authz-test-work/authn-anonoff>' + '\n' \
++ ' DAV svn' + '\n' \
++ ' SVNParentPath ' + local_tmp + '\n' \
++ ' AuthzSVNAccessFile ' + self._quote(self.authz_file) + '\n' \
++ ' SVNAdvertiseV2Protocol ' + self.httpv2_option + '\n' \
++ ' SVNListParentPath On' + '\n' \
++ ' AuthType Basic' + '\n' \
++ ' AuthName "Subversion Repository"' + '\n' \
++ ' AuthUserFile ' + self._quote(self.httpd_users) + '\n' \
++ ' Require valid-user' + '\n' \
++ ' AuthzSVNAnonymous Off' + '\n' \
++ ' SVNPathAuthz On' + '\n' \
++ '</Location>' + '\n' \
++ '<Location /authz-test-work/authn-lcuser>' + '\n' \
++ ' DAV svn' + '\n' \
++ ' SVNParentPath ' + local_tmp + '\n' \
++ ' AuthzSVNAccessFile ' + self._quote(self.authz_file) + '\n' \
++ ' SVNAdvertiseV2Protocol ' + self.httpv2_option + '\n' \
++ ' SVNListParentPath On' + '\n' \
++ ' AuthType Basic' + '\n' \
++ ' AuthName "Subversion Repository"' + '\n' \
++ ' AuthUserFile ' + self._quote(self.httpd_users) + '\n' \
++ ' Require valid-user' + '\n' \
++ ' AuthzForceUsernameCase Lower' + '\n' \
++ ' SVNPathAuthz ' + self.path_authz_option + '\n' \
++ '</Location>' + '\n' \
++ '<Location /authz-test-work/authn-lcuser>' + '\n' \
++ ' DAV svn' + '\n' \
++ ' SVNParentPath ' + local_tmp + '\n' \
++ ' AuthzSVNAccessFile ' + self._quote(self.authz_file) + '\n' \
++ ' SVNAdvertiseV2Protocol ' + self.httpv2_option + '\n' \
++ ' SVNListParentPath On' + '\n' \
++ ' AuthType Basic' + '\n' \
++ ' AuthName "Subversion Repository"' + '\n' \
++ ' AuthUserFile ' + self._quote(self.httpd_users) + '\n' \
++ ' Require valid-user' + '\n' \
++ ' AuthzForceUsernameCase Lower' + '\n' \
++ ' SVNPathAuthz ' + self.path_authz_option + '\n' \
++ '</Location>' + '\n' \
++ '<Location /authz-test-work/authn-group>' + '\n' \
++ ' DAV svn' + '\n' \
++ ' SVNParentPath ' + local_tmp + '\n' \
++ ' AuthzSVNAccessFile ' + self._quote(self.authz_file) + '\n' \
++ ' SVNAdvertiseV2Protocol ' + self.httpv2_option + '\n' \
++ ' SVNListParentPath On' + '\n' \
++ ' AuthType Basic' + '\n' \
++ ' AuthName "Subversion Repository"' + '\n' \
++ ' AuthUserFile ' + self._quote(self.httpd_users) + '\n' \
++ ' AuthGroupFile ' + self._quote(self.httpd_groups) + '\n' \
++ ' Require group random' + '\n' \
++ ' AuthzSVNAuthoritative Off' + '\n' \
++ ' SVNPathAuthz On' + '\n' \
++ '</Location>' + '\n' \
++ '<IfModule mod_authz_core.c>' + '\n' \
++ '<Location /authz-test-work/sallrany>' + '\n' \
++ ' DAV svn' + '\n' \
++ ' SVNParentPath ' + local_tmp + '\n' \
++ ' AuthzSVNAccessFile ' + self._quote(self.authz_file) + '\n' \
++ ' SVNAdvertiseV2Protocol ' + self.httpv2_option + '\n' \
++ ' SVNListParentPath On' + '\n' \
++ ' AuthType Basic' + '\n' \
++ ' AuthName "Subversion Repository"' + '\n' \
++ ' AuthUserFile ' + self._quote(self.httpd_users) + '\n' \
++ ' AuthzSendForbiddenOnFailure On' + '\n' \
++ ' Satisfy All' + '\n' \
++ ' <RequireAny>' + '\n' \
++ ' Require valid-user' + '\n' \
++ ' Require expr req(\'ALLOW\') == \'1\'' + '\n' \
++ ' </RequireAny>' + '\n' \
++ ' SVNPathAuthz ' + self.path_authz_option + '\n' \
++ '</Location>' + '\n' \
++ '<Location /authz-test-work/sallrall>'+ '\n' \
++ ' DAV svn' + '\n' \
++ ' SVNParentPath ' + local_tmp + '\n' \
++ ' AuthzSVNAccessFile ' + self._quote(self.authz_file) + '\n' \
++ ' SVNAdvertiseV2Protocol ' + self.httpv2_option + '\n' \
++ ' SVNListParentPath On' + '\n' \
++ ' AuthType Basic' + '\n' \
++ ' AuthName "Subversion Repository"' + '\n' \
++ ' AuthUserFile ' + self._quote(self.httpd_users) + '\n' \
++ ' AuthzSendForbiddenOnFailure On' + '\n' \
++ ' Satisfy All' + '\n' \
++ ' <RequireAll>' + '\n' \
++ ' Require valid-user' + '\n' \
++ ' Require expr req(\'ALLOW\') == \'1\'' + '\n' \
++ ' </RequireAll>' + '\n' \
++ ' SVNPathAuthz ' + self.path_authz_option + '\n' \
++ '</Location>' + '\n' \
++ '</IfModule>' + '\n' \
++
+ def start(self):
+ if self.service:
+ self._start_service()
+@@ -786,6 +949,10 @@ if not test_javahl:
+ log_file = os.path.join(abs_builddir, log)
+ fail_log_file = os.path.join(abs_builddir, faillog)
+
++ if run_httpd:
++ httpd_version = "%.1f" % daemon.httpd_ver
++ else:
++ httpd_version = None
+ th = run_tests.TestHarness(abs_srcdir, abs_builddir,
+ log_file,
+ fail_log_file,
+@@ -795,6 +962,7 @@ if not test_javahl:
+ fsfs_sharding, fsfs_packing,
+ list_tests, svn_bin, mode_filter,
+ milestone_filter,
++ httpd_version=httpd_version,
+ set_log_level=log_level, ssl_cert=ssl_cert)
+ old_cwd = os.getcwd()
+ try:
diff --git a/yocto-poky/meta/recipes-devtools/subversion/subversion-1.8.13/subversion-CVE-2015-3187.patch b/yocto-poky/meta/recipes-devtools/subversion/subversion-1.8.13/subversion-CVE-2015-3187.patch
new file mode 100644
index 000000000..494e11c6c
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/subversion/subversion-1.8.13/subversion-CVE-2015-3187.patch
@@ -0,0 +1,346 @@
+Fix CVE-2015-3187
+
+Patch is from:
+http://subversion.apache.org/security/CVE-2015-3187-advisory.txt
+
+Upstream-Status: Backport
+
+Signed-off-by: Wenzong Fan <wenzong.fan@windriver.com>
+
+Index: subversion/libsvn_repos/rev_hunt.c
+===================================================================
+--- a/subversion/libsvn_repos/rev_hunt.c (revision 1685077)
++++ b/subversion/libsvn_repos/rev_hunt.c (working copy)
+@@ -726,23 +726,6 @@ svn_repos_trace_node_locations(svn_fs_t *fs,
+ if (! prev_path)
+ break;
+
+- if (authz_read_func)
+- {
+- svn_boolean_t readable;
+- svn_fs_root_t *tmp_root;
+-
+- SVN_ERR(svn_fs_revision_root(&tmp_root, fs, revision, currpool));
+- SVN_ERR(authz_read_func(&readable, tmp_root, path,
+- authz_read_baton, currpool));
+- if (! readable)
+- {
+- svn_pool_destroy(lastpool);
+- svn_pool_destroy(currpool);
+-
+- return SVN_NO_ERROR;
+- }
+- }
+-
+ /* Assign the current path to all younger revisions until we reach
+ the copy target rev. */
+ while ((revision_ptr < revision_ptr_end)
+@@ -765,6 +748,20 @@ svn_repos_trace_node_locations(svn_fs_t *fs,
+ path = prev_path;
+ revision = prev_rev;
+
++ if (authz_read_func)
++ {
++ svn_boolean_t readable;
++ SVN_ERR(svn_fs_revision_root(&root, fs, revision, currpool));
++ SVN_ERR(authz_read_func(&readable, root, path,
++ authz_read_baton, currpool));
++ if (!readable)
++ {
++ svn_pool_destroy(lastpool);
++ svn_pool_destroy(currpool);
++ return SVN_NO_ERROR;
++ }
++ }
++
+ /* Clear last pool and switch. */
+ svn_pool_clear(lastpool);
+ tmppool = lastpool;
+Index: subversion/tests/cmdline/authz_tests.py
+===================================================================
+--- a/subversion/tests/cmdline/authz_tests.py (revision 1685077)
++++ b/subversion/tests/cmdline/authz_tests.py (working copy)
+@@ -609,8 +609,10 @@ def authz_log_and_tracing_test(sbox):
+
+ ## cat
+
++ expected_err2 = ".*svn: E195012: Unable to find repository location.*"
++
+ # now see if we can look at the older version of rho
+- svntest.actions.run_and_verify_svn(None, None, expected_err,
++ svntest.actions.run_and_verify_svn(None, None, expected_err2,
+ 'cat', '-r', '2', D_url+'/rho')
+
+ if sbox.repo_url.startswith('http'):
+@@ -627,10 +629,11 @@ def authz_log_and_tracing_test(sbox):
+ svntest.actions.run_and_verify_svn(None, None, expected_err,
+ 'diff', '-r', 'HEAD', G_url+'/rho')
+
+- svntest.actions.run_and_verify_svn(None, None, expected_err,
++ # diff treats the unreadable path as indicating an add so no error
++ svntest.actions.run_and_verify_svn(None, None, [],
+ 'diff', '-r', '2', D_url+'/rho')
+
+- svntest.actions.run_and_verify_svn(None, None, expected_err,
++ svntest.actions.run_and_verify_svn(None, None, [],
+ 'diff', '-r', '2:4', D_url+'/rho')
+
+ # test whether read access is correctly granted and denied
+Index: subversion/tests/libsvn_repos/repos-test.c
+===================================================================
+--- a/subversion/tests/libsvn_repos/repos-test.c (revision 1685077)
++++ b/subversion/tests/libsvn_repos/repos-test.c (working copy)
+@@ -3524,6 +3524,245 @@ test_load_r0_mergeinfo(const svn_test_opts_t *opts
+ return SVN_NO_ERROR;
+ }
+
++static svn_error_t *
++mkdir_delete_copy(svn_repos_t *repos,
++ const char *src,
++ const char *dst,
++ apr_pool_t *pool)
++{
++ svn_fs_t *fs = svn_repos_fs(repos);
++ svn_revnum_t youngest_rev;
++ svn_fs_txn_t *txn;
++ svn_fs_root_t *txn_root, *rev_root;
++
++ SVN_ERR(svn_fs_youngest_rev(&youngest_rev, fs, pool));
++
++ SVN_ERR(svn_fs_begin_txn(&txn, fs, youngest_rev, pool));
++ SVN_ERR(svn_fs_txn_root(&txn_root, txn, pool));
++ SVN_ERR(svn_fs_make_dir(txn_root, "A/T", pool));
++ SVN_ERR(svn_repos_fs_commit_txn(NULL, repos, &youngest_rev, txn, pool));
++
++ SVN_ERR(svn_fs_begin_txn(&txn, fs, youngest_rev, pool));
++ SVN_ERR(svn_fs_txn_root(&txn_root, txn, pool));
++ SVN_ERR(svn_fs_delete(txn_root, "A/T", pool));
++ SVN_ERR(svn_repos_fs_commit_txn(NULL, repos, &youngest_rev, txn, pool));
++
++ SVN_ERR(svn_fs_begin_txn(&txn, fs, youngest_rev, pool));
++ SVN_ERR(svn_fs_txn_root(&txn_root, txn, pool));
++ SVN_ERR(svn_fs_revision_root(&rev_root, fs, youngest_rev - 1, pool));
++ SVN_ERR(svn_fs_copy(rev_root, src, txn_root, dst, pool));
++ SVN_ERR(svn_repos_fs_commit_txn(NULL, repos, &youngest_rev, txn, pool));
++
++ return SVN_NO_ERROR;
++}
++
++struct authz_read_baton_t {
++ apr_hash_t *paths;
++ apr_pool_t *pool;
++ const char *deny;
++};
++
++static svn_error_t *
++authz_read_func(svn_boolean_t *allowed,
++ svn_fs_root_t *root,
++ const char *path,
++ void *baton,
++ apr_pool_t *pool)
++{
++ struct authz_read_baton_t *b = baton;
++
++ if (b->deny && !strcmp(b->deny, path))
++ *allowed = FALSE;
++ else
++ *allowed = TRUE;
++
++ svn_hash_sets(b->paths, apr_pstrdup(b->pool, path), (void*)1);
++
++ return SVN_NO_ERROR;
++}
++
++static svn_error_t *
++verify_locations(apr_hash_t *actual,
++ apr_hash_t *expected,
++ apr_hash_t *checked,
++ apr_pool_t *pool)
++{
++ apr_hash_index_t *hi;
++
++ for (hi = apr_hash_first(pool, expected); hi; hi = apr_hash_next(hi))
++ {
++ const svn_revnum_t *rev = svn__apr_hash_index_key(hi);
++ const char *path = apr_hash_get(actual, rev, sizeof(svn_revnum_t));
++
++ if (!path)
++ return svn_error_createf(SVN_ERR_TEST_FAILED, NULL,
++ "expected %s for %d found (null)",
++ (char*)svn__apr_hash_index_val(hi),
++ (int)*rev);
++ else if (strcmp(path, svn__apr_hash_index_val(hi)))
++ return svn_error_createf(SVN_ERR_TEST_FAILED, NULL,
++ "expected %s for %d found %s",
++ (char*)svn__apr_hash_index_val(hi),
++ (int)*rev, path);
++
++ }
++
++ for (hi = apr_hash_first(pool, actual); hi; hi = apr_hash_next(hi))
++ {
++ const svn_revnum_t *rev = svn__apr_hash_index_key(hi);
++ const char *path = apr_hash_get(expected, rev, sizeof(svn_revnum_t));
++
++ if (!path)
++ return svn_error_createf(SVN_ERR_TEST_FAILED, NULL,
++ "found %s for %d expected (null)",
++ (char*)svn__apr_hash_index_val(hi),
++ (int)*rev);
++ else if (strcmp(path, svn__apr_hash_index_val(hi)))
++ return svn_error_createf(SVN_ERR_TEST_FAILED, NULL,
++ "found %s for %d expected %s",
++ (char*)svn__apr_hash_index_val(hi),
++ (int)*rev, path);
++
++ if (!svn_hash_gets(checked, path))
++ return svn_error_createf(SVN_ERR_TEST_FAILED, NULL,
++ "did not check %s", path);
++ }
++
++ return SVN_NO_ERROR;
++}
++
++static void
++set_expected(apr_hash_t *expected,
++ svn_revnum_t rev,
++ const char *path,
++ apr_pool_t *pool)
++{
++ svn_revnum_t *rp = apr_palloc(pool, sizeof(svn_revnum_t));
++ *rp = rev;
++ apr_hash_set(expected, rp, sizeof(svn_revnum_t), path);
++}
++
++static svn_error_t *
++trace_node_locations_authz(const svn_test_opts_t *opts,
++ apr_pool_t *pool)
++{
++ svn_repos_t *repos;
++ svn_fs_t *fs;
++ svn_revnum_t youngest_rev = 0;
++ svn_fs_txn_t *txn;
++ svn_fs_root_t *txn_root;
++ struct authz_read_baton_t arb;
++ apr_array_header_t *revs = apr_array_make(pool, 10, sizeof(svn_revnum_t));
++ apr_hash_t *locations;
++ apr_hash_t *expected = apr_hash_make(pool);
++ int i;
++
++ /* Create test repository. */
++ SVN_ERR(svn_test__create_repos(&repos, "test-repo-trace-node-locations-authz",
++ opts, pool));
++ fs = svn_repos_fs(repos);
++
++ /* r1 create A */
++ SVN_ERR(svn_fs_begin_txn(&txn, fs, youngest_rev, pool));
++ SVN_ERR(svn_fs_txn_root(&txn_root, txn, pool));
++ SVN_ERR(svn_fs_make_dir(txn_root, "A", pool));
++ SVN_ERR(svn_fs_make_file(txn_root, "A/f", pool));
++ SVN_ERR(svn_test__set_file_contents(txn_root, "A/f", "foobar", pool));
++ SVN_ERR(svn_repos_fs_commit_txn(NULL, repos, &youngest_rev, txn, pool));
++
++ /* r4 copy A to B */
++ SVN_ERR(mkdir_delete_copy(repos, "A", "B", pool));
++
++ /* r7 copy B to C */
++ SVN_ERR(mkdir_delete_copy(repos, "B", "C", pool));
++
++ /* r10 copy C to D */
++ SVN_ERR(mkdir_delete_copy(repos, "C", "D", pool));
++
++ SVN_ERR(svn_fs_youngest_rev(&youngest_rev, fs, pool));
++ SVN_ERR_ASSERT(youngest_rev == 10);
++
++ arb.paths = apr_hash_make(pool);
++ arb.pool = pool;
++ arb.deny = NULL;
++
++ apr_array_clear(revs);
++ for (i = 0; i <= youngest_rev; ++i)
++ APR_ARRAY_PUSH(revs, svn_revnum_t) = i;
++ set_expected(expected, 10, "/D/f", pool);
++ set_expected(expected, 8, "/C/f", pool);
++ set_expected(expected, 7, "/C/f", pool);
++ set_expected(expected, 5, "/B/f", pool);
++ set_expected(expected, 4, "/B/f", pool);
++ set_expected(expected, 2, "/A/f", pool);
++ set_expected(expected, 1, "/A/f", pool);
++ apr_hash_clear(arb.paths);
++ SVN_ERR(svn_repos_trace_node_locations(fs, &locations, "D/f", 10, revs,
++ authz_read_func, &arb, pool));
++ SVN_ERR(verify_locations(locations, expected, arb.paths, pool));
++
++ apr_array_clear(revs);
++ for (i = 1; i <= youngest_rev; ++i)
++ APR_ARRAY_PUSH(revs, svn_revnum_t) = i;
++ apr_hash_clear(arb.paths);
++ SVN_ERR(svn_repos_trace_node_locations(fs, &locations, "D/f", 10, revs,
++ authz_read_func, &arb, pool));
++ SVN_ERR(verify_locations(locations, expected, arb.paths, pool));
++
++ apr_array_clear(revs);
++ for (i = 2; i <= youngest_rev; ++i)
++ APR_ARRAY_PUSH(revs, svn_revnum_t) = i;
++ set_expected(expected, 1, NULL, pool);
++ apr_hash_clear(arb.paths);
++ SVN_ERR(svn_repos_trace_node_locations(fs, &locations, "D/f", 10, revs,
++ authz_read_func, &arb, pool));
++ SVN_ERR(verify_locations(locations, expected, arb.paths, pool));
++
++ apr_array_clear(revs);
++ for (i = 3; i <= youngest_rev; ++i)
++ APR_ARRAY_PUSH(revs, svn_revnum_t) = i;
++ set_expected(expected, 2, NULL, pool);
++ apr_hash_clear(arb.paths);
++ SVN_ERR(svn_repos_trace_node_locations(fs, &locations, "D/f", 10, revs,
++ authz_read_func, &arb, pool));
++ SVN_ERR(verify_locations(locations, expected, arb.paths, pool));
++
++ apr_array_clear(revs);
++ for (i = 6; i <= youngest_rev; ++i)
++ APR_ARRAY_PUSH(revs, svn_revnum_t) = i;
++ set_expected(expected, 5, NULL, pool);
++ set_expected(expected, 4, NULL, pool);
++ apr_hash_clear(arb.paths);
++ SVN_ERR(svn_repos_trace_node_locations(fs, &locations, "D/f", 10, revs,
++ authz_read_func, &arb, pool));
++ SVN_ERR(verify_locations(locations, expected, arb.paths, pool));
++
++ arb.deny = "/B/f";
++ apr_array_clear(revs);
++ for (i = 0; i <= youngest_rev; ++i)
++ APR_ARRAY_PUSH(revs, svn_revnum_t) = i;
++ apr_hash_clear(arb.paths);
++ SVN_ERR(svn_repos_trace_node_locations(fs, &locations, "D/f", 10, revs,
++ authz_read_func, &arb, pool));
++ SVN_ERR(verify_locations(locations, expected, arb.paths, pool));
++
++ apr_array_clear(revs);
++ for (i = 6; i <= youngest_rev; ++i)
++ APR_ARRAY_PUSH(revs, svn_revnum_t) = i;
++ apr_hash_clear(arb.paths);
++ SVN_ERR(svn_repos_trace_node_locations(fs, &locations, "D/f", 10, revs,
++ authz_read_func, &arb, pool));
++ SVN_ERR(verify_locations(locations, expected, arb.paths, pool));
++
++ APR_ARRAY_PUSH(revs, svn_revnum_t) = 0;
++ apr_hash_clear(arb.paths);
++ SVN_ERR(svn_repos_trace_node_locations(fs, &locations, "D/f", 10, revs,
++ authz_read_func, &arb, pool));
++ SVN_ERR(verify_locations(locations, expected, arb.paths, pool));
++
++ return SVN_NO_ERROR;
++}
++
+ /* The test table. */
+
+ struct svn_test_descriptor_t test_funcs[] =
+@@ -3573,5 +3812,7 @@ struct svn_test_descriptor_t test_funcs[] =
+ "test dumping with r0 mergeinfo"),
+ SVN_TEST_OPTS_PASS(test_load_r0_mergeinfo,
+ "test loading with r0 mergeinfo"),
++ SVN_TEST_OPTS_PASS(trace_node_locations_authz,
++ "authz for svn_repos_trace_node_locations"),
+ SVN_TEST_NULL
+ };
diff --git a/yocto-poky/meta/recipes-devtools/subversion/subversion_1.8.13.bb b/yocto-poky/meta/recipes-devtools/subversion/subversion_1.8.13.bb
index f843b9513..68934b7e0 100644
--- a/yocto-poky/meta/recipes-devtools/subversion/subversion_1.8.13.bb
+++ b/yocto-poky/meta/recipes-devtools/subversion/subversion_1.8.13.bb
@@ -1,6 +1,7 @@
SUMMARY = "Subversion (svn) version control system client"
SECTION = "console/network"
DEPENDS = "apr-util serf sqlite3 file"
+DEPENDS_append_class-native = " file-replacement-native"
RDEPENDS_${PN} = "serf"
LICENSE = "Apache-2"
HOMEPAGE = "http://subversion.tigris.org"
@@ -13,6 +14,8 @@ SRC_URI = "${APACHE_MIRROR}/${BPN}/${BPN}-${PV}.tar.bz2 \
file://libtool2.patch \
file://disable_macos.patch \
file://serf.m4-Regex-modified-to-allow-D-in-paths.patch \
+ file://subversion-CVE-2015-3184.patch \
+ file://subversion-CVE-2015-3187.patch \
"
SRC_URI[md5sum] = "4413417b529d7bdf82f74e50df02e88b"
SRC_URI[sha256sum] = "1099cc68840753b48aedb3a27ebd1e2afbcc84ddb871412e5d500e843d607579"
diff --git a/yocto-poky/meta/recipes-devtools/syslinux/syslinux/0010-gcc46-compatibility.patch b/yocto-poky/meta/recipes-devtools/syslinux/syslinux/0010-gcc46-compatibility.patch
new file mode 100644
index 000000000..6279258c2
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/syslinux/syslinux/0010-gcc46-compatibility.patch
@@ -0,0 +1,37 @@
+don't break with old compilers and -DGNU_EFI_USE_MS_ABI
+It's entirely legitimate to request GNU_EFI_USE_MS_ABI even if the current
+compiler doesn't support it, and gnu-efi should transparently fall back to
+using legacy techniques to set the calling convention. We don't get type
+checking, but at least it will still compile.
+
+Adapted from gnu-efi
+
+Author: Steve Langasek <steve.langasek@ubuntu.com>
+Upstream-Status: Pending
+
+Index: syslinux-6.03/efi64/include/efi/x86_64/efibind.h
+===================================================================
+--- syslinux-6.03.orig/efi64/include/efi/x86_64/efibind.h
++++ syslinux-6.03/efi64/include/efi/x86_64/efibind.h
+@@ -25,8 +25,6 @@ Revision History
+ #if defined(GNU_EFI_USE_MS_ABI)
+ #if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))
+ #define HAVE_USE_MS_ABI 1
+- #else
+- #error Compiler is too old for GNU_EFI_USE_MS_ABI
+ #endif
+ #endif
+
+Index: syslinux-6.03/gnu-efi/gnu-efi-3.0/inc/x86_64/efibind.h
+===================================================================
+--- syslinux-6.03.orig/gnu-efi/gnu-efi-3.0/inc/x86_64/efibind.h
++++ syslinux-6.03/gnu-efi/gnu-efi-3.0/inc/x86_64/efibind.h
+@@ -25,8 +25,6 @@ Revision History
+ #if defined(GNU_EFI_USE_MS_ABI)
+ #if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))
+ #define HAVE_USE_MS_ABI 1
+- #else
+- #error Compiler is too old for GNU_EFI_USE_MS_ABI
+ #endif
+ #endif
+
diff --git a/yocto-poky/meta/recipes-devtools/syslinux/syslinux/0011-mk-MMD-does-not-take-any-arguments.patch b/yocto-poky/meta/recipes-devtools/syslinux/syslinux/0011-mk-MMD-does-not-take-any-arguments.patch
new file mode 100644
index 000000000..443c1ccc5
--- /dev/null
+++ b/yocto-poky/meta/recipes-devtools/syslinux/syslinux/0011-mk-MMD-does-not-take-any-arguments.patch
@@ -0,0 +1,33 @@
+From 0f3d83c25491951f1fa84c7957358ef3d1bcd8a9 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Wed, 9 Sep 2015 17:39:22 +0000
+Subject: [PATCH] mk: -MMD does not take any arguments
+
+Specify -Wp for each option, clang seems to not accept
+-Wp,-x,y,-a,b
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+Upstream-Status: Pending
+
+ mk/syslinux.mk | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: syslinux-6.03/mk/syslinux.mk
+===================================================================
+--- syslinux-6.03.orig/mk/syslinux.mk
++++ syslinux-6.03/mk/syslinux.mk
+@@ -82,11 +82,11 @@ ARCH ?= $(strip $(SUBARCH))
+ GCCWARN = -W -Wall -Wstrict-prototypes $(DEBUGOPT)
+
+ # Common stanza to make gcc generate .*.d dependency files
+-MAKEDEPS = -Wp,-MT,$@,-MD,$(dir $@).$(notdir $@).d
++MAKEDEPS = -MT $@ -MD
+
+ # Dependencies that exclude system headers; use whenever we use
+ # header files from the platform.
+-UMAKEDEPS = -Wp,-MT,$@,-MMD,$(dir $@).$(notdir $@).d
++UMAKEDEPS = -MT $@ -MMD
+
+ # Items that are only appropriate during development; this file is
+ # removed when tarballs are generated.
diff --git a/yocto-poky/meta/recipes-devtools/syslinux/syslinux_6.03.bb b/yocto-poky/meta/recipes-devtools/syslinux/syslinux_6.03.bb
index ef9ae2fbe..8534528d7 100644
--- a/yocto-poky/meta/recipes-devtools/syslinux/syslinux_6.03.bb
+++ b/yocto-poky/meta/recipes-devtools/syslinux/syslinux_6.03.bb
@@ -21,6 +21,8 @@ SRC_URI = "${KERNELORG_MIRROR}/linux/utils/boot/syslinux/syslinux-${PV}.tar.xz \
file://0007-linux-syslinux-implement-ext_construct_sectmap_fs.patch \
file://0008-libinstaller-syslinuxext-implement-syslinux_patch_bo.patch \
file://0009-linux-syslinux-implement-install_bootblock.patch \
+ file://0010-gcc46-compatibility.patch \
+ file://0011-mk-MMD-does-not-take-any-arguments.patch \
"
SRC_URI[md5sum] = "92a253df9211e9c20172796ecf388f13"
diff --git a/yocto-poky/meta/recipes-devtools/unfs3/unfs3_0.9.22.r490.bb b/yocto-poky/meta/recipes-devtools/unfs3/unfs3_0.9.22.r490.bb
index 45cf54591..51308955d 100644
--- a/yocto-poky/meta/recipes-devtools/unfs3/unfs3_0.9.22.r490.bb
+++ b/yocto-poky/meta/recipes-devtools/unfs3/unfs3_0.9.22.r490.bb
@@ -29,6 +29,7 @@ SRC_URI = "svn://svn.code.sf.net/p/unfs3/code;module=trunk;rev=${MOD_PV} \
BBCLASSEXTEND = "native nativesdk"
inherit autotools
+EXTRA_OECONF_append_class-native = " --sbindir=${bindir}"
# Turn off these header detects else the inode search
# will walk entire file systems and this is a real problem
diff --git a/yocto-poky/meta/recipes-extended/bash/bash.inc b/yocto-poky/meta/recipes-extended/bash/bash.inc
index c06f157b8..020409fb6 100644
--- a/yocto-poky/meta/recipes-extended/bash/bash.inc
+++ b/yocto-poky/meta/recipes-extended/bash/bash.inc
@@ -7,7 +7,7 @@ DEPENDS = "ncurses bison-native"
inherit autotools gettext texinfo update-alternatives ptest
EXTRA_AUTORECONF += "--exclude=autoheader"
-EXTRA_OECONF = "--enable-job-control"
+EXTRA_OECONF = "--enable-job-control --without-bash-malloc"
# If NON_INTERACTIVE_LOGIN_SHELLS is defined, all login shells read the
# startup files, even if they are not interactive.
diff --git a/yocto-poky/meta/recipes-extended/byacc/byacc/byacc-open.patch b/yocto-poky/meta/recipes-extended/byacc/byacc/byacc-open.patch
index 916054340..005831130 100644
--- a/yocto-poky/meta/recipes-extended/byacc/byacc/byacc-open.patch
+++ b/yocto-poky/meta/recipes-extended/byacc/byacc/byacc-open.patch
@@ -1,3 +1,15 @@
+Ubuntu defaults to passing _FORTIFY_SOURCE=2 which breaks byacc as it doesn't
+pass enough arguments to open():
+
+ inlined from 'open_tmpfile' at byacc-20150711/main.c:588:5:
+ /usr/include/x86_64-linux-gnu/bits/fcntl2.h:50:24: error: call to '__open_missing_mode' declared with attribute error:
+ open with O_CREAT in second argument needs 3 arguments
+
+Add a mode of 0666 to fix this.
+
+Upstream-Status: Pending
+Signed-off-by: Ross Burton <ross.burton@intel.com>
+
diff --git a/main.c b/main.c
index 620ce3f..82071a4 100644
--- a/main.c
diff --git a/yocto-poky/meta/recipes-extended/bzip2/bzip2-1.0.6/fix-bunzip2-qt-returns-0-for-corrupt-archives.patch b/yocto-poky/meta/recipes-extended/bzip2/bzip2-1.0.6/fix-bunzip2-qt-returns-0-for-corrupt-archives.patch
new file mode 100644
index 000000000..ece90d94e
--- /dev/null
+++ b/yocto-poky/meta/recipes-extended/bzip2/bzip2-1.0.6/fix-bunzip2-qt-returns-0-for-corrupt-archives.patch
@@ -0,0 +1,55 @@
+From 8068659388127e8e63f2d2297ba2348c72b20705 Mon Sep 17 00:00:00 2001
+From: Wenzong Fan <wenzong.fan@windriver.com>
+Date: Mon, 12 Oct 2015 03:19:51 -0400
+Subject: [PATCH] bzip2: fix bunzip2 -qt returns 0 for corrupt archives
+
+"bzip2 -t FILE" returns 2 if FILE exists, but is not a valid bzip2 file.
+"bzip2 -qt FILE" returns 0 when this happens, although it does print out
+an error message as is does so.
+
+This has been fix by Debian, just port changes from Debian patch file
+"20-legacy.patch".
+
+Debian defect:
+https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=279025
+
+Fix item from changelog:
+http://archive.debian.net/changelogs/pool/main/b/bzip2/bzip2_1.0.2-7/changelog
+
+ * Fixed "bunzip2 -qt returns 0 for corrupt archives" (Closes: #279025).
+
+Upstream-Status: Pending
+
+Signed-off-by: Wenzong Fan <wenzong.fan@windriver.com>
+---
+ bzip2.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/bzip2.c b/bzip2.c
+index 6de9d1d..f2ce668 100644
+--- a/bzip2.c
++++ b/bzip2.c
+@@ -2003,12 +2003,14 @@ IntNative main ( IntNative argc, Char *argv[] )
+ testf ( aa->name );
+ }
+ }
+- if (testFailsExist && noisy) {
+- fprintf ( stderr,
+- "\n"
+- "You can use the `bzip2recover' program to attempt to recover\n"
+- "data from undamaged sections of corrupted files.\n\n"
+- );
++ if (testFailsExist) {
++ if (noisy) {
++ fprintf ( stderr,
++ "\n"
++ "You can use the `bzip2recover' program to attempt to recover\n"
++ "data from undamaged sections of corrupted files.\n\n"
++ );
++ }
+ setExit(2);
+ exit(exitValue);
+ }
+--
+1.9.1
+
diff --git a/yocto-poky/meta/recipes-extended/bzip2/bzip2_1.0.6.bb b/yocto-poky/meta/recipes-extended/bzip2/bzip2_1.0.6.bb
index 233fe4c33..d7b8c0655 100644
--- a/yocto-poky/meta/recipes-extended/bzip2/bzip2_1.0.6.bb
+++ b/yocto-poky/meta/recipes-extended/bzip2/bzip2_1.0.6.bb
@@ -9,6 +9,7 @@ LIC_FILES_CHKSUM = "file://LICENSE;beginline=8;endline=37;md5=40d9d1eb05736d1bfc
PR = "r5"
SRC_URI = "http://www.bzip.org/${PV}/${BP}.tar.gz \
+ file://fix-bunzip2-qt-returns-0-for-corrupt-archives.patch \
file://configure.ac;subdir=${BP} \
file://Makefile.am;subdir=${BP} \
file://run-ptest"
diff --git a/yocto-poky/meta/recipes-extended/cpio/cpio_v2.inc b/yocto-poky/meta/recipes-extended/cpio/cpio_v2.inc
index 93de4bb92..8520ff267 100644
--- a/yocto-poky/meta/recipes-extended/cpio/cpio_v2.inc
+++ b/yocto-poky/meta/recipes-extended/cpio/cpio_v2.inc
@@ -18,9 +18,11 @@ EXTRA_OECONF += "DEFAULT_RMT_DIR=${base_sbindir}"
do_install () {
autotools_do_install
- install -d ${D}${base_bindir}/
- mv "${D}${bindir}/cpio" "${D}${base_bindir}/cpio"
- rmdir ${D}${bindir}/
+ if [ "${base_bindir}" != "${bindir}" ]; then
+ install -d ${D}${base_bindir}/
+ mv "${D}${bindir}/cpio" "${D}${base_bindir}/cpio"
+ rmdir ${D}${bindir}/
+ fi
}
PACKAGES =+ "${PN}-rmt"
diff --git a/yocto-poky/meta/recipes-extended/cronie/cronie_1.5.0.bb b/yocto-poky/meta/recipes-extended/cronie/cronie_1.5.0.bb
index 38bd5935c..697501ac1 100644
--- a/yocto-poky/meta/recipes-extended/cronie/cronie_1.5.0.bb
+++ b/yocto-poky/meta/recipes-extended/cronie/cronie_1.5.0.bb
@@ -4,7 +4,7 @@ specified programs at scheduled times and related tools. It is based on the \
original cron and has security and configuration enhancements like the \
ability to use pam and SELinux."
HOMEPAGE = "https://fedorahosted.org/cronie/"
-BUGTRACKER = "mmaslano@redhat.com"
+BUGTRACKER = "https://bugzilla.redhat.com"
# Internet Systems Consortium License
LICENSE = "ISC & BSD-3-Clause & BSD-2-Clause & GPLv2+"
diff --git a/yocto-poky/meta/recipes-extended/cups/cups.inc b/yocto-poky/meta/recipes-extended/cups/cups.inc
index 57cdf2650..2c34da98d 100644
--- a/yocto-poky/meta/recipes-extended/cups/cups.inc
+++ b/yocto-poky/meta/recipes-extended/cups/cups.inc
@@ -28,6 +28,7 @@ PACKAGECONFIG ??= "${@bb.utils.contains('DISTRO_FEATURES', 'zeroconf', 'avahi',
PACKAGECONFIG[avahi] = "--enable-avahi,--disable-avahi,avahi"
PACKAGECONFIG[acl] = "--enable-acl,--disable-acl,acl"
PACKAGECONFIG[pam] = "--enable-pam, --disable-pam, libpam"
+PACKAGECONFIG[xinetd] = "--with-xinetd=${sysconfdir}/xinetd.d,--without-xinetd,xinetd"
EXTRA_OECONF = " \
--enable-gnutls \
@@ -64,6 +65,11 @@ do_install () {
rm -fr ${D}/${localstatedir}/run
rmdir ${D}/${libdir}/${BPN}/driver
+ # Fix the pam configuration file permissions
+ if ${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'true', 'false', d)}; then
+ chmod 0644 ${D}${sysconfdir}/pam.d/cups
+ fi
+
# Remove sysinit script and symlinks if sysvinit is not in DISTRO_FEATURES
if ${@bb.utils.contains('DISTRO_FEATURES','sysvinit','false','true',d)}; then
rm -rf ${D}${sysconfdir}/init.d/
diff --git a/yocto-poky/meta/recipes-extended/cwautomacros/cwautomacros_20110201.bb b/yocto-poky/meta/recipes-extended/cwautomacros/cwautomacros_20110201.bb
index 43ea3ce38..65a99fc28 100644
--- a/yocto-poky/meta/recipes-extended/cwautomacros/cwautomacros_20110201.bb
+++ b/yocto-poky/meta/recipes-extended/cwautomacros/cwautomacros_20110201.bb
@@ -14,6 +14,9 @@ do_configure() {
do_install() {
oe_runmake CWAUTOMACROSPREFIX=${D}${prefix} install
+
+ # cleanup buildpaths in autogen.sh
+ sed -i -e 's,${D},,g' ${D}${prefix}/share/cwautomacros/scripts/autogen.sh
}
BBCLASSEXTEND = "native"
diff --git a/yocto-poky/meta/recipes-extended/foomatic/foomatic-filters-4.0.17/CVE-2015-8327.patch b/yocto-poky/meta/recipes-extended/foomatic/foomatic-filters-4.0.17/CVE-2015-8327.patch
new file mode 100644
index 000000000..aaedc88aa
--- /dev/null
+++ b/yocto-poky/meta/recipes-extended/foomatic/foomatic-filters-4.0.17/CVE-2015-8327.patch
@@ -0,0 +1,23 @@
+Upstream-Status: Backport
+
+
+http://bzr.linuxfoundation.org/loggerhead/openprinting/cups-filters/revision/7406
+
+Hand applied change to util.c. Fix was for cups-filters but also applied to foomatic-filters.
+
+CVE: CVE-2015-8327
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+Index: util.c
+===================================================================
+--- a/util.c
++++ b/util.c
+@@ -31,7 +31,7 @@
+ #include <assert.h>
+
+
+-const char* shellescapes = "|;<>&!$\'\"#*?()[]{}";
++const char* shellescapes = "|;<>&!$\'\"`#*?()[]{}";
+
+ const char * temp_dir()
+ {
diff --git a/yocto-poky/meta/recipes-extended/foomatic/foomatic-filters-4.0.17/CVE-2015-8560.patch b/yocto-poky/meta/recipes-extended/foomatic/foomatic-filters-4.0.17/CVE-2015-8560.patch
new file mode 100644
index 000000000..dc973c459
--- /dev/null
+++ b/yocto-poky/meta/recipes-extended/foomatic/foomatic-filters-4.0.17/CVE-2015-8560.patch
@@ -0,0 +1,23 @@
+Upstream-Status: Backport
+
+
+http://bzr.linuxfoundation.org/loggerhead/openprinting/cups-filters/revision/7419
+
+Hand applied change to util.c. Fix was for cups-filters but also applied to foomatic-filters.
+
+CVE: CVE-2015-8560
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+Index: util.c
+===================================================================
+--- a/util.c
++++ b/util.c
+@@ -31,7 +31,7 @@
+ #include <assert.h>
+
+
+-const char* shellescapes = "|<>&!$\'\"#*?()[]{}";
++const char* shellescapes = "|;<>&!$\'\"#*?()[]{}";
+
+ const char * temp_dir()
+ {
diff --git a/yocto-poky/meta/recipes-extended/foomatic/foomatic-filters_4.0.17.bb b/yocto-poky/meta/recipes-extended/foomatic/foomatic-filters_4.0.17.bb
index 790c98138..58ef1f5b0 100644
--- a/yocto-poky/meta/recipes-extended/foomatic/foomatic-filters_4.0.17.bb
+++ b/yocto-poky/meta/recipes-extended/foomatic/foomatic-filters_4.0.17.bb
@@ -17,6 +17,10 @@ LIC_FILES_CHKSUM = "file://${WORKDIR}/foomatic-filters-${PV}/COPYING;md5=393a5ca
SRC_URI = "http://www.openprinting.org/download/foomatic/foomatic-filters-${PV}.tar.gz"
+SRC_URI += "file://CVE-2015-8560.patch \
+ file://CVE-2015-8327.patch \
+ "
+
SRC_URI[md5sum] = "b05f5dcbfe359f198eef3df5b283d896"
SRC_URI[sha256sum] = "a2e2e53e502571e88eeb9010c45a0d54671f15707ee104f5c9c22b59ea7a33e3"
diff --git a/yocto-poky/meta/recipes-extended/ghostscript/ghostscript/png_mak.patch b/yocto-poky/meta/recipes-extended/ghostscript/ghostscript/png_mak.patch
new file mode 100644
index 000000000..da900ead3
--- /dev/null
+++ b/yocto-poky/meta/recipes-extended/ghostscript/ghostscript/png_mak.patch
@@ -0,0 +1,21 @@
+ghostscript: add dependency for pnglibconf.h
+
+When using parallel make jobs, we need to be sure that
+pnglibconf.h is created before we try to reference it,
+so add a rule to png.mak.
+
+Upstream-Status: Pending
+
+Signed-off-by: Joe Slater <jslater@windriver.com>
+
+--- a/base/png.mak
++++ b/base/png.mak
+@@ -81,6 +81,8 @@ png.config-clean :
+ $(pnglibconf_h) : $(PNGSRC)scripts$(D)pnglibconf.h.prebuilt
+ $(CP_) $(PNGSRC)scripts$(D)pnglibconf.h.prebuilt $(pnglibconf_h)
+
++$(MAKEDIRS) : $(pnglibconf_h)
++
+ PDEP=$(AK) $(pnglibconf_h) $(MAKEDIRS)
+
+ png_1=$(PNGOBJ)png.$(OBJ) $(PNGOBJ)pngmem.$(OBJ) $(PNGOBJ)pngerror.$(OBJ) $(PNGOBJ)pngset.$(OBJ)
diff --git a/yocto-poky/meta/recipes-extended/ghostscript/ghostscript_9.16.bb b/yocto-poky/meta/recipes-extended/ghostscript/ghostscript_9.16.bb
index ec4acc666..d584c49b0 100644
--- a/yocto-poky/meta/recipes-extended/ghostscript/ghostscript_9.16.bb
+++ b/yocto-poky/meta/recipes-extended/ghostscript/ghostscript_9.16.bb
@@ -19,6 +19,7 @@ DEPENDS_class-native = ""
SRC_URI_BASE = "http://downloads.ghostscript.com/public/ghostscript-${PV}.tar.gz \
file://ghostscript-9.15-parallel-make.patch \
file://ghostscript-9.16-Werror-return-type.patch \
+ file://png_mak.patch \
"
SRC_URI = "${SRC_URI_BASE} \
diff --git a/yocto-poky/meta/recipes-extended/grep/grep_2.21.bb b/yocto-poky/meta/recipes-extended/grep/grep_2.21.bb
index 3661098c5..c51147b57 100644
--- a/yocto-poky/meta/recipes-extended/grep/grep_2.21.bb
+++ b/yocto-poky/meta/recipes-extended/grep/grep_2.21.bb
@@ -23,11 +23,13 @@ do_configure_prepend () {
do_install () {
autotools_do_install
- install -d ${D}${base_bindir}
- mv ${D}${bindir}/grep ${D}${base_bindir}/grep
- mv ${D}${bindir}/egrep ${D}${base_bindir}/egrep
- mv ${D}${bindir}/fgrep ${D}${base_bindir}/fgrep
- rmdir ${D}${bindir}/
+ if [ "${base_bindir}" != "${bindir}" ]; then
+ install -d ${D}${base_bindir}
+ mv ${D}${bindir}/grep ${D}${base_bindir}/grep
+ mv ${D}${bindir}/egrep ${D}${base_bindir}/egrep
+ mv ${D}${bindir}/fgrep ${D}${base_bindir}/fgrep
+ rmdir ${D}${bindir}/
+ fi
}
inherit update-alternatives
diff --git a/yocto-poky/meta/recipes-extended/gzip/gzip.inc b/yocto-poky/meta/recipes-extended/gzip/gzip.inc
index 94480ec53..58e5e0c53 100644
--- a/yocto-poky/meta/recipes-extended/gzip/gzip.inc
+++ b/yocto-poky/meta/recipes-extended/gzip/gzip.inc
@@ -10,12 +10,14 @@ inherit autotools texinfo
EXTRA_OEMAKE_class-target = "GREP=${base_bindir}/grep"
do_install_append () {
- # Rename and move files into /bin (FHS), which is typical place for gzip
- install -d ${D}${base_bindir}
- mv ${D}${bindir}/gunzip ${D}${base_bindir}/gunzip
- mv ${D}${bindir}/gzip ${D}${base_bindir}/gzip
- mv ${D}${bindir}/zcat ${D}${base_bindir}/zcat
- mv ${D}${bindir}/uncompress ${D}${base_bindir}/uncompress
+ if [ "${base_bindir}" != "${bindir}" ]; then
+ # Rename and move files into /bin (FHS), which is typical place for gzip
+ install -d ${D}${base_bindir}
+ mv ${D}${bindir}/gunzip ${D}${base_bindir}/gunzip
+ mv ${D}${bindir}/gzip ${D}${base_bindir}/gzip
+ mv ${D}${bindir}/zcat ${D}${base_bindir}/zcat
+ mv ${D}${bindir}/uncompress ${D}${base_bindir}/uncompress
+ fi
}
inherit update-alternatives
diff --git a/yocto-poky/meta/recipes-extended/images/wic-image-minimal.bb b/yocto-poky/meta/recipes-extended/images/wic-image-minimal.bb
deleted file mode 100644
index 073c569fe..000000000
--- a/yocto-poky/meta/recipes-extended/images/wic-image-minimal.bb
+++ /dev/null
@@ -1,14 +0,0 @@
-SUMMARY = "An example of partitioned image."
-
-IMAGE_INSTALL = "packagegroup-core-boot ${ROOTFS_PKGMANAGE_BOOTSTRAP}"
-
-IMAGE_FSTYPES = "wic.bz2"
-RM_OLD_IMAGE = "1"
-
-# core-image-minimal is referenced in .wks, so we need its rootfs
-# to be ready before our rootfs
-do_rootfs[depends] += "core-image-minimal:do_rootfs"
-
-IMAGE_ROOTFS_EXTRA_SPACE = "2000"
-
-inherit image
diff --git a/yocto-poky/meta/recipes-extended/images/wic-image-minimal.wks b/yocto-poky/meta/recipes-extended/images/wic-image-minimal.wks
deleted file mode 100644
index 29cd8f2c8..000000000
--- a/yocto-poky/meta/recipes-extended/images/wic-image-minimal.wks
+++ /dev/null
@@ -1,10 +0,0 @@
-# short-description: Example of partitioned image with complex layout
-# long-description: This image contains boot partition and 3 rootfs partitions
-# created from core-image-minimal and wic-image-minimal image recipes.
-
-part /boot --source bootimg-pcbios --ondisk sda --label boot --active --align 1024
-part / --source rootfs --ondisk sda --fstype=ext2 --label platform --align 1024
-part /core --source rootfs --rootfs-dir=core-image-minimal --ondisk sda --fstype=ext2 --label core --align 1024
-part /backup --source rootfs --rootfs-dir=wic-image-minimal --ondisk sda --fstype=ext2 --label backup --align 1024
-
-bootloader --timeout=0 --append="rootwait console=tty0"
diff --git a/yocto-poky/meta/recipes-extended/iptables/iptables/0002-configure.ac-only-check-conntrack-when-libnfnetlink-enabled.patch b/yocto-poky/meta/recipes-extended/iptables/iptables/0002-configure.ac-only-check-conntrack-when-libnfnetlink-enabled.patch
new file mode 100644
index 000000000..89ad8f666
--- /dev/null
+++ b/yocto-poky/meta/recipes-extended/iptables/iptables/0002-configure.ac-only-check-conntrack-when-libnfnetlink-enabled.patch
@@ -0,0 +1,34 @@
+Package libnetfilter-conntrack depends on package libnfnetlink. iptables
+checks package libnetfilter-conntrack whatever its package config
+libnfnetlink is enabled or not. When libnfnetlink is disabled but
+package libnetfilter-conntrack exists, it fails randomly with:
+
+| In file included from .../iptables/1.4.21-r0/iptables-1.4.21/extensions/libxt_connlabel.c:8:0:
+| .../tmp/sysroots/qemumips/usr/include/libnetfilter_conntrack/libnetfilter_conntrack.h:14:42: fatal error: libnfnetlink/linux_nfnetlink.h: No such file or directory
+| compilation terminated.
+| GNUmakefile:96: recipe for target 'libxt_connlabel.oo' failed
+
+Only check libnetfilter-conntrack when libnfnetlink is enabled to fix it.
+
+Upstream-Status: Pending
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+diff --git a/configure.ac b/configure.ac
+index 5d7e62b..e331ee7 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -88,8 +88,12 @@ if test "$ac_cv_header_linux_ip_vs_h" != "yes"; then
+ blacklist_modules="$blacklist_modules ipvs";
+ fi;
+
+-PKG_CHECK_MODULES([libnetfilter_conntrack], [libnetfilter_conntrack >= 1.0.4],
++nfconntrack=0
++AS_IF([test "x$enable_libnfnetlink" = "xyes"], [
++ PKG_CHECK_MODULES([libnetfilter_conntrack], [libnetfilter_conntrack >= 1.0.4],
+ [nfconntrack=1], [nfconntrack=0])
++ ])
++
+ AM_CONDITIONAL([HAVE_LIBNETFILTER_CONNTRACK], [test "$nfconntrack" = 1])
+
+ if test "$nfconntrack" -ne 1; then
diff --git a/yocto-poky/meta/recipes-extended/iptables/iptables_1.4.21.bb b/yocto-poky/meta/recipes-extended/iptables/iptables_1.4.21.bb
index 31c017b2c..deea5e514 100644
--- a/yocto-poky/meta/recipes-extended/iptables/iptables_1.4.21.bb
+++ b/yocto-poky/meta/recipes-extended/iptables/iptables_1.4.21.bb
@@ -23,6 +23,7 @@ SRC_URI = "http://netfilter.org/projects/iptables/files/iptables-${PV}.tar.bz2 \
file://types.h-add-defines-that-are-required-for-if_packet.patch \
file://0001-configure-Add-option-to-enable-disable-libnfnetlink.patch \
file://0001-fix-build-with-musl.patch \
+ file://0002-configure.ac-only-check-conntrack-when-libnfnetlink-enabled.patch \
"
SRC_URI[md5sum] = "536d048c8e8eeebcd9757d0863ebb0c0"
@@ -38,7 +39,7 @@ PACKAGECONFIG ?= "${@bb.utils.contains('DISTRO_FEATURES', 'ipv6', 'ipv6', '', d)
PACKAGECONFIG[ipv6] = "--enable-ipv6,--disable-ipv6,"
# libnfnetlink recipe is in meta-networking layer
-PACKAGECONFIG[libnfnetlink] = "--enable-libnfnetlink,--disable-libnfnetlink,libnfnetlink"
+PACKAGECONFIG[libnfnetlink] = "--enable-libnfnetlink,--disable-libnfnetlink,libnfnetlink libnetfilter-conntrack"
do_configure_prepend() {
# Remove some libtool m4 files
diff --git a/yocto-poky/meta/recipes-extended/libaio/libaio/system-linkage.patch b/yocto-poky/meta/recipes-extended/libaio/libaio/system-linkage.patch
new file mode 100644
index 000000000..0b1f47569
--- /dev/null
+++ b/yocto-poky/meta/recipes-extended/libaio/libaio/system-linkage.patch
@@ -0,0 +1,37 @@
+From 94bba6880b1f10c6b3bf33a17ac40935d65a81ae Mon Sep 17 00:00:00 2001
+From: Ross Burton <ross.burton@intel.com>
+Date: Fri, 6 Nov 2015 15:19:46 +0000
+Subject: [PATCH] Don't remove the system libraries and startup files from
+ libaio, as in some build configurations these are required. For example,
+ including conf/include/security_flags.inc on PPC results in:
+
+io_queue_init.os: In function `io_queue_init':
+tmp/work/ppce300c3-poky-linux/libaio/0.3.110-r0/libaio-0.3.110/src/io_queue_init.c:33:
+undefined reference to `__stack_chk_fail_local'
+
+Upstream-Status: Pending
+Signed-off-by: Ross Burton <ross.burton@intel.com>
+---
+ src/Makefile | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/src/Makefile b/src/Makefile
+index eadb336..56ab701 100644
+--- a/src/Makefile
++++ b/src/Makefile
+@@ -3,10 +3,10 @@ includedir=$(prefix)/include
+ libdir=$(prefix)/lib
+
+ CFLAGS ?= -g -fomit-frame-pointer -O2
+-CFLAGS += -nostdlib -nostartfiles -Wall -I. -fPIC
++CFLAGS += -Wall -I. -fPIC
+ SO_CFLAGS=-shared $(CFLAGS)
+ L_CFLAGS=$(CFLAGS)
+-LINK_FLAGS=
++LINK_FLAGS=$(LDFLAGS)
+ LINK_FLAGS+=$(LDFLAGS)
+
+ soname=libaio.so.1
+--
+2.1.4
+
diff --git a/yocto-poky/meta/recipes-extended/libaio/libaio_0.3.110.bb b/yocto-poky/meta/recipes-extended/libaio/libaio_0.3.110.bb
index cbe29ce2a..2adfa0aa4 100644
--- a/yocto-poky/meta/recipes-extended/libaio/libaio_0.3.110.bb
+++ b/yocto-poky/meta/recipes-extended/libaio/libaio_0.3.110.bb
@@ -11,18 +11,13 @@ SRC_URI = "${DEBIAN_MIRROR}/main/liba/libaio/libaio_${PV}.orig.tar.gz \
file://destdir.patch \
file://libaio_fix_for_x32.patch \
file://libaio_fix_for_mips_syscalls.patch \
-"
+ file://system-linkage.patch \
+ "
SRC_URI[md5sum] = "2a35602e43778383e2f4907a4ca39ab8"
SRC_URI[sha256sum] = "e019028e631725729376250e32b473012f7cb68e1f7275bfc1bbcdd0f8745f7e"
EXTRA_OEMAKE =+ "prefix=${prefix} includedir=${includedir} libdir=${libdir}"
-# Need libc for stack-protector's __stack_chk_fail_local() bounce function
-LDFLAGS_append_x86 = " -lc"
-
-do_configure () {
- sed -i 's#LINK_FLAGS=.*#LINK_FLAGS=$(LDFLAGS)#' src/Makefile
-}
do_install () {
oe_runmake install DESTDIR=${D}
diff --git a/yocto-poky/meta/recipes-extended/libarchive/libarchive/0001-Add-ARCHIVE_EXTRACT_SECURE_NOABSOLUTEPATHS-option.patch b/yocto-poky/meta/recipes-extended/libarchive/libarchive/libarchive-CVE-2015-2304.patch
index 4ca779c40..4ca779c40 100644
--- a/yocto-poky/meta/recipes-extended/libarchive/libarchive/0001-Add-ARCHIVE_EXTRACT_SECURE_NOABSOLUTEPATHS-option.patch
+++ b/yocto-poky/meta/recipes-extended/libarchive/libarchive/libarchive-CVE-2015-2304.patch
diff --git a/yocto-poky/meta/recipes-extended/libarchive/libarchive_3.1.2.bb b/yocto-poky/meta/recipes-extended/libarchive/libarchive_3.1.2.bb
index aaa325535..716db9aff 100644
--- a/yocto-poky/meta/recipes-extended/libarchive/libarchive_3.1.2.bb
+++ b/yocto-poky/meta/recipes-extended/libarchive/libarchive_3.1.2.bb
@@ -32,7 +32,7 @@ PACKAGECONFIG[nettle] = "--with-nettle,--without-nettle,nettle,"
SRC_URI = "http://libarchive.org/downloads/libarchive-${PV}.tar.gz \
file://libarchive-CVE-2013-0211.patch \
file://pkgconfig.patch \
- file://0001-Add-ARCHIVE_EXTRACT_SECURE_NOABSOLUTEPATHS-option.patch \
+ file://libarchive-CVE-2015-2304.patch \
file://mkdir.patch \
"
diff --git a/yocto-poky/meta/recipes-extended/libtirpc/libtirpc/remove-des-uclibc.patch b/yocto-poky/meta/recipes-extended/libtirpc/libtirpc/remove-des-uclibc.patch
index d003348af..553b1ffb8 100644
--- a/yocto-poky/meta/recipes-extended/libtirpc/libtirpc/remove-des-uclibc.patch
+++ b/yocto-poky/meta/recipes-extended/libtirpc/libtirpc/remove-des-uclibc.patch
@@ -3,11 +3,11 @@ uclibc does not provide des functionality unlike eglibc so lets disable ssl supp
Upstream-Status: Inappropriate [uclibc specific]
Signed-off-by: Khem Raj <raj.khem@gmail.com>
-Index: libtirpc-0.2.3/src/rpc_soc.c
+Index: libtirpc-0.2.5/src/rpc_soc.c
===================================================================
---- libtirpc-0.2.3.orig/src/rpc_soc.c 2013-03-10 16:00:51.355282153 -0700
-+++ libtirpc-0.2.3/src/rpc_soc.c 2013-03-10 16:00:51.703282148 -0700
-@@ -520,6 +520,7 @@
+--- libtirpc-0.2.5.orig/src/rpc_soc.c
++++ libtirpc-0.2.5/src/rpc_soc.c
+@@ -520,6 +520,7 @@ clnt_broadcast(prog, vers, proc, xargs,
(resultproc_t) rpc_wrap_bcast, "udp");
}
@@ -15,7 +15,7 @@ Index: libtirpc-0.2.3/src/rpc_soc.c
/*
* Create the client des authentication object. Obsoleted by
* authdes_seccreate().
-@@ -551,6 +552,7 @@
+@@ -551,6 +552,7 @@ fallback:
dummy = authdes_seccreate(servername, window, NULL, ckey);
return (dummy);
}
@@ -23,16 +23,16 @@ Index: libtirpc-0.2.3/src/rpc_soc.c
/*
* Create a client handle for a unix connection. Obsoleted by clnt_vc_create()
-Index: libtirpc-0.2.3/src/Makefile.am
+Index: libtirpc-0.2.5/src/Makefile.am
===================================================================
---- libtirpc-0.2.3.orig/src/Makefile.am 2013-03-10 16:00:51.355282153 -0700
-+++ libtirpc-0.2.3/src/Makefile.am 2013-03-10 16:00:51.703282148 -0700
-@@ -50,7 +50,7 @@
+--- libtirpc-0.2.5.orig/src/Makefile.am
++++ libtirpc-0.2.5/src/Makefile.am
+@@ -51,7 +51,7 @@ libtirpc_la_SOURCES = auth_none.c auth_u
rpc_callmsg.c rpc_generic.c rpc_soc.c rpcb_clnt.c rpcb_prot.c \
rpcb_st_xdr.c svc.c svc_auth.c svc_dg.c svc_auth_unix.c svc_auth_none.c \
svc_generic.c svc_raw.c svc_run.c svc_simple.c svc_vc.c getpeereid.c \
-- auth_time.c auth_des.c authdes_prot.c
-+ auth_time.c
+- auth_time.c auth_des.c authdes_prot.c debug.c
++ auth_time.c debug.c
## XDR
libtirpc_la_SOURCES += xdr.c xdr_rec.c xdr_array.c xdr_float.c xdr_mem.c xdr_reference.c xdr_stdio.c
diff --git a/yocto-poky/meta/recipes-extended/libtirpc/libtirpc/va_list.patch b/yocto-poky/meta/recipes-extended/libtirpc/libtirpc/va_list.patch
new file mode 100644
index 000000000..855d15b58
--- /dev/null
+++ b/yocto-poky/meta/recipes-extended/libtirpc/libtirpc/va_list.patch
@@ -0,0 +1,18 @@
+This patch is fixing build with uclibc where compiler ( gcc5 ) says it cant find va_list
+the patch is right for upstreaming as well
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+Upstream-Status: Pending
+
+Index: libtirpc-0.2.5/src/debug.h
+===================================================================
+--- libtirpc-0.2.5.orig/src/debug.h
++++ libtirpc-0.2.5/src/debug.h
+@@ -22,6 +22,7 @@
+ #ifndef _DEBUG_H
+ #define _DEBUG_H
+ #include <syslog.h>
++#include <stdarg.h>
+
+ extern int libtirpc_debug_level;
+ extern int log_stderr;
diff --git a/yocto-poky/meta/recipes-extended/libtirpc/libtirpc_0.2.5.bb b/yocto-poky/meta/recipes-extended/libtirpc/libtirpc_0.2.5.bb
index 3edf00249..330b82991 100644
--- a/yocto-poky/meta/recipes-extended/libtirpc/libtirpc_0.2.5.bb
+++ b/yocto-poky/meta/recipes-extended/libtirpc/libtirpc_0.2.5.bb
@@ -15,7 +15,9 @@ SRC_URI = "${SOURCEFORGE_MIRROR}/${BPN}/${BP}.tar.bz2;name=libtirpc \
file://libtirpc-0.2.1-fortify.patch \
"
-SRC_URI_append_libc-uclibc = " file://remove-des-uclibc.patch"
+SRC_URI_append_libc-uclibc = " file://remove-des-uclibc.patch \
+ file://va_list.patch \
+ "
SRC_URI[libtirpc.md5sum] = "8cd41a5ef5a9b50d0fb6abb98af15368"
SRC_URI[libtirpc.sha256sum] = "62f9de7c2c8686c568757730e1fef66502a0e00d6cacf33546d0267984e002db"
diff --git a/yocto-poky/meta/recipes-extended/logrotate/logrotate_3.9.1.bb b/yocto-poky/meta/recipes-extended/logrotate/logrotate_3.9.1.bb
index 7d0a15961..5f1a601ae 100644
--- a/yocto-poky/meta/recipes-extended/logrotate/logrotate_3.9.1.bb
+++ b/yocto-poky/meta/recipes-extended/logrotate/logrotate_3.9.1.bb
@@ -53,7 +53,7 @@ do_compile_prepend() {
}
do_install(){
- oe_runmake install DESTDIR=${D} PREFIX=${D} MANDIR=${mandir} BINDIR=${bindir}
+ oe_runmake install DESTDIR=${D} PREFIX=${D} MANDIR=${mandir}
mkdir -p ${D}${sysconfdir}/logrotate.d
mkdir -p ${D}${sysconfdir}/cron.daily
mkdir -p ${D}${localstatedir}/lib
diff --git a/yocto-poky/meta/recipes-extended/lsb/lsb_4.1.bb b/yocto-poky/meta/recipes-extended/lsb/lsb_4.1.bb
index 6215b62d8..c9f6a8bd9 100644
--- a/yocto-poky/meta/recipes-extended/lsb/lsb_4.1.bb
+++ b/yocto-poky/meta/recipes-extended/lsb/lsb_4.1.bb
@@ -9,8 +9,8 @@ LSB_CORE_x86 = "lsb-core-ia32"
LSB_CORE_x86-64 = "lsb-core-amd64"
RPROVIDES_${PN} += "${LSB_CORE}"
-# lsb_release needs getopt
-RDEPENDS_${PN} += "${VIRTUAL-RUNTIME_getopt}"
+# lsb_release needs getopt, lsbinitscripts
+RDEPENDS_${PN} += "${VIRTUAL-RUNTIME_getopt} lsbinitscripts"
LIC_FILES_CHKSUM = "file://README;md5=12da544b1a3a5a1795a21160b49471cf"
diff --git a/yocto-poky/meta/recipes-extended/lsb/lsbinitscripts_9.64.bb b/yocto-poky/meta/recipes-extended/lsb/lsbinitscripts_9.64.bb
index 6db667c13..150f6f230 100644
--- a/yocto-poky/meta/recipes-extended/lsb/lsbinitscripts_9.64.bb
+++ b/yocto-poky/meta/recipes-extended/lsb/lsbinitscripts_9.64.bb
@@ -3,6 +3,8 @@ SECTION = "base"
LICENSE = "GPLv2"
DEPENDS = "popt glib-2.0"
+RDEPENDS_${PN} += "util-linux"
+
LIC_FILES_CHKSUM = "file://COPYING;md5=ebf4e8b49780ab187d51bd26aaa022c6"
S="${WORKDIR}/initscripts-${PV}"
diff --git a/yocto-poky/meta/recipes-extended/ltp/ltp/0001-replace-inline-with-static-inline-for-gcc-5.x.patch b/yocto-poky/meta/recipes-extended/ltp/ltp/0001-replace-inline-with-static-inline-for-gcc-5.x.patch
new file mode 100644
index 000000000..0b594dc55
--- /dev/null
+++ b/yocto-poky/meta/recipes-extended/ltp/ltp/0001-replace-inline-with-static-inline-for-gcc-5.x.patch
@@ -0,0 +1,69 @@
+Upstream-Status: Backport [From https://github.com/linux-test-project/ltp/commit/40a2457cb8ec42a05a2f96b0810057efdb2a55f5]
+
+gcc 5.x defaults to -std=gnu11 instead of -std=gnu89 which causes
+semantics for inline functions changes.
+
+The standalone 'inline' causes error with gcc 5 such as:
+
+git/testcases/kernel/syscalls/kill/kill10.c:355: undefined reference to `k_sigaction'
+
+Replace inline with static inline to be compatible with both gcc 4 and 5.
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+---
+ testcases/kernel/controllers/libcontrollers/libcontrollers.c | 2 +-
+ testcases/kernel/controllers/libcontrollers/libcontrollers.h | 2 +-
+ testcases/kernel/syscalls/kill/kill10.c | 4 ++--
+ 3 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/testcases/kernel/controllers/libcontrollers/libcontrollers.c b/testcases/kernel/controllers/libcontrollers/libcontrollers.c
+index b01e1b8..8857bc9 100644
+--- a/testcases/kernel/controllers/libcontrollers/libcontrollers.c
++++ b/testcases/kernel/controllers/libcontrollers/libcontrollers.c
+@@ -146,7 +146,7 @@ int read_file(char *filepath, int action, unsigned int *value)
+ * Prints error message and returns -1
+ */
+
+-inline int error_function(char *msg1, char *msg2)
++static inline int error_function(char *msg1, char *msg2)
+ {
+ fprintf(stdout, "ERROR: %s ", msg1);
+ fprintf(stdout, "%s\n", msg2);
+diff --git a/testcases/kernel/controllers/libcontrollers/libcontrollers.h b/testcases/kernel/controllers/libcontrollers/libcontrollers.h
+index 4001555..a1a0dfa 100644
+--- a/testcases/kernel/controllers/libcontrollers/libcontrollers.h
++++ b/testcases/kernel/controllers/libcontrollers/libcontrollers.h
+@@ -70,7 +70,7 @@ enum{
+ GET_TASKS
+ };
+
+-inline int error_function(char *msg1, char *msg2);
++static inline int error_function(char *msg1, char *msg2);
+
+ unsigned int read_shares_file (char *filepath);
+
+diff --git a/testcases/kernel/syscalls/kill/kill10.c b/testcases/kernel/syscalls/kill/kill10.c
+index 982d9da..33dbcd3 100644
+--- a/testcases/kernel/syscalls/kill/kill10.c
++++ b/testcases/kernel/syscalls/kill/kill10.c
+@@ -185,7 +185,7 @@ int child_checklist_total = 0;
+ int checklist_cmp(const void *a, const void *b);
+ void checklist_reset(int bit);
+
+-inline int k_sigaction(int sig, struct sigaction *sa, struct sigaction *osa);
++static inline int k_sigaction(int sig, struct sigaction *sa, struct sigaction *osa);
+
+ char *TCID = "kill10";
+ int TST_TOTAL = 1;
+@@ -756,7 +756,7 @@ void checklist_reset(int bit)
+
+ }
+
+-inline int k_sigaction(int sig, struct sigaction *sa, struct sigaction *osa)
++static inline int k_sigaction(int sig, struct sigaction *sa, struct sigaction *osa)
+ {
+ int ret;
+ if ((ret = sigaction(sig, sa, osa)) == -1) {
+---
+-1.9.1
+-
diff --git a/yocto-poky/meta/recipes-extended/ltp/ltp_20150420.bb b/yocto-poky/meta/recipes-extended/ltp/ltp_20150420.bb
index 108ebf1e6..ed46b5e09 100644
--- a/yocto-poky/meta/recipes-extended/ltp/ltp_20150420.bb
+++ b/yocto-poky/meta/recipes-extended/ltp/ltp_20150420.bb
@@ -29,6 +29,7 @@ SRC_URI = "git://github.com/linux-test-project/ltp.git \
file://add-knob-for-numa.patch \
file://add-knob-for-tirpc.patch \
file://0001-ltp-vma03-fix-the-alginment-of-page-size.patch \
+ file://0001-replace-inline-with-static-inline-for-gcc-5.x.patch \
"
S = "${WORKDIR}/git"
diff --git a/yocto-poky/meta/recipes-extended/mailx/mailx_12.5-5.bb b/yocto-poky/meta/recipes-extended/mailx/mailx_12.5-5.bb
index ffa90498b..c87c58258 100644
--- a/yocto-poky/meta/recipes-extended/mailx/mailx_12.5-5.bb
+++ b/yocto-poky/meta/recipes-extended/mailx/mailx_12.5-5.bb
@@ -41,3 +41,8 @@ EXTRA_OEMAKE = "SENDMAIL=${sbindir}/sendmail IPv6=-DHAVE_IPv6_FUNCS PREFIX=/usr
# fio.c:56:17: fatal error: ssl.h: No such file or directory
# #include <ssl.h>
PARALLEL_MAKE = ""
+
+# Causes gcc to get stuck and eat all available memory in qemuarm builds
+# http://errors.yoctoproject.org/Errors/Details/20488/
+ARM_INSTRUCTION_SET_armv4 = "arm"
+ARM_INSTRUCTION_SET_armv5 = "arm"
diff --git a/yocto-poky/meta/recipes-extended/pam/libpam/use-utmpx.patch b/yocto-poky/meta/recipes-extended/pam/libpam/use-utmpx.patch
new file mode 100644
index 000000000..dd04bbb84
--- /dev/null
+++ b/yocto-poky/meta/recipes-extended/pam/libpam/use-utmpx.patch
@@ -0,0 +1,233 @@
+utmp() may not be configured in and use posix compliant utmpx always
+UTMP is SVID legacy, UTMPX is mandated by POSIX
+
+Upstream-Status: Pending
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+Index: Linux-PAM-1.2.1/libpam/pam_modutil_getlogin.c
+===================================================================
+--- Linux-PAM-1.2.1.orig/libpam/pam_modutil_getlogin.c
++++ Linux-PAM-1.2.1/libpam/pam_modutil_getlogin.c
+@@ -10,8 +10,7 @@
+
+ #include <stdlib.h>
+ #include <unistd.h>
+-#include <utmp.h>
+-
++#include <utmpx.h>
+ #define _PAMMODUTIL_GETLOGIN "_pammodutil_getlogin"
+
+ const char *
+@@ -22,7 +21,7 @@ pam_modutil_getlogin(pam_handle_t *pamh)
+ const void *void_curr_tty;
+ const char *curr_tty;
+ char *curr_user;
+- struct utmp *ut, line;
++ struct utmpx *ut, line;
+
+ status = pam_get_data(pamh, _PAMMODUTIL_GETLOGIN, &logname);
+ if (status == PAM_SUCCESS) {
+@@ -48,10 +47,10 @@ pam_modutil_getlogin(pam_handle_t *pamh)
+ }
+ logname = NULL;
+
+- setutent();
++ setutxent();
+ strncpy(line.ut_line, curr_tty, sizeof(line.ut_line));
+
+- if ((ut = getutline(&line)) == NULL) {
++ if ((ut = getutxline(&line)) == NULL) {
+ goto clean_up_and_go_home;
+ }
+
+@@ -74,7 +73,7 @@ pam_modutil_getlogin(pam_handle_t *pamh)
+
+ clean_up_and_go_home:
+
+- endutent();
++ endutxent();
+
+ return logname;
+ }
+Index: Linux-PAM-1.2.1/modules/pam_issue/pam_issue.c
+===================================================================
+--- Linux-PAM-1.2.1.orig/modules/pam_issue/pam_issue.c
++++ Linux-PAM-1.2.1/modules/pam_issue/pam_issue.c
+@@ -25,7 +25,7 @@
+ #include <string.h>
+ #include <unistd.h>
+ #include <sys/utsname.h>
+-#include <utmp.h>
++#include <utmpx.h>
+ #include <time.h>
+ #include <syslog.h>
+
+@@ -246,13 +246,13 @@ read_issue_quoted(pam_handle_t *pamh, FI
+ case 'U':
+ {
+ unsigned int users = 0;
+- struct utmp *ut;
+- setutent();
+- while ((ut = getutent())) {
++ struct utmpx *ut;
++ setutxent();
++ while ((ut = getutxent())) {
+ if (ut->ut_type == USER_PROCESS)
+ ++users;
+ }
+- endutent();
++ endutxent();
+ if (c == 'U')
+ snprintf (buf, sizeof buf, "%u %s", users,
+ (users == 1) ? "user" : "users");
+Index: Linux-PAM-1.2.1/modules/pam_lastlog/pam_lastlog.c
+===================================================================
+--- Linux-PAM-1.2.1.orig/modules/pam_lastlog/pam_lastlog.c
++++ Linux-PAM-1.2.1/modules/pam_lastlog/pam_lastlog.c
+@@ -15,8 +15,9 @@
+ #include <errno.h>
+ #ifdef HAVE_UTMP_H
+ # include <utmp.h>
+-#else
+-# include <lastlog.h>
++#endif
++#ifdef HAVE_UTMPX_H
++# include <utmpx.h>
+ #endif
+ #include <pwd.h>
+ #include <stdlib.h>
+@@ -27,6 +28,12 @@
+ #include <syslog.h>
+ #include <unistd.h>
+
++#ifndef HAVE_UTMP_H
++#define UT_LINESIZE 32
++#define UT_HOSTSIZE 32
++#define UT_NAMESIZE 256
++#endif
++
+ #if defined(hpux) || defined(sunos) || defined(solaris)
+ # ifndef _PATH_LASTLOG
+ # define _PATH_LASTLOG "/usr/adm/lastlog"
+@@ -38,7 +45,7 @@
+ # define UT_LINESIZE 12
+ # endif /* UT_LINESIZE */
+ #endif
+-#if defined(hpux)
++#if defined(hpux) || !defined HAVE_UTMP_H
+ struct lastlog {
+ time_t ll_time;
+ char ll_line[UT_LINESIZE];
+@@ -447,8 +454,8 @@ last_login_failed(pam_handle_t *pamh, in
+ {
+ int retval;
+ int fd;
+- struct utmp ut;
+- struct utmp utuser;
++ struct utmpx ut;
++ struct utmpx utuser;
+ int failed = 0;
+ char the_time[256];
+ char *date = NULL;
+Index: Linux-PAM-1.2.1/modules/pam_limits/pam_limits.c
+===================================================================
+--- Linux-PAM-1.2.1.orig/modules/pam_limits/pam_limits.c
++++ Linux-PAM-1.2.1/modules/pam_limits/pam_limits.c
+@@ -33,7 +33,7 @@
+ #include <sys/resource.h>
+ #include <limits.h>
+ #include <glob.h>
+-#include <utmp.h>
++#include <utmpx.h>
+ #ifndef UT_USER /* some systems have ut_name instead of ut_user */
+ #define UT_USER ut_user
+ #endif
+@@ -227,7 +227,7 @@ static int
+ check_logins (pam_handle_t *pamh, const char *name, int limit, int ctrl,
+ struct pam_limit_s *pl)
+ {
+- struct utmp *ut;
++ struct utmpx *ut;
+ int count;
+
+ if (ctrl & PAM_DEBUG_ARG) {
+@@ -242,7 +242,7 @@ check_logins (pam_handle_t *pamh, const
+ return LOGIN_ERR;
+ }
+
+- setutent();
++ setutxent();
+
+ /* Because there is no definition about when an application
+ actually adds a utmp entry, some applications bizarrely do the
+@@ -260,7 +260,7 @@ check_logins (pam_handle_t *pamh, const
+ count = 1;
+ }
+
+- while((ut = getutent())) {
++ while((ut = getutxent())) {
+ #ifdef USER_PROCESS
+ if (ut->ut_type != USER_PROCESS) {
+ continue;
+@@ -296,7 +296,7 @@ check_logins (pam_handle_t *pamh, const
+ break;
+ }
+ }
+- endutent();
++ endutxent();
+ if (count > limit) {
+ if (name) {
+ pam_syslog(pamh, LOG_WARNING,
+Index: Linux-PAM-1.2.1/modules/pam_timestamp/pam_timestamp.c
+===================================================================
+--- Linux-PAM-1.2.1.orig/modules/pam_timestamp/pam_timestamp.c
++++ Linux-PAM-1.2.1/modules/pam_timestamp/pam_timestamp.c
+@@ -56,7 +56,7 @@
+ #include <time.h>
+ #include <sys/time.h>
+ #include <unistd.h>
+-#include <utmp.h>
++#include <utmpx.h>
+ #include <syslog.h>
+ #include <paths.h>
+ #include "hmacsha1.h"
+@@ -197,15 +197,15 @@ timestamp_good(time_t then, time_t now,
+ static int
+ check_login_time(const char *ruser, time_t timestamp)
+ {
+- struct utmp utbuf, *ut;
++ struct utmpx utbuf, *ut;
+ time_t oldest_login = 0;
+
+- setutent();
++ setutxent();
+ while(
+ #ifdef HAVE_GETUTENT_R
+- !getutent_r(&utbuf, &ut)
++ !getutxent_r(&utbuf, &ut)
+ #else
+- (ut = getutent()) != NULL
++ (ut = getutxent()) != NULL
+ #endif
+ ) {
+ if (ut->ut_type != USER_PROCESS) {
+@@ -218,7 +218,7 @@ check_login_time(const char *ruser, time
+ oldest_login = ut->ut_tv.tv_sec;
+ }
+ }
+- endutent();
++ endutxent();
+ if(oldest_login == 0 || timestamp < oldest_login) {
+ return PAM_AUTH_ERR;
+ }
+Index: Linux-PAM-1.2.1/modules/pam_unix/support.c
+===================================================================
+--- Linux-PAM-1.2.1.orig/modules/pam_unix/support.c
++++ Linux-PAM-1.2.1/modules/pam_unix/support.c
+@@ -13,7 +13,6 @@
+ #include <pwd.h>
+ #include <shadow.h>
+ #include <limits.h>
+-#include <utmp.h>
+ #include <errno.h>
+ #include <signal.h>
+ #include <ctype.h>
diff --git a/yocto-poky/meta/recipes-extended/pam/libpam_1.2.1.bb b/yocto-poky/meta/recipes-extended/pam/libpam_1.2.1.bb
index ac3097ef7..035335656 100644
--- a/yocto-poky/meta/recipes-extended/pam/libpam_1.2.1.bb
+++ b/yocto-poky/meta/recipes-extended/pam/libpam_1.2.1.bb
@@ -28,7 +28,9 @@ SRC_URI = "http://linux-pam.org/library/Linux-PAM-${PV}.tar.bz2 \
SRC_URI[md5sum] = "9dc53067556d2dd567808fd509519dd6"
SRC_URI[sha256sum] = "342b1211c0d3b203a7df2540a5b03a428a087bd8a48c17e49ae268f992b334d9"
-SRC_URI_append_libc-uclibc = " file://pam-no-innetgr.patch"
+SRC_URI_append_libc-uclibc = " file://pam-no-innetgr.patch \
+ file://use-utmpx.patch"
+
SRC_URI_append_libc-musl = " file://pam-no-innetgr.patch"
DEPENDS = "bison flex flex-native cracklib"
diff --git a/yocto-poky/meta/recipes-extended/quota/quota/remove_non_posix_types.patch b/yocto-poky/meta/recipes-extended/quota/quota/remove_non_posix_types.patch
index 5442d9854..06ff13cb9 100644
--- a/yocto-poky/meta/recipes-extended/quota/quota/remove_non_posix_types.patch
+++ b/yocto-poky/meta/recipes-extended/quota/quota/remove_non_posix_types.patch
@@ -183,3 +183,16 @@ Index: quota-tools/quot.h
} du_t;
#define NDU 60000
+Index: quota-tools/rquota_server.c
+===================================================================
+--- quota-tools.orig/rquota_server.c
++++ quota-tools/rquota_server.c
+@@ -60,7 +60,7 @@ extern char nfs_pseudoroot[PATH_MAX];
+ */
+ extern struct authunix_parms *unix_cred;
+
+-int in_group(gid_t * gids, u_int len, gid_t gid)
++int in_group(gid_t * gids, uint32_t len, gid_t gid)
+ {
+ gid_t *gidsp = gids + len;
+
diff --git a/yocto-poky/meta/recipes-extended/quota/quota_4.02.bb b/yocto-poky/meta/recipes-extended/quota/quota_4.02.bb
index 124b0a369..673d58428 100644
--- a/yocto-poky/meta/recipes-extended/quota/quota_4.02.bb
+++ b/yocto-poky/meta/recipes-extended/quota/quota_4.02.bb
@@ -23,7 +23,7 @@ DEPENDS = "gettext-native e2fsprogs"
inherit autotools-brokensep gettext pkgconfig
-CFLAGS += "-I=${includedir}/tirpc"
+CFLAGS += "-I${STAGING_INCDIR}/tirpc"
LDFLAGS += "-ltirpc"
ASNEEDED = ""
EXTRA_OEMAKE += 'STRIP=""'
diff --git a/yocto-poky/meta/recipes-extended/rpcbind/rpcbind/0001-uclibc-nss.patch b/yocto-poky/meta/recipes-extended/rpcbind/rpcbind/0001-uclibc-nss.patch
deleted file mode 100644
index afa55f3b7..000000000
--- a/yocto-poky/meta/recipes-extended/rpcbind/rpcbind/0001-uclibc-nss.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-Upstream-Status: Pending
-
-From b8f0d7b7318ba344c25785d6f5cf3f8de98012d4 Mon Sep 17 00:00:00 2001
-From: Natanael Copa <ncopa@alpinelinux.org>
-Date: Tue, 2 Feb 2010 09:36:03 +0000
-Subject: [PATCH 1/2] uclibc-nss
-
----
- src/rpcbind.c | 4 ++++
- 1 files changed, 4 insertions(+), 0 deletions(-)
-
-diff --git a/src/rpcbind.c b/src/rpcbind.c
-index 525ffba..1fe1a60 100644
---- a/src/rpcbind.c
-+++ b/src/rpcbind.c
-@@ -67,7 +67,11 @@
- #include <pwd.h>
- #include <string.h>
- #include <errno.h>
-+#if defined(__UCLIBC__)
-+#define __nss_configure_lookup(x,y)
-+#else
- #include <nss.h>
-+#endif
- #include "config.h"
- #include "rpcbind.h"
-
---
-1.6.6.1
-
diff --git a/yocto-poky/meta/recipes-extended/rpcbind/rpcbind/cve-2015-7236.patch b/yocto-poky/meta/recipes-extended/rpcbind/rpcbind/cve-2015-7236.patch
new file mode 100644
index 000000000..f156290bf
--- /dev/null
+++ b/yocto-poky/meta/recipes-extended/rpcbind/rpcbind/cve-2015-7236.patch
@@ -0,0 +1,83 @@
+commit 06f7ebb1dade2f0dbf872ea2bedf17cff4734bdd
+Author: Olaf Kirch <okir@...e.de>
+Date: Thu Aug 6 16:27:20 2015 +0200
+
+ Fix memory corruption in PMAP_CALLIT code
+
+ - A PMAP_CALLIT call comes in on IPv4 UDP
+ - rpcbind duplicates the caller's address to a netbuf and stores it in
+ FINFO[0].caller_addr. caller_addr->buf now points to a memory region A
+ with a size of 16 bytes
+ - rpcbind forwards the call to the local service, receives a reply
+ - when processing the reply, it does this in xprt_set_caller:
+ xprt->xp_rtaddr = *FINFO[0].caller_addr
+ It sends out the reply, and then frees the netbuf caller_addr and
+ caller_addr.buf.
+ However, it does not clear xp_rtaddr, so xp_rtaddr.buf now refers
+ to memory region A, which is free.
+ - When the next call comes in on the UDP/IPv4 socket, svc_dg_recv will
+ be called, which will set xp_rtaddr to the client's address.
+ It will reuse the buffer inside xp_rtaddr, ie it will write a
+ sockaddr_in to region A
+
+ Some time down the road, an incoming TCP connection is accepted,
+ allocating a fresh SVCXPRT. The memory region A is inside the
+ new SVCXPRT
+
+ - While processing the TCP call, another UDP call comes in, again
+ overwriting region A with the client's address
+ - TCP client closes connection. In svc_destroy, we now trip over
+ the garbage left in region A
+
+ We ran into the case where a commercial scanner was triggering
+ occasional rpcbind segfaults. The core file that was captured showed
+ a corrupted xprt->xp_netid pointer that was really a sockaddr_in.
+
+ Signed-off-by: Olaf Kirch <okir@...e.de>
+
+ Upstream-Status: Backport
+
+ Signed-off-by: Li Zhou <li.zhou@windriver.com>
+---
+ src/rpcb_svc_com.c | 23 ++++++++++++++++++++++-
+ 1 file changed, 22 insertions(+), 1 deletion(-)
+
+Index: rpcbind-0.1.6+git20080930/src/rpcb_svc_com.c
+===================================================================
+--- rpcbind-0.1.6+git20080930.orig/src/rpcb_svc_com.c
++++ rpcbind-0.1.6+git20080930/src/rpcb_svc_com.c
+@@ -1298,12 +1298,33 @@ check_rmtcalls(struct pollfd *pfds, int
+ return (ncallbacks_found);
+ }
+
++/*
++ * This is really a helper function defined in libtirpc, but unfortunately, it hasn't
++ * been exported yet.
++ */
++static struct netbuf *
++__rpc_set_netbuf(struct netbuf *nb, const void *ptr, size_t len)
++{
++ if (nb->len != len) {
++ if (nb->len)
++ mem_free(nb->buf, nb->len);
++ nb->buf = mem_alloc(len);
++ if (nb->buf == NULL)
++ return NULL;
++
++ nb->maxlen = nb->len = len;
++ }
++ memcpy(nb->buf, ptr, len);
++ return nb;
++}
++
+ static void
+ xprt_set_caller(SVCXPRT *xprt, struct finfo *fi)
+ {
++ const struct netbuf *caller = fi->caller_addr;
+ u_int32_t *xidp;
+
+- *(svc_getrpccaller(xprt)) = *(fi->caller_addr);
++ __rpc_set_netbuf(svc_getrpccaller(xprt), caller->buf, caller->len);
+ xidp = __rpcb_get_dg_xidp(xprt);
+ *xidp = fi->caller_xid;
+ }
diff --git a/yocto-poky/meta/recipes-extended/rpcbind/rpcbind_0.2.3.bb b/yocto-poky/meta/recipes-extended/rpcbind/rpcbind_0.2.3.bb
index 333602185..ecd3ba8a5 100644
--- a/yocto-poky/meta/recipes-extended/rpcbind/rpcbind_0.2.3.bb
+++ b/yocto-poky/meta/recipes-extended/rpcbind/rpcbind_0.2.3.bb
@@ -19,11 +19,10 @@ SRC_URI = "${SOURCEFORGE_MIRROR}/rpcbind/rpcbind-${PV}.tar.bz2 \
file://rpcbind.conf \
file://rpcbind.socket \
file://rpcbind.service \
+ file://cve-2015-7236.patch \
"
MUSLPATCHES_libc-musl = "file://musl-sunrpc.patch"
-UCLIBCPATCHES_libc-uclibc = "file://0001-uclibc-nss.patch \
- "
UCLIBCPATCHES ?= ""
MUSLPATCHES ?= ""
diff --git a/yocto-poky/meta/recipes-extended/screen/screen/0001-Fix-stack-overflow-due-to-too-deep-recursion.patch b/yocto-poky/meta/recipes-extended/screen/screen/0001-Fix-stack-overflow-due-to-too-deep-recursion.patch
new file mode 100644
index 000000000..2bc9a59be
--- /dev/null
+++ b/yocto-poky/meta/recipes-extended/screen/screen/0001-Fix-stack-overflow-due-to-too-deep-recursion.patch
@@ -0,0 +1,57 @@
+Bug: 45713
+
+How to reproduce:
+Run this command inside screen
+$ printf '\x1b[10000000T'
+
+screen will recursively call MScrollV to depth n/256.
+This is time consuming and will overflow stack if n is huge.
+
+Fixes CVE-2015-6806
+
+Upstream-Status: Backport
+
+Signed-off-by: Kuang-che Wu <kcwu@csie.org>
+Signed-off-by: Amadeusz Sławiński <amade@asmblr.net>
+Signed-off-by: Maxin B. John <maxin.john@intel.com>
+---
+diff -Naur screen-4.3.1-orig/ansi.c screen-4.3.1/ansi.c
+--- screen-4.3.1-orig/ansi.c 2015-06-29 00:22:55.000000000 +0300
++++ screen-4.3.1/ansi.c 2015-10-06 13:13:58.297648039 +0300
+@@ -2502,13 +2502,13 @@
+ return;
+ if (n > 0)
+ {
++ if (ye - ys + 1 < n)
++ n = ye - ys + 1;
+ if (n > 256)
+ {
+ MScrollV(p, n - 256, ys, ye, bce);
+ n = 256;
+ }
+- if (ye - ys + 1 < n)
+- n = ye - ys + 1;
+ #ifdef COPY_PASTE
+ if (compacthist)
+ {
+@@ -2562,15 +2562,15 @@
+ }
+ else
+ {
+- if (n < -256)
+- {
+- MScrollV(p, n + 256, ys, ye, bce);
+- n = -256;
+- }
+ n = -n;
+ if (ye - ys + 1 < n)
+ n = ye - ys + 1;
+
++ if (n > 256)
++ {
++ MScrollV(p, - (n - 256), ys, ye, bce);
++ n = 256;
++ }
+ ml = p->w_mlines + ye;
+ /* Clear lines */
+ for (i = ye; i > ye - n; i--, ml--)
diff --git a/yocto-poky/meta/recipes-extended/screen/screen_4.3.1.bb b/yocto-poky/meta/recipes-extended/screen/screen_4.3.1.bb
index 92457af17..00d878b2c 100644
--- a/yocto-poky/meta/recipes-extended/screen/screen_4.3.1.bb
+++ b/yocto-poky/meta/recipes-extended/screen/screen_4.3.1.bb
@@ -24,6 +24,7 @@ SRC_URI = "${GNU_MIRROR}/screen/screen-${PV}.tar.gz \
file://Avoid-mis-identifying-systems-as-SVR4.patch \
file://0001-fix-for-multijob-build.patch \
file://0002-comm.h-now-depends-on-term.h.patch \
+ file://0001-Fix-stack-overflow-due-to-too-deep-recursion.patch \
"
SRC_URI[md5sum] = "5bb3b0ff2674e29378c31ad3411170ad"
diff --git a/yocto-poky/meta/recipes-extended/sudo/sudo_1.8.14p3.bb b/yocto-poky/meta/recipes-extended/sudo/sudo_1.8.14p3.bb
index 6b3cd6dbf..b93112fa3 100644
--- a/yocto-poky/meta/recipes-extended/sudo/sudo_1.8.14p3.bb
+++ b/yocto-poky/meta/recipes-extended/sudo/sudo_1.8.14p3.bb
@@ -22,7 +22,7 @@ EXTRA_OECONF += " \
do_install_append () {
if [ "${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'pam', '', d)}" = "pam" ]; then
- install -D -m 664 ${WORKDIR}/sudo.pam ${D}/${sysconfdir}/pam.d/sudo
+ install -D -m 644 ${WORKDIR}/sudo.pam ${D}/${sysconfdir}/pam.d/sudo
fi
chmod 4111 ${D}${bindir}/sudo
diff --git a/yocto-poky/meta/recipes-extended/sysstat/sysstat/0001-Include-needed-headers-explicitly.patch b/yocto-poky/meta/recipes-extended/sysstat/sysstat/0001-Include-needed-headers-explicitly.patch
new file mode 100644
index 000000000..c12652307
--- /dev/null
+++ b/yocto-poky/meta/recipes-extended/sysstat/sysstat/0001-Include-needed-headers-explicitly.patch
@@ -0,0 +1,62 @@
+From 42325faa88d64cce799977d611b2792beb154643 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Mon, 14 Sep 2015 08:36:59 +0000
+Subject: [PATCH] Include needed headers explicitly
+
+on glibc these headers get pulled in indirectly via other .h files
+but right fix is to include them directly when used
+
+fixes
+
+error: use of undeclared identifier 'PATH_MAX'
+error: called object type 'unsigned int' is not a function or function pointer
+dm_major = major(aux.st_rdev);
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+Upstream-Status: Pending
+
+ common.c | 1 +
+ ioconf.c | 1 +
+ sa_common.c | 1 +
+ 3 files changed, 3 insertions(+)
+
+diff --git a/common.c b/common.c
+index a23155b..ad86446 100644
+--- a/common.c
++++ b/common.c
+@@ -20,6 +20,7 @@
+ */
+
+ #include <stdio.h>
++#include <limits.h>
+ #include <string.h>
+ #include <stdlib.h>
+ #include <time.h>
+diff --git a/ioconf.c b/ioconf.c
+index 7d88c5d..6d67691 100644
+--- a/ioconf.c
++++ b/ioconf.c
+@@ -27,6 +27,7 @@
+ #include <errno.h>
+ #include <dirent.h>
+ #include <sys/stat.h>
++#include <sys/types.h>
+
+ #include "ioconf.h"
+ #include "common.h"
+diff --git a/sa_common.c b/sa_common.c
+index b7351d9..c9e3299 100644
+--- a/sa_common.c
++++ b/sa_common.c
+@@ -20,6 +20,7 @@
+ */
+
+ #include <stdio.h>
++#include <limits.h>
+ #include <string.h>
+ #include <stdlib.h>
+ #include <time.h>
+--
+2.5.2
+
diff --git a/yocto-poky/meta/recipes-extended/sysstat/sysstat_11.1.5.bb b/yocto-poky/meta/recipes-extended/sysstat/sysstat_11.1.5.bb
index 69d2ec26b..bff861617 100644
--- a/yocto-poky/meta/recipes-extended/sysstat/sysstat_11.1.5.bb
+++ b/yocto-poky/meta/recipes-extended/sysstat/sysstat_11.1.5.bb
@@ -2,6 +2,8 @@ require sysstat.inc
LIC_FILES_CHKSUM = "file://COPYING;md5=8ca43cbc842c2336e835926c2166c28b"
+SRC_URI += "file://0001-Include-needed-headers-explicitly.patch"
+
SRC_URI[md5sum] = "4d8e6e72d057189a1660462a678d9ada"
SRC_URI[sha256sum] = "feb3a90d86ffd69cf5b88144a8876ae05bd42384f559676f08100671589fa2bb"
diff --git a/yocto-poky/meta/recipes-extended/tar/tar.inc b/yocto-poky/meta/recipes-extended/tar/tar.inc
index b339c4338..93e4da114 100644
--- a/yocto-poky/meta/recipes-extended/tar/tar.inc
+++ b/yocto-poky/meta/recipes-extended/tar/tar.inc
@@ -22,10 +22,12 @@ do_install () {
}
do_install_append_class-target() {
- install -d ${D}${base_bindir}
- mv ${D}${bindir}/tar ${D}${base_bindir}/tar
- mv ${D}${bindir}/gtar ${D}${base_bindir}/gtar
- rmdir ${D}${bindir}/
+ if [ "${base_bindir}" != "${bindir}" ]; then
+ install -d ${D}${base_bindir}
+ mv ${D}${bindir}/tar ${D}${base_bindir}/tar
+ mv ${D}${bindir}/gtar ${D}${base_bindir}/gtar
+ rmdir ${D}${bindir}/
+ fi
}
PACKAGES =+ "${PN}-rmt"
diff --git a/yocto-poky/meta/recipes-extended/texinfo/texinfo_6.0.bb b/yocto-poky/meta/recipes-extended/texinfo/texinfo_6.0.bb
index 8fb715a11..a8702cfca 100644
--- a/yocto-poky/meta/recipes-extended/texinfo/texinfo_6.0.bb
+++ b/yocto-poky/meta/recipes-extended/texinfo/texinfo_6.0.bb
@@ -10,7 +10,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504"
PROVIDES_append_class-native = " texinfo-replacement-native"
def compress_pkg(d):
- if "compress_doc" in (d.getVar("INHERIT", True) or "").split():
+ if bb.data.inherits_class('compress_doc', d):
compress = d.getVar("DOC_COMPRESS", True)
if compress == "gz":
return "gzip"
diff --git a/yocto-poky/meta/recipes-extended/tzcode/tzcode-native_2015f.bb b/yocto-poky/meta/recipes-extended/tzcode/tzcode-native_2015f.bb
deleted file mode 100644
index a8865a316..000000000
--- a/yocto-poky/meta/recipes-extended/tzcode/tzcode-native_2015f.bb
+++ /dev/null
@@ -1,25 +0,0 @@
-# note that we allow for us to use data later than our code version
-#
-DESCRIPTION = "tzcode, timezone zoneinfo utils -- zic, zdump, tzselect"
-LICENSE = "PD & BSD"
-
-LIC_FILES_CHKSUM = "file://${WORKDIR}/README;md5=d0ff93a73dd5bc3c6e724bb4343760f6"
-
-SRC_URI =" ftp://ftp.iana.org/tz/releases/tzcode${PV}.tar.gz;name=tzcode \
- ftp://ftp.iana.org/tz/releases/tzdata2015f.tar.gz;name=tzdata"
-
-SRC_URI[tzcode.md5sum] = "19578d432ba8b92f73406a17a9bc268d"
-SRC_URI[tzcode.sha256sum] = "0c95e0a42bb61141f790f4f5f204b954d7654c894aa54a594a215d6f38de84ae"
-SRC_URI[tzdata.md5sum] = "e3b82732d20e973e48af1c6f13df9a1d"
-SRC_URI[tzdata.sha256sum] = "959f81b541e042ecb13c50097d264ae92ff03a57979c478dbcf24d5da242531d"
-
-S = "${WORKDIR}"
-
-inherit native
-
-do_install () {
- install -d ${D}${bindir}/
- install -m 755 zic ${D}${bindir}/
- install -m 755 zdump ${D}${bindir}/
- install -m 755 tzselect ${D}${bindir}/
-}
diff --git a/yocto-poky/meta/recipes-extended/tzcode/tzcode-native_2016a.bb b/yocto-poky/meta/recipes-extended/tzcode/tzcode-native_2016a.bb
new file mode 100644
index 000000000..76f97f0b5
--- /dev/null
+++ b/yocto-poky/meta/recipes-extended/tzcode/tzcode-native_2016a.bb
@@ -0,0 +1,25 @@
+# note that we allow for us to use data later than our code version
+#
+SUMMARY = "tzcode, timezone zoneinfo utils -- zic, zdump, tzselect"
+LICENSE = "PD & BSD & BSD-3-Clause"
+
+LIC_FILES_CHKSUM = "file://LICENSE;md5=76ae2becfcb9a685041c6f166b44c2c2"
+
+SRC_URI =" ftp://ftp.iana.org/tz/releases/tzcode${PV}.tar.gz;name=tzcode \
+ ftp://ftp.iana.org/tz/releases/tzdata${PV}.tar.gz;name=tzdata"
+
+SRC_URI[tzcode.md5sum] = "f5e0299925631da7cf82d8ce1205111d"
+SRC_URI[tzcode.sha256sum] = "11ae66d59b844e8c6c81914c9dd73b666627bd7792855ba9de195eee4520c28d"
+SRC_URI[tzdata.md5sum] = "0d3123eb1b453ec0620822bd65be4c42"
+SRC_URI[tzdata.sha256sum] = "5efa6b324e64ef921ef700ac3273a51895f672684a30e342f68e47871c6a8cd1"
+
+S = "${WORKDIR}"
+
+inherit native
+
+do_install () {
+ install -d ${D}${bindir}/
+ install -m 755 zic ${D}${bindir}/
+ install -m 755 zdump ${D}${bindir}/
+ install -m 755 tzselect ${D}${bindir}/
+}
diff --git a/yocto-poky/meta/recipes-extended/tzdata/tzdata_2015f.bb b/yocto-poky/meta/recipes-extended/tzdata/tzdata_2016a.bb
index 7cda40daf..6ba5f81b1 100644
--- a/yocto-poky/meta/recipes-extended/tzdata/tzdata_2015f.bb
+++ b/yocto-poky/meta/recipes-extended/tzdata/tzdata_2016a.bb
@@ -1,14 +1,15 @@
-DESCRIPTION = "Timezone data"
+SUMMARY = "Timezone data"
HOMEPAGE = "http://www.iana.org/time-zones"
SECTION = "base"
-LICENSE = "PD & BSD"
-LIC_FILES_CHKSUM = "file://asia;beginline=2;endline=3;md5=996a9811747aa48db91ed239e5b355a1"
+LICENSE = "PD & BSD & BSD-3-Clause"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=76ae2becfcb9a685041c6f166b44c2c2"
+
DEPENDS = "tzcode-native"
SRC_URI = "ftp://ftp.iana.org/tz/releases/tzdata${PV}.tar.gz;name=tzdata"
-SRC_URI[tzdata.md5sum] = "e3b82732d20e973e48af1c6f13df9a1d"
-SRC_URI[tzdata.sha256sum] = "959f81b541e042ecb13c50097d264ae92ff03a57979c478dbcf24d5da242531d"
+SRC_URI[tzdata.md5sum] = "0d3123eb1b453ec0620822bd65be4c42"
+SRC_URI[tzdata.sha256sum] = "5efa6b324e64ef921ef700ac3273a51895f672684a30e342f68e47871c6a8cd1"
inherit allarch
diff --git a/yocto-poky/meta/recipes-extended/unzip/unzip/CVE-2015-7696.patch b/yocto-poky/meta/recipes-extended/unzip/unzip/CVE-2015-7696.patch
new file mode 100644
index 000000000..ea93823cb
--- /dev/null
+++ b/yocto-poky/meta/recipes-extended/unzip/unzip/CVE-2015-7696.patch
@@ -0,0 +1,38 @@
+Upstream-Status: Backport
+Signed-off-by: Tudor Florea <tudor.flore@enea.com>
+
+From 68efed87fabddd450c08f3112f62a73f61d493c9 Mon Sep 17 00:00:00 2001
+From: Petr Stodulka <pstodulk@redhat.com>
+Date: Mon, 14 Sep 2015 18:23:17 +0200
+Subject: [PATCH 1/2] upstream fix for heap overflow
+
+https://bugzilla.redhat.com/attachment.cgi?id=1073002
+---
+ crypt.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/crypt.c b/crypt.c
+index 784e411..a8975f2 100644
+--- a/crypt.c
++++ b/crypt.c
+@@ -465,7 +465,17 @@ int decrypt(__G__ passwrd)
+ GLOBAL(pInfo->encrypted) = FALSE;
+ defer_leftover_input(__G);
+ for (n = 0; n < RAND_HEAD_LEN; n++) {
+- b = NEXTBYTE;
++ /* 2012-11-23 SMS. (OUSPG report.)
++ * Quit early if compressed size < HEAD_LEN. The resulting
++ * error message ("unable to get password") could be improved,
++ * but it's better than trying to read nonexistent data, and
++ * then continuing with a negative G.csize. (See
++ * fileio.c:readbyte()).
++ */
++ if ((b = NEXTBYTE) == (ush)EOF)
++ {
++ return PK_ERR;
++ }
+ h[n] = (uch)b;
+ Trace((stdout, " (%02x)", h[n]));
+ }
+--
+2.4.6
diff --git a/yocto-poky/meta/recipes-extended/unzip/unzip/CVE-2015-7697.patch b/yocto-poky/meta/recipes-extended/unzip/unzip/CVE-2015-7697.patch
new file mode 100644
index 000000000..da6898833
--- /dev/null
+++ b/yocto-poky/meta/recipes-extended/unzip/unzip/CVE-2015-7697.patch
@@ -0,0 +1,31 @@
+Upstream-Status: Backport
+Signed-off-by: Tudor Florea <tudor.flore@enea.com>
+
+From bd8a743ee0a77e65ad07ef4196c4cd366add3f26 Mon Sep 17 00:00:00 2001
+From: Kamil Dudka <kdudka@redhat.com>
+Date: Mon, 14 Sep 2015 18:24:56 +0200
+Subject: [PATCH 2/2] fix infinite loop when extracting empty bzip2 data
+
+---
+ extract.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/extract.c b/extract.c
+index 7134bfe..29db027 100644
+--- a/extract.c
++++ b/extract.c
+@@ -2733,6 +2733,12 @@ __GDEF
+ int repeated_buf_err;
+ bz_stream bstrm;
+
++ if (G.incnt <= 0 && G.csize <= 0L) {
++ /* avoid an infinite loop */
++ Trace((stderr, "UZbunzip2() got empty input\n"));
++ return 2;
++ }
++
+ #if (defined(DLL) && !defined(NO_SLIDE_REDIR))
+ if (G.redirect_slide)
+ wsize = G.redirect_size, redirSlide = G.redirect_buffer;
+--
+2.4.6
diff --git a/yocto-poky/meta/recipes-extended/unzip/unzip/unzip-6.0_overflow3.diff b/yocto-poky/meta/recipes-extended/unzip/unzip/cve-2014-9636.patch
index 0a0bfbbb1..0a0bfbbb1 100644
--- a/yocto-poky/meta/recipes-extended/unzip/unzip/unzip-6.0_overflow3.diff
+++ b/yocto-poky/meta/recipes-extended/unzip/unzip/cve-2014-9636.patch
diff --git a/yocto-poky/meta/recipes-extended/unzip/unzip_6.0.bb b/yocto-poky/meta/recipes-extended/unzip/unzip_6.0.bb
index 4a0a713a6..b38632378 100644
--- a/yocto-poky/meta/recipes-extended/unzip/unzip_6.0.bb
+++ b/yocto-poky/meta/recipes-extended/unzip/unzip_6.0.bb
@@ -10,10 +10,12 @@ SRC_URI = "ftp://ftp.info-zip.org/pub/infozip/src/unzip60.tgz \
file://avoid-strip.patch \
file://define-ldflags.patch \
file://06-unzip60-alt-iconv-utf8_CVE-2015-1315.patch \
- file://unzip-6.0_overflow3.diff \
+ file://cve-2014-9636.patch \
file://09-cve-2014-8139-crc-overflow.patch \
file://10-cve-2014-8140-test-compr-eb.patch \
file://11-cve-2014-8141-getzip64data.patch \
+ file://CVE-2015-7696.patch \
+ file://CVE-2015-7697.patch \
"
SRC_URI[md5sum] = "62b490407489521db863b523a7f86375"
diff --git a/yocto-poky/meta/recipes-extended/xz/xz_5.2.1.bb b/yocto-poky/meta/recipes-extended/xz/xz_5.2.1.bb
index e0ae48fe4..cf7fba656 100644
--- a/yocto-poky/meta/recipes-extended/xz/xz_5.2.1.bb
+++ b/yocto-poky/meta/recipes-extended/xz/xz_5.2.1.bb
@@ -6,7 +6,7 @@ SECTION = "base"
# which is GPLv3 is an m4 macro which isn't shipped in any of our packages,
# and the LGPL bits are under lib/, which appears to be used for libgnu, which
# appears to be used for DOS builds. So we're left with GPLv2+ and PD.
-LICENSE = "GPLv2+ & GPLv3+ & LGPLv2.1+ & PD"
+LICENSE = "GPLv2+ & GPL-3.0-with-autoconf-exception & LGPLv2.1+ & PD"
LICENSE_${PN} = "GPLv2+"
LICENSE_${PN}-dev = "GPLv2+"
LICENSE_${PN}-staticdev = "GPLv2+"
diff --git a/yocto-poky/meta/recipes-gnome/epiphany/epiphany_3.16.3.bb b/yocto-poky/meta/recipes-gnome/epiphany/epiphany_3.16.3.bb
index 506fb25dd..c3745c0ed 100644
--- a/yocto-poky/meta/recipes-gnome/epiphany/epiphany_3.16.3.bb
+++ b/yocto-poky/meta/recipes-gnome/epiphany/epiphany_3.16.3.bb
@@ -5,7 +5,10 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=751419260aa954499f7abaabaa882bbe"
DEPENDS = "libsoup-2.4 webkitgtk gtk+3 iso-codes ca-certificates avahi libnotify gcr libwnck3 \
gsettings-desktop-schemas gnome-desktop3"
-inherit gnomebase gsettings
+inherit gnomebase gsettings distro_features_check
+# libwnck3 is x11 only
+REQUIRED_DISTRO_FEATURES = "x11"
+
SRC_URI += "file://0001-yelp.m4-drop-the-check-for-itstool.patch"
SRC_URI[archive.md5sum] = "3296af4532b8019775f4b40d21a341ae"
SRC_URI[archive.sha256sum] = "d527f1770779ec22d955aeb13b148a846a26144e433ff0480c981af80e2390b1"
diff --git a/yocto-poky/meta/recipes-gnome/gcr/gcr_3.16.0.bb b/yocto-poky/meta/recipes-gnome/gcr/gcr_3.16.0.bb
index 8b5b6e446..e50b3a815 100644
--- a/yocto-poky/meta/recipes-gnome/gcr/gcr_3.16.0.bb
+++ b/yocto-poky/meta/recipes-gnome/gcr/gcr_3.16.0.bb
@@ -7,7 +7,9 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=55ca817ccb7d5b5b66355690e9abc605"
DEPENDS = "gtk+3 p11-kit glib-2.0 libgcrypt vala"
-inherit autotools gnomebase gtk-icon-cache gtk-doc
+inherit autotools gnomebase gtk-icon-cache gtk-doc distro_features_check
+# depends on gtk+3, but also x11 through gtk+-x11
+REQUIRED_DISTRO_FEATURES = "x11"
SRC_URI[archive.md5sum] = "d5835680be0b6a838e02a528d5378d9c"
SRC_URI[archive.sha256sum] = "ecfe8df41cc88158364bb15addc670b11e539fe844742983629ba2323888d075"
@@ -16,3 +18,6 @@ FILES_${PN} += " \
${datadir}/dbus-1 \
${datadir}/gcr-3 \
"
+
+# http://errors.yoctoproject.org/Errors/Details/20229/
+ARM_INSTRUCTION_SET = "arm"
diff --git a/yocto-poky/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/CVE-2015-7674.patch b/yocto-poky/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/CVE-2015-7674.patch
new file mode 100644
index 000000000..d516e88ab
--- /dev/null
+++ b/yocto-poky/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/CVE-2015-7674.patch
@@ -0,0 +1,39 @@
+From e9a5704edaa9aee9498f1fbf6e1b70fcce2e55aa Mon Sep 17 00:00:00 2001
+From: Benjamin Otte <otte@redhat.com>
+Date: Tue, 22 Sep 2015 22:44:51 +0200
+Subject: [PATCH] pixops: Don't overflow variables when shifting them
+
+If we shift by 16 bits we need to be sure those 16 bits actually exist.
+They do now.
+
+Upstream-status: Backport
+https://git.gnome.org/browse/gdk-pixbuf/commit/?id=e9a5704edaa9aee9498f1fbf6e1b70fcce2e55aa
+
+CVE: CVE-2015-7674
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ gdk-pixbuf/pixops/pixops.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+Index: gdk-pixbuf-2.30.8/gdk-pixbuf/pixops/pixops.c
+===================================================================
+--- gdk-pixbuf-2.30.8.orig/gdk-pixbuf/pixops/pixops.c
++++ gdk-pixbuf-2.30.8/gdk-pixbuf/pixops/pixops.c
+@@ -264,11 +264,11 @@ pixops_scale_nearest (guchar *des
+ double scale_x,
+ double scale_y)
+ {
+- int i;
+- int x;
+- int x_step = (1 << SCALE_SHIFT) / scale_x;
+- int y_step = (1 << SCALE_SHIFT) / scale_y;
+- int xmax, xstart, xstop, x_pos, y_pos;
++ gint64 i;
++ gint64 x;
++ gint64 x_step = (1 << SCALE_SHIFT) / scale_x;
++ gint64 y_step = (1 << SCALE_SHIFT) / scale_y;
++ gint64 xmax, xstart, xstop, x_pos, y_pos;
+ const guchar *p;
+
+ #define INNER_LOOP(SRC_CHANNELS,DEST_CHANNELS,ASSIGN_PIXEL) \
diff --git a/yocto-poky/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.30.8.bb b/yocto-poky/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.30.8.bb
index 07c2dcec1..dcd01b14e 100644
--- a/yocto-poky/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.30.8.bb
+++ b/yocto-poky/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.30.8.bb
@@ -9,7 +9,6 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=3bf50002aefd002f49e7bb854063f7e7 \
SECTION = "libs"
DEPENDS = "glib-2.0"
-DEPENDS_append_linuxstdbase = " virtual/libx11"
MAJ_VER = "${@oe.utils.trim_version("${PV}", 2)}"
@@ -19,6 +18,7 @@ SRC_URI = "${GNOME_MIRROR}/${BPN}/${MAJ_VER}/${BPN}-${PV}.tar.xz \
file://run-ptest \
file://fatal-loader.patch \
file://0001-pixops-Be-more-careful-about-integer-overflow.patch \
+ file://CVE-2015-7674.patch \
"
SRC_URI[md5sum] = "4fed0d54432f1b69fc6e66e608bd5542"
@@ -50,18 +50,19 @@ PACKAGES =+ "${PN}-xlib"
FILES_${PN}-xlib = "${libdir}/*pixbuf_xlib*${SOLIBS}"
ALLOW_EMPTY_${PN}-xlib = "1"
-FILES_${PN} = "${bindir}/gdk-pixbuf-query-loaders \
- ${bindir}/gdk-pixbuf-pixdata \
+FILES_${PN} = "${libdir}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders \
${libdir}/lib*.so.*"
FILES_${PN}-dev += " \
${bindir}/gdk-pixbuf-csource \
+ ${bindir}/gdk-pixbuf-pixdata \
${includedir}/* \
${libdir}/gdk-pixbuf-2.0/${LIBV}/loaders/*.la \
"
FILES_${PN}-dbg += " \
- ${libdir}/.debug/* \
+ ${libdir}/.debug/* \
+ ${libdir}/gdk-pixbuf-2.0/.debug/* \
${libdir}/gdk-pixbuf-2.0/${LIBV}/loaders/.debug/* \
"
@@ -81,6 +82,12 @@ python populate_packages_prepend () {
d.appendVar("RDEPENDS_gdk-pixbuf-ptest", " " + packages)
}
+do_install_append() {
+ # Move gdk-pixbuf-query-loaders into libdir so it is always available
+ # in multilib builds.
+ mv ${D}/${bindir}/gdk-pixbuf-query-loaders ${D}/${libdir}/gdk-pixbuf-2.0/
+}
+
do_install_append_class-native() {
find ${D}${libdir} -name "libpixbufloader-*.la" -exec rm \{\} \;
@@ -90,8 +97,17 @@ do_install_append_class-native() {
create_wrapper ${D}/${bindir}/gdk-pixbuf-pixdata \
GDK_PIXBUF_MODULE_FILE=${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/${LIBV}/loaders.cache
- create_wrapper ${D}/${bindir}/gdk-pixbuf-query-loaders \
+ create_wrapper ${D}/${libdir}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders \
GDK_PIXBUF_MODULE_FILE=${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/${LIBV}/loaders.cache \
GDK_PIXBUF_MODULEDIR=${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/${LIBV}/loaders
}
BBCLASSEXTEND = "native"
+
+SSTATEPREINSTFUNCS_append_class-native = " gdkpixbuf_sstate_preinst"
+SYSROOT_PREPROCESS_FUNCS_append_class-native = " gdkpixbuf_sstate_preinst"
+
+gdkpixbuf_sstate_preinst() {
+ if [ "${BB_CURRENTTASK}" = "populate_sysroot" ]; then
+ rm -rf ${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/${LIBV}/*
+ fi
+}
diff --git a/yocto-poky/meta/recipes-gnome/gnome-desktop/gnome-desktop3_3.16.2.bb b/yocto-poky/meta/recipes-gnome/gnome-desktop/gnome-desktop3_3.16.2.bb
index 1f2f06c84..3765697f2 100644
--- a/yocto-poky/meta/recipes-gnome/gnome-desktop/gnome-desktop3_3.16.2.bb
+++ b/yocto-poky/meta/recipes-gnome/gnome-desktop/gnome-desktop3_3.16.2.bb
@@ -10,7 +10,10 @@ inherit gnome pkgconfig
SRC_URI[archive.md5sum] = "ab5bf4cc94ad63639f42adcc1542b1f0"
SRC_URI[archive.sha256sum] = "3a8f196b46eb9dbd3ba2afb8fb5fef6a8825539d449a02181311242e22227bd0"
-DEPENDS += "gsettings-desktop-schemas gconf libxrandr virtual/libx11 gtk+3 glib-2.0 gnome-doc-utils gnome-common startup-notification iso-codes"
+DEPENDS += "gsettings-desktop-schemas gconf libxrandr virtual/libx11 gtk+3 glib-2.0 gnome-doc-utils gnome-common startup-notification xkeyboard-config iso-codes"
+
+inherit distro_features_check
+REQUIRED_DISTRO_FEATURES = "x11"
EXTRA_OECONF = "--disable-desktop-docs"
diff --git a/yocto-poky/meta/recipes-gnome/gnome/gnome-doc-utils.inc b/yocto-poky/meta/recipes-gnome/gnome/gnome-doc-utils.inc
index 958750690..8adfac7d7 100644
--- a/yocto-poky/meta/recipes-gnome/gnome/gnome-doc-utils.inc
+++ b/yocto-poky/meta/recipes-gnome/gnome/gnome-doc-utils.inc
@@ -15,9 +15,6 @@ CLEANBROKEN = "1"
EXTRA_OECONF += "--disable-scrollkeeper"
do_install_append() {
- mkdir -p ${D}${datadir}/xml/gnome/xslt/
- cp -pPr ${S}/xslt/* ${D}${datadir}/xml/gnome/xslt/
-
chown -R root:root ${D}
}
diff --git a/yocto-poky/meta/recipes-gnome/gtk+/gtk+.inc b/yocto-poky/meta/recipes-gnome/gtk+/gtk+.inc
index be5273d62..a197b9d96 100644
--- a/yocto-poky/meta/recipes-gnome/gtk+/gtk+.inc
+++ b/yocto-poky/meta/recipes-gnome/gtk+/gtk+.inc
@@ -11,7 +11,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=3bf50002aefd002f49e7bb854063f7e7"
SECTION = "libs"
inherit distro_features_check
-ANY_OF_DISTRO_FEATURES = "directfb x11"
+ANY_OF_DISTRO_FEATURES = "${GTK2DISTROFEATURES}"
X11DEPENDS = "virtual/libx11 libxext libxcursor libxrandr libxdamage libxrender libxcomposite"
DEPENDS = "glib-2.0 pango atk jpeg libpng gdk-pixbuf-native docbook-utils-native \
diff --git a/yocto-poky/meta/recipes-gnome/gtk+/gtk+3.inc b/yocto-poky/meta/recipes-gnome/gtk+/gtk+3.inc
index f29f0d303..22a40d8f0 100644
--- a/yocto-poky/meta/recipes-gnome/gtk+/gtk+3.inc
+++ b/yocto-poky/meta/recipes-gnome/gtk+/gtk+3.inc
@@ -10,7 +10,8 @@ DEPENDS = "glib-2.0 cairo pango atk jpeg libpng gdk-pixbuf \
LICENSE = "LGPLv2 & LGPLv2+ & LGPLv2.1+"
-inherit autotools pkgconfig gtk-doc update-alternatives gtk-immodules-cache gsettings
+inherit autotools pkgconfig gtk-doc update-alternatives gtk-immodules-cache gsettings distro_features_check
+ANY_OF_DISTRO_FEATURES = "${GTK3DISTROFEATURES}"
# This should be in autotools.bbclass, but until something elses uses it putting
# it here avoids rebuilding everything.
@@ -31,9 +32,11 @@ EXTRA_OECONF += " \
"
PACKAGECONFIG ??= "${@bb.utils.contains("DISTRO_FEATURES", "x11", "x11", "", d)} \
+ ${@bb.utils.contains("DISTRO_FEATURES", "opengl x11", "glx", "", d)} \
${@bb.utils.contains("DISTRO_FEATURES", "wayland", "wayland", "", d)}"
PACKAGECONFIG[x11] = "--enable-x11-backend,--disable-x11-backend,at-spi2-atk fontconfig libx11 libxext libxcursor libxi libxdamage libxrandr libxrender libxcomposite libxfixes"
+PACKAGECONFIG[glx] = "--enable-glx,--disable-glx,,libgl"
PACKAGECONFIG[wayland] = "--enable-wayland-backend,--disable-wayland-backend,wayland libxkbcommon virtual/mesa"
do_install_append() {
diff --git a/yocto-poky/meta/recipes-gnome/gtk+/gtk+3/Do-not-try-to-initialize-GL-without-libGL.patch b/yocto-poky/meta/recipes-gnome/gtk+/gtk+3/Do-not-try-to-initialize-GL-without-libGL.patch
new file mode 100644
index 000000000..c8c480c5e
--- /dev/null
+++ b/yocto-poky/meta/recipes-gnome/gtk+/gtk+3/Do-not-try-to-initialize-GL-without-libGL.patch
@@ -0,0 +1,60 @@
+From fc22058a10db913534f11348f86681fe9e1838e5 Mon Sep 17 00:00:00 2001
+From: Jussi Kukkonen <jussi.kukkonen@intel.com>
+Date: Fri, 16 Oct 2015 16:35:16 +0300
+Subject: [PATCH] Do not try to initialize GL without libGL
+
+_gdk_x11_screen_update_visuals_for_gl() will end up calling epoxys
+GLX api which will exit() if libGL.so.1 is not present. We do not
+want that to happen and we don't want every app to have to set
+"GDK_GL=disabled" environment variable: so use #ifdef set based on
+opengl distro feature.
+
+Upstream is not interested in the fix as it is: Either epoxy should be
+fixed (to not exit) or GTK+ possibly could do some additional probing
+before calling epoxy APIs.
+
+Upstream-Status: Denied
+Signed-off-by: Jussi Kukkonen <jussi.kukkonen@intel.com>
+---
+ configure.ac | 7 +++++++
+ gdk/x11/gdkvisual-x11.c | 5 +++++
+ 2 files changed, 12 insertions(+)
+
+diff --git a/configure.ac b/configure.ac
+index 729a62e..58cc1ac 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -328,6 +328,13 @@ AC_ARG_ENABLE(mir-backend,
+ [enable the Mir gdk backend])],
+ [backend_set=yes])
+
++AC_ARG_ENABLE(glx,
++ [AS_HELP_STRING([--enable-glx],
++ [When enabled Gdk will try to initialize GLX])])
++AS_IF([test "x$enable_glx" != "xno"], [
++ AC_DEFINE([HAVE_GLX], [], [GLX will be available at runtime])
++])
++
+ if test -z "$backend_set"; then
+ if test "$platform_win32" = yes; then
+ enable_win32_backend=yes
+diff --git a/gdk/x11/gdkvisual-x11.c b/gdk/x11/gdkvisual-x11.c
+index f3b062d..c8243f4 100644
+--- a/gdk/x11/gdkvisual-x11.c
++++ b/gdk/x11/gdkvisual-x11.c
+@@ -345,7 +345,12 @@ _gdk_x11_screen_init_visuals (GdkScreen *screen)
+ /* If GL is available we want to pick better default/rgba visuals,
+ as we care about glx details such as alpha/depth/stencil depth,
+ stereo and double buffering */
++ /* update_visuals_for_gl() will end up calling epoxy GLX api which
++ will exit if libgl is not there: so only do this if we know GL
++ is available */
++#ifdef HAVE_GLX
+ _gdk_x11_screen_update_visuals_for_gl (screen);
++#endif
+ }
+
+ gint
+--
+2.1.4
+
diff --git a/yocto-poky/meta/recipes-gnome/gtk+/gtk+3_3.16.6.bb b/yocto-poky/meta/recipes-gnome/gtk+/gtk+3_3.16.6.bb
index 1d736a432..381e60750 100644
--- a/yocto-poky/meta/recipes-gnome/gtk+/gtk+3_3.16.6.bb
+++ b/yocto-poky/meta/recipes-gnome/gtk+/gtk+3_3.16.6.bb
@@ -5,6 +5,7 @@ MAJ_VER = "${@oe.utils.trim_version("${PV}", 2)}"
SRC_URI = "http://ftp.gnome.org/pub/gnome/sources/gtk+/${MAJ_VER}/gtk+-${PV}.tar.xz \
file://hardcoded_libtool.patch \
file://Dont-force-csd.patch \
+ file://Do-not-try-to-initialize-GL-without-libGL.patch \
"
SRC_URI[md5sum] = "fc59e5c8b5a4585b60623dd708df400b"
diff --git a/yocto-poky/meta/recipes-gnome/gtk-engines/gtk-engines_2.20.2.bb b/yocto-poky/meta/recipes-gnome/gtk-engines/gtk-engines_2.20.2.bb
index c30454cab..7c3a87e87 100644
--- a/yocto-poky/meta/recipes-gnome/gtk-engines/gtk-engines_2.20.2.bb
+++ b/yocto-poky/meta/recipes-gnome/gtk-engines/gtk-engines_2.20.2.bb
@@ -6,7 +6,7 @@ LICENSE = "LGPLv2.1"
LIC_FILES_CHKSUM = "file://COPYING;md5=2d5025d4aa3495befef8f17206a5b0a1"
SECTION = "x11/base"
-DEPENDS = "intltool-native gtk+"
+DEPENDS = "intltool-native gtk+ gettext-native"
PR = "r3"
@@ -34,6 +34,9 @@ RDEPENDS_${PN}-dev = ""
inherit gnomebase
GNOME_COMPRESS_TYPE="bz2"
+inherit distro_features_check
+ANY_OF_DISTRO_FEATURES = "${GTK2DISTROFEATURES}"
+
python populate_packages_prepend() {
engines_root = os.path.join(d.getVar('libdir', True), "gtk-2.0/2.10.0/engines")
themes_root = os.path.join(d.getVar('datadir', True), "themes")
diff --git a/yocto-poky/meta/recipes-gnome/gtk-theme-torturer/gtk-theme-torturer_git.bb b/yocto-poky/meta/recipes-gnome/gtk-theme-torturer/gtk-theme-torturer_git.bb
index 889fd8941..b67806def 100644
--- a/yocto-poky/meta/recipes-gnome/gtk-theme-torturer/gtk-theme-torturer_git.bb
+++ b/yocto-poky/meta/recipes-gnome/gtk-theme-torturer/gtk-theme-torturer_git.bb
@@ -13,6 +13,9 @@ S = "${WORKDIR}/git/gtk-theme-torturer"
CFLAGS += "-Wl,-rpath-link,${STAGING_LIBDIR}"
+inherit distro_features_check
+ANY_OF_DISTRO_FEATURES = "${GTK2DISTROFEATURES}"
+
do_install() {
install -d ${D}${bindir}
install -m 0755 torturer ${D}${bindir}
diff --git a/yocto-poky/meta/recipes-gnome/libglade/libglade_2.6.4.bb b/yocto-poky/meta/recipes-gnome/libglade/libglade_2.6.4.bb
index 15267cad8..553e19c60 100644
--- a/yocto-poky/meta/recipes-gnome/libglade/libglade_2.6.4.bb
+++ b/yocto-poky/meta/recipes-gnome/libglade/libglade_2.6.4.bb
@@ -11,7 +11,8 @@ SECTION = "libs"
PR = "r5"
DEPENDS = "zlib gdk-pixbuf gtk+"
-inherit autotools pkgconfig gnomebase gtk-doc
+inherit autotools pkgconfig gnomebase gtk-doc distro_features_check
+ANY_OF_DISTRO_FEATURES = "${GTK2DISTROFEATURES}"
GNOME_COMPRESS_TYPE="bz2"
SRC_URI += "file://glade-cruft.patch file://no-xml2.patch file://python_environment.patch"
diff --git a/yocto-poky/meta/recipes-gnome/librsvg/librsvg/CVE-2015-7558_1.patch b/yocto-poky/meta/recipes-gnome/librsvg/librsvg/CVE-2015-7558_1.patch
new file mode 100644
index 000000000..a3ba41f50
--- /dev/null
+++ b/yocto-poky/meta/recipes-gnome/librsvg/librsvg/CVE-2015-7558_1.patch
@@ -0,0 +1,139 @@
+From d1c9191949747f6dcfd207831d15dd4ba00e31f2 Mon Sep 17 00:00:00 2001
+From: Benjamin Otte <otte@redhat.com>
+Date: Wed, 7 Oct 2015 05:31:08 +0200
+Subject: [PATCH] state: Store mask as reference
+
+Instead of immediately looking up the mask, store the reference and look
+it up on use.
+
+Upstream-status: Backport
+
+supporting patch
+https://git.gnome.org/browse/librsvg/commit/rsvg-styles.c?id=d1c9191949747f6dcfd207831d15dd4ba00e31f2
+
+CVE: CVE-2015-7558
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ rsvg-cairo-draw.c | 6 +++++-
+ rsvg-mask.c | 17 -----------------
+ rsvg-mask.h | 2 --
+ rsvg-styles.c | 12 ++++++++----
+ rsvg-styles.h | 2 +-
+ 5 files changed, 14 insertions(+), 25 deletions(-)
+
+Index: librsvg-2.40.10/rsvg-cairo-draw.c
+===================================================================
+--- librsvg-2.40.10.orig/rsvg-cairo-draw.c
++++ librsvg-2.40.10/rsvg-cairo-draw.c
+@@ -825,7 +825,11 @@ rsvg_cairo_pop_render_stack (RsvgDrawing
+ cairo_set_operator (render->cr, state->comp_op);
+
+ if (state->mask) {
+- rsvg_cairo_generate_mask (render->cr, state->mask, ctx, &render->bbox);
++ RsvgNode *mask;
++
++ mask = rsvg_defs_lookup (ctx->defs, state->mask);
++ if (mask && RSVG_NODE_TYPE (mask) == RSVG_NODE_TYPE_MASK)
++ rsvg_cairo_generate_mask (render->cr, (RsvgMask *) mask, ctx, &render->bbox);
+ } else if (state->opacity != 0xFF)
+ cairo_paint_with_alpha (render->cr, (double) state->opacity / 255.0);
+ else
+Index: librsvg-2.40.10/rsvg-mask.c
+===================================================================
+--- librsvg-2.40.10.orig/rsvg-mask.c
++++ librsvg-2.40.10/rsvg-mask.c
+@@ -103,23 +103,6 @@ rsvg_get_url_string (const char *str)
+ }
+
+ RsvgNode *
+-rsvg_mask_parse (const RsvgDefs * defs, const char *str)
+-{
+- char *name;
+-
+- name = rsvg_get_url_string (str);
+- if (name) {
+- RsvgNode *val;
+- val = rsvg_defs_lookup (defs, name);
+- g_free (name);
+-
+- if (val && RSVG_NODE_TYPE (val) == RSVG_NODE_TYPE_MASK)
+- return val;
+- }
+- return NULL;
+-}
+-
+-RsvgNode *
+ rsvg_clip_path_parse (const RsvgDefs * defs, const char *str)
+ {
+ char *name;
+Index: librsvg-2.40.10/rsvg-mask.h
+===================================================================
+--- librsvg-2.40.10.orig/rsvg-mask.h
++++ librsvg-2.40.10/rsvg-mask.h
+@@ -48,8 +48,6 @@ struct _RsvgMask {
+
+ G_GNUC_INTERNAL
+ RsvgNode *rsvg_new_mask (void);
+-G_GNUC_INTERNAL
+-RsvgNode *rsvg_mask_parse (const RsvgDefs * defs, const char *str);
+
+ typedef struct _RsvgClipPath RsvgClipPath;
+
+Index: librsvg-2.40.10/rsvg-styles.c
+===================================================================
+--- librsvg-2.40.10.orig/rsvg-styles.c
++++ librsvg-2.40.10/rsvg-styles.c
+@@ -221,6 +221,7 @@ rsvg_state_clone (RsvgState * dst, const
+
+ *dst = *src;
+ dst->parent = parent;
++ dst->mask = g_strdup (src->mask);
+ dst->font_family = g_strdup (src->font_family);
+ dst->lang = g_strdup (src->lang);
+ rsvg_paint_server_ref (dst->fill);
+@@ -356,7 +357,8 @@ rsvg_state_inherit_run (RsvgState * dst,
+
+ if (inherituninheritables) {
+ dst->clip_path_ref = src->clip_path_ref;
+- dst->mask = src->mask;
++ g_free (dst->mask);
++ dst->mask = g_strdup (src->mask);
+ dst->enable_background = src->enable_background;
+ dst->adobe_blend = src->adobe_blend;
+ dst->opacity = src->opacity;
+@@ -444,6 +446,7 @@ rsvg_state_inherit (RsvgState * dst, con
+ void
+ rsvg_state_finalize (RsvgState * state)
+ {
++ g_free (state->mask);
+ g_free (state->font_family);
+ g_free (state->lang);
+ rsvg_paint_server_unref (state->fill);
+@@ -517,9 +520,10 @@ rsvg_parse_style_pair (RsvgHandle * ctx,
+ state->adobe_blend = 11;
+ else
+ state->adobe_blend = 0;
+- } else if (g_str_equal (name, "mask"))
+- state->mask = rsvg_mask_parse (ctx->priv->defs, value);
+- else if (g_str_equal (name, "clip-path")) {
++ } else if (g_str_equal (name, "mask")) {
++ g_free (state->mask);
++ state->mask = rsvg_get_url_string (value);
++ } else if (g_str_equal (name, "clip-path")) {
+ state->clip_path_ref = rsvg_clip_path_parse (ctx->priv->defs, value);
+ } else if (g_str_equal (name, "overflow")) {
+ if (!g_str_equal (value, "inherit")) {
+Index: librsvg-2.40.10/rsvg-styles.h
+===================================================================
+--- librsvg-2.40.10.orig/rsvg-styles.h
++++ librsvg-2.40.10/rsvg-styles.h
+@@ -80,7 +80,7 @@ struct _RsvgState {
+ cairo_matrix_t personal_affine;
+
+ RsvgFilter *filter;
+- void *mask;
++ char *mask;
+ void *clip_path_ref;
+ guint8 adobe_blend; /* 0..11 */
+ guint8 opacity; /* 0..255 */
diff --git a/yocto-poky/meta/recipes-gnome/librsvg/librsvg/CVE-2015-7558_2.patch b/yocto-poky/meta/recipes-gnome/librsvg/librsvg/CVE-2015-7558_2.patch
new file mode 100644
index 000000000..9f6820ef9
--- /dev/null
+++ b/yocto-poky/meta/recipes-gnome/librsvg/librsvg/CVE-2015-7558_2.patch
@@ -0,0 +1,230 @@
+From 6cfaab12c70cd4a34c4730837f1ecdf792593c90 Mon Sep 17 00:00:00 2001
+From: Benjamin Otte <otte@redhat.com>
+Date: Wed, 7 Oct 2015 07:57:39 +0200
+Subject: [PATCH] state: Look up clip path lazily
+
+Upstream-status: Backport
+
+supporting patch
+https://git.gnome.org/browse/librsvg/commit/rsvg-styles.c?id=6cfaab12c70cd4a34c4730837f1ecdf792593c90
+
+CVE: CVE-2015-7558
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ rsvg-cairo-draw.c | 56 +++++++++++++++++++++++++++++++++----------------------
+ rsvg-mask.c | 17 -----------------
+ rsvg-mask.h | 2 --
+ rsvg-styles.c | 10 +++++++---
+ rsvg-styles.h | 2 +-
+ 5 files changed, 42 insertions(+), 45 deletions(-)
+
+Index: librsvg-2.40.10/rsvg-cairo-draw.c
+===================================================================
+--- librsvg-2.40.10.orig/rsvg-cairo-draw.c
++++ librsvg-2.40.10/rsvg-cairo-draw.c
+@@ -461,7 +461,7 @@ rsvg_cairo_render_path (RsvgDrawingCtx *
+ return;
+
+ need_tmpbuf = ((state->fill != NULL) && (state->stroke != NULL) && state->opacity != 0xff)
+- || state->clip_path_ref || state->mask || state->filter
++ || state->clip_path || state->mask || state->filter
+ || (state->comp_op != CAIRO_OPERATOR_OVER);
+
+ if (need_tmpbuf)
+@@ -708,18 +708,6 @@ rsvg_cairo_generate_mask (cairo_t * cr,
+ }
+
+ static void
+-rsvg_cairo_push_early_clips (RsvgDrawingCtx * ctx)
+-{
+- RsvgCairoRender *render = RSVG_CAIRO_RENDER (ctx->render);
+-
+- cairo_save (render->cr);
+- if (rsvg_current_state (ctx)->clip_path_ref)
+- if (((RsvgClipPath *) rsvg_current_state (ctx)->clip_path_ref)->units == userSpaceOnUse)
+- rsvg_cairo_clip (ctx, rsvg_current_state (ctx)->clip_path_ref, NULL);
+-
+-}
+-
+-static void
+ rsvg_cairo_push_render_stack (RsvgDrawingCtx * ctx)
+ {
+ /* XXX: Untested, probably needs help wrt filters */
+@@ -731,9 +719,27 @@ rsvg_cairo_push_render_stack (RsvgDrawin
+ RsvgState *state = rsvg_current_state (ctx);
+ gboolean lateclip = FALSE;
+
+- if (rsvg_current_state (ctx)->clip_path_ref)
+- if (((RsvgClipPath *) rsvg_current_state (ctx)->clip_path_ref)->units == objectBoundingBox)
+- lateclip = TRUE;
++ if (rsvg_current_state (ctx)->clip_path) {
++ RsvgNode *node;
++ node = rsvg_defs_lookup (ctx->defs, rsvg_current_state (ctx)->clip_path);
++ if (node && RSVG_NODE_TYPE (node) == RSVG_NODE_TYPE_CLIP_PATH) {
++ RsvgClipPath *clip_path = (RsvgClipPath *) node;
++
++ switch (clip_path->units) {
++ case userSpaceOnUse:
++ rsvg_cairo_clip (ctx, clip_path, NULL);
++ break;
++ case objectBoundingBox:
++ lateclip = TRUE;
++ break;
++
++ default:
++ g_assert_not_reached ();
++ break;
++ }
++
++ }
++ }
+
+ if (state->opacity == 0xFF
+ && !state->filter && !state->mask && !lateclip && (state->comp_op == CAIRO_OPERATOR_OVER)
+@@ -774,7 +780,9 @@ rsvg_cairo_push_render_stack (RsvgDrawin
+ void
+ rsvg_cairo_push_discrete_layer (RsvgDrawingCtx * ctx)
+ {
+- rsvg_cairo_push_early_clips (ctx);
++ RsvgCairoRender *render = RSVG_CAIRO_RENDER (ctx->render);
++
++ cairo_save (render->cr);
+ rsvg_cairo_push_render_stack (ctx);
+ }
+
+@@ -783,14 +791,18 @@ rsvg_cairo_pop_render_stack (RsvgDrawing
+ {
+ RsvgCairoRender *render = RSVG_CAIRO_RENDER (ctx->render);
+ cairo_t *child_cr = render->cr;
+- gboolean lateclip = FALSE;
++ RsvgClipPath *lateclip = NULL;
+ cairo_surface_t *surface = NULL;
+ RsvgState *state = rsvg_current_state (ctx);
+ gboolean nest;
+
+- if (rsvg_current_state (ctx)->clip_path_ref)
+- if (((RsvgClipPath *) rsvg_current_state (ctx)->clip_path_ref)->units == objectBoundingBox)
+- lateclip = TRUE;
++ if (rsvg_current_state (ctx)->clip_path) {
++ RsvgNode *node;
++ node = rsvg_defs_lookup (ctx->defs, rsvg_current_state (ctx)->clip_path);
++ if (node && RSVG_NODE_TYPE (node) == RSVG_NODE_TYPE_CLIP_PATH
++ && ((RsvgClipPath *) node)->units == objectBoundingBox)
++ lateclip = (RsvgClipPath *) node;
++ }
+
+ if (state->opacity == 0xFF
+ && !state->filter && !state->mask && !lateclip && (state->comp_op == CAIRO_OPERATOR_OVER)
+@@ -820,7 +832,7 @@ rsvg_cairo_pop_render_stack (RsvgDrawing
+ nest ? 0 : render->offset_y);
+
+ if (lateclip)
+- rsvg_cairo_clip (ctx, rsvg_current_state (ctx)->clip_path_ref, &render->bbox);
++ rsvg_cairo_clip (ctx, lateclip, &render->bbox);
+
+ cairo_set_operator (render->cr, state->comp_op);
+
+Index: librsvg-2.40.10/rsvg-mask.c
+===================================================================
+--- librsvg-2.40.10.orig/rsvg-mask.c
++++ librsvg-2.40.10/rsvg-mask.c
+@@ -102,23 +102,6 @@ rsvg_get_url_string (const char *str)
+ return NULL;
+ }
+
+-RsvgNode *
+-rsvg_clip_path_parse (const RsvgDefs * defs, const char *str)
+-{
+- char *name;
+-
+- name = rsvg_get_url_string (str);
+- if (name) {
+- RsvgNode *val;
+- val = rsvg_defs_lookup (defs, name);
+- g_free (name);
+-
+- if (val && RSVG_NODE_TYPE (val) == RSVG_NODE_TYPE_CLIP_PATH)
+- return val;
+- }
+- return NULL;
+-}
+-
+ static void
+ rsvg_clip_path_set_atts (RsvgNode * self, RsvgHandle * ctx, RsvgPropertyBag * atts)
+ {
+Index: librsvg-2.40.10/rsvg-mask.h
+===================================================================
+--- librsvg-2.40.10.orig/rsvg-mask.h
++++ librsvg-2.40.10/rsvg-mask.h
+@@ -58,8 +58,6 @@ struct _RsvgClipPath {
+
+ G_GNUC_INTERNAL
+ RsvgNode *rsvg_new_clip_path (void);
+-G_GNUC_INTERNAL
+-RsvgNode *rsvg_clip_path_parse (const RsvgDefs * defs, const char *str);
+
+ G_END_DECLS
+ #endif
+Index: librsvg-2.40.10/rsvg-styles.c
+===================================================================
+--- librsvg-2.40.10.orig/rsvg-styles.c
++++ librsvg-2.40.10/rsvg-styles.c
+@@ -149,7 +149,7 @@ rsvg_state_init (RsvgState * state)
+ state->visible = TRUE;
+ state->cond_true = TRUE;
+ state->filter = NULL;
+- state->clip_path_ref = NULL;
++ state->clip_path = NULL;
+ state->startMarker = NULL;
+ state->middleMarker = NULL;
+ state->endMarker = NULL;
+@@ -222,6 +222,7 @@ rsvg_state_clone (RsvgState * dst, const
+ *dst = *src;
+ dst->parent = parent;
+ dst->mask = g_strdup (src->mask);
++ dst->clip_path = g_strdup (src->clip_path);
+ dst->font_family = g_strdup (src->font_family);
+ dst->lang = g_strdup (src->lang);
+ rsvg_paint_server_ref (dst->fill);
+@@ -356,7 +357,8 @@ rsvg_state_inherit_run (RsvgState * dst,
+ }
+
+ if (inherituninheritables) {
+- dst->clip_path_ref = src->clip_path_ref;
++ g_free (dst->clip_path);
++ dst->clip_path = g_strdup (src->clip_path);
+ g_free (dst->mask);
+ dst->mask = g_strdup (src->mask);
+ dst->enable_background = src->enable_background;
+@@ -447,6 +449,7 @@ void
+ rsvg_state_finalize (RsvgState * state)
+ {
+ g_free (state->mask);
++ g_free (state->clip_path);
+ g_free (state->font_family);
+ g_free (state->lang);
+ rsvg_paint_server_unref (state->fill);
+@@ -524,7 +527,8 @@ rsvg_parse_style_pair (RsvgHandle * ctx,
+ g_free (state->mask);
+ state->mask = rsvg_get_url_string (value);
+ } else if (g_str_equal (name, "clip-path")) {
+- state->clip_path_ref = rsvg_clip_path_parse (ctx->priv->defs, value);
++ g_free (state->clip_path);
++ state->clip_path = rsvg_get_url_string (value);
+ } else if (g_str_equal (name, "overflow")) {
+ if (!g_str_equal (value, "inherit")) {
+ state->overflow = rsvg_css_parse_overflow (value, &state->has_overflow);
+Index: librsvg-2.40.10/rsvg-styles.h
+===================================================================
+--- librsvg-2.40.10.orig/rsvg-styles.h
++++ librsvg-2.40.10/rsvg-styles.h
+@@ -81,7 +81,7 @@ struct _RsvgState {
+
+ RsvgFilter *filter;
+ char *mask;
+- void *clip_path_ref;
++ char *clip_path;
+ guint8 adobe_blend; /* 0..11 */
+ guint8 opacity; /* 0..255 */
+
diff --git a/yocto-poky/meta/recipes-gnome/librsvg/librsvg/CVE-2015-7558_3.patch b/yocto-poky/meta/recipes-gnome/librsvg/librsvg/CVE-2015-7558_3.patch
new file mode 100644
index 000000000..dd67ab768
--- /dev/null
+++ b/yocto-poky/meta/recipes-gnome/librsvg/librsvg/CVE-2015-7558_3.patch
@@ -0,0 +1,223 @@
+From a51919f7e1ca9c535390a746fbf6e28c8402dc61 Mon Sep 17 00:00:00 2001
+From: Benjamin Otte <otte@redhat.com>
+Date: Wed, 7 Oct 2015 08:45:37 +0200
+Subject: [PATCH] rsvg: Add rsvg_acquire_node()
+
+This function does proper recursion checks when looking up resources
+from URLs and thereby helps avoiding infinite loops when cyclic
+references span multiple types of elements.
+
+Upstream-status: Backport
+
+https://git.gnome.org/browse/librsvg/commit/rsvg-styles.c?id=a51919f7e1ca9c535390a746fbf6e28c8402dc61
+
+CVE: CVE-2015-7558
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ rsvg-base.c | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++++
+ rsvg-cairo-draw.c | 15 +++++++++++----
+ rsvg-cairo-render.c | 1 +
+ rsvg-filter.c | 9 +++++++--
+ rsvg-private.h | 5 +++++
+ 5 files changed, 79 insertions(+), 6 deletions(-)
+
+Index: librsvg-2.40.10/rsvg-base.c
+===================================================================
+--- librsvg-2.40.10.orig/rsvg-base.c
++++ librsvg-2.40.10/rsvg-base.c
+@@ -1236,6 +1236,8 @@ rsvg_drawing_ctx_free (RsvgDrawingCtx *
+ g_slist_free (handle->drawsub_stack);
+
+ g_slist_free (handle->ptrs);
++ g_warn_if_fail (handle->acquired_nodes == NULL);
++ g_slist_free (handle->acquired_nodes);
+
+ if (handle->base_uri)
+ g_free (handle->base_uri);
+@@ -2018,6 +2020,59 @@ rsvg_push_discrete_layer (RsvgDrawingCtx
+ ctx->render->push_discrete_layer (ctx);
+ }
+
++/*
++ * rsvg_acquire_node:
++ * @ctx: The drawing context in use
++ * @url: The IRI to lookup
++ *
++ * Use this function when looking up urls to other nodes. This
++ * function does proper recursion checking and thereby avoids
++ * infinite loops.
++ *
++ * Nodes acquired by this function must be released using
++ * rsvg_release_node() in reverse acquiring order.
++ *
++ * Returns: The node referenced by @url or %NULL if the @url
++ * does not reference a node.
++ */
++RsvgNode *
++rsvg_acquire_node (RsvgDrawingCtx * ctx, const char *url)
++{
++ RsvgNode *node;
++
++ node = rsvg_defs_lookup (ctx->defs, url);
++ if (node == NULL)
++ return NULL;
++
++ if (g_slist_find (ctx->acquired_nodes, node))
++ return NULL;
++
++ ctx->acquired_nodes = g_slist_prepend (ctx->acquired_nodes, node);
++
++ return node;
++}
++
++/*
++ * rsvg_release_node:
++ * @ctx: The drawing context the node was acquired from
++ * @node: Node to release
++ *
++ * Releases a node previously acquired via rsvg_acquire_node().
++ *
++ * if @node is %NULL, this function does nothing.
++ */
++void
++rsvg_release_node (RsvgDrawingCtx * ctx, RsvgNode *node)
++{
++ if (node == NULL)
++ return;
++
++ g_return_if_fail (ctx->acquired_nodes != NULL);
++ g_return_if_fail (ctx->acquired_nodes->data == node);
++
++ ctx->acquired_nodes = g_slist_remove (ctx->acquired_nodes, node);
++}
++
+ void
+ rsvg_render_path (RsvgDrawingCtx * ctx, const cairo_path_t *path)
+ {
+Index: librsvg-2.40.10/rsvg-cairo-draw.c
+===================================================================
+--- librsvg-2.40.10.orig/rsvg-cairo-draw.c
++++ librsvg-2.40.10/rsvg-cairo-draw.c
+@@ -721,7 +721,7 @@ rsvg_cairo_push_render_stack (RsvgDrawin
+
+ if (rsvg_current_state (ctx)->clip_path) {
+ RsvgNode *node;
+- node = rsvg_defs_lookup (ctx->defs, rsvg_current_state (ctx)->clip_path);
++ node = rsvg_acquire_node (ctx, rsvg_current_state (ctx)->clip_path);
+ if (node && RSVG_NODE_TYPE (node) == RSVG_NODE_TYPE_CLIP_PATH) {
+ RsvgClipPath *clip_path = (RsvgClipPath *) node;
+
+@@ -739,6 +739,8 @@ rsvg_cairo_push_render_stack (RsvgDrawin
+ }
+
+ }
++
++ rsvg_release_node (ctx, node);
+ }
+
+ if (state->opacity == 0xFF
+@@ -798,10 +800,12 @@ rsvg_cairo_pop_render_stack (RsvgDrawing
+
+ if (rsvg_current_state (ctx)->clip_path) {
+ RsvgNode *node;
+- node = rsvg_defs_lookup (ctx->defs, rsvg_current_state (ctx)->clip_path);
++ node = rsvg_acquire_node (ctx, rsvg_current_state (ctx)->clip_path);
+ if (node && RSVG_NODE_TYPE (node) == RSVG_NODE_TYPE_CLIP_PATH
+ && ((RsvgClipPath *) node)->units == objectBoundingBox)
+ lateclip = (RsvgClipPath *) node;
++ else
++ rsvg_release_node (ctx, node);
+ }
+
+ if (state->opacity == 0xFF
+@@ -831,17 +835,20 @@ rsvg_cairo_pop_render_stack (RsvgDrawing
+ nest ? 0 : render->offset_x,
+ nest ? 0 : render->offset_y);
+
+- if (lateclip)
++ if (lateclip) {
+ rsvg_cairo_clip (ctx, lateclip, &render->bbox);
++ rsvg_release_node (ctx, (RsvgNode *) lateclip);
++ }
+
+ cairo_set_operator (render->cr, state->comp_op);
+
+ if (state->mask) {
+ RsvgNode *mask;
+
+- mask = rsvg_defs_lookup (ctx->defs, state->mask);
++ mask = rsvg_acquire_node (ctx, state->mask);
+ if (mask && RSVG_NODE_TYPE (mask) == RSVG_NODE_TYPE_MASK)
+ rsvg_cairo_generate_mask (render->cr, (RsvgMask *) mask, ctx, &render->bbox);
++ rsvg_release_node (ctx, mask);
+ } else if (state->opacity != 0xFF)
+ cairo_paint_with_alpha (render->cr, (double) state->opacity / 255.0);
+ else
+Index: librsvg-2.40.10/rsvg-cairo-render.c
+===================================================================
+--- librsvg-2.40.10.orig/rsvg-cairo-render.c
++++ librsvg-2.40.10/rsvg-cairo-render.c
+@@ -155,6 +155,7 @@ rsvg_cairo_new_drawing_ctx (cairo_t * cr
+ draw->pango_context = NULL;
+ draw->drawsub_stack = NULL;
+ draw->ptrs = NULL;
++ draw->acquired_nodes = NULL;
+
+ rsvg_state_push (draw);
+ state = rsvg_current_state (draw);
+Index: librsvg-2.40.10/rsvg-filter.c
+===================================================================
+--- librsvg-2.40.10.orig/rsvg-filter.c
++++ librsvg-2.40.10/rsvg-filter.c
+@@ -3921,6 +3921,7 @@ rsvg_filter_primitive_image_render_in (R
+ RsvgDrawingCtx *ctx;
+ RsvgFilterPrimitiveImage *upself;
+ RsvgNode *drawable;
++ cairo_surface_t *result;
+
+ ctx = context->ctx;
+
+@@ -3929,13 +3930,17 @@ rsvg_filter_primitive_image_render_in (R
+ if (!upself->href)
+ return NULL;
+
+- drawable = rsvg_defs_lookup (ctx->defs, upself->href->str);
++ drawable = rsvg_acquire_node (ctx, upself->href->str);
+ if (!drawable)
+ return NULL;
+
+ rsvg_current_state (ctx)->affine = context->paffine;
+
+- return rsvg_get_surface_of_node (ctx, drawable, context->width, context->height);
++ result = rsvg_get_surface_of_node (ctx, drawable, context->width, context->height);
++
++ rsvg_release_node (ctx, drawable);
++
++ return result;
+ }
+
+ static cairo_surface_t *
+Index: librsvg-2.40.10/rsvg-private.h
+===================================================================
+--- librsvg-2.40.10.orig/rsvg-private.h
++++ librsvg-2.40.10/rsvg-private.h
+@@ -200,6 +200,7 @@ struct RsvgDrawingCtx {
+ GSList *vb_stack;
+ GSList *drawsub_stack;
+ GSList *ptrs;
++ GSList *acquired_nodes;
+ };
+
+ /*Abstract base class for context for our backends (one as yet)*/
+@@ -360,6 +361,10 @@ void rsvg_pop_discrete_layer (RsvgDra
+ G_GNUC_INTERNAL
+ void rsvg_push_discrete_layer (RsvgDrawingCtx * ctx);
+ G_GNUC_INTERNAL
++RsvgNode *rsvg_acquire_node (RsvgDrawingCtx * ctx, const char *url);
++G_GNUC_INTERNAL
++void rsvg_release_node (RsvgDrawingCtx * ctx, RsvgNode *node);
++G_GNUC_INTERNAL
+ void rsvg_render_path (RsvgDrawingCtx * ctx, const cairo_path_t *path);
+ G_GNUC_INTERNAL
+ void rsvg_render_surface (RsvgDrawingCtx * ctx, cairo_surface_t *surface,
diff --git a/yocto-poky/meta/recipes-gnome/librsvg/librsvg_2.40.10.bb b/yocto-poky/meta/recipes-gnome/librsvg/librsvg_2.40.10.bb
index a8b0e4f9c..cb8a73c40 100644
--- a/yocto-poky/meta/recipes-gnome/librsvg/librsvg_2.40.10.bb
+++ b/yocto-poky/meta/recipes-gnome/librsvg/librsvg_2.40.10.bb
@@ -12,11 +12,17 @@ BBCLASSEXTEND = "native"
inherit autotools pkgconfig gnomebase gtk-doc pixbufcache
-SRC_URI += "file://gtk-option.patch"
+SRC_URI += "file://gtk-option.patch \
+ file://CVE-2015-7558_1.patch \
+ file://CVE-2015-7558_2.patch \
+ file://CVE-2015-7558_3.patch \
+ "
SRC_URI[archive.md5sum] = "fadebe2e799ab159169ee3198415ff85"
SRC_URI[archive.sha256sum] = "965c807438ce90b204e930ff80c92eba1606a2f6fd5ccfd09335c99896dd3479"
+CACHED_CONFIGUREVARS = "ac_cv_path_GDK_PIXBUF_QUERYLOADERS=${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders"
+
EXTRA_OECONF = "--disable-introspection --disable-vala"
# The older ld (2.22) on the host (Centos 6.5) doesn't have the
diff --git a/yocto-poky/meta/recipes-gnome/libsecret/libsecret_0.18.2.bb b/yocto-poky/meta/recipes-gnome/libsecret/libsecret_0.18.2.bb
index cebc83b57..8fc00181f 100644
--- a/yocto-poky/meta/recipes-gnome/libsecret/libsecret_0.18.2.bb
+++ b/yocto-poky/meta/recipes-gnome/libsecret/libsecret_0.18.2.bb
@@ -4,9 +4,12 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=23c2a5e0106b99d75238986559bb5fc6"
inherit gnomebase gtk-doc
-DEPENDS = "glib-2.0 libgcrypt"
+DEPENDS = "glib-2.0 libgcrypt gettext-native"
EXTRA_OECONF += "--disable-manpages"
SRC_URI[archive.md5sum] = "23cdf8267d11a26f88f0dbec1e2022ad"
SRC_URI[archive.sha256sum] = "12fd288b012e1b2b1b54d586cd4c6507885715534644b4534b7ef7d7079ba443"
+
+# http://errors.yoctoproject.org/Errors/Details/20228/
+ARM_INSTRUCTION_SET = "arm"
diff --git a/yocto-poky/meta/recipes-gnome/libwnck/libwnck3_3.14.0.bb b/yocto-poky/meta/recipes-gnome/libwnck/libwnck3_3.14.0.bb
index d0f5175e5..3ee1ae98a 100644
--- a/yocto-poky/meta/recipes-gnome/libwnck/libwnck3_3.14.0.bb
+++ b/yocto-poky/meta/recipes-gnome/libwnck/libwnck3_3.14.0.bb
@@ -13,3 +13,8 @@ PACKAGECONFIG[startup-notification] = "--enable-startup-notification,--disable-s
inherit gnomebase
SRC_URI[archive.md5sum] = "4538672e0d775fadedf10abeb8020047"
SRC_URI[archive.sha256sum] = "f5080076346609b4c36394b879f3a86b92ced3b90a37cb54c8e9a14f00e7921c"
+
+inherit distro_features_check
+# libxres means x11 only
+REQUIRED_DISTRO_FEATURES = "x11"
+
diff --git a/yocto-poky/meta/recipes-graphics/cairo/cairo.inc b/yocto-poky/meta/recipes-graphics/cairo/cairo.inc
index 1e45318dd..45651bad2 100644
--- a/yocto-poky/meta/recipes-graphics/cairo/cairo.inc
+++ b/yocto-poky/meta/recipes-graphics/cairo/cairo.inc
@@ -9,11 +9,14 @@ Extension."
HOMEPAGE = "http://cairographics.org"
BUGTRACKER = "http://bugs.freedesktop.org"
SECTION = "libs"
+
LICENSE = "MPL-1 & LGPLv2.1 & GPLv3+"
LICENSE_${PN} = "MPL-1 & LGPLv2.1"
LICENSE_${PN}-dev = "MPL-1 & LGPLv2.1"
LICENSE_${PN}-gobject = "MPL-1 & LGPLv2.1"
+LICENSE_${PN}-script-interpreter = "MPL-1 & LGPLv2.1"
LICENSE_${PN}-perf-utils = "GPLv3+"
+
X11DEPENDS = "virtual/libx11 libsm libxrender libxext"
DEPENDS = "libpng fontconfig pixman glib-2.0 zlib"
diff --git a/yocto-poky/meta/recipes-graphics/cairo/cairo/Manually-transpose-the-matrix-in-_cairo_gl_shader_bi.patch b/yocto-poky/meta/recipes-graphics/cairo/cairo/Manually-transpose-the-matrix-in-_cairo_gl_shader_bi.patch
new file mode 100644
index 000000000..955b7d4ef
--- /dev/null
+++ b/yocto-poky/meta/recipes-graphics/cairo/cairo/Manually-transpose-the-matrix-in-_cairo_gl_shader_bi.patch
@@ -0,0 +1,49 @@
+Upstream-Status: Backport
+
+ http://lists.cairographics.org/archives/cairo/2015-May/026253.html
+ http://cgit.freedesktop.org/cairo/commit/?id=f52f0e2feb1ad0a4de23c475a8c020d41a1764a8
+
+Signed-off-by: Andre McCurdy <armccurdy@gmail.com>
+
+
+From f52f0e2feb1ad0a4de23c475a8c020d41a1764a8 Mon Sep 17 00:00:00 2001
+From: Zan Dobersek <zdobersek@igalia.com>
+Date: Fri, 8 May 2015 01:50:25 -0700
+Subject: [PATCH] Manually transpose the matrix in _cairo_gl_shader_bind_matrix()
+
+To maintain compatibility with OpenGL ES 2.0, the matrix in
+_cairo_gl_shader_bind_matrix() should be manually transposed,
+and GL_FALSE passed as the transpose argument to the
+glUniformMatrix3fv() call as it is the only valid value for
+that parameter in OpenGL ES 2.0.
+
+Reviewed-by: Bryce Harrington <bryce@osg.samsung.com>
+Acked-by: "Henry (Yu) Song" <henry.song@samsung.com>
+---
+ src/cairo-gl-shaders.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/src/cairo-gl-shaders.c b/src/cairo-gl-shaders.c
+index 2710606..fe975d2 100644
+--- a/src/cairo-gl-shaders.c
++++ b/src/cairo-gl-shaders.c
+@@ -973,12 +973,12 @@ _cairo_gl_shader_bind_matrix (cairo_gl_context_t *ctx,
+ {
+ cairo_gl_dispatch_t *dispatch = &ctx->dispatch;
+ float gl_m[9] = {
+- m->xx, m->xy, m->x0,
+- m->yx, m->yy, m->y0,
+- 0, 0, 1
++ m->xx, m->yx, 0,
++ m->xy, m->yy, 0,
++ m->x0, m->y0, 1
+ };
+ assert (location != -1);
+- dispatch->UniformMatrix3fv (location, 1, GL_TRUE, gl_m);
++ dispatch->UniformMatrix3fv (location, 1, GL_FALSE, gl_m);
+ }
+
+ void
+--
+1.9.1
+
diff --git a/yocto-poky/meta/recipes-graphics/cairo/cairo_1.14.2.bb b/yocto-poky/meta/recipes-graphics/cairo/cairo_1.14.2.bb
index 3817dbf28..75cde0ad7 100644
--- a/yocto-poky/meta/recipes-graphics/cairo/cairo_1.14.2.bb
+++ b/yocto-poky/meta/recipes-graphics/cairo/cairo_1.14.2.bb
@@ -3,6 +3,7 @@ require cairo.inc
LIC_FILES_CHKSUM = "file://COPYING;md5=e73e999e0c72b5ac9012424fa157ad77"
SRC_URI = "http://cairographics.org/releases/cairo-${PV}.tar.xz"
+SRC_URI += "file://Manually-transpose-the-matrix-in-_cairo_gl_shader_bi.patch"
SRC_URI[md5sum] = "e1cdfaf1c6c995c4d4c54e07215b0118"
SRC_URI[sha256sum] = "c919d999ddb1bbbecd4bbe65299ca2abd2079c7e13d224577895afa7005ecceb"
diff --git a/yocto-poky/meta/recipes-graphics/directfb/directfb.inc b/yocto-poky/meta/recipes-graphics/directfb/directfb.inc
index 446aaeadb..cbd401483 100644
--- a/yocto-poky/meta/recipes-graphics/directfb/directfb.inc
+++ b/yocto-poky/meta/recipes-graphics/directfb/directfb.inc
@@ -22,6 +22,9 @@ S = "${WORKDIR}/DirectFB-${PV}"
LDFLAGS_append =" -lts -lm"
+# Workaround for linking issues seen with armv7a + gold
+LDFLAGS_append_armv7a = "${@base_contains('DISTRO_FEATURES', 'ld-is-gold', ' -fuse-ld=bfd ', '', d)}"
+
BINCONFIG = "${bindir}/directfb-config"
inherit autotools binconfig-disabled pkgconfig
diff --git a/yocto-poky/meta/recipes-graphics/libsdl/libsdl_1.2.15.bb b/yocto-poky/meta/recipes-graphics/libsdl/libsdl_1.2.15.bb
index 266bd4246..c0d5c6a96 100644
--- a/yocto-poky/meta/recipes-graphics/libsdl/libsdl_1.2.15.bb
+++ b/yocto-poky/meta/recipes-graphics/libsdl/libsdl_1.2.15.bb
@@ -13,8 +13,9 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=27818cd7fd83877a8e3ef82b82798ef4"
PROVIDES = "virtual/libsdl"
DEPENDS = "${@bb.utils.contains('DISTRO_FEATURES', 'directfb', 'directfb', '', d)} \
- ${@bb.utils.contains('DISTRO_FEATURES', 'opengl', 'virtual/libgl libglu', '', d)} \
+ ${@bb.utils.contains('DISTRO_FEATURES', 'opengl', 'virtual/libgl', '', d)} \
${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'virtual/libx11 libxext libxrandr libxrender', '', d)} \
+ ${@bb.utils.contains('DISTRO_FEATURES', 'x11 opengl', 'libglu', '', d)} \
tslib"
DEPENDS_class-nativesdk = "${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'virtual/nativesdk-libx11 nativesdk-libxrandr nativesdk-libxrender nativesdk-libxext', '', d)}"
diff --git a/yocto-poky/meta/recipes-graphics/libsdl2/libsdl2_2.0.3.bb b/yocto-poky/meta/recipes-graphics/libsdl2/libsdl2_2.0.3.bb
index 97f64f384..f138f9761 100644
--- a/yocto-poky/meta/recipes-graphics/libsdl2/libsdl2_2.0.3.bb
+++ b/yocto-poky/meta/recipes-graphics/libsdl2/libsdl2_2.0.3.bb
@@ -39,7 +39,7 @@ PACKAGECONFIG ??= " \
${@bb.utils.contains('DISTRO_FEATURES', 'alsa', 'alsa', '', d)} \
${@bb.utils.contains('DISTRO_FEATURES', 'directfb', 'directfb', '', d)} \
${@bb.utils.contains('DISTRO_FEATURES', 'pulseaudio', 'pulseaudio', '', d)} \
- ${@bb.utils.contains('DISTRO_FEATURES', 'wayland', 'wayland', '', d)} \
+ ${@bb.utils.contains('DISTRO_FEATURES', 'wayland', 'wayland gles2', '', d)} \
${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'x11', '', d)} \
"
PACKAGECONFIG[alsa] = "--enable-alsa --disable-alsatest,--disable-alsa,alsa-lib,"
diff --git a/yocto-poky/meta/recipes-graphics/mesa/mesa-demos/0010-sharedtex_mt-fix-rendering-thread-hang.patch b/yocto-poky/meta/recipes-graphics/mesa/mesa-demos/0010-sharedtex_mt-fix-rendering-thread-hang.patch
new file mode 100644
index 000000000..04e1b446f
--- /dev/null
+++ b/yocto-poky/meta/recipes-graphics/mesa/mesa-demos/0010-sharedtex_mt-fix-rendering-thread-hang.patch
@@ -0,0 +1,43 @@
+From 525fa9ded72d22b53c5eb366f61e2ac1d407a2db Mon Sep 17 00:00:00 2001
+From: Awais Belal <awais_belal@mentor.com>
+Date: Thu, 8 Oct 2015 13:49:31 +0500
+Subject: [PATCH] sharedtex_mt: fix rendering thread hang
+
+XNextEvent is a blocking call which locks up the display mutex
+this causes the rendering threads to hang when they try call
+glXSwapBuffers() as that tries to take the same mutex in
+underlying calls through XCopyArea().
+So we only go to XNextEvent when it has at least one event
+and we wouldn't lock indefinitely.
+
+Signed-off-by: Awais Belal <awais_belal@mentor.com>
+Upstream-Status: Backport (2b304e765695d385fd3bf414e6e444020bedb0a8)
+
+---
+ src/xdemos/sharedtex_mt.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/src/xdemos/sharedtex_mt.c b/src/xdemos/sharedtex_mt.c
+index a90903a..1d503c4 100644
+--- a/src/xdemos/sharedtex_mt.c
++++ b/src/xdemos/sharedtex_mt.c
+@@ -420,9 +420,14 @@ Resize(struct window *h, unsigned int width, unsigned int height)
+ static void
+ EventLoop(void)
+ {
++ int i;
++ XEvent event;
+ while (1) {
+- int i;
+- XEvent event;
++ /* Do we have an event? */
++ if (XPending(gDpy) == 0) {
++ usleep(10000);
++ continue;
++ }
+ XNextEvent(gDpy, &event);
+ for (i = 0; i < NumWindows; i++) {
+ struct window *h = &Windows[i];
+--
+1.9.1
+
diff --git a/yocto-poky/meta/recipes-graphics/mesa/mesa-demos_8.2.0.bb b/yocto-poky/meta/recipes-graphics/mesa/mesa-demos_8.2.0.bb
index e4516425e..0094f556f 100644
--- a/yocto-poky/meta/recipes-graphics/mesa/mesa-demos_8.2.0.bb
+++ b/yocto-poky/meta/recipes-graphics/mesa/mesa-demos_8.2.0.bb
@@ -19,6 +19,7 @@ SRC_URI = "ftp://ftp.freedesktop.org/pub/mesa/demos/${PV}/${BPN}-${PV}.tar.bz2 \
file://0007-Install-few-more-test-programs.patch \
file://0008-glsl-perf-Add-few-missing-.glsl-.vert-.frag-files-to.patch \
file://0009-glsl-perf-Install-.glsl-.vert-.frag-files.patch \
+ file://0010-sharedtex_mt-fix-rendering-thread-hang.patch \
"
SRC_URI[md5sum] = "72613a2c8c013716db02e3ff59d29061"
SRC_URI[sha256sum] = "e4bfecb5816ddd4b7b37c1bc876b63f1f7f06fda5879221a9774d0952f90ba92"
diff --git a/yocto-poky/meta/recipes-graphics/piglit/piglit_git.bb b/yocto-poky/meta/recipes-graphics/piglit/piglit_git.bb
index 0d825c981..55ad78c09 100644
--- a/yocto-poky/meta/recipes-graphics/piglit/piglit_git.bb
+++ b/yocto-poky/meta/recipes-graphics/piglit/piglit_git.bb
@@ -18,6 +18,12 @@ inherit cmake pythonnative distro_features_check
# depends on virtual/libx11
REQUIRED_DISTRO_FEATURES = "x11"
+# The built scripts go into the temporary directory according to tempfile
+# (typically /tmp) which can race if multiple builds happen on the same machine,
+# so tell it to use a directory in ${B} to avoid overwriting.
+export TEMP = "${B}/temp/"
+do_compile[dirs] =+ "${B}/temp/"
+
PACKAGECONFIG ??= ""
PACKAGECONFIG[freeglut] = "-DPIGLIT_USE_GLUT=1,-DPIGLIT_USE_GLUT=0,freeglut,"
diff --git a/yocto-poky/meta/recipes-graphics/waffle/waffle/0001-third_party-threads-Use-PTHREAD_MUTEX_RECURSIVE-by-d.patch b/yocto-poky/meta/recipes-graphics/waffle/waffle/0001-third_party-threads-Use-PTHREAD_MUTEX_RECURSIVE-by-d.patch
new file mode 100644
index 000000000..a0c826ed9
--- /dev/null
+++ b/yocto-poky/meta/recipes-graphics/waffle/waffle/0001-third_party-threads-Use-PTHREAD_MUTEX_RECURSIVE-by-d.patch
@@ -0,0 +1,54 @@
+From 3b9b8f5f6d1b99af43e95ec0868404e552a85b73 Mon Sep 17 00:00:00 2001
+From: Emil Velikov <emil.l.velikov@gmail.com>
+Date: Thu, 19 Mar 2015 22:26:11 +0000
+Subject: [PATCH] third_party/threads: Use PTHREAD_MUTEX_RECURSIVE by default
+
+PTHREAD_MUTEX_RECURSIVE_NP was used for compatibility with old glibc.
+Although due to the_GNU_SOURCES define the portable,
+PTHREAD_MUTEX_RECURSIVE will be available for Linuxes since at least
+1998. Simplify things giving us compatibility with musl which
+apparently does not provide the non-portable define.
+
+Inspired by almost identical commit in mesa aead7fe2e2b(c11/threads: Use
+PTHREAD_MUTEX_RECURSIVE by default) by Felix Janda.
+
+Signed-off-by: Emil Velikov <emil.l.velikov@gmail.com>
+Reviewed-by: Chad Versace <chad.versace@intel.com>
+---
+Upstream-Status: Backport
+
+ third_party/threads/threads_posix.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/third_party/threads/threads_posix.c b/third_party/threads/threads_posix.c
+index 5835e43..e122bf9 100644
+--- a/third_party/threads/threads_posix.c
++++ b/third_party/threads/threads_posix.c
+@@ -26,6 +26,9 @@
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
++
++#define _GNU_SOURCE
++
+ #include <stdlib.h>
+ #ifndef assert
+ #include <assert.h>
+@@ -150,13 +153,8 @@ int mtx_init(mtx_t *mtx, int type)
+ && type != (mtx_try|mtx_recursive))
+ return thrd_error;
+ pthread_mutexattr_init(&attr);
+- if ((type & mtx_recursive) != 0) {
+-#if defined(__linux__) || defined(__linux)
+- pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE_NP);
+-#else
++ if ((type & mtx_recursive) != 0)
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+-#endif
+- }
+ pthread_mutex_init(mtx, &attr);
+ pthread_mutexattr_destroy(&attr);
+ return thrd_success;
+--
+2.5.2
+
diff --git a/yocto-poky/meta/recipes-graphics/waffle/waffle_1.5.1.bb b/yocto-poky/meta/recipes-graphics/waffle/waffle_1.5.1.bb
index b8aa05a2d..af8402053 100644
--- a/yocto-poky/meta/recipes-graphics/waffle/waffle_1.5.1.bb
+++ b/yocto-poky/meta/recipes-graphics/waffle/waffle_1.5.1.bb
@@ -3,7 +3,9 @@ LICENSE = "BSD-2-Clause"
LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=4c5154407c2490750dd461c50ad94797 \
file://include/waffle/waffle.h;endline=24;md5=61dbf8697f61c78645e75a93c585b1bf"
-SRC_URI = "http://waffle-gl.org/files/release/${BPN}-${PV}/${BPN}-${PV}.tar.xz"
+SRC_URI = "http://waffle-gl.org/files/release/${BPN}-${PV}/${BPN}-${PV}.tar.xz \
+ file://0001-third_party-threads-Use-PTHREAD_MUTEX_RECURSIVE-by-d.patch \
+ "
SRC_URI[md5sum] = "c0d802bc3d0aba87c51e423a3a8bdd69"
SRC_URI[sha256sum] = "cbab0e926515064e818bf089a5af04be33307e5f40d07659fb40d59b2bfe20aa"
diff --git a/yocto-poky/meta/recipes-graphics/xorg-app/xwininfo_1.1.3.bb b/yocto-poky/meta/recipes-graphics/xorg-app/xwininfo_1.1.3.bb
index 7a452410b..0c23d1943 100644
--- a/yocto-poky/meta/recipes-graphics/xorg-app/xwininfo_1.1.3.bb
+++ b/yocto-poky/meta/recipes-graphics/xorg-app/xwininfo_1.1.3.bb
@@ -7,7 +7,7 @@ windows. Information may include window position, size, color depth, \
and a number of other items."
LIC_FILES_CHKSUM = "file://COPYING;md5=78976cd3115f6faf615accc4e094d90e"
-DEPENDS += "libxext libxmu"
+DEPENDS += "libxext libxmu gettext-native"
PE = "0"
diff --git a/yocto-poky/meta/recipes-graphics/xorg-lib/libxcb.inc b/yocto-poky/meta/recipes-graphics/xorg-lib/libxcb.inc
index fe31f20df..e40ae77ab 100644
--- a/yocto-poky/meta/recipes-graphics/xorg-lib/libxcb.inc
+++ b/yocto-poky/meta/recipes-graphics/xorg-lib/libxcb.inc
@@ -14,7 +14,9 @@ DEPENDS = "xcb-proto xproto libxau xcb-proto-native libpthread-stubs"
SRC_URI = "http://xcb.freedesktop.org/dist/libxcb-${PV}.tar.bz2 \
file://xcbincludedir.patch \
- file://disable-check.patch"
+ file://disable-check.patch \
+ file://gcc-mips-pr68302-mips-workaround.patch \
+ "
PACKAGES_DYNAMIC = "^libxcb-.*"
diff --git a/yocto-poky/meta/recipes-graphics/xorg-lib/libxcb/gcc-mips-pr68302-mips-workaround.patch b/yocto-poky/meta/recipes-graphics/xorg-lib/libxcb/gcc-mips-pr68302-mips-workaround.patch
new file mode 100644
index 000000000..698d038f9
--- /dev/null
+++ b/yocto-poky/meta/recipes-graphics/xorg-lib/libxcb/gcc-mips-pr68302-mips-workaround.patch
@@ -0,0 +1,22 @@
+Reduce debug info for xcb.c since on mips we run into a gcc5 bug
+
+https://gcc.gnu.org/bugzilla/show_bug.cgi?id=68302
+
+This patch is a workaround to get past the gcc bug until its resolved.
+it should have minimal impact on libxcb while make it work.
+
+Upstream-Status: Inappropriate [OE-Specific]
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+
+Index: libxcb-1.11.1/src/Makefile.am
+===================================================================
+--- libxcb-1.11.1.orig/src/Makefile.am
++++ libxcb-1.11.1/src/Makefile.am
+@@ -188,6 +188,7 @@ EXTSOURCES += xkb.c
+ if BUILD_XKB
+ lib_LTLIBRARIES += libxcb-xkb.la
+ libxcb_xkb_la_LDFLAGS = -version-info 1:0:0 -no-undefined
++CFLAGS += -g1
+ libxcb_xkb_la_LIBADD = $(XCB_LIBS)
+ nodist_libxcb_xkb_la_SOURCES = xkb.c xkb.h
+ endif
diff --git a/yocto-poky/meta/recipes-graphics/xorg-lib/pixman/0001-v3-test-add-a-check-for-FE_DIVBYZERO.patch b/yocto-poky/meta/recipes-graphics/xorg-lib/pixman/0001-v3-test-add-a-check-for-FE_DIVBYZERO.patch
new file mode 100644
index 000000000..a60df5fc0
--- /dev/null
+++ b/yocto-poky/meta/recipes-graphics/xorg-lib/pixman/0001-v3-test-add-a-check-for-FE_DIVBYZERO.patch
@@ -0,0 +1,65 @@
+From fcd5eb9bd0e8674a6f4987a8fce7dc1ba8f9320c Mon Sep 17 00:00:00 2001
+From: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Date: Thu, 17 Sep 2015 03:08:36 +0200
+Subject: [PATCH] [v3] test: add a check for FE_DIVBYZERO
+
+Some architectures, such as Microblaze and Nios2, currently do not
+implement FE_DIVBYZERO, even though they have <fenv.h> and
+feenableexcept(). This commit adds a configure.ac check to verify
+whether FE_DIVBYZERO is defined or not, and if not, disables the
+problematic code in test/utils.c.
+
+Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Signed-off-by: Marek Vasut <marex@denx.de>
+Upstream-Status: Submitted
+---
+Changes v1 -> v2:
+
+ * Use the ac_cv_have_decl_FE_DIVBYZERO variable, which is
+ automatically set by AC_CHECK_DECL, to decide whether or not
+ HAVE_FEDIVBYZERO should be defined.
+
+Changes v2 -> v3:
+
+ * Use action-if-yes of AC_CHECK_DECL as suggested in
+ http://lists.freedesktop.org/archives/pixman/2014-February/003176.html
+---
+ configure.ac | 5 +++++
+ test/utils.c | 2 ++
+ 2 files changed, 7 insertions(+)
+
+diff --git a/configure.ac b/configure.ac
+index f93cc30..424bfd3 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -891,6 +891,11 @@ if test x$have_feenableexcept = xyes; then
+ AC_DEFINE(HAVE_FEENABLEEXCEPT, 1, [Whether we have feenableexcept()])
+ fi
+
++AC_CHECK_DECL([FE_DIVBYZERO],
++ [AC_DEFINE(HAVE_FEDIVBYZERO, 1, [Whether we have FE_DIVBYZERO])],
++ [],
++ [[#include <fenv.h>]])
++
+ AC_CHECK_FUNC(gettimeofday, have_gettimeofday=yes, have_gettimeofday=no)
+ AC_CHECK_HEADER(sys/time.h, have_sys_time_h=yes, have_sys_time_h=no)
+ if test x$have_gettimeofday = xyes && test x$have_sys_time_h = xyes; then
+diff --git a/test/utils.c b/test/utils.c
+index 222d4d5..8657966 100644
+--- a/test/utils.c
++++ b/test/utils.c
+@@ -966,9 +966,11 @@ enable_divbyzero_exceptions (void)
+ {
+ #ifdef HAVE_FENV_H
+ #ifdef HAVE_FEENABLEEXCEPT
++#ifdef HAVE_FEDIVBYZERO
+ feenableexcept (FE_DIVBYZERO);
++#endif
+ #endif
+ #endif
+ }
+
+ void
+--
+2.1.4
+
diff --git a/yocto-poky/meta/recipes-graphics/xorg-lib/pixman_0.32.6.bb b/yocto-poky/meta/recipes-graphics/xorg-lib/pixman_0.32.6.bb
index eae59b62d..317a568bd 100644
--- a/yocto-poky/meta/recipes-graphics/xorg-lib/pixman_0.32.6.bb
+++ b/yocto-poky/meta/recipes-graphics/xorg-lib/pixman_0.32.6.bb
@@ -31,6 +31,7 @@ SRC_URI += "\
file://0001-ARM-qemu-related-workarounds-in-cpu-features-detecti.patch \
file://mips-export-revert.patch \
file://asm_include.patch \
+ file://0001-v3-test-add-a-check-for-FE_DIVBYZERO.patch \
"
SRC_URI[md5sum] = "8a9e8f14743a39cf303803f369c1f344"
diff --git a/yocto-poky/meta/recipes-graphics/xorg-xserver/xserver-xorg.inc b/yocto-poky/meta/recipes-graphics/xorg-xserver/xserver-xorg.inc
index cc1c02b55..9881c942b 100644
--- a/yocto-poky/meta/recipes-graphics/xorg-xserver/xserver-xorg.inc
+++ b/yocto-poky/meta/recipes-graphics/xorg-xserver/xserver-xorg.inc
@@ -23,6 +23,9 @@ S = "${WORKDIR}/${XORG_PN}-${PV}"
inherit autotools pkgconfig
+inherit distro_features_check
+REQUIRED_DISTRO_FEATURES = "x11"
+
PROTO_DEPS = "randrproto renderproto fixesproto damageproto xextproto xproto xf86dgaproto xf86miscproto xf86vidmodeproto compositeproto recordproto resourceproto videoproto scrnsaverproto xineramaproto fontsproto kbproto inputproto bigreqsproto xcmiscproto presentproto"
LIB_DEPS = "pixman libxfont xtrans libxau libxext libxdmcp libdrm libxkbfile libpciaccess openssl libgcrypt"
DEPENDS = "${PROTO_DEPS} ${LIB_DEPS} font-util"
diff --git a/yocto-poky/meta/recipes-kernel/kern-tools/kern-tools-native_git.bb b/yocto-poky/meta/recipes-kernel/kern-tools/kern-tools-native_git.bb
index 4753094aa..445d03a87 100644
--- a/yocto-poky/meta/recipes-kernel/kern-tools/kern-tools-native_git.bb
+++ b/yocto-poky/meta/recipes-kernel/kern-tools/kern-tools-native_git.bb
@@ -4,7 +4,7 @@ LIC_FILES_CHKSUM = "file://git/tools/kgit;beginline=5;endline=9;md5=d8d1d729a70c
DEPENDS = "git-native"
-SRCREV = "bd144d43ca5b1eaf9e727bced4ce3b61b642297c"
+SRCREV = "17d89d1861b532bbf1a81c1f024953e440db8de7"
PR = "r12"
PV = "0.2+git${SRCPV}"
diff --git a/yocto-poky/meta/recipes-kernel/kmod/kmod.inc b/yocto-poky/meta/recipes-kernel/kmod/kmod.inc
index e9aa67d56..71ffdf88d 100644
--- a/yocto-poky/meta/recipes-kernel/kmod/kmod.inc
+++ b/yocto-poky/meta/recipes-kernel/kmod/kmod.inc
@@ -16,7 +16,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=a6f89e2100d9b6cdffcea4f398e37343 \
"
inherit autotools gtk-doc
-SRCREV = "0d833715eaa65636dda2705b89359a1e0154dc58"
+SRCREV = "114ec87c85c35a2bd3682f9f891e494127be6fb5"
# Lookout for PV bump too when SRCREV is changed
PV = "21+git${SRCPV}"
diff --git a/yocto-poky/meta/recipes-kernel/linux-firmware/linux-firmware_git.bb b/yocto-poky/meta/recipes-kernel/linux-firmware/linux-firmware_git.bb
index 4939ca67c..0878ab1a8 100644
--- a/yocto-poky/meta/recipes-kernel/linux-firmware/linux-firmware_git.bb
+++ b/yocto-poky/meta/recipes-kernel/linux-firmware/linux-firmware_git.bb
@@ -260,10 +260,10 @@ FILES_${PN}-rtl-license = " \
/lib/firmware/LICENCE.rtlwifi_firmware.txt \
"
FILES_${PN}-rtl8192cu = " \
- /lib/firmware/rtlwifi/rtl8192cufw.bin \
+ /lib/firmware/rtlwifi/rtl8192cufw*.bin \
"
FILES_${PN}-rtl8192ce = " \
- /lib/firmware/rtlwifi/rtl8192cfw.bin \
+ /lib/firmware/rtlwifi/rtl8192cfw*.bin \
"
FILES_${PN}-rtl8192su = " \
/lib/firmware/rtlwifi/rtl8712u.bin \
diff --git a/yocto-poky/meta/recipes-kernel/linux/linux-yocto-rt_3.14.bb b/yocto-poky/meta/recipes-kernel/linux/linux-yocto-rt_3.14.bb
index 4d3d5c88d..bfeabbec0 100644
--- a/yocto-poky/meta/recipes-kernel/linux/linux-yocto-rt_3.14.bb
+++ b/yocto-poky/meta/recipes-kernel/linux/linux-yocto-rt_3.14.bb
@@ -5,7 +5,7 @@ require recipes-kernel/linux/linux-yocto.inc
SRCREV_machine ?= "8281915527ba8d79e59906c02f28e7aa11424723"
SRCREV_machine_qemuppc ?= "5e7d372ebc327f28656fc972fab55605eea8aec3"
-SRCREV_meta ?= "3a09b38a9f5015c56d99d17aa7c2f200c566249b"
+SRCREV_meta ?= "060fa80b7996250001ee90c50a4978c8fdb87fc4"
SRC_URI = "git://git.yoctoproject.org/linux-yocto-3.14.git;branch=${KBRANCH};name=machine \
git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-3.14;destsuffix=${KMETA}"
@@ -24,5 +24,6 @@ COMPATIBLE_MACHINE = "(qemux86|qemux86-64|qemuarm|qemuppc|qemumips)"
# Functionality flags
KERNEL_EXTRA_FEATURES ?= "features/netfilter/netfilter.scc features/taskstats/taskstats.scc"
KERNEL_FEATURES_append = " ${KERNEL_EXTRA_FEATURES}"
+KERNEL_FEATURES_append_qemuall=" cfg/virtio.scc"
KERNEL_FEATURES_append_qemux86=" cfg/sound.scc cfg/paravirt_kvm.scc"
KERNEL_FEATURES_append_qemux86-64=" cfg/sound.scc"
diff --git a/yocto-poky/meta/recipes-kernel/linux/linux-yocto-rt_4.1.bb b/yocto-poky/meta/recipes-kernel/linux/linux-yocto-rt_4.1.bb
index 06483c3ee..b441bf681 100644
--- a/yocto-poky/meta/recipes-kernel/linux/linux-yocto-rt_4.1.bb
+++ b/yocto-poky/meta/recipes-kernel/linux/linux-yocto-rt_4.1.bb
@@ -1,14 +1,14 @@
-KBRANCH ?= "standard/preempt-rt"
+KBRANCH ?= "standard/preempt-rt/base"
require recipes-kernel/linux/linux-yocto.inc
-SRCREV_machine ?= "59b8c4f5e8ddb9c33c62fff22204fe2b0d8c703e"
-SRCREV_meta ?= "429f9e2ff0649b8c9341345622545d874d5e303a"
+SRCREV_machine ?= "3188436876d5eaff8d48f82064367d4a65c3aa97"
+SRCREV_meta ?= "46bb64d605fd336d99fa05bab566b9553b40b4b4"
SRC_URI = "git://git.yoctoproject.org/linux-yocto-4.1.git;branch=${KBRANCH};name=machine \
git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-4.1;destsuffix=${KMETA}"
-LINUX_VERSION ?= "4.1.6"
+LINUX_VERSION ?= "4.1.15"
PV = "${LINUX_VERSION}+git${SRCPV}"
@@ -22,5 +22,6 @@ COMPATIBLE_MACHINE = "(qemux86|qemux86-64|qemuarm|qemuppc|qemumips)"
# Functionality flags
KERNEL_EXTRA_FEATURES ?= "features/netfilter/netfilter.scc features/taskstats/taskstats.scc"
KERNEL_FEATURES_append = " ${KERNEL_EXTRA_FEATURES}"
+KERNEL_FEATURES_append_qemuall=" cfg/virtio.scc"
KERNEL_FEATURES_append_qemux86=" cfg/sound.scc cfg/paravirt_kvm.scc"
KERNEL_FEATURES_append_qemux86-64=" cfg/sound.scc"
diff --git a/yocto-poky/meta/recipes-kernel/linux/linux-yocto-tiny_3.14.bb b/yocto-poky/meta/recipes-kernel/linux/linux-yocto-tiny_3.14.bb
index 412c817ff..e13cb80ce 100644
--- a/yocto-poky/meta/recipes-kernel/linux/linux-yocto-tiny_3.14.bb
+++ b/yocto-poky/meta/recipes-kernel/linux/linux-yocto-tiny_3.14.bb
@@ -10,7 +10,7 @@ KMETA = "kernel-meta"
KCONF_BSP_AUDIT_LEVEL = "2"
SRCREV_machine ?= "578602a722dbfb260801f3b37c6eafd2abb2340d"
-SRCREV_meta ?= "3a09b38a9f5015c56d99d17aa7c2f200c566249b"
+SRCREV_meta ?= "060fa80b7996250001ee90c50a4978c8fdb87fc4"
PV = "${LINUX_VERSION}+git${SRCPV}"
diff --git a/yocto-poky/meta/recipes-kernel/linux/linux-yocto-tiny_4.1.bb b/yocto-poky/meta/recipes-kernel/linux/linux-yocto-tiny_4.1.bb
index 061205e92..4caa2523b 100644
--- a/yocto-poky/meta/recipes-kernel/linux/linux-yocto-tiny_4.1.bb
+++ b/yocto-poky/meta/recipes-kernel/linux/linux-yocto-tiny_4.1.bb
@@ -4,13 +4,13 @@ KCONFIG_MODE = "--allnoconfig"
require recipes-kernel/linux/linux-yocto.inc
-LINUX_VERSION ?= "4.1.6"
+LINUX_VERSION ?= "4.1.15"
KMETA = "kernel-meta"
KCONF_BSP_AUDIT_LEVEL = "2"
-SRCREV_machine ?= "59b8c4f5e8ddb9c33c62fff22204fe2b0d8c703e"
-SRCREV_meta ?= "429f9e2ff0649b8c9341345622545d874d5e303a"
+SRCREV_machine ?= "788dfc9859321c09f1c58696bf8998f90ccb4f51"
+SRCREV_meta ?= "46bb64d605fd336d99fa05bab566b9553b40b4b4"
PV = "${LINUX_VERSION}+git${SRCPV}"
diff --git a/yocto-poky/meta/recipes-kernel/linux/linux-yocto.inc b/yocto-poky/meta/recipes-kernel/linux/linux-yocto.inc
index 3b41a61c3..81ffa24d0 100644
--- a/yocto-poky/meta/recipes-kernel/linux/linux-yocto.inc
+++ b/yocto-poky/meta/recipes-kernel/linux/linux-yocto.inc
@@ -11,6 +11,10 @@ DEPENDS_append_aarch64 = " libgcc"
KERNEL_CC_append_aarch64 = " ${TOOLCHAIN_OPTIONS}"
KERNEL_LD_append_aarch64 = " ${TOOLCHAIN_OPTIONS}"
+DEPENDS_append_nios2 = " libgcc"
+KERNEL_CC_append_nios2 = " ${TOOLCHAIN_OPTIONS}"
+KERNEL_LD_append_nios2 = " ${TOOLCHAIN_OPTIONS}"
+
# A KMACHINE is the mapping of a yocto $MACHINE to what is built
# by the kernel. This is typically the branch that should be built,
# and it can be specific to the machine or shared
diff --git a/yocto-poky/meta/recipes-kernel/linux/linux-yocto_3.14.bb b/yocto-poky/meta/recipes-kernel/linux/linux-yocto_3.14.bb
index b6b2e5a9b..db93d23db 100644
--- a/yocto-poky/meta/recipes-kernel/linux/linux-yocto_3.14.bb
+++ b/yocto-poky/meta/recipes-kernel/linux/linux-yocto_3.14.bb
@@ -15,16 +15,18 @@ SRCREV_machine_qemuarm ?= "4817747912b5c50ce5c31ef25658340ca615e1b4"
SRCREV_machine_qemuarm64 ?= "578602a722dbfb260801f3b37c6eafd2abb2340d"
SRCREV_machine_qemumips ?= "6ed76ec26b120f65f8547c8612b7334bd2745ec9"
SRCREV_machine_qemuppc ?= "a86ade84b2e142c0fd7536d96477107b6d07db5c"
-SRCREV_machine_qemux86 ?= "af1f7f586bd32d39c057f17606991b887eadb389"
-SRCREV_machine_qemux86-64 ?= "578602a722dbfb260801f3b37c6eafd2abb2340d"
+SRCREV_machine_qemux86 ?= "d9bf859dfae6f88b88b157119c20ae4d5e51420a"
+SRCREV_machine_qemux86-64 ?= "93b2b800d85c1565af7d96f3776dc38c85ae1902"
SRCREV_machine_qemumips64 ?= "a63d40b860a6d255005a541894d53729090b40ea"
SRCREV_machine ?= "578602a722dbfb260801f3b37c6eafd2abb2340d"
-SRCREV_meta ?= "3a09b38a9f5015c56d99d17aa7c2f200c566249b"
+SRCREV_meta ?= "060fa80b7996250001ee90c50a4978c8fdb87fc4"
SRC_URI = "git://git.yoctoproject.org/linux-yocto-3.14.git;branch=${KBRANCH};name=machine; \
git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-3.14;destsuffix=${KMETA}"
LINUX_VERSION ?= "3.14.36"
+LINUX_VERSION_qemux86 ?= "3.14.39"
+LINUX_VERSION_qemux86-64 ?= "3.14.39"
PV = "${LINUX_VERSION}+git${SRCPV}"
@@ -36,6 +38,7 @@ COMPATIBLE_MACHINE = "qemuarm|qemuarm64|qemux86|qemuppc|qemumips|qemumips64|qemu
# Functionality flags
KERNEL_EXTRA_FEATURES ?= "features/netfilter/netfilter.scc"
KERNEL_FEATURES_append = " ${KERNEL_EXTRA_FEATURES}"
+KERNEL_FEATURES_append_qemuall=" cfg/virtio.scc"
KERNEL_FEATURES_append_qemux86=" cfg/sound.scc cfg/paravirt_kvm.scc"
KERNEL_FEATURES_append_qemux86-64=" cfg/sound.scc cfg/paravirt_kvm.scc"
KERNEL_FEATURES_append = " ${@bb.utils.contains("TUNE_FEATURES", "mx32", " cfg/x32.scc", "" ,d)}"
diff --git a/yocto-poky/meta/recipes-kernel/linux/linux-yocto_3.19.bb b/yocto-poky/meta/recipes-kernel/linux/linux-yocto_3.19.bb
index e8c16407a..baa575b11 100644
--- a/yocto-poky/meta/recipes-kernel/linux/linux-yocto_3.19.bb
+++ b/yocto-poky/meta/recipes-kernel/linux/linux-yocto_3.19.bb
@@ -11,14 +11,14 @@ KBRANCH_qemux86 ?= "standard/common-pc"
KBRANCH_qemux86-64 ?= "standard/common-pc-64/base"
KBRANCH_qemumips64 ?= "standard/mti-malta64"
-SRCREV_machine_qemuarm ?= "963b4df663dba2584ac864e0c016825de0046558"
+SRCREV_machine_qemuarm ?= "857048f10bfe7089ca6007e72431f1c098b07115"
SRCREV_machine_qemuarm64 ?= "e152349de59b43b2a75f2c332b44171df461d5a0"
SRCREV_machine_qemumips ?= "cedbbc7b5e72df2e820bb9e7885f12132c5e2fff"
SRCREV_machine_qemuppc ?= "23a83386e10986a63e6cef712a045445499d002b"
-SRCREV_machine_qemux86 ?= "e152349de59b43b2a75f2c332b44171df461d5a0"
-SRCREV_machine_qemux86-64 ?= "e152349de59b43b2a75f2c332b44171df461d5a0"
+SRCREV_machine_qemux86 ?= "1583bf79b946cd5581d84d8c369b819a5ecb94b4"
+SRCREV_machine_qemux86-64 ?= "1583bf79b946cd5581d84d8c369b819a5ecb94b4"
SRCREV_machine_qemumips64 ?= "3eb70cea3532e22ab1b6da9864446621229e6616"
-SRCREV_machine ?= "e152349de59b43b2a75f2c332b44171df461d5a0"
+SRCREV_machine ?= "151571a39785218a57c3ae3355cd63694890cc8d"
SRCREV_meta ?= "1016714868249d64fc16692fd7679672b1efa17b"
SRC_URI = "git://git.yoctoproject.org/linux-yocto-3.19.git;name=machine;branch=${KBRANCH}; \
@@ -36,6 +36,7 @@ COMPATIBLE_MACHINE = "qemuarm|qemuarm64|qemux86|qemuppc|qemumips|qemumips64|qemu
# Functionality flags
KERNEL_EXTRA_FEATURES ?= "features/netfilter/netfilter.scc"
KERNEL_FEATURES_append = " ${KERNEL_EXTRA_FEATURES}"
+KERNEL_FEATURES_append_qemuall=" cfg/virtio.scc"
KERNEL_FEATURES_append_qemux86=" cfg/sound.scc cfg/paravirt_kvm.scc"
KERNEL_FEATURES_append_qemux86-64=" cfg/sound.scc cfg/paravirt_kvm.scc"
KERNEL_FEATURES_append = " ${@bb.utils.contains("TUNE_FEATURES", "mx32", " cfg/x32.scc", "" ,d)}"
diff --git a/yocto-poky/meta/recipes-kernel/linux/linux-yocto_4.1.bb b/yocto-poky/meta/recipes-kernel/linux/linux-yocto_4.1.bb
index 46d46165b..1bb7336b5 100644
--- a/yocto-poky/meta/recipes-kernel/linux/linux-yocto_4.1.bb
+++ b/yocto-poky/meta/recipes-kernel/linux/linux-yocto_4.1.bb
@@ -11,20 +11,22 @@ KBRANCH_qemux86 ?= "standard/base"
KBRANCH_qemux86-64 ?= "standard/base"
KBRANCH_qemumips64 ?= "standard/mti-malta64"
-SRCREV_machine_qemuarm ?= "3c1245d162ccb55de1af42bcf3dbf690457bf9e4"
-SRCREV_machine_qemuarm64 ?= "59b8c4f5e8ddb9c33c62fff22204fe2b0d8c703e"
-SRCREV_machine_qemumips ?= "4132a691d0908d10b8f07ce7ece02e6dc94e17bc"
-SRCREV_machine_qemuppc ?= "59b8c4f5e8ddb9c33c62fff22204fe2b0d8c703e"
-SRCREV_machine_qemux86 ?= "59b8c4f5e8ddb9c33c62fff22204fe2b0d8c703e"
-SRCREV_machine_qemux86-64 ?= "59b8c4f5e8ddb9c33c62fff22204fe2b0d8c703e"
-SRCREV_machine_qemumips64 ?= "033e1aa633465449edf544eb81adda0caf16ec60"
-SRCREV_machine ?= "59b8c4f5e8ddb9c33c62fff22204fe2b0d8c703e"
-SRCREV_meta ?= "429f9e2ff0649b8c9341345622545d874d5e303a"
+SRCREV_machine_qemuarm ?= "cf760f381c5e1e58d0c3372d66f4dfdc33f0984c"
+SRCREV_machine_qemuarm64 ?= "788dfc9859321c09f1c58696bf8998f90ccb4f51"
+SRCREV_machine_qemumips ?= "aa46295ab927bd5c960930c377855dbc4e57b195"
+SRCREV_machine_qemuppc ?= "788dfc9859321c09f1c58696bf8998f90ccb4f51"
+SRCREV_machine_qemux86 ?= "2e0ac7b6c4e3ada23a84756287e9b7051ace939a"
+SRCREV_machine_qemux86-64 ?= "2e0ac7b6c4e3ada23a84756287e9b7051ace939a"
+SRCREV_machine_qemumips64 ?= "949c0f2cbb4cf902478d009a7d38b6e4fb29e7c4"
+SRCREV_machine ?= "788dfc9859321c09f1c58696bf8998f90ccb4f51"
+SRCREV_meta ?= "46bb64d605fd336d99fa05bab566b9553b40b4b4"
SRC_URI = "git://git.yoctoproject.org/linux-yocto-4.1.git;name=machine;branch=${KBRANCH}; \
git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-4.1;destsuffix=${KMETA}"
-LINUX_VERSION ?= "4.1.6"
+LINUX_VERSION ?= "4.1.15"
+LINUX_VERSION_qemux86 ?= "4.1.17"
+LINUX_VERSION_qemux86-64 ?= "4.1.17"
PV = "${LINUX_VERSION}+git${SRCPV}"
@@ -36,6 +38,7 @@ COMPATIBLE_MACHINE = "qemuarm|qemuarm64|qemux86|qemuppc|qemumips|qemumips64|qemu
# Functionality flags
KERNEL_EXTRA_FEATURES ?= "features/netfilter/netfilter.scc"
KERNEL_FEATURES_append = " ${KERNEL_EXTRA_FEATURES}"
+KERNEL_FEATURES_append_qemuall=" cfg/virtio.scc"
KERNEL_FEATURES_append_qemux86=" cfg/sound.scc cfg/paravirt_kvm.scc"
KERNEL_FEATURES_append_qemux86-64=" cfg/sound.scc cfg/paravirt_kvm.scc"
KERNEL_FEATURES_append = " ${@bb.utils.contains("TUNE_FEATURES", "mx32", " cfg/x32.scc", "" ,d)}"
diff --git a/yocto-poky/meta/recipes-kernel/lttng/lttng-tools/0001-Fix-sessiond-disable-match-app-event-by-name.patch b/yocto-poky/meta/recipes-kernel/lttng/lttng-tools/0001-Fix-sessiond-disable-match-app-event-by-name.patch
new file mode 100644
index 000000000..ac1f34bfa
--- /dev/null
+++ b/yocto-poky/meta/recipes-kernel/lttng/lttng-tools/0001-Fix-sessiond-disable-match-app-event-by-name.patch
@@ -0,0 +1,58 @@
+From 700c5a9d4dc7b552926b8ddcbba91cc13312aba0 Mon Sep 17 00:00:00 2001
+From: Jonathan Rajotte <jonathan.rajotte-julien@efficios.com>
+Date: Wed, 9 Sep 2015 17:08:20 -0400
+Subject: [PATCH] Fix: sessiond: disable: match app event by name
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The use of a simple lookup and match on event name is insufficient
+to identify the corresponding ust app event.
+
+Fixes #914
+
+Signed-off-by: Jonathan Rajotte <jonathan.rajotte-julien@efficios.com>
+Signed-off-by: Jérémie Galarneau <jeremie.galarneau@efficios.com>
+
+Upstream-Status: Backport
+
+Signed-off-by: Li Zhou <li.zhou@windriver.com>
+---
+ src/bin/lttng-sessiond/ust-app.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/src/bin/lttng-sessiond/ust-app.c b/src/bin/lttng-sessiond/ust-app.c
+index 4066b06..53a6f93 100644
+--- a/src/bin/lttng-sessiond/ust-app.c
++++ b/src/bin/lttng-sessiond/ust-app.c
+@@ -3873,7 +3873,7 @@ int ust_app_disable_event_glb(struct ltt_ust_session *usess,
+ {
+ int ret = 0;
+ struct lttng_ht_iter iter, uiter;
+- struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
++ struct lttng_ht_node_str *ua_chan_node;
+ struct ust_app *app;
+ struct ust_app_session *ua_sess;
+ struct ust_app_channel *ua_chan;
+@@ -3910,14 +3910,14 @@ int ust_app_disable_event_glb(struct ltt_ust_session *usess,
+ }
+ ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+
+- lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &uiter);
+- ua_event_node = lttng_ht_iter_get_node_str(&uiter);
+- if (ua_event_node == NULL) {
++ ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
++ uevent->filter, uevent->attr.loglevel,
++ uevent->exclusion);
++ if (ua_event == NULL) {
+ DBG2("Event %s not found in channel %s for app pid %d."
+ "Skipping", uevent->attr.name, uchan->name, app->pid);
+ continue;
+ }
+- ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
+
+ ret = disable_ust_app_event(ua_sess, ua_event, app);
+ if (ret < 0) {
+--
+1.7.9.5
+
diff --git a/yocto-poky/meta/recipes-kernel/lttng/lttng-tools_2.6.0.bb b/yocto-poky/meta/recipes-kernel/lttng/lttng-tools_2.6.0.bb
index 6397a987a..909acc37a 100644
--- a/yocto-poky/meta/recipes-kernel/lttng/lttng-tools_2.6.0.bb
+++ b/yocto-poky/meta/recipes-kernel/lttng/lttng-tools_2.6.0.bb
@@ -28,6 +28,7 @@ SRC_URI = "git://git.lttng.org/lttng-tools.git;branch=stable-2.6 \
file://extern-decls.patch \
file://run-ptest \
file://lttng-tools-Fix-live-timer-calculation-error.patch \
+ file://0001-Fix-sessiond-disable-match-app-event-by-name.patch \
"
S = "${WORKDIR}/git"
@@ -37,8 +38,6 @@ inherit autotools-brokensep ptest pkgconfig useradd
USERADD_PACKAGES = "${PN}"
GROUPADD_PARAM_${PN} = "tracing"
-export KERNELDIR="${STAGING_KERNEL_DIR}"
-
FILES_${PN} += "${libdir}/lttng/libexec/* ${datadir}/xml/lttng \
${libdir}/python${PYTHON_BASEVERSION}/site-packages/*"
FILES_${PN}-dbg += "${libdir}/lttng/libexec/.debug \
diff --git a/yocto-poky/meta/recipes-kernel/oprofile/oprofileui-server_git.bb b/yocto-poky/meta/recipes-kernel/oprofile/oprofileui-server_git.bb
index eb3b78b90..cc3477bc7 100644
--- a/yocto-poky/meta/recipes-kernel/oprofile/oprofileui-server_git.bb
+++ b/yocto-poky/meta/recipes-kernel/oprofile/oprofileui-server_git.bb
@@ -9,7 +9,7 @@ SRC_URI = "git://git.yoctoproject.org/oprofileui \
file://init \
file://oprofileui-server.service "
-DEPENDS += "intltool-native"
+DEPENDS += "intltool-native gettext-native"
EXTRA_OECONF += "--disable-client --enable-server"
diff --git a/yocto-poky/meta/recipes-kernel/oprofile/oprofileui_git.bb b/yocto-poky/meta/recipes-kernel/oprofile/oprofileui_git.bb
index bb69d5404..86f3d8e50 100644
--- a/yocto-poky/meta/recipes-kernel/oprofile/oprofileui_git.bb
+++ b/yocto-poky/meta/recipes-kernel/oprofile/oprofileui_git.bb
@@ -1,6 +1,9 @@
require oprofileui.inc
-DEPENDS += "gtk+ libglade libxml2 avahi-ui gconf"
+DEPENDS += "gtk+ libglade libxml2 avahi-ui gconf gettext-native"
+
+inherit distro_features_check
+ANY_OF_DISTRO_FEATURES = "${GTK2DISTROFEATURES}"
SRCREV = "389e1875af4721d52c7e65cf9cfffb69b0ed6a59"
PV = "0.0+git${SRCPV}"
diff --git a/yocto-poky/meta/recipes-kernel/sysprof/sysprof_git.bb b/yocto-poky/meta/recipes-kernel/sysprof/sysprof_git.bb
index 19c3e10f7..7d87efe7d 100644
--- a/yocto-poky/meta/recipes-kernel/sysprof/sysprof_git.bb
+++ b/yocto-poky/meta/recipes-kernel/sysprof/sysprof_git.bb
@@ -19,7 +19,8 @@ SRC_URI_append_mips64n32 = " file://rmb-mips.patch"
S = "${WORKDIR}/git"
-inherit autotools pkgconfig
+inherit autotools pkgconfig distro_features_check
+ANY_OF_DISTRO_FEATURES = "${GTK2DISTROFEATURES}"
# We do not yet work for aarch64.
#
diff --git a/yocto-poky/meta/recipes-kernel/trace-cmd/kernelshark_git.bb b/yocto-poky/meta/recipes-kernel/trace-cmd/kernelshark_git.bb
index 9deccaefd..563182c89 100644
--- a/yocto-poky/meta/recipes-kernel/trace-cmd/kernelshark_git.bb
+++ b/yocto-poky/meta/recipes-kernel/trace-cmd/kernelshark_git.bb
@@ -9,6 +9,9 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=751419260aa954499f7abaabaa882bbe \
DEPENDS = "gtk+ libxml2"
RDEPENDS_${PN} = "trace-cmd"
+inherit distro_features_check
+ANY_OF_DISTRO_FEATURES = "${GTK2DISTROFEATURES}"
+
EXTRA_OEMAKE = "\
'prefix=${prefix}' \
'bindir_relative=${@oe.path.relative(prefix, bindir)}' \
diff --git a/yocto-poky/meta/recipes-lsb4/libpng/libpng12_1.2.53.bb b/yocto-poky/meta/recipes-lsb4/libpng/libpng12_1.2.53.bb
index bf861a7ba..24f6affa2 100644
--- a/yocto-poky/meta/recipes-lsb4/libpng/libpng12_1.2.53.bb
+++ b/yocto-poky/meta/recipes-lsb4/libpng/libpng12_1.2.53.bb
@@ -9,7 +9,7 @@ DEPENDS = "zlib"
PN = "libpng12"
S = "${WORKDIR}/libpng-${PV}"
-SRC_URI = "${SOURCEFORGE_MIRROR}/project/libpng/libpng12/${PV}/libpng-${PV}.tar.xz"
+SRC_URI = "${SOURCEFORGE_MIRROR}/project/libpng/libpng12/older-releases/${PV}/libpng-${PV}.tar.xz"
SRC_URI[md5sum] = "7d18a74e6fd2029aee76ccd00e00a9e6"
SRC_URI[sha256sum] = "b45e49f689e7451bd576569e6a344f7e0d11c02ecbb797f4da0e431526765c0a"
diff --git a/yocto-poky/meta/recipes-multimedia/gstreamer/gst-player_git.bb b/yocto-poky/meta/recipes-multimedia/gstreamer/gst-player_git.bb
index 54cfbbc92..985024256 100644
--- a/yocto-poky/meta/recipes-multimedia/gstreamer/gst-player_git.bb
+++ b/yocto-poky/meta/recipes-multimedia/gstreamer/gst-player_git.bb
@@ -16,7 +16,9 @@ SRCREV = "5386c5b984d40ef5434673ed62204e69aaf52645"
S = "${WORKDIR}/git"
-inherit autotools gtk-doc lib_package pkgconfig
+inherit autotools gtk-doc lib_package pkgconfig distro_features_check
+
+ANY_OF_DISTRO_FEATURES = "${GTK2DISTROFEATURES}"
do_configure_prepend() {
touch ${S}/ChangeLog
diff --git a/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-libav_1.4.5.bb b/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-libav_1.4.5.bb
index 97fc7ec3f..5d74a2e57 100644
--- a/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-libav_1.4.5.bb
+++ b/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-libav_1.4.5.bb
@@ -25,3 +25,6 @@ LIBAV_EXTRA_CONFIGURE_COMMON_ARG = "--target-os=linux \
S = "${WORKDIR}/gst-libav-${PV}"
+# http://errors.yoctoproject.org/Errors/Details/20493/
+ARM_INSTRUCTION_SET_armv4 = "arm"
+ARM_INSTRUCTION_SET_armv5 = "arm"
diff --git a/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-omx.inc b/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-omx.inc
index d69890434..26c13361f 100644
--- a/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-omx.inc
+++ b/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-omx.inc
@@ -20,11 +20,10 @@ EXTRA_OECONF += "--disable-valgrind --with-omx-target=${GSTREAMER_1_0_OMX_TARGET
python __anonymous () {
omx_target = d.getVar("GSTREAMER_1_0_OMX_TARGET", True)
if omx_target in ['generic', 'bellagio']:
- srcdir = d.getVar("S", True)
# Bellagio headers are incomplete (they are missing the OMX_VERSION_MAJOR,#
# OMX_VERSION_MINOR, OMX_VERSION_REVISION, and OMX_VERSION_STEP macros);
# appending a directory path to gst-omx' internal OpenMAX IL headers fixes this
- d.appendVar("CFLAGS", " -I%s/omx/openmax" % srcdir)
+ d.appendVar("CFLAGS", " -I${S}/omx/openmax")
elif omx_target == "rpi":
# Dedicated Raspberry Pi OpenMAX IL support makes this package machine specific
d.setVar("PACKAGE_ARCH", d.getVar("MACHINE_ARCH", True))
diff --git a/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad.inc b/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad.inc
index cdedb60b2..b4f01afe5 100644
--- a/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad.inc
+++ b/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad.inc
@@ -20,7 +20,7 @@ PACKAGECONFIG ??= " \
${@bb.utils.contains('DISTRO_FEATURES', 'wayland', 'wayland', '', d)} \
${@bb.utils.contains('DISTRO_FEATURES', 'bluetooth', 'bluez', '', d)} \
${@bb.utils.contains('DISTRO_FEATURES', 'directfb', 'directfb', '', d)} \
- orc curl uvch264 neon sndfile \
+ orc curl neon sndfile \
hls sbc dash bz2 smoothstreaming \
"
@@ -38,7 +38,7 @@ PACKAGECONFIG[opus] = "--enable-opus,--disable-opus,libopus"
PACKAGECONFIG[flite] = "--enable-flite,--disable-flite,flite-alsa"
PACKAGECONFIG[opencv] = "--enable-opencv,--disable-opencv,opencv"
PACKAGECONFIG[wayland] = "--enable-wayland --enable-egl,--disable-wayland --disable-egl,wayland virtual/egl"
-PACKAGECONFIG[uvch264] = "--enable-uvch264,--disable-uvch264,libusb1 udev"
+PACKAGECONFIG[uvch264] = "--enable-uvch264,--disable-uvch264,libusb1 libgudev"
PACKAGECONFIG[directfb] = "--enable-directfb,--disable-directfb,directfb"
PACKAGECONFIG[neon] = "--enable-neon,--disable-neon,neon"
PACKAGECONFIG[openal] = "--enable-openal,--disable-openal,openal-soft"
@@ -55,6 +55,9 @@ PACKAGECONFIG[sndfile] = "--enable-sndfile,--disable-sndfile,libsndfile1
PACKAGECONFIG[webp] = "--enable-webp,--disable-webp,libwebp"
PACKAGECONFIG[rtmp] = "--enable-rtmp,--disable-rtmp,rtmpdump"
PACKAGECONFIG[libssh2] = "--enable-libssh2,--disable-libssh2,libssh2"
+PACKAGECONFIG[voamrwbenc] = "--enable-voamrwbenc,--disable-voamrwbenc,vo-amrwbenc"
+PACKAGECONFIG[voaacenc] = "--enable-voaacenc,--disable-voaacenc,vo-aacenc"
+PACKAGECONFIG[resindvd] = "--enable-resindvd,--disable-resindvd,libdvdnav libdvdread"
# these plugins have not been ported to 1.0 (yet):
# directdraw vcd apexsink dc1394 lv2 linsys musepack mythtv
@@ -66,8 +69,8 @@ PACKAGECONFIG[libssh2] = "--enable-libssh2,--disable-libssh2,libssh2"
# these plugins have no corresponding library in OE-core or meta-openembedded:
# openni2 winks direct3d directdraw directsound winscreencap osx_video
# apple_media android_media avc chromaprint daala dts gme gsm kate ladspa mimic
-# mpeg2enc mplex ofa openjpeg opensles pvr resindvd rtmp soundtouch spandsp spc
-# srtp vdpau voaacenc voamrwbenc wasapi zbar
+# mpeg2enc mplex ofa openjpeg opensles pvr rtmp soundtouch spandsp spc
+# srtp vdpau wasapi zbar
EXTRA_OECONF += " \
--enable-dvb \
@@ -106,7 +109,6 @@ EXTRA_OECONF += " \
--disable-osx_video \
--disable-pvr \
--disable-quicktime \
- --disable-resindvd \
--disable-sdl \
--disable-sdltest \
--disable-sndio \
@@ -118,8 +120,6 @@ EXTRA_OECONF += " \
--disable-timidity \
--disable-vcd \
--disable-vdpau \
- --disable-voaacenc \
- --disable-voamrwbenc \
--disable-wasapi \
--disable-wildmidi \
--disable-wininet \
@@ -133,3 +133,4 @@ ARM_INSTRUCTION_SET = "arm"
FILES_gstreamer1.0-plugins-bad-opencv += "${datadir}/gst-plugins-bad/1.0/opencv*"
+FILES_${PN}-voamrwbenc += "${datadir}/gstreamer-${LIBV}/presets/GstVoAmrwbEnc.prs"
diff --git a/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad/0001-glimagesink-Downrank-to-marginal.patch b/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad/0001-glimagesink-Downrank-to-marginal.patch
new file mode 100644
index 000000000..f677603eb
--- /dev/null
+++ b/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad/0001-glimagesink-Downrank-to-marginal.patch
@@ -0,0 +1,32 @@
+From c6b37a80806f9128de47f1ccc3f2354f8d436bb6 Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Thu, 24 Sep 2015 19:47:32 +0300
+Subject: [PATCH] glimagesink: Downrank to marginal
+
+On desktop, where there is good OpenGL, xvimagesink will come up first,
+on other platforms, OpenGL can't be trusted because it's either software (like
+in a VM) or broken (like on embedded)., so let ximagesink come above.
+
+Upstream-Status: Submitted [https://bugzilla.gnome.org/show_bug.cgi?id=751684]
+
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+---
+ ext/gl/gstopengl.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/ext/gl/gstopengl.c b/ext/gl/gstopengl.c
+index a4b2540..0ccaacd 100644
+--- a/ext/gl/gstopengl.c
++++ b/ext/gl/gstopengl.c
+@@ -101,7 +101,7 @@ plugin_init (GstPlugin * plugin)
+ #endif
+
+ if (!gst_element_register (plugin, "glimagesink",
+- GST_RANK_SECONDARY, GST_TYPE_GLIMAGE_SINK)) {
++ GST_RANK_MARGINAL, GST_TYPE_GLIMAGE_SINK)) {
+ return FALSE;
+ }
+
+--
+2.1.4
+
diff --git a/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_1.4.5.bb b/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_1.4.5.bb
index 59065de81..687366924 100644
--- a/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_1.4.5.bb
+++ b/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_1.4.5.bb
@@ -5,7 +5,9 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=73a5855a8119deb017f5f13cf327095d \
file://COPYING.LIB;md5=21682e4e8fea52413fd26c60acb907e5 \
file://gst/tta/crc32.h;beginline=12;endline=29;md5=27db269c575d1e5317fffca2d33b3b50"
-SRC_URI += "file://0001-gl-do-not-check-for-GL-GLU-EGL-GLES2-libs-if-disable.patch"
+SRC_URI += "file://0001-gl-do-not-check-for-GL-GLU-EGL-GLES2-libs-if-disable.patch \
+ file://0001-glimagesink-Downrank-to-marginal.patch \
+ "
SRC_URI[md5sum] = "e0bb39412cf4a48fe0397bcf3a7cd451"
SRC_URI[sha256sum] = "152fad7250683d72f9deb36c5685428338365fe4a4c87ffe15e38783b14f983c"
diff --git a/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base.inc b/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base.inc
index 47f3f4011..4909b10d5 100644
--- a/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base.inc
+++ b/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-base.inc
@@ -25,13 +25,12 @@ PACKAGECONFIG[ogg] = "--enable-ogg,--disable-ogg,libogg"
PACKAGECONFIG[theora] = "--enable-theora,--disable-theora,libtheora"
PACKAGECONFIG[vorbis] = "--enable-vorbis,--disable-vorbis,libvorbis"
PACKAGECONFIG[pango] = "--enable-pango,--disable-pango,pango"
+# libvisual do not seem to exist anywhere in OE
+PACKAGECONFIG[visual] = "--enable-libvisual,--disable-libvisual,libvisual"
+PACKAGECONFIG[cdparanoia] = "--enable-cdparanoia,--disable-cdparanoia,cdparanoia"
-
-# cdparanoia and libvisual do not seem to exist anywhere in OE
EXTRA_OECONF += " \
--disable-freetypetest \
- --disable-cdparanoia \
- --disable-libvisual \
"
FILES_${MLPREFIX}libgsttag-1.0 += "${datadir}/gst-plugins-base/1.0/license-translations.dict"
diff --git a/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good.inc b/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good.inc
index 6e316de9f..edaafe842 100644
--- a/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good.inc
+++ b/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good.inc
@@ -11,7 +11,7 @@ inherit gettext
PACKAGECONFIG ??= " \
${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'x11', '', d)} \
${@bb.utils.contains('DISTRO_FEATURES', 'pulseaudio', 'pulseaudio', '', d)} \
- orc cairo flac gdk-pixbuf gudev jpeg libpng soup speex taglib \
+ orc cairo flac gdk-pixbuf jpeg libpng soup speex taglib v4l2\
"
X11DEPENDS = "virtual/libx11 libsm libxrender libxfixes libxdamage"
@@ -22,7 +22,8 @@ PACKAGECONFIG[pulseaudio] = "--enable-pulse,--disable-pulse,pulseaudio"
PACKAGECONFIG[cairo] = "--enable-cairo,--disable-cairo,cairo"
PACKAGECONFIG[flac] = "--enable-flac,--disable-flac,flac"
PACKAGECONFIG[gdk-pixbuf] = "--enable-gdk_pixbuf,--disable-gdk_pixbuf,gdk-pixbuf"
-PACKAGECONFIG[gudev] = "--with-gudev,--without-gudev,udev"
+PACKAGECONFIG[gudev] = "--with-gudev,--without-gudev,libgudev"
+PACKAGECONFIG[libv4l2] = "--with-libv4l2,--without-libv4l2,libv4l2"
PACKAGECONFIG[jack] = "--enable-jack,--disable-jack,jack"
PACKAGECONFIG[jpeg] = "--enable-jpeg,--disable-jpeg,jpeg"
PACKAGECONFIG[libpng] = "--enable-libpng,--disable-libpng,libpng"
@@ -31,15 +32,11 @@ PACKAGECONFIG[speex] = "--enable-speex,--disable-speex,speex"
PACKAGECONFIG[taglib] = "--enable-taglib,--disable-taglib,taglib"
PACKAGECONFIG[vpx] = "--enable-vpx,--disable-vpx,libvpx"
PACKAGECONFIG[wavpack] = "--enable-wavpack,--disable-wavpack,wavpack"
-
-# the 1394 plugins require both libraw1394 and libiec61883
-# the former is included in meta-oe, the latter isn't
-# -> disabled
+PACKAGECONFIG[dv1394] = "--enable-dv1394,--disable-dv1394,libraw1394 libiec61883 libavc1394"
+PACKAGECONFIG[v4l2] = "--enable-gst_v4l2,--disable-gst_v4l2"
EXTRA_OECONF += " \
--enable-oss \
- --enable-gst_v4l2 \
- --without-libv4l2 \
--disable-directsound \
--disable-waveform \
--disable-oss4 \
@@ -51,7 +48,6 @@ EXTRA_OECONF += " \
--disable-libdv \
--disable-shout2 \
--disable-examples \
- --disable-dv1394 \
"
FILES_${PN}-equalizer += "${datadir}/gstreamer-1.0/presets/*.prs"
diff --git a/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0/0003-basesink-Shouldn-t-drop-buffer-when-sync-false.patch b/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0/0003-basesink-Shouldn-t-drop-buffer-when-sync-false.patch
new file mode 100755
index 000000000..d682ee60c
--- /dev/null
+++ b/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0/0003-basesink-Shouldn-t-drop-buffer-when-sync-false.patch
@@ -0,0 +1,30 @@
+From 73df2b5c0aea58015788f5a94a3ec65296a688d3 Mon Sep 17 00:00:00 2001
+From: Song Bing <b06498@freescale.com>
+Date: Thu, 2 Jul 2015 14:32:21 +0800
+Subject: [PATCH] basesink: Shouldn't drop buffer when sync=false
+
+Shouldn't drop buffer when sync=false
+
+Upstream-Status: Accepted
+
+https://bugzilla.gnome.org/show_bug.cgi?id=751819
+---
+ libs/gst/base/gstbasesink.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/libs/gst/base/gstbasesink.c b/libs/gst/base/gstbasesink.c
+index d44e8fc..cd759ac 100644
+--- a/libs/gst/base/gstbasesink.c
++++ b/libs/gst/base/gstbasesink.c
+@@ -3423,7 +3423,7 @@ gst_base_sink_chain_unlocked (GstBaseSink * basesink, GstPad * pad,
+ if (G_UNLIKELY (stepped))
+ goto dropped;
+
+- if (syncable && do_sync) {
++ if (syncable && do_sync && gst_base_sink_get_sync (basesink)) {
+ GstClock *clock;
+
+ GST_OBJECT_LOCK (basesink);
+--
+1.7.9.5
+
diff --git a/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0_1.4.5.bb b/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0_1.4.5.bb
index db58754a6..73a4a9927 100644
--- a/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0_1.4.5.bb
+++ b/yocto-poky/meta/recipes-multimedia/gstreamer/gstreamer1.0_1.4.5.bb
@@ -9,6 +9,7 @@ SRC_URI = " \
file://0001-gstinfo-Shorten-__FILE__-on-all-platforms.patch \
file://inputselector-sticky-events-haven-t-send-out-when-ac-1-4-1.patch \
file://0002-basesink-Fix-QoS-lateness-checking-if-subclass-imple.patch \
+ file://0003-basesink-Shouldn-t-drop-buffer-when-sync-false.patch \
"
SRC_URI[md5sum] = "88a9289c64a4950ebb4f544980234289"
SRC_URI[sha256sum] = "40801aa7f979024526258a0e94707ba42b8ab6f7d2206e56adbc4433155cb0ae"
diff --git a/yocto-poky/meta/recipes-multimedia/libpng/libpng-1.6.17/CVE-2015-8126_1.patch b/yocto-poky/meta/recipes-multimedia/libpng/libpng-1.6.17/CVE-2015-8126_1.patch
new file mode 100644
index 000000000..25fe1364d
--- /dev/null
+++ b/yocto-poky/meta/recipes-multimedia/libpng/libpng-1.6.17/CVE-2015-8126_1.patch
@@ -0,0 +1,91 @@
+From 81f44665cce4cb1373f049a76f3904e981b7a766 Mon Sep 17 00:00:00 2001
+From: Glenn Randers-Pehrson <glennrp at users.sourceforge.net>
+Date: Thu, 29 Oct 2015 09:26:41 -0500
+Subject: [PATCH] [libpng16] Reject attempt to write over-length PLTE chunk
+
+Upstream-Status: Backport
+https://github.com/glennrp/libpng/commit/81f44665cce4cb1373f049a76f3904e981b7a766
+
+CVE: CVE-2015-8126 patch #1
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ libpng-manual.txt | 5 +++++
+ libpng.3 | 5 +++++
+ pngwrite.c | 4 ++--
+ pngwutil.c | 7 +++++--
+ 4 files changed, 17 insertions(+), 4 deletions(-)
+
+Index: libpng-1.6.17/libpng-manual.txt
+===================================================================
+--- libpng-1.6.17.orig/libpng-manual.txt
++++ libpng-1.6.17/libpng-manual.txt
+@@ -5109,6 +5109,11 @@ length, which resulted in PNG files that
+ chunk. This error was fixed in libpng-1.6.3, and a tool (called
+ contrib/tools/png-fix-itxt) has been added to the libpng distribution.
+
++Starting with libpng-1.6.19, attempting to write an over-length PLTE chunk
++is an error. Previously this requirement of the PNG specification was not
++enforced. Libpng continues to accept over-length PLTE chunks when reading,
++but does not make any use of the extra entries.
++
+ XIII. Detecting libpng
+
+ The png_get_io_ptr() function has been present since libpng-0.88, has never
+Index: libpng-1.6.17/libpng.3
+===================================================================
+--- libpng-1.6.17.orig/libpng.3
++++ libpng-1.6.17/libpng.3
+@@ -5613,6 +5613,11 @@ length, which resulted in PNG files that
+ chunk. This error was fixed in libpng-1.6.3, and a tool (called
+ contrib/tools/png-fix-itxt) has been added to the libpng distribution.
+
++Starting with libpng-1.6.19, attempting to write an over-length PLTE chunk
++is an error. Previously this requirement of the PNG specification was not
++enforced. Libpng continues to accept over-length PLTE chunks when reading,
++but does not make any use of the extra entries.
++
+ .SH XIII. Detecting libpng
+
+ The png_get_io_ptr() function has been present since libpng-0.88, has never
+Index: libpng-1.6.17/pngwrite.c
+===================================================================
+--- libpng-1.6.17.orig/pngwrite.c
++++ libpng-1.6.17/pngwrite.c
+@@ -205,7 +205,7 @@ png_write_info(png_structrp png_ptr, png
+ png_write_PLTE(png_ptr, info_ptr->palette,
+ (png_uint_32)info_ptr->num_palette);
+
+- else if ((info_ptr->color_type == PNG_COLOR_TYPE_PALETTE) !=0)
++ else if (info_ptr->color_type == PNG_COLOR_TYPE_PALETTE)
+ png_error(png_ptr, "Valid palette required for paletted images");
+
+ #ifdef PNG_WRITE_tRNS_SUPPORTED
+Index: libpng-1.6.17/pngwutil.c
+===================================================================
+--- libpng-1.6.17.orig/pngwutil.c
++++ libpng-1.6.17/pngwutil.c
+@@ -922,17 +922,20 @@ void /* PRIVATE */
+ png_write_PLTE(png_structrp png_ptr, png_const_colorp palette,
+ png_uint_32 num_pal)
+ {
+- png_uint_32 i;
++ png_uint_32 max_num_pal, i;
+ png_const_colorp pal_ptr;
+ png_byte buf[3];
+
+ png_debug(1, "in png_write_PLTE");
+
++ max_num_pal = (png_ptr->color_type == PNG_COLOR_TYPE_PALETTE) ?
++ (1 << png_ptr->bit_depth) : PNG_MAX_PALETTE_LENGTH;
++
+ if ((
+ #ifdef PNG_MNG_FEATURES_SUPPORTED
+ (png_ptr->mng_features_permitted & PNG_FLAG_MNG_EMPTY_PLTE) == 0 &&
+ #endif
+- num_pal == 0) || num_pal > 256)
++ num_pal == 0) || num_pal > max_num_pal)
+ {
+ if (png_ptr->color_type == PNG_COLOR_TYPE_PALETTE)
+ {
diff --git a/yocto-poky/meta/recipes-multimedia/libpng/libpng-1.6.17/CVE-2015-8126_2.patch b/yocto-poky/meta/recipes-multimedia/libpng/libpng-1.6.17/CVE-2015-8126_2.patch
new file mode 100644
index 000000000..4aa917084
--- /dev/null
+++ b/yocto-poky/meta/recipes-multimedia/libpng/libpng-1.6.17/CVE-2015-8126_2.patch
@@ -0,0 +1,134 @@
+From a901eb3ce6087e0afeef988247f1a1aa208cb54d Mon Sep 17 00:00:00 2001
+From: Glenn Randers-Pehrson <glennrp at users.sourceforge.net>
+Date: Fri, 30 Oct 2015 07:57:49 -0500
+Subject: [PATCH] [libpng16] Prevent reading over-length PLTE chunk (Cosmin
+ Truta).
+
+Upstream-Status: Backport
+https://github.com/glennrp/libpng/commit/a901eb3ce6087e0afeef988247f1a1aa208cb54d
+
+Many changes involved date and version updates with don't apply in this case.
+
+CVE: CVE-2015-8126 patch #2
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ ANNOUNCE | 6 +++---
+ CHANGES | 4 ++--
+ libpng-manual.txt | 11 +++++------
+ libpng.3 | 19 +++++++++----------
+ pngrutil.c | 3 +++
+ pngset.c | 13 +++++++++----
+ pngwutil.c | 6 +++---
+ 7 files changed, 34 insertions(+), 28 deletions(-)
+
+Index: libpng-1.6.17/libpng-manual.txt
+===================================================================
+--- libpng-1.6.17.orig/libpng-manual.txt
++++ libpng-1.6.17/libpng-manual.txt
+@@ -5109,10 +5109,9 @@ length, which resulted in PNG files that
+ chunk. This error was fixed in libpng-1.6.3, and a tool (called
+ contrib/tools/png-fix-itxt) has been added to the libpng distribution.
+
+-Starting with libpng-1.6.19, attempting to write an over-length PLTE chunk
++Starting with libpng-1.6.19, attempting to set an over-length PLTE chunk
+ is an error. Previously this requirement of the PNG specification was not
+-enforced. Libpng continues to accept over-length PLTE chunks when reading,
+-but does not make any use of the extra entries.
++enforced, and the palette was always limited to 256 entries.
+
+ XIII. Detecting libpng
+
+Index: libpng-1.6.17/libpng.3
+===================================================================
+--- libpng-1.6.17.orig/libpng.3
++++ libpng-1.6.17/libpng.3
+@@ -5613,10 +5613,9 @@ length, which resulted in PNG files that
+ chunk. This error was fixed in libpng-1.6.3, and a tool (called
+ contrib/tools/png-fix-itxt) has been added to the libpng distribution.
+
+-Starting with libpng-1.6.19, attempting to write an over-length PLTE chunk
++Starting with libpng-1.6.19, attempting to set an over-length PLTE chunk
+ is an error. Previously this requirement of the PNG specification was not
+-enforced. Libpng continues to accept over-length PLTE chunks when reading,
+-but does not make any use of the extra entries.
++enforced, and the palette was always limited to 256 entries.
+
+ .SH XIII. Detecting libpng
+
+Index: libpng-1.6.17/pngrutil.c
+===================================================================
+--- libpng-1.6.17.orig/pngrutil.c
++++ libpng-1.6.17/pngrutil.c
+@@ -997,6 +997,9 @@ png_handle_PLTE(png_structrp png_ptr, pn
+ * confusing.
+ *
+ * Fix this by not sharing the palette in this way.
++ *
++ * Starting with libpng-1.6.19, png_set_PLTE() also issues a png_error() when
++ * it attempts to set a palette length that is too large for the bit depth.
+ */
+ png_set_PLTE(png_ptr, info_ptr, palette, num);
+
+Index: libpng-1.6.17/pngset.c
+===================================================================
+--- libpng-1.6.17.orig/pngset.c
++++ libpng-1.6.17/pngset.c
+@@ -513,12 +513,17 @@ png_set_PLTE(png_structrp png_ptr, png_i
+ png_const_colorp palette, int num_palette)
+ {
+
++ png_uint_32 max_palette_length;
++
+ png_debug1(1, "in %s storage function", "PLTE");
+
+ if (png_ptr == NULL || info_ptr == NULL)
+ return;
+
+- if (num_palette < 0 || num_palette > PNG_MAX_PALETTE_LENGTH)
++ max_palette_length = (png_ptr->color_type == PNG_COLOR_TYPE_PALETTE) ?
++ (1 << png_ptr->bit_depth) : PNG_MAX_PALETTE_LENGTH;
++
++ if (num_palette < 0 || num_palette > max_palette_length)
+ {
+ if (info_ptr->color_type == PNG_COLOR_TYPE_PALETTE)
+ png_error(png_ptr, "Invalid palette length");
+@@ -551,8 +556,8 @@ png_set_PLTE(png_structrp png_ptr, png_i
+ png_free_data(png_ptr, info_ptr, PNG_FREE_PLTE, 0);
+
+ /* Changed in libpng-1.2.1 to allocate PNG_MAX_PALETTE_LENGTH instead
+- * of num_palette entries, in case of an invalid PNG file that has
+- * too-large sample values.
++ * of num_palette entries, in case of an invalid PNG file or incorrect
++ * call to png_set_PLTE() with too-large sample values.
+ */
+ png_ptr->palette = png_voidcast(png_colorp, png_calloc(png_ptr,
+ PNG_MAX_PALETTE_LENGTH * (sizeof (png_color))));
+Index: libpng-1.6.17/pngwutil.c
+===================================================================
+--- libpng-1.6.17.orig/pngwutil.c
++++ libpng-1.6.17/pngwutil.c
+@@ -922,20 +922,20 @@ void /* PRIVATE */
+ png_write_PLTE(png_structrp png_ptr, png_const_colorp palette,
+ png_uint_32 num_pal)
+ {
+- png_uint_32 max_num_pal, i;
++ png_uint_32 max_palette_length, i;
+ png_const_colorp pal_ptr;
+ png_byte buf[3];
+
+ png_debug(1, "in png_write_PLTE");
+
+- max_num_pal = (png_ptr->color_type == PNG_COLOR_TYPE_PALETTE) ?
++ max_palette_length = (png_ptr->color_type == PNG_COLOR_TYPE_PALETTE) ?
+ (1 << png_ptr->bit_depth) : PNG_MAX_PALETTE_LENGTH;
+
+ if ((
+ #ifdef PNG_MNG_FEATURES_SUPPORTED
+ (png_ptr->mng_features_permitted & PNG_FLAG_MNG_EMPTY_PLTE) == 0 &&
+ #endif
+- num_pal == 0) || num_pal > max_num_pal)
++ num_pal == 0) || num_pal > max_palette_length)
+ {
+ if (png_ptr->color_type == PNG_COLOR_TYPE_PALETTE)
+ {
diff --git a/yocto-poky/meta/recipes-multimedia/libpng/libpng-1.6.17/CVE-2015-8126_3.patch b/yocto-poky/meta/recipes-multimedia/libpng/libpng-1.6.17/CVE-2015-8126_3.patch
new file mode 100644
index 000000000..0e0ad2320
--- /dev/null
+++ b/yocto-poky/meta/recipes-multimedia/libpng/libpng-1.6.17/CVE-2015-8126_3.patch
@@ -0,0 +1,79 @@
+From 1bef8e97995c33123665582e57d3ed40b57d5978 Mon Sep 17 00:00:00 2001
+From: Glenn Randers-Pehrson <glennrp at users.sourceforge.net>
+Date: Fri, 30 Oct 2015 11:34:37 -0500
+Subject: [PATCH] [libpng16] Silently truncate over-length PLTE chunk while
+ reading.
+
+Upstream-Status: Backport
+https://github.com/glennrp/libpng/commit/1bef8e97995c33123665582e57d3ed40b57d5978
+
+Normal Issues is date and version conflicts not applied.
+
+CVE: CVE-2015-8i26 patch #3
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+
+---
+ ANNOUNCE | 3 ++-
+ CHANGES | 3 ++-
+ pngrutil.c | 15 +++++++++++----
+ pngset.c | 2 +-
+ 4 files changed, 16 insertions(+), 7 deletions(-)
+
+Index: libpng-1.6.17/pngrutil.c
+===================================================================
+--- libpng-1.6.17.orig/pngrutil.c
++++ libpng-1.6.17/pngrutil.c
+@@ -867,7 +867,7 @@ void /* PRIVATE */
+ png_handle_PLTE(png_structrp png_ptr, png_inforp info_ptr, png_uint_32 length)
+ {
+ png_color palette[PNG_MAX_PALETTE_LENGTH];
+- int num, i;
++ int max_palette_length, num, i;
+ #ifdef PNG_POINTER_INDEXING_SUPPORTED
+ png_colorp pal_ptr;
+ #endif
+@@ -925,9 +925,19 @@ png_handle_PLTE(png_structrp png_ptr, pn
+ return;
+ }
+
++ max_palette_length = (png_ptr->color_type == PNG_COLOR_TYPE_PALETTE) ?
++ (1 << png_ptr->bit_depth) : PNG_MAX_PALETTE_LENGTH;
++
+ /* The cast is safe because 'length' is less than 3*PNG_MAX_PALETTE_LENGTH */
+ num = (int)length / 3;
+
++ /* If the palette has 256 or fewer entries but is too large for the bit depth,
++ * we don't issue an error, to preserve the behavior of previous libpng versions.
++ * We silently truncate the unused extra palette entries here.
++ */
++ if (num > max_palette_length)
++ num = max_palette_length;
++
+ #ifdef PNG_POINTER_INDEXING_SUPPORTED
+ for (i = 0, pal_ptr = palette; i < num; i++, pal_ptr++)
+ {
+@@ -997,9 +1007,6 @@ png_handle_PLTE(png_structrp png_ptr, pn
+ * confusing.
+ *
+ * Fix this by not sharing the palette in this way.
+- *
+- * Starting with libpng-1.6.19, png_set_PLTE() also issues a png_error() when
+- * it attempts to set a palette length that is too large for the bit depth.
+ */
+ png_set_PLTE(png_ptr, info_ptr, palette, num);
+
+Index: libpng-1.6.17/pngset.c
+===================================================================
+--- libpng-1.6.17.orig/pngset.c
++++ libpng-1.6.17/pngset.c
+@@ -523,7 +523,7 @@ png_set_PLTE(png_structrp png_ptr, png_i
+ max_palette_length = (png_ptr->color_type == PNG_COLOR_TYPE_PALETTE) ?
+ (1 << png_ptr->bit_depth) : PNG_MAX_PALETTE_LENGTH;
+
+- if (num_palette < 0 || num_palette > max_palette_length)
++ if (num_palette < 0 || num_palette > (int) max_palette_length)
+ {
+ if (info_ptr->color_type == PNG_COLOR_TYPE_PALETTE)
+ png_error(png_ptr, "Invalid palette length");
diff --git a/yocto-poky/meta/recipes-multimedia/libpng/libpng-1.6.17/CVE-2015-8126_4.patch b/yocto-poky/meta/recipes-multimedia/libpng/libpng-1.6.17/CVE-2015-8126_4.patch
new file mode 100644
index 000000000..2622630d1
--- /dev/null
+++ b/yocto-poky/meta/recipes-multimedia/libpng/libpng-1.6.17/CVE-2015-8126_4.patch
@@ -0,0 +1,48 @@
+From 83f4c735c88e7f451541c1528d8043c31ba3b466 Mon Sep 17 00:00:00 2001
+From: Glenn Randers-Pehrson <glennrp at users.sourceforge.net>
+Date: Thu, 5 Nov 2015 11:18:44 -0600
+Subject: [PATCH] [libpng16] Clean up coding style in png_handle_PLTE()
+
+Upstream-Status: Backport
+https://github.com/glennrp/libpng/commit/83f4c735c88e7f451541c1528d8043c31ba3b466
+
+CVE: CVE-2015-8126 patch #4
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ pngrutil.c | 17 ++++++++++-------
+ 1 file changed, 10 insertions(+), 7 deletions(-)
+
+Index: libpng-1.6.17/pngrutil.c
+===================================================================
+--- libpng-1.6.17.orig/pngrutil.c
++++ libpng-1.6.17/pngrutil.c
+@@ -925,18 +925,21 @@ png_handle_PLTE(png_structrp png_ptr, pn
+ return;
+ }
+
+- max_palette_length = (png_ptr->color_type == PNG_COLOR_TYPE_PALETTE) ?
+- (1 << png_ptr->bit_depth) : PNG_MAX_PALETTE_LENGTH;
+-
+ /* The cast is safe because 'length' is less than 3*PNG_MAX_PALETTE_LENGTH */
+ num = (int)length / 3;
+
+- /* If the palette has 256 or fewer entries but is too large for the bit depth,
+- * we don't issue an error, to preserve the behavior of previous libpng versions.
+- * We silently truncate the unused extra palette entries here.
++ /* If the palette has 256 or fewer entries but is too large for the bit
++ * depth, we don't issue an error, to preserve the behavior of previous
++ * libpng versions. We silently truncate the unused extra palette entries
++ * here.
+ */
++ if (png_ptr->color_type == PNG_COLOR_TYPE_PALETTE)
++ max_palette_length = (1 << png_ptr->bit_depth);
++ else
++ max_palette_length = PNG_MAX_PALETTE_LENGTH;
++
+ if (num > max_palette_length)
+- num = max_palette_length;
++ num = max_palette_length;
+
+ #ifdef PNG_POINTER_INDEXING_SUPPORTED
+ for (i = 0, pal_ptr = palette; i < num; i++, pal_ptr++)
diff --git a/yocto-poky/meta/recipes-multimedia/libpng/libpng-1.6.17/CVE-2015-8472.patch b/yocto-poky/meta/recipes-multimedia/libpng/libpng-1.6.17/CVE-2015-8472.patch
new file mode 100644
index 000000000..404f012b0
--- /dev/null
+++ b/yocto-poky/meta/recipes-multimedia/libpng/libpng-1.6.17/CVE-2015-8472.patch
@@ -0,0 +1,29 @@
+From 9f2ad4928e47036cf1ac9b8fe45a491f15be2324 Mon Sep 17 00:00:00 2001
+From: Glenn Randers-Pehrson <glennrp at users.sourceforge.net>
+Date: Wed, 4 Nov 2015 23:47:42 -0600
+Subject: [PATCH] [libpng16] Fixed new bug with CRC error after reading an
+ over-length palette.
+
+Upstream-Status: Backport
+CVE: CVE-2015-8472
+
+https://github.com/glennrp/libpng/commit/9f2ad4928e47036cf1ac9b8fe45a491f15be2324
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ pngrutil.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: libpng-1.6.17/pngrutil.c
+===================================================================
+--- libpng-1.6.17.orig/pngrutil.c
++++ libpng-1.6.17/pngrutil.c
+@@ -973,7 +973,7 @@ png_handle_PLTE(png_structrp png_ptr, pn
+ if (png_ptr->color_type == PNG_COLOR_TYPE_PALETTE)
+ #endif
+ {
+- png_crc_finish(png_ptr, 0);
++ png_crc_finish(png_ptr, (int) length - num * 3);
+ }
+
+ #ifndef PNG_READ_OPT_PLTE_SUPPORTED
diff --git a/yocto-poky/meta/recipes-multimedia/libpng/libpng_1.6.17.bb b/yocto-poky/meta/recipes-multimedia/libpng/libpng_1.6.17.bb
index 00e5808b9..cc288c7f9 100644
--- a/yocto-poky/meta/recipes-multimedia/libpng/libpng_1.6.17.bb
+++ b/yocto-poky/meta/recipes-multimedia/libpng/libpng_1.6.17.bb
@@ -8,8 +8,16 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=b9b75399b72e4a8656cf3a6ddfc86d9a \
DEPENDS = "zlib"
LIBV = "16"
-SRC_URI = "${SOURCEFORGE_MIRROR}/project/libpng/libpng${LIBV}/${PV}/libpng-${PV}.tar.xz \
+SRC_URI = "${SOURCEFORGE_MIRROR}/project/libpng/libpng${LIBV}/older-releases/${PV}/libpng-${PV}.tar.xz \
"
+SRC_URI += "\
+ file://CVE-2015-8126_1.patch \
+ file://CVE-2015-8126_2.patch \
+ file://CVE-2015-8126_3.patch \
+ file://CVE-2015-8126_4.patch \
+ file://CVE-2015-8472.patch \
+ "
+
SRC_URI[md5sum] = "430a9b76b78533235cd4b9b26ce75c7e"
SRC_URI[sha256sum] = "98507b55fbe5cd43c51981f2924e4671fd81fe35d52dc53357e20f2c77fa5dfd"
diff --git a/yocto-poky/meta/recipes-multimedia/libsndfile/files/libsndfile-fix-CVE-2014-9756.patch b/yocto-poky/meta/recipes-multimedia/libsndfile/files/libsndfile-fix-CVE-2014-9756.patch
new file mode 100644
index 000000000..b54b3ba66
--- /dev/null
+++ b/yocto-poky/meta/recipes-multimedia/libsndfile/files/libsndfile-fix-CVE-2014-9756.patch
@@ -0,0 +1,24 @@
+src/file_io.c : Prevent potential divide-by-zero.
+
+Closes: https://github.com/erikd/libsndfile/issues/92
+
+Upstream-Status: Backport
+
+Fixes CVE-2014-9756
+
+Signed-off-by: Erik de Castro Lopo <erikd@mega-nerd.com>
+Signed-off-by: Maxin B. John <maxin.john@intel.com>
+---
+diff -Naur libsndfile-1.0.25-orig/src/file_io.c libsndfile-1.0.25/src/file_io.c
+--- libsndfile-1.0.25-orig/src/file_io.c 2011-01-19 12:12:28.000000000 +0200
++++ libsndfile-1.0.25/src/file_io.c 2015-11-04 15:02:04.337395618 +0200
+@@ -358,6 +358,9 @@
+ { sf_count_t total = 0 ;
+ ssize_t count ;
+
++ if (bytes == 0 || items == 0)
++ return 0 ;
++
+ if (psf->virtual_io)
+ return psf->vio.write (ptr, bytes*items, psf->vio_user_data) / bytes ;
+
diff --git a/yocto-poky/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.25.bb b/yocto-poky/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.25.bb
index 3e02f4ea7..be875c227 100644
--- a/yocto-poky/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.25.bb
+++ b/yocto-poky/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.25.bb
@@ -9,6 +9,7 @@ PR = "r2"
SRC_URI = "http://www.mega-nerd.com/libsndfile/files/libsndfile-${PV}.tar.gz \
file://0001-src-sd2.c-Fix-segfault-in-SD2-RSRC-parser.patch \
file://0001-src-sd2.c-Fix-two-potential-buffer-read-overflows.patch \
+ file://libsndfile-fix-CVE-2014-9756.patch \
"
SRC_URI[md5sum] = "e2b7bb637e01022c7d20f95f9c3990a2"
diff --git a/yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2015-8781.patch b/yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2015-8781.patch
new file mode 100644
index 000000000..bdbe69695
--- /dev/null
+++ b/yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2015-8781.patch
@@ -0,0 +1,196 @@
+From aaab5c3c9d2a2c6984f23ccbc79702610439bc65 Mon Sep 17 00:00:00 2001
+From: erouault <erouault>
+Date: Sun, 27 Dec 2015 16:25:11 +0000
+Subject: [PATCH] * libtiff/tif_luv.c: fix potential out-of-bound writes in
+ decode functions in non debug builds by replacing assert()s by regular if
+ checks (bugzilla #2522). Fix potential out-of-bound reads in case of short
+ input data.
+
+Upstream-Status: Backport
+
+https://github.com/vadz/libtiff/commit/aaab5c3c9d2a2c6984f23ccbc79702610439bc65
+hand applied Changelog changes
+
+CVE: CVE-2015-8781
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+---
+ ChangeLog | 7 +++++++
+ libtiff/tif_luv.c | 55 ++++++++++++++++++++++++++++++++++++++++++++-----------
+ 2 files changed, 51 insertions(+), 11 deletions(-)
+
+Index: tiff-4.0.4/ChangeLog
+===================================================================
+--- tiff-4.0.4.orig/ChangeLog
++++ tiff-4.0.4/ChangeLog
+@@ -1,3 +1,11 @@
++2015-12-27 Even Rouault <even.rouault at spatialys.com>
++
++ * libtiff/tif_luv.c: fix potential out-of-bound writes in decode
++ functions in non debug builds by replacing assert()s by regular if
++ checks (bugzilla #2522).
++ Fix potential out-of-bound reads in case of short input data.
++
++
+ 2015-06-21 Bob Friesenhahn <bfriesen@simple.dallas.tx.us>
+
+ * libtiff 4.0.4 released.
+Index: tiff-4.0.4/libtiff/tif_luv.c
+===================================================================
+--- tiff-4.0.4.orig/libtiff/tif_luv.c
++++ tiff-4.0.4/libtiff/tif_luv.c
+@@ -202,7 +202,11 @@ LogL16Decode(TIFF* tif, uint8* op, tmsiz
+ if (sp->user_datafmt == SGILOGDATAFMT_16BIT)
+ tp = (int16*) op;
+ else {
+- assert(sp->tbuflen >= npixels);
++ if(sp->tbuflen < npixels) {
++ TIFFErrorExt(tif->tif_clientdata, module,
++ "Translation buffer too short");
++ return (0);
++ }
+ tp = (int16*) sp->tbuf;
+ }
+ _TIFFmemset((void*) tp, 0, npixels*sizeof (tp[0]));
+@@ -211,9 +215,11 @@ LogL16Decode(TIFF* tif, uint8* op, tmsiz
+ cc = tif->tif_rawcc;
+ /* get each byte string */
+ for (shft = 2*8; (shft -= 8) >= 0; ) {
+- for (i = 0; i < npixels && cc > 0; )
++ for (i = 0; i < npixels && cc > 0; ) {
+ if (*bp >= 128) { /* run */
+- rc = *bp++ + (2-128); /* TODO: potential input buffer overrun when decoding corrupt or truncated data */
++ if( cc < 2 )
++ break;
++ rc = *bp++ + (2-128);
+ b = (int16)(*bp++ << shft);
+ cc -= 2;
+ while (rc-- && i < npixels)
+@@ -223,6 +229,7 @@ LogL16Decode(TIFF* tif, uint8* op, tmsiz
+ while (--cc && rc-- && i < npixels)
+ tp[i++] |= (int16)*bp++ << shft;
+ }
++ }
+ if (i != npixels) {
+ #if defined(__WIN32__) && (defined(_MSC_VER) || defined(__MINGW32__))
+ TIFFErrorExt(tif->tif_clientdata, module,
+@@ -268,13 +275,17 @@ LogLuvDecode24(TIFF* tif, uint8* op, tms
+ if (sp->user_datafmt == SGILOGDATAFMT_RAW)
+ tp = (uint32 *)op;
+ else {
+- assert(sp->tbuflen >= npixels);
++ if(sp->tbuflen < npixels) {
++ TIFFErrorExt(tif->tif_clientdata, module,
++ "Translation buffer too short");
++ return (0);
++ }
+ tp = (uint32 *) sp->tbuf;
+ }
+ /* copy to array of uint32 */
+ bp = (unsigned char*) tif->tif_rawcp;
+ cc = tif->tif_rawcc;
+- for (i = 0; i < npixels && cc > 0; i++) {
++ for (i = 0; i < npixels && cc >= 3; i++) {
+ tp[i] = bp[0] << 16 | bp[1] << 8 | bp[2];
+ bp += 3;
+ cc -= 3;
+@@ -325,7 +336,11 @@ LogLuvDecode32(TIFF* tif, uint8* op, tms
+ if (sp->user_datafmt == SGILOGDATAFMT_RAW)
+ tp = (uint32*) op;
+ else {
+- assert(sp->tbuflen >= npixels);
++ if(sp->tbuflen < npixels) {
++ TIFFErrorExt(tif->tif_clientdata, module,
++ "Translation buffer too short");
++ return (0);
++ }
+ tp = (uint32*) sp->tbuf;
+ }
+ _TIFFmemset((void*) tp, 0, npixels*sizeof (tp[0]));
+@@ -334,11 +349,13 @@ LogLuvDecode32(TIFF* tif, uint8* op, tms
+ cc = tif->tif_rawcc;
+ /* get each byte string */
+ for (shft = 4*8; (shft -= 8) >= 0; ) {
+- for (i = 0; i < npixels && cc > 0; )
++ for (i = 0; i < npixels && cc > 0; ) {
+ if (*bp >= 128) { /* run */
++ if( cc < 2 )
++ break;
+ rc = *bp++ + (2-128);
+ b = (uint32)*bp++ << shft;
+- cc -= 2; /* TODO: potential input buffer overrun when decoding corrupt or truncated data */
++ cc -= 2;
+ while (rc-- && i < npixels)
+ tp[i++] |= b;
+ } else { /* non-run */
+@@ -346,6 +363,7 @@ LogLuvDecode32(TIFF* tif, uint8* op, tms
+ while (--cc && rc-- && i < npixels)
+ tp[i++] |= (uint32)*bp++ << shft;
+ }
++ }
+ if (i != npixels) {
+ #if defined(__WIN32__) && (defined(_MSC_VER) || defined(__MINGW32__))
+ TIFFErrorExt(tif->tif_clientdata, module,
+@@ -413,6 +431,7 @@ LogLuvDecodeTile(TIFF* tif, uint8* bp, t
+ static int
+ LogL16Encode(TIFF* tif, uint8* bp, tmsize_t cc, uint16 s)
+ {
++ static const char module[] = "LogL16Encode";
+ LogLuvState* sp = EncoderState(tif);
+ int shft;
+ tmsize_t i;
+@@ -433,7 +452,11 @@ LogL16Encode(TIFF* tif, uint8* bp, tmsiz
+ tp = (int16*) bp;
+ else {
+ tp = (int16*) sp->tbuf;
+- assert(sp->tbuflen >= npixels);
++ if(sp->tbuflen < npixels) {
++ TIFFErrorExt(tif->tif_clientdata, module,
++ "Translation buffer too short");
++ return (0);
++ }
+ (*sp->tfunc)(sp, bp, npixels);
+ }
+ /* compress each byte string */
+@@ -506,6 +529,7 @@ LogL16Encode(TIFF* tif, uint8* bp, tmsiz
+ static int
+ LogLuvEncode24(TIFF* tif, uint8* bp, tmsize_t cc, uint16 s)
+ {
++ static const char module[] = "LogLuvEncode24";
+ LogLuvState* sp = EncoderState(tif);
+ tmsize_t i;
+ tmsize_t npixels;
+@@ -521,7 +545,11 @@ LogLuvEncode24(TIFF* tif, uint8* bp, tms
+ tp = (uint32*) bp;
+ else {
+ tp = (uint32*) sp->tbuf;
+- assert(sp->tbuflen >= npixels);
++ if(sp->tbuflen < npixels) {
++ TIFFErrorExt(tif->tif_clientdata, module,
++ "Translation buffer too short");
++ return (0);
++ }
+ (*sp->tfunc)(sp, bp, npixels);
+ }
+ /* write out encoded pixels */
+@@ -553,6 +581,7 @@ LogLuvEncode24(TIFF* tif, uint8* bp, tms
+ static int
+ LogLuvEncode32(TIFF* tif, uint8* bp, tmsize_t cc, uint16 s)
+ {
++ static const char module[] = "LogLuvEncode32";
+ LogLuvState* sp = EncoderState(tif);
+ int shft;
+ tmsize_t i;
+@@ -574,7 +603,11 @@ LogLuvEncode32(TIFF* tif, uint8* bp, tms
+ tp = (uint32*) bp;
+ else {
+ tp = (uint32*) sp->tbuf;
+- assert(sp->tbuflen >= npixels);
++ if(sp->tbuflen < npixels) {
++ TIFFErrorExt(tif->tif_clientdata, module,
++ "Translation buffer too short");
++ return (0);
++ }
+ (*sp->tfunc)(sp, bp, npixels);
+ }
+ /* compress each byte string */
diff --git a/yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2015-8784.patch b/yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2015-8784.patch
new file mode 100644
index 000000000..cf37fd388
--- /dev/null
+++ b/yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2015-8784.patch
@@ -0,0 +1,73 @@
+From b18012dae552f85dcc5c57d3bf4e997a15b1cc1c Mon Sep 17 00:00:00 2001
+From: erouault <erouault>
+Date: Sun, 27 Dec 2015 16:55:20 +0000
+Subject: [PATCH] * libtiff/tif_next.c: fix potential out-of-bound write in
+ NeXTDecode() triggered by http://lcamtuf.coredump.cx/afl/vulns/libtiff5.tif
+ (bugzilla #2508)
+
+Upstream-Status: Backport
+https://github.com/vadz/libtiff/commit/b18012dae552f85dcc5c57d3bf4e997a15b1cc1c
+hand applied Changelog changes
+
+CVE: CVE-2015-8784
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ ChangeLog | 6 ++++++
+ libtiff/tif_next.c | 10 ++++++++--
+ 2 files changed, 14 insertions(+), 2 deletions(-)
+
+Index: tiff-4.0.4/ChangeLog
+===================================================================
+--- tiff-4.0.4.orig/ChangeLog
++++ tiff-4.0.4/ChangeLog
+@@ -1,5 +1,11 @@
+ 2015-12-27 Even Rouault <even.rouault at spatialys.com>
+
++ * libtiff/tif_next.c: fix potential out-of-bound write in NeXTDecode()
++ triggered by http://lcamtuf.coredump.cx/afl/vulns/libtiff5.tif
++ (bugzilla #2508)
++
++2015-12-27 Even Rouault <even.rouault at spatialys.com>
++
+ * libtiff/tif_luv.c: fix potential out-of-bound writes in decode
+ functions in non debug builds by replacing assert()s by regular if
+ checks (bugzilla #2522).
+Index: tiff-4.0.4/libtiff/tif_next.c
+===================================================================
+--- tiff-4.0.4.orig/libtiff/tif_next.c
++++ tiff-4.0.4/libtiff/tif_next.c
+@@ -37,7 +37,7 @@
+ case 0: op[0] = (unsigned char) ((v) << 6); break; \
+ case 1: op[0] |= (v) << 4; break; \
+ case 2: op[0] |= (v) << 2; break; \
+- case 3: *op++ |= (v); break; \
++ case 3: *op++ |= (v); op_offset++; break; \
+ } \
+ }
+
+@@ -106,6 +106,7 @@ NeXTDecode(TIFF* tif, uint8* buf, tmsize
+ uint32 imagewidth = tif->tif_dir.td_imagewidth;
+ if( isTiled(tif) )
+ imagewidth = tif->tif_dir.td_tilewidth;
++ tmsize_t op_offset = 0;
+
+ /*
+ * The scanline is composed of a sequence of constant
+@@ -122,10 +123,15 @@ NeXTDecode(TIFF* tif, uint8* buf, tmsize
+ * bounds, potentially resulting in a security
+ * issue.
+ */
+- while (n-- > 0 && npixels < imagewidth)
++ while (n-- > 0 && npixels < imagewidth && op_offset < scanline)
+ SETPIXEL(op, grey);
+ if (npixels >= imagewidth)
+ break;
++ if (op_offset >= scanline ) {
++ TIFFErrorExt(tif->tif_clientdata, module, "Invalid data for scanline %ld",
++ (long) tif->tif_row);
++ return (0);
++ }
+ if (cc == 0)
+ goto bad;
+ n = *bp++, cc--;
diff --git a/yocto-poky/meta/recipes-multimedia/libtiff/tiff_4.0.4.bb b/yocto-poky/meta/recipes-multimedia/libtiff/tiff_4.0.4.bb
index cf3a5f04c..f1f5a7e8e 100644
--- a/yocto-poky/meta/recipes-multimedia/libtiff/tiff_4.0.4.bb
+++ b/yocto-poky/meta/recipes-multimedia/libtiff/tiff_4.0.4.bb
@@ -5,6 +5,8 @@ HOMEPAGE = "http://www.remotesensing.org/libtiff/"
SRC_URI = "ftp://ftp.remotesensing.org/pub/libtiff/tiff-${PV}.tar.gz \
file://libtool2.patch \
+ file://CVE-2015-8781.patch \
+ file://CVE-2015-8784.patch \
"
SRC_URI[md5sum] = "9aee7107408a128c0c7b24286c0db900"
diff --git a/yocto-poky/meta/recipes-multimedia/pulseaudio/pulseaudio/0001-card-add-pa_card_profile.ports.patch b/yocto-poky/meta/recipes-multimedia/pulseaudio/pulseaudio/0001-card-add-pa_card_profile.ports.patch
new file mode 100644
index 000000000..97b2e4064
--- /dev/null
+++ b/yocto-poky/meta/recipes-multimedia/pulseaudio/pulseaudio/0001-card-add-pa_card_profile.ports.patch
@@ -0,0 +1,245 @@
+From 6f814b40a01d03f93b36184c19339033949de472 Mon Sep 17 00:00:00 2001
+From: Tanu Kaskinen <tanuk@iki.fi>
+Date: Fri, 23 Oct 2015 12:23:13 +0300
+Subject: [PATCH 1/4] card: add pa_card_profile.ports
+
+Having ports accessible from pa_card_profile allows checking whether all ports
+of a profile are unavailable, and therefore helps with managing the profile
+availability (implemented in a later patch).
+
+http://bugzilla.yoctoproject.org/show_bug.cgi?id=8448
+
+Upstream-Status: Submitted [http://lists.freedesktop.org/archives/pulseaudio-discuss/2015-October/024614.html]
+Signed-off-by: Jussi Kukkonen <jussi.kukkonen@intel.com>
+---
+ src/modules/alsa/alsa-mixer.c | 4 +++-
+ src/modules/alsa/alsa-ucm.c | 1 +
+ src/modules/bluetooth/module-bluez4-device.c | 6 ++++++
+ src/modules/bluetooth/module-bluez5-device.c | 6 ++++++
+ src/pulsecore/card.c | 16 ++++++++++++++++
+ src/pulsecore/card.h | 18 ++++++++++++------
+ src/pulsecore/device-port.c | 7 ++++++-
+ 7 files changed, 50 insertions(+), 8 deletions(-)
+
+diff --git a/src/modules/alsa/alsa-mixer.c b/src/modules/alsa/alsa-mixer.c
+index 47cbd14..c5b82b0 100644
+--- a/src/modules/alsa/alsa-mixer.c
++++ b/src/modules/alsa/alsa-mixer.c
+@@ -4654,8 +4654,10 @@ static pa_device_port* device_port_alsa_init(pa_hashmap *ports, /* card ports */
+ path->port = p;
+ }
+
+- if (cp)
++ if (cp) {
+ pa_hashmap_put(p->profiles, cp->name, cp);
++ pa_card_profile_add_port(cp, p);
++ }
+
+ if (extra) {
+ pa_hashmap_put(extra, p->name, p);
+diff --git a/src/modules/alsa/alsa-ucm.c b/src/modules/alsa/alsa-ucm.c
+index aa2d601..c8199d6 100644
+--- a/src/modules/alsa/alsa-ucm.c
++++ b/src/modules/alsa/alsa-ucm.c
+@@ -761,6 +761,7 @@ static void ucm_add_port_combination(
+ if (cp) {
+ pa_log_debug("Adding profile %s to port %s.", cp->name, port->name);
+ pa_hashmap_put(port->profiles, cp->name, cp);
++ pa_card_profile_add_port(cp, port);
+ }
+
+ if (hash) {
+diff --git a/src/modules/bluetooth/module-bluez4-device.c b/src/modules/bluetooth/module-bluez4-device.c
+index db69d34..b40c6a0 100644
+--- a/src/modules/bluetooth/module-bluez4-device.c
++++ b/src/modules/bluetooth/module-bluez4-device.c
+@@ -2183,6 +2183,7 @@ static pa_card_profile *create_card_profile(struct userdata *u, const char *uuid
+ p->max_sink_channels = 2;
+ p->max_source_channels = 0;
+ pa_hashmap_put(output_port->profiles, p->name, p);
++ pa_card_profile_add_port(p, output_port);
+
+ d = PA_CARD_PROFILE_DATA(p);
+ *d = PA_BLUEZ4_PROFILE_A2DP;
+@@ -2194,6 +2195,7 @@ static pa_card_profile *create_card_profile(struct userdata *u, const char *uuid
+ p->max_sink_channels = 0;
+ p->max_source_channels = 2;
+ pa_hashmap_put(input_port->profiles, p->name, p);
++ pa_card_profile_add_port(p, input_port);
+
+ d = PA_CARD_PROFILE_DATA(p);
+ *d = PA_BLUEZ4_PROFILE_A2DP_SOURCE;
+@@ -2206,6 +2208,8 @@ static pa_card_profile *create_card_profile(struct userdata *u, const char *uuid
+ p->max_source_channels = 1;
+ pa_hashmap_put(input_port->profiles, p->name, p);
+ pa_hashmap_put(output_port->profiles, p->name, p);
++ pa_card_profile_add_port(p, input_port);
++ pa_card_profile_add_port(p, output_port);
+
+ d = PA_CARD_PROFILE_DATA(p);
+ *d = PA_BLUEZ4_PROFILE_HSP;
+@@ -2218,6 +2222,8 @@ static pa_card_profile *create_card_profile(struct userdata *u, const char *uuid
+ p->max_source_channels = 1;
+ pa_hashmap_put(input_port->profiles, p->name, p);
+ pa_hashmap_put(output_port->profiles, p->name, p);
++ pa_card_profile_add_port(p, input_port);
++ pa_card_profile_add_port(p, output_port);
+
+ d = PA_CARD_PROFILE_DATA(p);
+ *d = PA_BLUEZ4_PROFILE_HFGW;
+diff --git a/src/modules/bluetooth/module-bluez5-device.c b/src/modules/bluetooth/module-bluez5-device.c
+index 7238e6f..3321785 100644
+--- a/src/modules/bluetooth/module-bluez5-device.c
++++ b/src/modules/bluetooth/module-bluez5-device.c
+@@ -1790,6 +1790,7 @@ static pa_card_profile *create_card_profile(struct userdata *u, const char *uuid
+ cp->max_sink_channels = 2;
+ cp->max_source_channels = 0;
+ pa_hashmap_put(output_port->profiles, cp->name, cp);
++ pa_card_profile_add_port(cp, output_port);
+
+ p = PA_CARD_PROFILE_DATA(cp);
+ *p = PA_BLUETOOTH_PROFILE_A2DP_SINK;
+@@ -1801,6 +1802,7 @@ static pa_card_profile *create_card_profile(struct userdata *u, const char *uuid
+ cp->max_sink_channels = 0;
+ cp->max_source_channels = 2;
+ pa_hashmap_put(input_port->profiles, cp->name, cp);
++ pa_card_profile_add_port(cp, input_port);
+
+ p = PA_CARD_PROFILE_DATA(cp);
+ *p = PA_BLUETOOTH_PROFILE_A2DP_SOURCE;
+@@ -1813,6 +1815,8 @@ static pa_card_profile *create_card_profile(struct userdata *u, const char *uuid
+ cp->max_source_channels = 1;
+ pa_hashmap_put(input_port->profiles, cp->name, cp);
+ pa_hashmap_put(output_port->profiles, cp->name, cp);
++ pa_card_profile_add_port(cp, input_port);
++ pa_card_profile_add_port(cp, output_port);
+
+ p = PA_CARD_PROFILE_DATA(cp);
+ *p = PA_BLUETOOTH_PROFILE_HEADSET_HEAD_UNIT;
+@@ -1825,6 +1829,8 @@ static pa_card_profile *create_card_profile(struct userdata *u, const char *uuid
+ cp->max_source_channels = 1;
+ pa_hashmap_put(input_port->profiles, cp->name, cp);
+ pa_hashmap_put(output_port->profiles, cp->name, cp);
++ pa_card_profile_add_port(cp, input_port);
++ pa_card_profile_add_port(cp, output_port);
+
+ p = PA_CARD_PROFILE_DATA(cp);
+ *p = PA_BLUETOOTH_PROFILE_HEADSET_AUDIO_GATEWAY;
+diff --git a/src/pulsecore/card.c b/src/pulsecore/card.c
+index 6f9391e..cc4c784 100644
+--- a/src/pulsecore/card.c
++++ b/src/pulsecore/card.c
+@@ -50,6 +50,7 @@ pa_card_profile *pa_card_profile_new(const char *name, const char *description,
+ c->n_sinks = c->n_sources = 0;
+ c->max_sink_channels = c->max_source_channels = 0;
+ c->available = PA_AVAILABLE_UNKNOWN;
++ c->ports = pa_hashmap_new(pa_idxset_string_hash_func, pa_idxset_string_compare_func);
+
+ return c;
+ }
+@@ -57,11 +58,25 @@ pa_card_profile *pa_card_profile_new(const char *name, const char *description,
+ void pa_card_profile_free(pa_card_profile *c) {
+ pa_assert(c);
+
++ if (c->ports) {
++ pa_device_port *port;
++ void *state;
++ PA_HASHMAP_FOREACH(port, c->ports, state)
++ pa_hashmap_remove (port->profiles, c->name);
++ pa_hashmap_free(c->ports);
++ }
++
+ pa_xfree(c->name);
+ pa_xfree(c->description);
+ pa_xfree(c);
+ }
+
++void pa_card_profile_add_port(pa_card_profile *profile, pa_device_port *port) {
++ pa_assert(profile);
++
++ pa_hashmap_put(profile->ports, port->name, port);
++}
++
+ void pa_card_profile_set_available(pa_card_profile *c, pa_available_t available) {
+ pa_core *core;
+
+@@ -198,6 +213,7 @@ pa_card *pa_card_new(pa_core *core, pa_card_new_data *data) {
+
+ c->userdata = NULL;
+ c->set_profile = NULL;
++ c->active_profile = NULL;
+
+ pa_device_init_description(c->proplist, c);
+ pa_device_init_icon(c->proplist, true);
+diff --git a/src/pulsecore/card.h b/src/pulsecore/card.h
+index 3e2c004..1c33958 100644
+--- a/src/pulsecore/card.h
++++ b/src/pulsecore/card.h
+@@ -22,19 +22,21 @@
+
+ typedef struct pa_card pa_card;
+
+-#include <pulse/proplist.h>
+-#include <pulsecore/core.h>
+-#include <pulsecore/module.h>
+-#include <pulsecore/idxset.h>
+-
+ /* This enum replaces pa_port_available_t (defined in pulse/def.h) for
+- * internal use, so make sure both enum types stay in sync. */
++ * internal use, so make sure both enum types stay in sync. This is defined
++ * before the #includes, because device-port.h depends on this enum. */
+ typedef enum pa_available {
+ PA_AVAILABLE_UNKNOWN = 0,
+ PA_AVAILABLE_NO = 1,
+ PA_AVAILABLE_YES = 2,
+ } pa_available_t;
+
++#include <pulse/proplist.h>
++#include <pulsecore/core.h>
++#include <pulsecore/device-port.h>
++#include <pulsecore/module.h>
++#include <pulsecore/idxset.h>
++
+ typedef struct pa_card_profile {
+ pa_card *card;
+ char *name;
+@@ -43,6 +45,8 @@ typedef struct pa_card_profile {
+ unsigned priority;
+ pa_available_t available; /* PA_AVAILABLE_UNKNOWN, PA_AVAILABLE_NO or PA_AVAILABLE_YES */
+
++ pa_hashmap *ports; /* port name -> pa_device_port */
++
+ /* We probably want to have different properties later on here */
+ unsigned n_sinks;
+ unsigned n_sources;
+@@ -100,6 +104,8 @@ typedef struct pa_card_new_data {
+ pa_card_profile *pa_card_profile_new(const char *name, const char *description, size_t extra);
+ void pa_card_profile_free(pa_card_profile *c);
+
++void pa_card_profile_add_port(pa_card_profile *profile, pa_device_port *port);
++
+ /* The profile's available status has changed */
+ void pa_card_profile_set_available(pa_card_profile *c, pa_available_t available);
+
+diff --git a/src/pulsecore/device-port.c b/src/pulsecore/device-port.c
+index cfe2a80..f16ecef 100644
+--- a/src/pulsecore/device-port.c
++++ b/src/pulsecore/device-port.c
+@@ -95,8 +95,13 @@ static void device_port_free(pa_object *o) {
+ if (p->proplist)
+ pa_proplist_free(p->proplist);
+
+- if (p->profiles)
++ if (p->profiles) {
++ pa_card_profile *profile;
++ void *state;
++ PA_HASHMAP_FOREACH(profile, p->profiles, state)
++ pa_hashmap_remove (profile->ports, p->name);
+ pa_hashmap_free(p->profiles);
++ }
+
+ pa_xfree(p->name);
+ pa_xfree(p->description);
+--
+2.1.4
+
diff --git a/yocto-poky/meta/recipes-multimedia/pulseaudio/pulseaudio/0002-alsa-bluetooth-fail-if-user-requested-profile-doesn-.patch b/yocto-poky/meta/recipes-multimedia/pulseaudio/pulseaudio/0002-alsa-bluetooth-fail-if-user-requested-profile-doesn-.patch
new file mode 100644
index 000000000..c3f217bc6
--- /dev/null
+++ b/yocto-poky/meta/recipes-multimedia/pulseaudio/pulseaudio/0002-alsa-bluetooth-fail-if-user-requested-profile-doesn-.patch
@@ -0,0 +1,60 @@
+From 339eb179baa7810113f6456accc05b3a32c1cdba Mon Sep 17 00:00:00 2001
+From: Tanu Kaskinen <tanuk@iki.fi>
+Date: Fri, 23 Oct 2015 12:36:34 +0300
+Subject: [PATCH 2/4] alsa, bluetooth: fail if user-requested profile doesn't
+ exist
+
+If we can't fulfill the user request fully, I think we shouldn't
+fulfill it at all, to make it clear that the requested operation
+didn't succeed.
+
+http://bugzilla.yoctoproject.org/show_bug.cgi?id=8448
+
+Upstream-Status: Submitted [http://lists.freedesktop.org/archives/pulseaudio-discuss/2015-October/024614.html]
+Signed-off-by: Jussi Kukkonen <jussi.kukkonen@intel.com>
+---
+ src/modules/alsa/module-alsa-card.c | 10 ++++++++--
+ src/modules/bluetooth/module-bluez4-device.c | 6 ++++--
+ 2 files changed, 12 insertions(+), 4 deletions(-)
+
+diff --git a/src/modules/alsa/module-alsa-card.c b/src/modules/alsa/module-alsa-card.c
+index a7fec04..32f517e 100644
+--- a/src/modules/alsa/module-alsa-card.c
++++ b/src/modules/alsa/module-alsa-card.c
+@@ -754,8 +754,14 @@ int pa__init(pa_module *m) {
+ goto fail;
+ }
+
+- if ((profile = pa_modargs_get_value(u->modargs, "profile", NULL)))
+- pa_card_new_data_set_profile(&data, profile);
++ if ((profile = pa_modargs_get_value(u->modargs, "profile", NULL))) {
++ if (pa_hashmap_get(data.profiles, profile))
++ pa_card_new_data_set_profile(&data, profile);
++ else {
++ pa_log("No such profile: %s", profile);
++ goto fail;
++ }
++ }
+
+ u->card = pa_card_new(m->core, &data);
+ pa_card_new_data_done(&data);
+diff --git a/src/modules/bluetooth/module-bluez4-device.c b/src/modules/bluetooth/module-bluez4-device.c
+index b40c6a0..94e6988 100644
+--- a/src/modules/bluetooth/module-bluez4-device.c
++++ b/src/modules/bluetooth/module-bluez4-device.c
+@@ -2310,8 +2310,10 @@ static int add_card(struct userdata *u) {
+ if ((default_profile = pa_modargs_get_value(u->modargs, "profile", NULL))) {
+ if (pa_hashmap_get(data.profiles, default_profile))
+ pa_card_new_data_set_profile(&data, default_profile);
+- else
+- pa_log_warn("Profile '%s' not valid or not supported by device.", default_profile);
++ else {
++ pa_log("Profile '%s' not valid or not supported by device.", default_profile);
++ return -1;
++ }
+ }
+
+ u->card = pa_card_new(u->core, &data);
+--
+2.1.4
+
diff --git a/yocto-poky/meta/recipes-multimedia/pulseaudio/pulseaudio/0003-card-move-profile-selection-after-pa_card_new.patch b/yocto-poky/meta/recipes-multimedia/pulseaudio/pulseaudio/0003-card-move-profile-selection-after-pa_card_new.patch
new file mode 100644
index 000000000..9585f3d52
--- /dev/null
+++ b/yocto-poky/meta/recipes-multimedia/pulseaudio/pulseaudio/0003-card-move-profile-selection-after-pa_card_new.patch
@@ -0,0 +1,363 @@
+From cc41c8a3149ef04d4aa2db3d15032605a5504658 Mon Sep 17 00:00:00 2001
+From: Tanu Kaskinen <tanuk@iki.fi>
+Date: Fri, 23 Oct 2015 12:59:53 +0300
+Subject: [PATCH 3/4] card: move profile selection after pa_card_new()
+
+I want module-alsa-card to set the availability of unavailable
+profiles before the initial card profile gets selected, so that the
+selection logic can use correct availability information.
+module-alsa-card initializes the jack state after calling
+pa_card_new(), however, and the profile selection happens in
+pa_card_new(). This patch solves that by introducing pa_card_put() and
+moving the profile selection code there.
+
+An alternative solution would have been to move the jack
+initialization to happen before pa_card_new() and use pa_card_new_data
+instead of pa_card in the jack initialization code, but I disliked
+that idea (I want to get rid of the "new data" pattern eventually).
+
+The CARD_NEW hook is used when applying the initial profile policy, so
+that was moved to pa_card_put(). That required changing the hook data
+from pa_card_new_data to pa_card. module-card-restore now uses
+pa_card_set_profile() instead of pa_card_new_data_set_profile(). That
+required adding a state variable to pa_card, because
+pa_card_set_profile() needs to distinguish between setting the initial
+profile and setting the profile in other situations.
+
+The order in which the initial profile policy is applied is reversed
+in this patch. Previously the first one to set it won, now the last
+one to set it wins. I think this is better, because if you have N
+parties that want to set the profile, we avoid checking N times
+whether someone else has already set the profile.
+
+http://bugzilla.yoctoproject.org/show_bug.cgi?id=8448
+
+Upstream-Status: Submitted [http://lists.freedesktop.org/archives/pulseaudio-discuss/2015-October/024614.html]
+Signed-off-by: Jussi Kukkonen <jussi.kukkonen@intel.com>
+---
+ src/modules/alsa/module-alsa-card.c | 19 +++---
+ src/modules/bluetooth/module-bluez4-device.c | 18 +++---
+ src/modules/bluetooth/module-bluez5-device.c | 1 +
+ src/modules/macosx/module-coreaudio-device.c | 1 +
+ src/modules/module-card-restore.c | 24 ++++----
+ src/pulsecore/card.c | 86 +++++++++++++++-------------
+ src/pulsecore/card.h | 7 +++
+ 7 files changed, 87 insertions(+), 69 deletions(-)
+
+diff --git a/src/modules/alsa/module-alsa-card.c b/src/modules/alsa/module-alsa-card.c
+index 32f517e..5b39654 100644
+--- a/src/modules/alsa/module-alsa-card.c
++++ b/src/modules/alsa/module-alsa-card.c
+@@ -754,15 +754,6 @@ int pa__init(pa_module *m) {
+ goto fail;
+ }
+
+- if ((profile = pa_modargs_get_value(u->modargs, "profile", NULL))) {
+- if (pa_hashmap_get(data.profiles, profile))
+- pa_card_new_data_set_profile(&data, profile);
+- else {
+- pa_log("No such profile: %s", profile);
+- goto fail;
+- }
+- }
+-
+ u->card = pa_card_new(m->core, &data);
+ pa_card_new_data_done(&data);
+
+@@ -773,6 +764,16 @@ int pa__init(pa_module *m) {
+ u->card->set_profile = card_set_profile;
+
+ init_jacks(u);
++ pa_card_put(u->card);
++
++ if ((profile = pa_modargs_get_value(u->modargs, "profile", NULL))) {
++ u->card->active_profile = pa_hashmap_get(u->card->profiles, profile);
++ if (!u->card->active_profile) {
++ pa_log("No such profile: %s", profile);
++ goto fail;
++ }
++ }
++
+ init_profile(u);
+ init_eld_ctls(u);
+
+diff --git a/src/modules/bluetooth/module-bluez4-device.c b/src/modules/bluetooth/module-bluez4-device.c
+index 94e6988..5efc5dc 100644
+--- a/src/modules/bluetooth/module-bluez4-device.c
++++ b/src/modules/bluetooth/module-bluez4-device.c
+@@ -2307,15 +2307,6 @@ static int add_card(struct userdata *u) {
+ *d = PA_BLUEZ4_PROFILE_OFF;
+ pa_hashmap_put(data.profiles, p->name, p);
+
+- if ((default_profile = pa_modargs_get_value(u->modargs, "profile", NULL))) {
+- if (pa_hashmap_get(data.profiles, default_profile))
+- pa_card_new_data_set_profile(&data, default_profile);
+- else {
+- pa_log("Profile '%s' not valid or not supported by device.", default_profile);
+- return -1;
+- }
+- }
+-
+ u->card = pa_card_new(u->core, &data);
+ pa_card_new_data_done(&data);
+
+@@ -2326,6 +2317,15 @@ static int add_card(struct userdata *u) {
+
+ u->card->userdata = u;
+ u->card->set_profile = card_set_profile;
++ pa_card_put(u->card);
++
++ if ((default_profile = pa_modargs_get_value(u->modargs, "profile", NULL))) {
++ u->card->active_profile = pa_hashmap_get(u->card->profiles, default_profile);
++ if (!u->card->active_profile) {
++ pa_log("Profile '%s' not valid or not supported by device.", default_profile);
++ return -1;
++ }
++ }
+
+ d = PA_CARD_PROFILE_DATA(u->card->active_profile);
+
+diff --git a/src/modules/bluetooth/module-bluez5-device.c b/src/modules/bluetooth/module-bluez5-device.c
+index 3321785..0081a21 100644
+--- a/src/modules/bluetooth/module-bluez5-device.c
++++ b/src/modules/bluetooth/module-bluez5-device.c
+@@ -1959,6 +1959,7 @@ static int add_card(struct userdata *u) {
+
+ u->card->userdata = u;
+ u->card->set_profile = set_profile_cb;
++ pa_card_put(u->card);
+
+ p = PA_CARD_PROFILE_DATA(u->card->active_profile);
+ u->profile = *p;
+diff --git a/src/modules/macosx/module-coreaudio-device.c b/src/modules/macosx/module-coreaudio-device.c
+index 4bbb5d5..41f151f 100644
+--- a/src/modules/macosx/module-coreaudio-device.c
++++ b/src/modules/macosx/module-coreaudio-device.c
+@@ -764,6 +764,7 @@ int pa__init(pa_module *m) {
+ pa_card_new_data_done(&card_new_data);
+ u->card->userdata = u;
+ u->card->set_profile = card_set_profile;
++ pa_card_put(u->card);
+
+ u->rtpoll = pa_rtpoll_new();
+ pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
+diff --git a/src/modules/module-card-restore.c b/src/modules/module-card-restore.c
+index baa2f4f..0501ac8 100644
+--- a/src/modules/module-card-restore.c
++++ b/src/modules/module-card-restore.c
+@@ -485,34 +485,38 @@ static pa_hook_result_t port_offset_change_callback(pa_core *c, pa_device_port *
+ return PA_HOOK_OK;
+ }
+
+-static pa_hook_result_t card_new_hook_callback(pa_core *c, pa_card_new_data *new_data, struct userdata *u) {
++static pa_hook_result_t card_new_hook_callback(pa_core *c, pa_card *card, struct userdata *u) {
+ struct entry *e;
+ void *state;
+ pa_device_port *p;
+ struct port_info *p_info;
+
+- pa_assert(new_data);
++ pa_assert(c);
++ pa_assert(card);
++ pa_assert(u);
+
+- if (!(e = entry_read(u, new_data->name)))
++ if (!(e = entry_read(u, card->name)))
+ return PA_HOOK_OK;
+
+ if (e->profile[0]) {
+- if (!new_data->active_profile) {
+- pa_card_new_data_set_profile(new_data, e->profile);
+- pa_log_info("Restored profile '%s' for card %s.", new_data->active_profile, new_data->name);
+- new_data->save_profile = true;
++ pa_card_profile *profile;
+
++ profile = pa_hashmap_get(card->profiles, e->profile);
++ if (profile) {
++ pa_card_set_profile(card, profile, true);
++ pa_log_info("Restored profile '%s' for card %s.", card->active_profile->name, card->name);
+ } else
+- pa_log_debug("Not restoring profile for card %s, because already set.", new_data->name);
++ pa_log_debug("Tried to restore profile %s for card %s, but the card doesn't have such profile.",
++ e->profile, card->name);
+ }
+
+ /* Always restore the latency offsets because their
+ * initial value is always 0 */
+
+- pa_log_info("Restoring port latency offsets for card %s.", new_data->name);
++ pa_log_info("Restoring port latency offsets for card %s.", card->name);
+
+ PA_HASHMAP_FOREACH(p_info, e->ports, state)
+- if ((p = pa_hashmap_get(new_data->ports, p_info->name)))
++ if ((p = pa_hashmap_get(card->ports, p_info->name)))
+ p->latency_offset = p_info->offset;
+
+ entry_free(e);
+diff --git a/src/pulsecore/card.c b/src/pulsecore/card.c
+index cc4c784..1b7f71b 100644
+--- a/src/pulsecore/card.c
++++ b/src/pulsecore/card.c
+@@ -151,6 +151,7 @@ pa_card *pa_card_new(pa_core *core, pa_card_new_data *data) {
+ pa_assert(!pa_hashmap_isempty(data->profiles));
+
+ c = pa_xnew(pa_card, 1);
++ c->state = PA_CARD_STATE_INIT;
+
+ if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_CARD, c, data->namereg_fail))) {
+ pa_xfree(c);
+@@ -159,12 +160,6 @@ pa_card *pa_card_new(pa_core *core, pa_card_new_data *data) {
+
+ pa_card_new_data_set_name(data, name);
+
+- if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_CARD_NEW], data) < 0) {
+- pa_xfree(c);
+- pa_namereg_unregister(core, name);
+- return NULL;
+- }
+-
+ c->core = core;
+ c->name = pa_xstrdup(data->name);
+ c->proplist = pa_proplist_copy(data->proplist);
+@@ -187,30 +182,6 @@ pa_card *pa_card_new(pa_core *core, pa_card_new_data *data) {
+ PA_HASHMAP_FOREACH(port, c->ports, state)
+ port->card = c;
+
+- c->active_profile = NULL;
+- c->save_profile = false;
+-
+- if (data->active_profile)
+- if ((c->active_profile = pa_hashmap_get(c->profiles, data->active_profile)))
+- c->save_profile = data->save_profile;
+-
+- if (!c->active_profile) {
+- PA_HASHMAP_FOREACH(profile, c->profiles, state) {
+- if (profile->available == PA_AVAILABLE_NO)
+- continue;
+-
+- if (!c->active_profile || profile->priority > c->active_profile->priority)
+- c->active_profile = profile;
+- }
+- /* If all profiles are not available, then we still need to pick one */
+- if (!c->active_profile) {
+- PA_HASHMAP_FOREACH(profile, c->profiles, state)
+- if (!c->active_profile || profile->priority > c->active_profile->priority)
+- c->active_profile = profile;
+- }
+- pa_assert(c->active_profile);
+- }
+-
+ c->userdata = NULL;
+ c->set_profile = NULL;
+ c->active_profile = NULL;
+@@ -219,13 +190,39 @@ pa_card *pa_card_new(pa_core *core, pa_card_new_data *data) {
+ pa_device_init_icon(c->proplist, true);
+ pa_device_init_intended_roles(c->proplist);
+
+- pa_assert_se(pa_idxset_put(core->cards, c, &c->index) >= 0);
++ return c;
++}
+
+- pa_log_info("Created %u \"%s\"", c->index, c->name);
+- pa_subscription_post(core, PA_SUBSCRIPTION_EVENT_CARD|PA_SUBSCRIPTION_EVENT_NEW, c->index);
++void pa_card_put(pa_card *card) {
++ pa_card_profile *profile;
++ void *state;
+
+- pa_hook_fire(&core->hooks[PA_CORE_HOOK_CARD_PUT], c);
+- return c;
++ pa_assert(card);
++
++ PA_HASHMAP_FOREACH(profile, card->profiles, state) {
++ if (profile->available == PA_AVAILABLE_NO)
++ continue;
++
++ if (!card->active_profile || profile->priority > card->active_profile->priority)
++ card->active_profile = profile;
++ }
++
++ /* If all profiles are unavailable, then we still need to pick one */
++ if (!card->active_profile) {
++ PA_HASHMAP_FOREACH(profile, card->profiles, state)
++ if (!card->active_profile || profile->priority > card->active_profile->priority)
++ card->active_profile = profile;
++ }
++ pa_assert(card->active_profile);
++
++ pa_hook_fire(&card->core->hooks[PA_CORE_HOOK_CARD_NEW], card);
++
++ pa_assert_se(pa_idxset_put(card->core->cards, card, &card->index) >= 0);
++ card->state = PA_CARD_STATE_LINKED;
++
++ pa_log_info("Created %u \"%s\"", card->index, card->name);
++ pa_hook_fire(&card->core->hooks[PA_CORE_HOOK_CARD_PUT], card);
++ pa_subscription_post(card->core, PA_SUBSCRIPTION_EVENT_CARD|PA_SUBSCRIPTION_EVENT_NEW, card->index);
+ }
+
+ void pa_card_free(pa_card *c) {
+@@ -292,17 +289,24 @@ int pa_card_set_profile(pa_card *c, pa_card_profile *profile, bool save) {
+ return 0;
+ }
+
+- if ((r = c->set_profile(c, profile)) < 0)
++ /* If we're setting the initial profile, we shouldn't call set_profile(),
++ * because the implementations don't expect that (for historical reasons).
++ * We should just set c->active_profile, and the implementations will
++ * properly set up that profile after pa_card_put() has returned. It would
++ * be probably good to change this so that also the initial profile can be
++ * set up in set_profile(), but if set_profile() fails, that would need
++ * some better handling than what we do here currently. */
++ if (c->state != PA_CARD_STATE_INIT && (r = c->set_profile(c, profile)) < 0)
+ return r;
+
+- pa_subscription_post(c->core, PA_SUBSCRIPTION_EVENT_CARD|PA_SUBSCRIPTION_EVENT_CHANGE, c->index);
+-
+- pa_log_info("Changed profile of card %u \"%s\" to %s", c->index, c->name, profile->name);
+-
+ c->active_profile = profile;
+ c->save_profile = save;
+
+- pa_hook_fire(&c->core->hooks[PA_CORE_HOOK_CARD_PROFILE_CHANGED], c);
++ if (c->state != PA_CARD_STATE_INIT) {
++ pa_log_info("Changed profile of card %u \"%s\" to %s", c->index, c->name, profile->name);
++ pa_hook_fire(&c->core->hooks[PA_CORE_HOOK_CARD_PROFILE_CHANGED], c);
++ pa_subscription_post(c->core, PA_SUBSCRIPTION_EVENT_CARD|PA_SUBSCRIPTION_EVENT_CHANGE, c->index);
++ }
+
+ return 0;
+ }
+diff --git a/src/pulsecore/card.h b/src/pulsecore/card.h
+index 1c33958..dbbc1c2 100644
+--- a/src/pulsecore/card.h
++++ b/src/pulsecore/card.h
+@@ -37,6 +37,11 @@ typedef enum pa_available {
+ #include <pulsecore/module.h>
+ #include <pulsecore/idxset.h>
+
++typedef enum pa_card_state {
++ PA_CARD_STATE_INIT,
++ PA_CARD_STATE_LINKED,
++} pa_card_state_t;
++
+ typedef struct pa_card_profile {
+ pa_card *card;
+ char *name;
+@@ -61,6 +66,7 @@ typedef struct pa_card_profile {
+
+ struct pa_card {
+ uint32_t index;
++ pa_card_state_t state;
+ pa_core *core;
+
+ char *name;
+@@ -115,6 +121,7 @@ void pa_card_new_data_set_profile(pa_card_new_data *data, const char *profile);
+ void pa_card_new_data_done(pa_card_new_data *data);
+
+ pa_card *pa_card_new(pa_core *c, pa_card_new_data *data);
++void pa_card_put(pa_card *c);
+ void pa_card_free(pa_card *c);
+
+ void pa_card_add_profile(pa_card *c, pa_card_profile *profile);
+--
+2.1.4
+
diff --git a/yocto-poky/meta/recipes-multimedia/pulseaudio/pulseaudio/0004-alsa-set-availability-for-some-unavailable-profiles.patch b/yocto-poky/meta/recipes-multimedia/pulseaudio/pulseaudio/0004-alsa-set-availability-for-some-unavailable-profiles.patch
new file mode 100644
index 000000000..bb318aa06
--- /dev/null
+++ b/yocto-poky/meta/recipes-multimedia/pulseaudio/pulseaudio/0004-alsa-set-availability-for-some-unavailable-profiles.patch
@@ -0,0 +1,75 @@
+From 0136b73158f60d5dc630ae348b18df3b59a2a5c2 Mon Sep 17 00:00:00 2001
+From: Tanu Kaskinen <tanuk@iki.fi>
+Date: Fri, 23 Oct 2015 13:37:11 +0300
+Subject: [PATCH 4/4] alsa: set availability for (some) unavailable profiles
+
+The alsa card hasn't so far set any availability for profiles. That
+caused an issue with some HDMI hardware: the sound card has two HDMI
+outputs, but only the second of them is actually usable. The
+unavailable port is marked as unavailable and the available port is
+marked as available, but this information isn't propagated to the
+profile availability. Without profile availability information, the
+initial profile policy picks the unavailable one, since it has a
+higher priority value.
+
+This patch adds simple logic for marking some profiles unavailable:
+if the profile only contains unavailable ports, the profile is
+unavailable too. This can be improved in the future so that if a
+profile contains sinks or sources that only contain unavailable ports,
+the profile should be marked as unavailable. Implementing that
+requires adding more information about the sinks and sources to
+pa_card_profile, however.
+
+BugLink: https://bugzilla.yoctoproject.org/show_bug.cgi?id=8448
+
+Upstream-Status: Submitted [http://lists.freedesktop.org/archives/pulseaudio-discuss/2015-October/024614.html]
+Signed-off-by: Jussi Kukkonen <jussi.kukkonen@intel.com>
+---
+ src/modules/alsa/module-alsa-card.c | 24 ++++++++++++++++++++++++
+ 1 file changed, 24 insertions(+)
+
+diff --git a/src/modules/alsa/module-alsa-card.c b/src/modules/alsa/module-alsa-card.c
+index 5b39654..73a846c 100644
+--- a/src/modules/alsa/module-alsa-card.c
++++ b/src/modules/alsa/module-alsa-card.c
+@@ -366,6 +366,7 @@ static int report_jack_state(snd_mixer_elem_t *melem, unsigned int mask) {
+ void *state;
+ pa_alsa_jack *jack;
+ pa_device_port *port;
++ pa_card_profile *profile;
+
+ pa_assert(u);
+
+@@ -396,6 +397,29 @@ static int report_jack_state(snd_mixer_elem_t *melem, unsigned int mask) {
+ }
+ report_port_state(port, u);
+ }
++
++ /* Update profile availabilities. The logic could be improved; for now we
++ * only set obviously unavailable profiles (those that contain only
++ * unavailable ports) to PA_AVAILABLE_NO and all others to
++ * PA_AVAILABLE_UNKNOWN. */
++ PA_HASHMAP_FOREACH(profile, u->card->profiles, state) {
++ void *state2;
++ pa_available_t available = PA_AVAILABLE_NO;
++
++ /* Don't touch the "off" profile. */
++ if (pa_hashmap_size(profile->ports) == 0)
++ continue;
++
++ PA_HASHMAP_FOREACH(port, profile->ports, state2) {
++ if (port->available != PA_AVAILABLE_NO) {
++ available = PA_AVAILABLE_UNKNOWN;
++ break;
++ }
++ }
++
++ pa_card_profile_set_available(profile, available);
++ }
++
+ return 0;
+ }
+
+--
+2.1.4
+
diff --git a/yocto-poky/meta/recipes-multimedia/pulseaudio/pulseaudio_6.0.bb b/yocto-poky/meta/recipes-multimedia/pulseaudio/pulseaudio_6.0.bb
index 31e909638..ec629aa02 100644
--- a/yocto-poky/meta/recipes-multimedia/pulseaudio/pulseaudio_6.0.bb
+++ b/yocto-poky/meta/recipes-multimedia/pulseaudio/pulseaudio_6.0.bb
@@ -6,6 +6,10 @@ SRC_URI = "http://freedesktop.org/software/pulseaudio/releases/${BP}.tar.xz \
file://0001-conf-parser-add-support-for-.d-directories.patch \
file://fix-git-version-gen.patch \
file://volatiles.04_pulse \
+ file://0001-card-add-pa_card_profile.ports.patch \
+ file://0002-alsa-bluetooth-fail-if-user-requested-profile-doesn-.patch \
+ file://0003-card-move-profile-selection-after-pa_card_new.patch \
+ file://0004-alsa-set-availability-for-some-unavailable-profiles.patch \
"
SRC_URI[md5sum] = "b691e83b7434c678dffacfa3a027750e"
SRC_URI[sha256sum] = "b50640e0b80b1607600accfad2e45aabb79d379bf6354c9671efa2065477f6f6"
diff --git a/yocto-poky/meta/recipes-qt/qt4/qt4-4.8.7.inc b/yocto-poky/meta/recipes-qt/qt4/qt4-4.8.7.inc
index 5257e760e..d165514bf 100644
--- a/yocto-poky/meta/recipes-qt/qt4/qt4-4.8.7.inc
+++ b/yocto-poky/meta/recipes-qt/qt4/qt4-4.8.7.inc
@@ -21,10 +21,10 @@ SRC_URI = "http://download.qt-project.org/official_releases/qt/4.8/${PV}/qt-ever
file://0018-configure-make-pulseaudio-a-configurable-option.patch \
file://0019-Fixes-for-gcc-4.7.0-particularly-on-qemux86.patch \
file://0027-tools.pro-disable-qmeegographicssystemhelper.patch \
- file://0028-Don-t-crash-on-broken-GIF-images.patch \
file://0030-aarch64_arm64_qatomic_support.patch \
file://0031-aarch64_arm64_mkspecs.patch \
file://0032-aarch64_add_header.patch \
+ file://0034-Fix-kmap2qmap-build-with-clang.patch \
file://Fix-QWSLock-invalid-argument-logs.patch \
file://add_check_for_aarch64_32.patch \
file://g++.conf \
diff --git a/yocto-poky/meta/recipes-qt/qt4/qt4-4.8.7/0028-Don-t-crash-on-broken-GIF-images.patch b/yocto-poky/meta/recipes-qt/qt4/qt4-4.8.7/0028-Don-t-crash-on-broken-GIF-images.patch
deleted file mode 100644
index 906e2fdfc..000000000
--- a/yocto-poky/meta/recipes-qt/qt4/qt4-4.8.7/0028-Don-t-crash-on-broken-GIF-images.patch
+++ /dev/null
@@ -1,47 +0,0 @@
-From f1b76c126c476c155af8c404b97c42cd1a709333 Mon Sep 17 00:00:00 2001
-From: Lars Knoll <lars.knoll@digia.com>
-Date: Thu, 24 Apr 2014 15:33:27 +0200
-Subject: [PATCH] Don't crash on broken GIF images
-
-Broken GIF images could set invalid width and height
-values inside the image, leading to Qt creating a null
-QImage for it. In that case we need to abort decoding
-the image and return an error.
-
-Initial patch by Rich Moore.
-
-Backport of Id82a4036f478bd6e49c402d6598f57e7e5bb5e1e from Qt 5
-
-Task-number: QTBUG-38367
-Change-Id: I0680740018aaa8356d267b7af3f01fac3697312a
-Security-advisory: CVE-2014-0190
-Reviewed-by: Richard J. Moore <rich@kde.org>
-
-Upstream-Status: Backport
-Signed-off-by: Paul Eggleton <paul.eggleton@linux.intel.com>
-
----
- src/gui/image/qgifhandler.cpp | 7 +++++++
- 1 file changed, 7 insertions(+)
-
-diff --git a/src/gui/image/qgifhandler.cpp b/src/gui/image/qgifhandler.cpp
-index 3324f04..5199dd3 100644
---- a/src/gui/image/qgifhandler.cpp
-+++ b/src/gui/image/qgifhandler.cpp
-@@ -359,6 +359,13 @@ int QGIFFormat::decode(QImage *image, const uchar *buffer, int length,
- memset(bits, 0, image->byteCount());
- }
-
-+ // Check if the previous attempt to create the image failed. If it
-+ // did then the image is broken and we should give up.
-+ if (image->isNull()) {
-+ state = Error;
-+ return -1;
-+ }
-+
- disposePrevious(image);
- disposed = false;
-
---
-1.9.3
-
diff --git a/yocto-poky/meta/recipes-qt/qt4/qt4-4.8.7/0034-Fix-kmap2qmap-build-with-clang.patch b/yocto-poky/meta/recipes-qt/qt4/qt4-4.8.7/0034-Fix-kmap2qmap-build-with-clang.patch
new file mode 100644
index 000000000..f47a1d9c3
--- /dev/null
+++ b/yocto-poky/meta/recipes-qt/qt4/qt4-4.8.7/0034-Fix-kmap2qmap-build-with-clang.patch
@@ -0,0 +1,34 @@
+From: Samuel Gaist <samuel.gaist@edeltech.ch>
+Date: Wed, 4 Mar 2015 20:16:50 +0000 (+0100)
+Subject: Fix kmap2qmap build on OS X
+X-Git-Tag: v5.4.2~6
+X-Git-Url: https://codereview.qt-project.org/gitweb?p=qt%2Fqttools.git;a=commitdiff_plain;h=cf196a2565235f649b88fac55b53270bea23458d;hp=3070815a24239bd0f469bfeb8d0a1f091974e28e
+
+Fix kmap2qmap build on OS X
+
+Currently kmap2qmap fails to build on OS X (clang) This patch aims to
+fix this.
+
+Change-Id: I61c985dc7ad1f2486368c39aa976599d274942ab
+Reviewed-by: Friedemann Kleint <Friedemann.Kleint@theqtcompany.com>
+---
+Upstream-Status: Backport
+Index: qt-everywhere-opensource-src-4.8.7/tools/kmap2qmap/main.cpp
+===================================================================
+--- qt-everywhere-opensource-src-4.8.7.orig/tools/kmap2qmap/main.cpp
++++ qt-everywhere-opensource-src-4.8.7/tools/kmap2qmap/main.cpp
+@@ -385,9 +385,11 @@ static const int symbol_synonyms_size =
+
+ // makes the generated array in --header mode a bit more human readable
+ QT_BEGIN_NAMESPACE
+-static bool operator<(const QWSKeyboard::Mapping &m1, const QWSKeyboard::Mapping &m2)
+-{
+- return m1.keycode != m2.keycode ? m1.keycode < m2.keycode : m1.modifiers < m2.modifiers;
++namespace QWSKeyboard {
++ static bool operator<(const Mapping &m1, const Mapping &m2)
++ {
++ return m1.keycode != m2.keycode ? m1.keycode < m2.keycode : m1.modifiers < m2.modifiers;
++ }
+ }
+ QT_END_NAMESPACE
+
diff --git a/yocto-poky/meta/recipes-sato/gtk-engines/gtk-sato-engine.inc b/yocto-poky/meta/recipes-sato/gtk-engines/gtk-sato-engine.inc
index 4e37ff204..fa6b2b2e7 100644
--- a/yocto-poky/meta/recipes-sato/gtk-engines/gtk-sato-engine.inc
+++ b/yocto-poky/meta/recipes-sato/gtk-engines/gtk-sato-engine.inc
@@ -8,6 +8,9 @@ SECTION = "x11/base"
DEPENDS = "gtk+"
RDEPENDS_gtk-theme-sato = "gtk-sato-engine"
+inherit distro_features_check
+ANY_OF_DISTRO_FEATURES = "${GTK2DISTROFEATURES}"
+
PACKAGES += "gtk-theme-sato"
FILES_${PN} = "${libdir}/gtk-2.0/*/engines/*.so "
FILES_${PN}-dev = "${libdir}/gtk-2.0/*/engines/*.la"
diff --git a/yocto-poky/meta/recipes-sato/leafpad/leafpad_0.8.18.1.bb b/yocto-poky/meta/recipes-sato/leafpad/leafpad_0.8.18.1.bb
index f71514916..093b89f77 100644
--- a/yocto-poky/meta/recipes-sato/leafpad/leafpad_0.8.18.1.bb
+++ b/yocto-poky/meta/recipes-sato/leafpad/leafpad_0.8.18.1.bb
@@ -6,7 +6,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=94d55d512a9ba36caa9b7df079bae19f \
file://src/leafpad.h;endline=20;md5=d3d6a89f5e61e8b13bdea537511ba1fa \
file://src/utils.c;endline=20;md5=0d2cc6584ba3202448bb274f62739571"
-DEPENDS = "gtk+ intltool-native libowl"
+DEPENDS = "gtk+ intltool-native libowl gettext-native"
# The libowl requires x11 in DISTRO_FEATURES
REQUIRED_DISTRO_FEATURES = "x11"
diff --git a/yocto-poky/meta/recipes-sato/matchbox-terminal/matchbox-terminal_git.bb b/yocto-poky/meta/recipes-sato/matchbox-terminal/matchbox-terminal_git.bb
index 91fd150e3..c8cbd57a6 100644
--- a/yocto-poky/meta/recipes-sato/matchbox-terminal/matchbox-terminal_git.bb
+++ b/yocto-poky/meta/recipes-sato/matchbox-terminal/matchbox-terminal_git.bb
@@ -15,4 +15,6 @@ SRC_URI = "git://git.yoctoproject.org/${BPN}"
S = "${WORKDIR}/git"
-inherit autotools pkgconfig
+inherit autotools pkgconfig distro_features_check
+
+ANY_OF_DISTRO_FEATURES = "${GTK2DISTROFEATURES}"
diff --git a/yocto-poky/meta/recipes-sato/pcmanfm/pcmanfm_1.2.3.bb b/yocto-poky/meta/recipes-sato/pcmanfm/pcmanfm_1.2.3.bb
index 4467303f6..b63db875d 100644
--- a/yocto-poky/meta/recipes-sato/pcmanfm/pcmanfm_1.2.3.bb
+++ b/yocto-poky/meta/recipes-sato/pcmanfm/pcmanfm_1.2.3.bb
@@ -7,7 +7,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=59530bdf33659b29e73d4adb9f9f6552 \
file://src/gseal-gtk-compat.h;endline=21;md5=46922c8691f58d124f9420fe16149ce2"
SECTION = "x11"
-DEPENDS = "gtk+ startup-notification libfm intltool-native"
+DEPENDS = "gtk+ startup-notification libfm intltool-native gettext-native"
DEPENDS_append_poky = " libowl"
diff --git a/yocto-poky/meta/recipes-sato/puzzles/oh-puzzles_git.bb b/yocto-poky/meta/recipes-sato/puzzles/oh-puzzles_git.bb
index 36cd2cc7a..9876fe0fd 100644
--- a/yocto-poky/meta/recipes-sato/puzzles/oh-puzzles_git.bb
+++ b/yocto-poky/meta/recipes-sato/puzzles/oh-puzzles_git.bb
@@ -8,7 +8,7 @@ LIC_FILES_CHKSUM = "file://LICENCE;md5=f56ec6772dd1c7c367067bbea8ea1675 \
file://src/tree234.c;endline=28;md5=b4feb1976feebf8f1379093ed52f2945"
SECTION = "x11"
-DEPENDS = "gtk+ gconf intltool-native librsvg"
+DEPENDS = "gtk+ gconf intltool-native librsvg gettext-native"
# libowl requires x11 in DISTRO_FEATURES
DEPENDS_append_poky = " ${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'libowl', '', d)}"
diff --git a/yocto-poky/meta/recipes-sato/webkit/webkitgtk_2.8.5.bb b/yocto-poky/meta/recipes-sato/webkit/webkitgtk_2.8.5.bb
index 82d670d94..e29666ae7 100644
--- a/yocto-poky/meta/recipes-sato/webkit/webkitgtk_2.8.5.bb
+++ b/yocto-poky/meta/recipes-sato/webkit/webkitgtk_2.8.5.bb
@@ -18,7 +18,10 @@ SRC_URI = "\
SRC_URI[md5sum] = "df79991848a5096d3a75289ebce547ae"
SRC_URI[sha256sum] = "3d1f0c534935f43fd74df90f2648fcee672d60f1f57a30fa557a77891ae04d20"
-inherit cmake lib_package pkgconfig perlnative pythonnative
+inherit cmake lib_package pkgconfig perlnative pythonnative distro_features_check
+
+# depends on libxt
+REQUIRED_DISTRO_FEATURES = "x11"
DEPENDS = "zlib enchant libsoup-2.4 curl libxml2 cairo libxslt libxt libidn gnutls \
gtk+ gtk+3 gstreamer1.0 gstreamer1.0-plugins-base flex-native gperf-native sqlite3 \
@@ -26,33 +29,46 @@ DEPENDS = "zlib enchant libsoup-2.4 curl libxml2 cairo libxslt libxt libidn gnut
atk udev harfbuzz jpeg libpng pulseaudio librsvg libtheora libvorbis libxcomposite libxtst \
ruby-native libsecret libnotify gstreamer1.0-plugins-bad \
"
-DEPENDS += " ${@bb.utils.contains('DISTRO_FEATURES', 'opengl', 'virtual/libgl', '', d)}"
+
+PACKAGECONFIG ??= "${@base_contains('DISTRO_FEATURES', 'x11', 'x11', 'wayland' ,d)} \
+ ${@base_contains('DISTRO_FEATURES', 'opengl', 'webgl', '' ,d)}"
+
+PACKAGECONFIG[wayland] = "-DENABLE_WAYLAND_TARGET=ON,-DENABLE_WAYLAND_TARGET=OFF,wayland"
+PACKAGECONFIG[x11] = "-DENABLE_X11_TARGET=ON,-DENABLE_X11_TARGET=OFF,virtual/libx11"
+PACKAGECONFIG[geoclue] = "-DENABLE_GEOLOCATION=ON,-DENABLE_GEOLOCATION=OFF,geoclue"
+PACKAGECONFIG[enchant] = "-DENABLE_SPELLCHECK=ON,-DENABLE_SPELLCHECK=OFF,enchant"
+PACKAGECONFIG[gtk2] = "-DENABLE_PLUGIN_PROCESS_GTK2=ON,-DENABLE_PLUGIN_PROCESS_GTK2=OFF,gtk+"
+PACKAGECONFIG[gles2] = "-DENABLE_GLES2=ON,-DENABLE_GLES2=OFF,virtual/libgles2"
+PACKAGECONFIG[webgl] = "-DENABLE_WEBGL=ON,-DENABLE_WEBGL=OFF,virtual/libgl"
+PACKAGECONFIG[libsecret] = "-DENABLE_CREDENTIAL_STORAGE=ON,-DENABLE_CREDENTIAL_STORAGE=OFF,libsecret"
EXTRA_OECMAKE = " \
-DPORT=GTK \
-DCMAKE_BUILD_TYPE=Release \
- -DENABLE_INTROSPECTION=False \
- -DENABLE_MINIBROWSER=True \
- ${@bb.utils.contains('DISTRO_FEATURES', 'opengl', '-DENABLE_WEBGL=True', '-DENABLE_WEBGL=False', d)} \
+ -DENABLE_INTROSPECTION=OFF \
+ -DENABLE_GTKDOC=OFF \
+ -DENABLE_MINIBROWSER=ON \
"
# Javascript JIT is not supported on powerpc
-EXTRA_OECMAKE_append_powerpc = " -DENABLE_JIT=False "
-EXTRA_OECMAKE_append_powerpc64 = " -DENABLE_JIT=False "
+EXTRA_OECMAKE_append_powerpc = " -DENABLE_JIT=OFF "
+EXTRA_OECMAKE_append_powerpc64 = " -DENABLE_JIT=OFF "
# ARM JIT code does not build on ARMv5/6 anymore, apparently they test only on v7 onwards
-EXTRA_OECMAKE_append_armv5 = " -DENABLE_JIT=False "
-EXTRA_OECMAKE_append_armv6 = " -DENABLE_JIT=False "
+EXTRA_OECMAKE_append_armv5 = " -DENABLE_JIT=OFF "
+EXTRA_OECMAKE_append_armv6 = " -DENABLE_JIT=OFF "
# binutils 2.25.1 has a bug on aarch64:
# https://sourceware.org/bugzilla/show_bug.cgi?id=18430
-EXTRA_OECMAKE_append_aarch64 = " -DUSE_LD_GOLD=False "
+EXTRA_OECMAKE_append_aarch64 = " -DUSE_LD_GOLD=OFF "
# JIT not supported on MIPS either
-EXTRA_OECMAKE_append_mips = " -DENABLE_JIT=False "
-EXTRA_OECMAKE_append_mips64 = " -DENABLE_JIT=False "
+EXTRA_OECMAKE_append_mips = " -DENABLE_JIT=OFF "
+EXTRA_OECMAKE_append_mips64 = " -DENABLE_JIT=OFF "
FILES_${PN} += "${libdir}/webkit2gtk-4.0/injected-bundle/libwebkit2gtkinjectedbundle.so"
FILES_${PN}-dbg += "${libdir}/webkit2gtk-4.0/injected-bundle/.debug/libwebkit2gtkinjectedbundle.so"
FILES_${PN}-dbg += "${libdir}/webkitgtk/webkit2gtk-4.0/.debug/*"
+# http://errors.yoctoproject.org/Errors/Details/20370/
+ARM_INSTRUCTION_SET = "arm"
diff --git a/yocto-poky/meta/recipes-support/apr/apr-util_1.5.4.bb b/yocto-poky/meta/recipes-support/apr/apr-util_1.5.4.bb
index 6a14d14d4..a36f13db5 100644
--- a/yocto-poky/meta/recipes-support/apr/apr-util_1.5.4.bb
+++ b/yocto-poky/meta/recipes-support/apr/apr-util_1.5.4.bb
@@ -60,6 +60,12 @@ do_configure_append_class-nativesdk() {
sed -i "s#\(LIBTOOL=\$(apr_builddir)\).*#\1/libtool#" ${S}/build/rules.mk
}
+do_install_append_class-target() {
+ sed -i -e 's,${STAGING_DIR_HOST},,g' \
+ -e 's,APU_SOURCE_DIR=.*,APR_SOURCE_DIR=,g' \
+ -e 's,APU_BUILD_DIR=.*,APR_BUILD_DIR=,g' ${D}${bindir}/apu-1-config
+}
+
FILES_${PN} += "${libdir}/apr-util-1/apr_dbm_gdbm-1.so"
FILES_${PN}-dev += "${libdir}/aprutil.exp ${libdir}/apr-util-1/apr_dbm_gdbm.so* ${libdir}/apr-util-1/apr_dbm_gdbm.la"
FILES_${PN}-dbg += "${libdir}/apr-util-1/.debug/*"
diff --git a/yocto-poky/meta/recipes-support/apr/apr_1.5.2.bb b/yocto-poky/meta/recipes-support/apr/apr_1.5.2.bb
index c1f7f380e..1c61e84e5 100644
--- a/yocto-poky/meta/recipes-support/apr/apr_1.5.2.bb
+++ b/yocto-poky/meta/recipes-support/apr/apr_1.5.2.bb
@@ -32,6 +32,11 @@ CACHED_CONFIGUREVARS += "apr_cv_mutex_recursive=yes"
#
CACHED_CONFIGUREVARS += "ac_cv_header_netinet_sctp_h=no ac_cv_header_netinet_sctp_uio_h=no"
+# Otherwise libtool fails to compile apr-utils
+# x86_64-linux-libtool: compile: unable to infer tagged configuration
+# x86_64-linux-libtool: error: specify a tag with '--tag'
+CCACHE = ""
+
do_configure_prepend() {
# Avoid absolute paths for grep since it causes failures
# when using sstate between different hosts with different
@@ -55,7 +60,13 @@ do_configure_append() {
do_install_append() {
oe_multilib_header apr.h
install -d ${D}${datadir}/apr
- cp ${S}/${HOST_SYS}-libtool ${D}${datadir}/build-1/libtool
+}
+
+do_install_append_class-target() {
+ sed -i -e 's,${STAGING_DIR_HOST},,g' ${D}${datadir}/build-1/apr_rules.mk
+ sed -i -e 's,${STAGING_DIR_HOST},,g' \
+ -e 's,APR_SOURCE_DIR=.*,APR_SOURCE_DIR=,g' \
+ -e 's,APR_BUILD_DIR=.*,APR_BUILD_DIR=,g' ${D}${bindir}/apr-1-config
}
SSTATE_SCAN_FILES += "apr_rules.mk libtool"
@@ -73,6 +84,7 @@ apr_sysroot_preprocess () {
cp ${S}/build/mkdir.sh $d/
cp ${S}/build/make_exports.awk $d/
cp ${S}/build/make_var_export.awk $d/
+ cp ${S}/${HOST_SYS}-libtool ${SYSROOT_DESTDIR}${datadir}/build-1/libtool
}
do_compile_ptest() {
diff --git a/yocto-poky/meta/recipes-support/atk/at-spi2-core_2.16.0.bb b/yocto-poky/meta/recipes-support/atk/at-spi2-core_2.16.0.bb
index 933cbe70f..7c12b5428 100644
--- a/yocto-poky/meta/recipes-support/atk/at-spi2-core_2.16.0.bb
+++ b/yocto-poky/meta/recipes-support/atk/at-spi2-core_2.16.0.bb
@@ -11,7 +11,7 @@ SRC_URI = "${GNOME_MIRROR}/${BPN}/${MAJ_VER}/${BPN}-${PV}.tar.xz \
SRC_URI[md5sum] = "be6eeea370f913b7639b609913b2cf02"
SRC_URI[sha256sum] = "1c0b77fb8ce81abbf1d80c0afee9858b3f9229f673b7881995fe0fc16b1a74d0"
-DEPENDS = "dbus glib-2.0 virtual/libx11 libxi libxtst intltool-native"
+DEPENDS = "dbus glib-2.0 virtual/libx11 libxi libxtst intltool-native gettext-native"
inherit autotools gtk-doc pkgconfig distro_features_check
# depends on virtual/libx11
diff --git a/yocto-poky/meta/recipes-support/curl/curl/CVE-2016-0754.patch b/yocto-poky/meta/recipes-support/curl/curl/CVE-2016-0754.patch
new file mode 100644
index 000000000..f0402de08
--- /dev/null
+++ b/yocto-poky/meta/recipes-support/curl/curl/CVE-2016-0754.patch
@@ -0,0 +1,417 @@
+From b1bb4ca6d8777683b6a549fb61dba36759da26f4 Mon Sep 17 00:00:00 2001
+From: Ray Satiro <raysatiro@yahoo.com>
+Date: Tue, 26 Jan 2016 23:23:15 +0100
+Subject: [PATCH] curl: avoid local drive traversal when saving file (Windows)
+
+curl does not sanitize colons in a remote file name that is used as the
+local file name. This may lead to a vulnerability on systems where the
+colon is a special path character. Currently Windows/DOS is the only OS
+where this vulnerability applies.
+
+CVE-2016-0754
+
+Bug: http://curl.haxx.se/docs/adv_20160127B.html
+
+Upstream-Status: Backport
+http://curl.haxx.se/CVE-2016-0754.patch
+
+CVE: CVE-2016-0754
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ src/tool_cb_hdr.c | 40 ++++++------
+ src/tool_doswin.c | 174 ++++++++++++++++++++++++++++++++++++++++++++---------
+ src/tool_doswin.h | 2 +-
+ src/tool_operate.c | 29 ++++++---
+ 4 files changed, 187 insertions(+), 58 deletions(-)
+
+diff --git a/src/tool_cb_hdr.c b/src/tool_cb_hdr.c
+index fd208e8..0fca39f 100644
+--- a/src/tool_cb_hdr.c
++++ b/src/tool_cb_hdr.c
+@@ -26,10 +26,11 @@
+ #define ENABLE_CURLX_PRINTF
+ /* use our own printf() functions */
+ #include "curlx.h"
+
+ #include "tool_cfgable.h"
++#include "tool_doswin.h"
+ #include "tool_msgs.h"
+ #include "tool_cb_hdr.h"
+
+ #include "memdebug.h" /* keep this as LAST include */
+
+@@ -112,22 +113,28 @@ size_t tool_header_cb(void *ptr, size_t size, size_t nmemb, void *userdata)
+ /* this expression below typecasts 'cb' only to avoid
+ warning: signed and unsigned type in conditional expression
+ */
+ len = (ssize_t)cb - (p - str);
+ filename = parse_filename(p, len);
+- if(filename) {
+- outs->filename = filename;
+- outs->alloc_filename = TRUE;
+- outs->is_cd_filename = TRUE;
+- outs->s_isreg = TRUE;
+- outs->fopened = FALSE;
+- outs->stream = NULL;
+- hdrcbdata->honor_cd_filename = FALSE;
+- break;
+- }
+- else
++ if(!filename)
++ return failure;
++
++#if defined(MSDOS) || defined(WIN32)
++ if(sanitize_file_name(&filename)) {
++ free(filename);
+ return failure;
++ }
++#endif /* MSDOS || WIN32 */
++
++ outs->filename = filename;
++ outs->alloc_filename = TRUE;
++ outs->is_cd_filename = TRUE;
++ outs->s_isreg = TRUE;
++ outs->fopened = FALSE;
++ outs->stream = NULL;
++ hdrcbdata->honor_cd_filename = FALSE;
++ break;
+ }
+ }
+
+ return cb;
+ }
+@@ -179,19 +186,16 @@ static char *parse_filename(const char *ptr, size_t len)
+ return NULL;
+ }
+ }
+
+ /* scan for the end letter and stop there */
+- q = p;
+- while(*q) {
+- if(q[1] && (q[0] == '\\'))
+- q++;
+- else if(q[0] == stop)
++ for(q = p; *q; ++q) {
++ if(*q == stop) {
++ *q = '\0';
+ break;
+- q++;
++ }
+ }
+- *q = '\0';
+
+ /* make sure the file name doesn't end in \r or \n */
+ q = strchr(p, '\r');
+ if(q)
+ *q = '\0';
+diff --git a/src/tool_doswin.c b/src/tool_doswin.c
+index dd6e8bb..9c6a7a3 100644
+--- a/src/tool_doswin.c
++++ b/src/tool_doswin.c
+@@ -83,46 +83,110 @@ __pragma(warning(pop))
+ # define _use_lfn(f) ALWAYS_FALSE /* long file names never available */
+ #elif defined(__DJGPP__)
+ # include <fcntl.h> /* _use_lfn(f) prototype */
+ #endif
+
+-static const char *msdosify (const char *file_name);
+-static char *rename_if_dos_device_name (char *file_name);
++static char *msdosify(const char *file_name);
++static char *rename_if_dos_device_name(const char *file_name);
+
+-/*
+- * sanitize_dos_name: returns a newly allocated string holding a
+- * valid file name which will be a transformation of given argument
+- * in case this wasn't already a valid file name.
+- *
+- * This function takes ownership of given argument, free'ing it before
+- * returning. Caller is responsible of free'ing returned string. Upon
+- * out of memory condition function returns NULL.
+- */
+
+-char *sanitize_dos_name(char *file_name)
++/*
++Sanitize *file_name.
++Success: (CURLE_OK) *file_name points to a sanitized version of the original.
++ This function takes ownership of the original *file_name and frees it.
++Failure: (!= CURLE_OK) *file_name is unchanged.
++*/
++CURLcode sanitize_file_name(char **file_name)
+ {
+- char new_name[PATH_MAX];
++ size_t len;
++ char *p, *sanitized;
++
++ /* Calculate the maximum length of a filename.
++ FILENAME_MAX is often the same as PATH_MAX, in other words it does not
++ discount the path information. PATH_MAX size is calculated based on:
++ <drive-letter><colon><path-sep><max-filename-len><NULL> */
++ const size_t max_filename_len = PATH_MAX - 3 - 1;
++
++ if(!file_name || !*file_name)
++ return CURLE_BAD_FUNCTION_ARGUMENT;
++
++ len = strlen(*file_name);
++
++ if(len >= max_filename_len)
++ len = max_filename_len - 1;
+
+- if(!file_name)
+- return NULL;
++ sanitized = malloc(len + 1);
+
+- if(strlen(file_name) >= PATH_MAX)
+- file_name[PATH_MAX-1] = '\0'; /* truncate it */
++ if(!sanitized)
++ return CURLE_OUT_OF_MEMORY;
+
+- strcpy(new_name, msdosify(file_name));
++ strncpy(sanitized, *file_name, len);
++ sanitized[len] = '\0';
+
+- Curl_safefree(file_name);
++ for(p = sanitized; *p; ++p ) {
++ const char *banned;
++ if(1 <= *p && *p <= 31) {
++ *p = '_';
++ continue;
++ }
++ for(banned = "|<>/\\\":?*"; *banned; ++banned) {
++ if(*p == *banned) {
++ *p = '_';
++ break;
++ }
++ }
++ }
+
+- return strdup(rename_if_dos_device_name(new_name));
++#ifdef MSDOS
++ /* msdosify checks for more banned characters for MSDOS, however it allows
++ for some path information to pass through. since we are sanitizing only a
++ filename and cannot allow a path it's important this call be done in
++ addition to and not instead of the banned character check above. */
++ p = msdosify(sanitized);
++ if(!p) {
++ free(sanitized);
++ return CURLE_BAD_FUNCTION_ARGUMENT;
++ }
++ sanitized = p;
++ len = strlen(sanitized);
++#endif
++
++ p = rename_if_dos_device_name(sanitized);
++ if(!p) {
++ free(sanitized);
++ return CURLE_BAD_FUNCTION_ARGUMENT;
++ }
++ sanitized = p;
++ len = strlen(sanitized);
++
++ /* dos_device_name rename will rename a device name, possibly changing the
++ length. If the length is too long now we can't truncate it because we
++ could end up with a device name. In practice this shouldn't be a problem
++ because device names are short, but you never know. */
++ if(len >= max_filename_len) {
++ free(sanitized);
++ return CURLE_BAD_FUNCTION_ARGUMENT;
++ }
++
++ *file_name = sanitized;
++ return CURLE_OK;
+ }
+
+-/* The following functions are taken with modification from the DJGPP
+- * port of tar 1.12. They use algorithms originally from DJTAR. */
++/* The functions msdosify, rename_if_dos_device_name and __crt0_glob_function
++ * were taken with modification from the DJGPP port of tar 1.12. They use
++ * algorithms originally from DJTAR.
++ */
+
+-static const char *msdosify (const char *file_name)
++/*
++Extra sanitization MSDOS for file_name.
++Returns a copy of file_name that is sanitized by MSDOS standards.
++Warning: path information may pass through. For sanitizing a filename use
++sanitize_file_name which calls this function after sanitizing path info.
++*/
++static char *msdosify(const char *file_name)
+ {
+- static char dos_name[PATH_MAX];
++ char dos_name[PATH_MAX];
+ static const char illegal_chars_dos[] = ".+, ;=[]" /* illegal in DOS */
+ "|<>\\\":?*"; /* illegal in DOS & W95 */
+ static const char *illegal_chars_w95 = &illegal_chars_dos[8];
+ int idx, dot_idx;
+ const char *s = file_name;
+@@ -199,39 +263,89 @@ static const char *msdosify (const char *file_name)
+ else
+ idx++;
+ }
+
+ *d = '\0';
+- return dos_name;
++ return strdup(dos_name);
+ }
+
+-static char *rename_if_dos_device_name (char *file_name)
++/*
++Rename file_name if it's a representation of a device name.
++Returns a copy of file_name, and the copy will have contents different from the
++original if a device name was found.
++*/
++static char *rename_if_dos_device_name(const char *file_name)
+ {
+ /* We could have a file whose name is a device on MS-DOS. Trying to
+ * retrieve such a file would fail at best and wedge us at worst. We need
+ * to rename such files. */
+- char *base;
++ char *p, *base;
+ struct_stat st_buf;
+ char fname[PATH_MAX];
+
+ strncpy(fname, file_name, PATH_MAX-1);
+ fname[PATH_MAX-1] = '\0';
+ base = basename(fname);
+ if(((stat(base, &st_buf)) == 0) && (S_ISCHR(st_buf.st_mode))) {
+ size_t blen = strlen(base);
+
+- if(strlen(fname) >= PATH_MAX-1) {
++ if(strlen(fname) == PATH_MAX-1) {
+ /* Make room for the '_' */
+ blen--;
+ base[blen] = '\0';
+ }
+ /* Prepend a '_'. */
+ memmove(base + 1, base, blen + 1);
+ base[0] = '_';
+- strcpy(file_name, fname);
+ }
+- return file_name;
++
++ /* The above stat check does not identify devices for me in Windows 7. For
++ example a stat on COM1 returns a regular file S_IFREG. According to MSDN
++ stat doc that is the correct behavior, so I assume the above code is
++ legacy, maybe MSDOS or DJGPP specific? */
++
++ /* Rename devices.
++ Examples: CON => _CON, CON.EXT => CON_EXT, CON:ADS => CON_ADS */
++ for(p = fname; p; p = (p == fname && fname != base ? base : NULL)) {
++ size_t p_len;
++ int x = (curl_strnequal(p, "CON", 3) ||
++ curl_strnequal(p, "PRN", 3) ||
++ curl_strnequal(p, "AUX", 3) ||
++ curl_strnequal(p, "NUL", 3)) ? 3 :
++ (curl_strnequal(p, "CLOCK$", 6)) ? 6 :
++ (curl_strnequal(p, "COM", 3) || curl_strnequal(p, "LPT", 3)) ?
++ (('1' <= p[3] && p[3] <= '9') ? 4 : 3) : 0;
++
++ if(!x)
++ continue;
++
++ /* the devices may be accessible with an extension or ADS, for
++ example CON.AIR and CON:AIR both access console */
++ if(p[x] == '.' || p[x] == ':') {
++ p[x] = '_';
++ continue;
++ }
++ else if(p[x]) /* no match */
++ continue;
++
++ p_len = strlen(p);
++
++ if(strlen(fname) == PATH_MAX-1) {
++ /* Make room for the '_' */
++ p_len--;
++ p[p_len] = '\0';
++ }
++ /* Prepend a '_'. */
++ memmove(p + 1, p, p_len + 1);
++ p[0] = '_';
++
++ /* if fname was just modified then the basename pointer must be updated */
++ if(p == fname)
++ base = basename(fname);
++ }
++
++ return strdup(fname);
+ }
+
+ #if defined(MSDOS) && (defined(__DJGPP__) || defined(__GO32__))
+
+ /*
+diff --git a/src/tool_doswin.h b/src/tool_doswin.h
+index cd216db..fc83f16 100644
+--- a/src/tool_doswin.h
++++ b/src/tool_doswin.h
+@@ -23,11 +23,11 @@
+ ***************************************************************************/
+ #include "tool_setup.h"
+
+ #if defined(MSDOS) || defined(WIN32)
+
+-char *sanitize_dos_name(char *file_name);
++CURLcode sanitize_file_name(char **filename);
+
+ #if defined(MSDOS) && (defined(__DJGPP__) || defined(__GO32__))
+
+ char **__crt0_glob_function(char *arg);
+
+diff --git a/src/tool_operate.c b/src/tool_operate.c
+index 30d60cb..272ebd4 100644
+--- a/src/tool_operate.c
++++ b/src/tool_operate.c
+@@ -541,30 +541,41 @@ static CURLcode operate_do(struct GlobalConfig *global,
+ if(!outfile) {
+ /* extract the file name from the URL */
+ result = get_url_file_name(&outfile, this_url);
+ if(result)
+ goto show_error;
++
++#if defined(MSDOS) || defined(WIN32)
++ result = sanitize_file_name(&outfile);
++ if(result) {
++ Curl_safefree(outfile);
++ goto show_error;
++ }
++#endif /* MSDOS || WIN32 */
++
+ if(!*outfile && !config->content_disposition) {
+ helpf(global->errors, "Remote file name has no length!\n");
+ result = CURLE_WRITE_ERROR;
+ goto quit_urls;
+ }
+-#if defined(MSDOS) || defined(WIN32)
+- /* For DOS and WIN32, we do some major replacing of
+- bad characters in the file name before using it */
+- outfile = sanitize_dos_name(outfile);
+- if(!outfile) {
+- result = CURLE_OUT_OF_MEMORY;
+- goto show_error;
+- }
+-#endif /* MSDOS || WIN32 */
+ }
+ else if(urls) {
+ /* fill '#1' ... '#9' terms from URL pattern */
+ char *storefile = outfile;
+ result = glob_match_url(&outfile, storefile, urls);
+ Curl_safefree(storefile);
++
++#if defined(MSDOS) || defined(WIN32)
++ if(!result) {
++ result = sanitize_file_name(&outfile);
++ if(result) {
++ Curl_safefree(outfile);
++ goto show_error;
++ }
++ }
++#endif /* MSDOS || WIN32 */
++
+ if(result) {
+ /* bad globbing */
+ warnf(config->global, "bad output glob!\n");
+ goto quit_urls;
+ }
+--
+2.7.0
+
diff --git a/yocto-poky/meta/recipes-support/curl/curl/CVE-2016-0755.patch b/yocto-poky/meta/recipes-support/curl/curl/CVE-2016-0755.patch
new file mode 100644
index 000000000..44b9d9a3f
--- /dev/null
+++ b/yocto-poky/meta/recipes-support/curl/curl/CVE-2016-0755.patch
@@ -0,0 +1,138 @@
+From d41dcba4e9b69d6b761e3460cc6ae7e8fd8f621f Mon Sep 17 00:00:00 2001
+From: Isaac Boukris <iboukris@gmail.com>
+Date: Wed, 13 Jan 2016 11:05:51 +0200
+Subject: [PATCH] NTLM: Fix ConnectionExists to compare Proxy credentials
+
+Proxy NTLM authentication should compare credentials when
+re-using a connection similar to host authentication, as it
+authenticate the connection.
+
+Example:
+curl -v -x http://proxy:port http://host/ -U good_user:good_pwd
+ --proxy-ntlm --next -x http://proxy:port http://host/
+ [-U fake_user:fake_pwd --proxy-ntlm]
+
+CVE-2016-0755
+
+Bug: http://curl.haxx.se/docs/adv_20160127A.html
+
+Upstream-Status: Backport
+http://curl.haxx.se/CVE-2016-0755.patch
+
+CVE: CVE-2016-0755
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ lib/url.c | 62 ++++++++++++++++++++++++++++++++++++++++----------------------
+ 1 file changed, 40 insertions(+), 22 deletions(-)
+
+Index: curl-7.44.0/lib/url.c
+===================================================================
+--- curl-7.44.0.orig/lib/url.c
++++ curl-7.44.0/lib/url.c
+@@ -3107,12 +3107,17 @@ ConnectionExists(struct SessionHandle *d
+ struct connectdata *check;
+ struct connectdata *chosen = 0;
+ bool canPipeline = IsPipeliningPossible(data, needle);
++ struct connectbundle *bundle;
++
+ #ifdef USE_NTLM
+- bool wantNTLMhttp = ((data->state.authhost.want & CURLAUTH_NTLM) ||
+- (data->state.authhost.want & CURLAUTH_NTLM_WB)) &&
+- (needle->handler->protocol & PROTO_FAMILY_HTTP) ? TRUE : FALSE;
++ bool wantNTLMhttp = ((data->state.authhost.want &
++ (CURLAUTH_NTLM | CURLAUTH_NTLM_WB)) &&
++ (needle->handler->protocol & PROTO_FAMILY_HTTP));
++ bool wantProxyNTLMhttp = (needle->bits.proxy_user_passwd &&
++ ((data->state.authproxy.want &
++ (CURLAUTH_NTLM | CURLAUTH_NTLM_WB)) &&
++ (needle->handler->protocol & PROTO_FAMILY_HTTP)));
+ #endif
+- struct connectbundle *bundle;
+
+ *force_reuse = FALSE;
+ *waitpipe = FALSE;
+@@ -3152,9 +3157,6 @@ ConnectionExists(struct SessionHandle *d
+ curr = bundle->conn_list->head;
+ while(curr) {
+ bool match = FALSE;
+-#if defined(USE_NTLM)
+- bool credentialsMatch = FALSE;
+-#endif
+ size_t pipeLen;
+
+ /*
+@@ -3262,21 +3264,14 @@ ConnectionExists(struct SessionHandle *d
+ continue;
+ }
+
+- if((!(needle->handler->flags & PROTOPT_CREDSPERREQUEST))
+-#ifdef USE_NTLM
+- || (wantNTLMhttp || check->ntlm.state != NTLMSTATE_NONE)
+-#endif
+- ) {
+- /* This protocol requires credentials per connection or is HTTP+NTLM,
++ if(!(needle->handler->flags & PROTOPT_CREDSPERREQUEST)) {
++ /* This protocol requires credentials per connection,
+ so verify that we're using the same name and password as well */
+ if(!strequal(needle->user, check->user) ||
+ !strequal(needle->passwd, check->passwd)) {
+ /* one of them was different */
+ continue;
+ }
+-#if defined(USE_NTLM)
+- credentialsMatch = TRUE;
+-#endif
+ }
+
+ if(!needle->bits.httpproxy || needle->handler->flags&PROTOPT_SSL ||
+@@ -3335,20 +3330,43 @@ ConnectionExists(struct SessionHandle *d
+ possible. (Especially we must not reuse the same connection if
+ partway through a handshake!) */
+ if(wantNTLMhttp) {
+- if(credentialsMatch && check->ntlm.state != NTLMSTATE_NONE) {
+- chosen = check;
++ if(!strequal(needle->user, check->user) ||
++ !strequal(needle->passwd, check->passwd))
++ continue;
++ }
++ else if(check->ntlm.state != NTLMSTATE_NONE) {
++ /* Connection is using NTLM auth but we don't want NTLM */
++ continue;
++ }
++
++ /* Same for Proxy NTLM authentication */
++ if(wantProxyNTLMhttp) {
++ if(!strequal(needle->proxyuser, check->proxyuser) ||
++ !strequal(needle->proxypasswd, check->proxypasswd))
++ continue;
++ }
++ else if(check->proxyntlm.state != NTLMSTATE_NONE) {
++ /* Proxy connection is using NTLM auth but we don't want NTLM */
++ continue;
++ }
+
++ if(wantNTLMhttp || wantProxyNTLMhttp) {
++ /* Credentials are already checked, we can use this connection */
++ chosen = check;
++
++ if((wantNTLMhttp &&
++ (check->ntlm.state != NTLMSTATE_NONE)) ||
++ (wantProxyNTLMhttp &&
++ (check->proxyntlm.state != NTLMSTATE_NONE))) {
+ /* We must use this connection, no other */
+ *force_reuse = TRUE;
+ break;
+ }
+- else if(credentialsMatch)
+- /* this is a backup choice */
+- chosen = check;
++
++ /* Continue look up for a better connection */
+ continue;
+ }
+ #endif
+-
+ if(canPipeline) {
+ /* We can pipeline if we want to. Let's continue looking for
+ the optimal connection to use, i.e the shortest pipe that is not
diff --git a/yocto-poky/meta/recipes-support/curl/curl_7.44.0.bb b/yocto-poky/meta/recipes-support/curl/curl_7.44.0.bb
index b293303da..419ed8365 100644
--- a/yocto-poky/meta/recipes-support/curl/curl_7.44.0.bb
+++ b/yocto-poky/meta/recipes-support/curl/curl_7.44.0.bb
@@ -12,7 +12,9 @@ SRC_URI = "http://curl.haxx.se/download/curl-${PV}.tar.bz2 \
# curl likes to set -g0 in CFLAGS, so we stop it
# from mucking around with debug options
#
-SRC_URI += " file://configure_ac.patch"
+SRC_URI += " file://configure_ac.patch \
+ file://CVE-2016-0754.patch \
+ file://CVE-2016-0755.patch"
SRC_URI[md5sum] = "6b952ca00e5473b16a11f05f06aa8dae"
SRC_URI[sha256sum] = "1e2541bae6582bb697c0fbae49e1d3e6fad5d05d5aa80dbd6f072e0a44341814"
@@ -45,6 +47,11 @@ do_install_append() {
oe_multilib_header curl/curlbuild.h
}
+do_install_append_class-target() {
+ # cleanup buildpaths from curl-config
+ sed -i -e 's,${STAGING_DIR_HOST},,g' ${D}${bindir}/curl-config
+}
+
PACKAGES =+ "lib${BPN}"
FILES_lib${BPN} = "${libdir}/lib*.so.*"
diff --git a/yocto-poky/meta/recipes-support/gmp/gmp-4.2.1/Use-__gnu_inline__-attribute.patch b/yocto-poky/meta/recipes-support/gmp/gmp-4.2.1/Use-__gnu_inline__-attribute.patch
new file mode 100644
index 000000000..627d71aba
--- /dev/null
+++ b/yocto-poky/meta/recipes-support/gmp/gmp-4.2.1/Use-__gnu_inline__-attribute.patch
@@ -0,0 +1,36 @@
+From 3cb33502bafd04b8ad4ca3454fab16d5ff313297 Mon Sep 17 00:00:00 2001
+From: Jussi Kukkonen <jussi.kukkonen@intel.com>
+Date: Tue, 22 Sep 2015 13:16:23 +0300
+Subject: [PATCH] Use __gnu_inline__ attribute
+
+gcc5 uses C11 inline rules. This means the old "extern inline"
+semantics are not available without a special attribute.
+
+See: https://gcc.gnu.org/gcc-5/porting_to.html
+
+Upstream-Status: Inappropriate [Fixed in current versions]
+Signed-off-by: Jussi Kukkonen <jussi.kukkonen@intel.com>
+---
+ gmp-h.in | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/gmp-h.in b/gmp-h.in
+index eed6fe4..361dd1d 100644
+--- a/gmp-h.in
++++ b/gmp-h.in
+@@ -419,8 +419,11 @@ typedef __mpq_struct *mpq_ptr;
+ /* gcc has __inline__ in all modes, including strict ansi. Give a prototype
+ for an inline too, so as to correctly specify "dllimport" on windows, in
+ case the function is called rather than inlined. */
++
++/* Use __gnu_inline__ attribute: later gcc uses different "extern inline"
++ behaviour */
+ #ifdef __GNUC__
+-#define __GMP_EXTERN_INLINE extern __inline__
++#define __GMP_EXTERN_INLINE extern __inline__ __attribute__ ((__gnu_inline__))
+ #define __GMP_INLINE_PROTOTYPES 1
+ #endif
+
+--
+2.1.4
+
diff --git a/yocto-poky/meta/recipes-support/gmp/gmp-4.2.1/disable-stdc.patch b/yocto-poky/meta/recipes-support/gmp/gmp-4.2.1/disable-stdc.patch
deleted file mode 100644
index 5decb1cec..000000000
--- a/yocto-poky/meta/recipes-support/gmp/gmp-4.2.1/disable-stdc.patch
+++ /dev/null
@@ -1,39 +0,0 @@
-This patch was removed in f181c6ce8b3 when gmp 4.2.1 was mistakenly
-dropped.
-
-Upstream is not interested in patches for ancient versions.
-
-Upstream-Status: Inappropriate
-Signed-off-by: Jussi Kukkonen <jussi.kukkonen@intel.com>
-
-# "extern inline" in traditional gcc means that the function should be
-# inlined wherever it's seen, while in C99, "extern inline" means that i
-# the function should only be inlined where the inline definition is
-# seen while in other places it's not inlined:
-# http://gcc.gnu.org/ml/gcc/2006-11/msg00006.html
-#
-# gmp checks "--std=gnu99" to use C99 convention however it internally
-# defines some "extern inline" functions in gmp.h, which is included
-# by mainly .c files and finally lead a flood of redefinition function
-# errors when linking objects together.
-#
-# So disable C99/ANSI detection to stick to tranditional gcc behavior
-#
-# by Kevin Tian <kevin.tian@intel.com>, 2010-08-13
-#
-# (this patch is licensed under GPLv2+)
-
-diff --git a/configure.in b/configure.in
-index 450cc92..aab0b59 100644
---- a/configure.in
-+++ b/configure.in
-@@ -1869,9 +1869,7 @@ AC_SUBST(DEFN_LONG_LONG_LIMB)
-
- # The C compiler and preprocessor, put into ANSI mode if possible.
- AC_PROG_CC
--AC_PROG_CC_STDC
- AC_PROG_CPP
--GMP_H_ANSI
-
-
- # The C compiler on the build system, and associated tests.
diff --git a/yocto-poky/meta/recipes-support/gmp/gmp_4.2.1.bb b/yocto-poky/meta/recipes-support/gmp/gmp_4.2.1.bb
index 928c01a5b..bfc6a380e 100644
--- a/yocto-poky/meta/recipes-support/gmp/gmp_4.2.1.bb
+++ b/yocto-poky/meta/recipes-support/gmp/gmp_4.2.1.bb
@@ -7,7 +7,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=892f569a555ba9c07a568a7c0c4fa63a \
file://COPYING.LIB;md5=fbc093901857fcd118f065f900982c24 \
file://gmp-h.in;beginline=6;endline=21;md5=e056f74a12c3277d730dbcfb85d2ca34"
-SRC_URI += "file://disable-stdc.patch \
+SRC_URI += "file://Use-__gnu_inline__-attribute.patch \
file://gmp_fix_for_automake-1.12.patch \
"
diff --git a/yocto-poky/meta/recipes-support/icu/icu/fix-install-manx.patch b/yocto-poky/meta/recipes-support/icu/icu/fix-install-manx.patch
new file mode 100644
index 000000000..ec63f50c4
--- /dev/null
+++ b/yocto-poky/meta/recipes-support/icu/icu/fix-install-manx.patch
@@ -0,0 +1,48 @@
+The generic recursive target calls target-local so also adding it to the
+dependency list results in races due to install-local being executed twice in
+parallel. For example, install-manx can fail if the two install processes race
+and one process tries to chown a file that the other process has just deleted.
+
+Also install-manx should be a phony target, and for clarity use $^ instead of $?
+in the install command.
+
+Upstream-Status: Pending
+Signed-off-by: Ross Burton <ross.burton@intel.com>
+
+
+diff --git a/Makefile.in b/Makefile.in
+index 9db6c52..3441afa 100644
+--- a/Makefile.in
++++ b/Makefile.in
+@@ -71,7 +71,7 @@ EXTRA_DATA =
+
+ ## List of phony targets
+ .PHONY : all all-local all-recursive install install-local install-udata install-udata-files install-udata-dlls \
+-install-recursive clean clean-local clean-recursive distclean \
++install-recursive install-manx clean clean-local clean-recursive distclean \
+ distclean-local distclean-recursive doc dist dist-local dist-recursive \
+ check check-local check-recursive clean-recursive-with-twist install-icu \
+ doc install-doc tests icu4j-data icu4j-data-install update-windows-makefiles xcheck-local xcheck-recursive xperf xcheck xperf-recursive \
+@@ -82,10 +82,10 @@ check-exhaustive check-exhaustive-local check-exhaustive-recursive releaseDist
+
+ ## List of standard targets
+ all: all-local all-recursive
+-install: install-recursive install-local
++install: install-recursive
+ clean: clean-recursive-with-twist clean-local
+-distclean : distclean-recursive distclean-local
+-dist: dist-recursive dist-local
++distclean : distclean-recursive
++dist: dist-recursive
+ check: all check-recursive
+ check-recursive: all
+ xcheck: all xcheck-recursive
+@@ -352,7 +352,7 @@ config.status: $(srcdir)/configure $(srcdir)/common/unicode/uvernum.h
+
+ install-manx: $(MANX_FILES)
+ $(MKINSTALLDIRS) $(DESTDIR)$(mandir)/man$(SECTION)
+- $(INSTALL_DATA) $? $(DESTDIR)$(mandir)/man$(SECTION)
++ $(INSTALL_DATA) $^ $(DESTDIR)$(mandir)/man$(SECTION)
+
+ config/%.$(SECTION): $(srcdir)/config/%.$(SECTION).in
+ cd $(top_builddir) \
diff --git a/yocto-poky/meta/recipes-support/icu/icu_55.1.bb b/yocto-poky/meta/recipes-support/icu/icu_55.1.bb
index f63a9bd08..e91b6f3ab 100644
--- a/yocto-poky/meta/recipes-support/icu/icu_55.1.bb
+++ b/yocto-poky/meta/recipes-support/icu/icu_55.1.bb
@@ -8,9 +8,14 @@ def icu_download_version(d):
ICU_PV = "${@icu_download_version(d)}"
+# http://errors.yoctoproject.org/Errors/Details/20486/
+ARM_INSTRUCTION_SET_armv4 = "arm"
+ARM_INSTRUCTION_SET_armv5 = "arm"
+
BASE_SRC_URI = "http://download.icu-project.org/files/icu4c/${PV}/icu4c-${ICU_PV}-src.tgz"
SRC_URI = "${BASE_SRC_URI} \
file://icu-pkgdata-large-cmd.patch \
+ file://fix-install-manx.patch \
"
SRC_URI_append_class-target = "\
diff --git a/yocto-poky/meta/recipes-support/libbsd/files/CVE-2016-2090.patch b/yocto-poky/meta/recipes-support/libbsd/files/CVE-2016-2090.patch
new file mode 100644
index 000000000..2eaae1386
--- /dev/null
+++ b/yocto-poky/meta/recipes-support/libbsd/files/CVE-2016-2090.patch
@@ -0,0 +1,50 @@
+From c8f0723d2b4520bdd6b9eb7c3e7976de726d7ff7 Mon Sep 17 00:00:00 2001
+From: Hanno Boeck <hanno@hboeck.de>
+Date: Wed, 27 Jan 2016 15:10:11 +0100
+Subject: [PATCH] Fix heap buffer overflow in fgetwln()
+
+In the function fgetwln() there's a 4 byte heap overflow.
+
+There is a while loop that has this check to see whether there's still
+enough space in the buffer:
+
+ if (!fb->len || wused > fb->len) {
+
+If this is true more memory gets allocated. However this test won't be
+true if wused == fb->len, but at that point wused already points out
+of the buffer. Some lines later there's a write to the buffer:
+
+ fb->wbuf[wused++] = wc;
+
+This bug was found with the help of address sanitizer.
+
+Warned-by: ASAN
+Fixes: https://bugs.freedesktop.org/show_bug.cgi?id=93881
+Signed-off-by: Guillem Jover <guillem@hadrons.org>
+
+Upstream-Status: Backport
+http://cgit.freedesktop.org/libbsd/commit/?id=c8f0723d2b4520bdd6b9eb7c3e7976de726d7ff7
+
+CVE: CVE-2016-2090
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ src/fgetwln.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/fgetwln.c b/src/fgetwln.c
+index 9ee0776..aa3f927 100644
+--- a/src/fgetwln.c
++++ b/src/fgetwln.c
+@@ -60,7 +60,7 @@ fgetwln(FILE *stream, size_t *lenp)
+ fb->fp = stream;
+
+ while ((wc = fgetwc(stream)) != WEOF) {
+- if (!fb->len || wused > fb->len) {
++ if (!fb->len || wused >= fb->len) {
+ wchar_t *wp;
+
+ if (fb->len)
+--
+2.3.5
+
diff --git a/yocto-poky/meta/recipes-support/libbsd/libbsd_0.7.0.bb b/yocto-poky/meta/recipes-support/libbsd/libbsd_0.7.0.bb
index 902666da7..8d9a708a1 100644
--- a/yocto-poky/meta/recipes-support/libbsd/libbsd_0.7.0.bb
+++ b/yocto-poky/meta/recipes-support/libbsd/libbsd_0.7.0.bb
@@ -13,7 +13,9 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=f1530ea92aeaa1c5e2547cfd43905d8c"
SECTION = "libs"
DEPENDS = ""
-SRC_URI = "http://libbsd.freedesktop.org/releases/${BPN}-${PV}.tar.xz"
+SRC_URI = "http://libbsd.freedesktop.org/releases/${BPN}-${PV}.tar.xz \
+ file://CVE-2016-2090.patch \
+ "
SRC_URI[md5sum] = "fcceb4e66fd448ca4ed42ba22a8babb0"
SRC_URI[sha256sum] = "0f3b0e17e5c34c038126e0a04351b11e23c6101a7d0ce3beeab29bb6415c10bb"
diff --git a/yocto-poky/meta/recipes-support/libfm/libfm-extra_1.2.3.bb b/yocto-poky/meta/recipes-support/libfm/libfm-extra_1.2.3.bb
index 46d2d62b9..36752692b 100644
--- a/yocto-poky/meta/recipes-support/libfm/libfm-extra_1.2.3.bb
+++ b/yocto-poky/meta/recipes-support/libfm/libfm-extra_1.2.3.bb
@@ -5,7 +5,7 @@ LICENSE = "LGPLv2+"
LIC_FILES_CHKSUM = "file://src/fm-extra.h;beginline=8;endline=21;md5=ef1f84da64b3c01cca447212f7ef6007"
SECTION = "x11/libs"
-DEPENDS = "glib-2.0 intltool-native"
+DEPENDS = "glib-2.0 intltool-native gettext-native"
SRC_URI = "${SOURCEFORGE_MIRROR}/pcmanfm/libfm-${PV}.tar.xz \
file://0001-nls.m4-Take-it-from-gettext-0.15.patch \
diff --git a/yocto-poky/meta/recipes-support/libfm/libfm_1.2.3.bb b/yocto-poky/meta/recipes-support/libfm/libfm_1.2.3.bb
index 629502f68..e9ff6569c 100644
--- a/yocto-poky/meta/recipes-support/libfm/libfm_1.2.3.bb
+++ b/yocto-poky/meta/recipes-support/libfm/libfm_1.2.3.bb
@@ -9,7 +9,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=59530bdf33659b29e73d4adb9f9f6552 \
SECTION = "x11/libs"
-DEPENDS = "glib-2.0 pango gtk+ menu-cache intltool-native libexif libfm-extra"
+DEPENDS = "glib-2.0 pango gtk+ menu-cache intltool-native libexif libfm-extra gettext-native"
SRC_URI = "${SOURCEFORGE_MIRROR}/pcmanfm/libfm-${PV}.tar.xz"
@@ -18,7 +18,8 @@ SRC_URI[sha256sum] = "c692f1624a4cbc8d1dd55f3b3f3369fbf5d26f63a916e2c295230b2344
PR = "r1"
-inherit autotools pkgconfig gtk-doc
+inherit autotools pkgconfig gtk-doc distro_features_check
+ANY_OF_DISTRO_FEATURES = "${GTK2DISTROFEATURES}"
do_configure[dirs] =+ "${S}/m4"
diff --git a/yocto-poky/meta/recipes-support/libgcrypt/files/CVE-2015-7511_1.patch b/yocto-poky/meta/recipes-support/libgcrypt/files/CVE-2015-7511_1.patch
new file mode 100644
index 000000000..14c25b9ad
--- /dev/null
+++ b/yocto-poky/meta/recipes-support/libgcrypt/files/CVE-2015-7511_1.patch
@@ -0,0 +1,245 @@
+From 2ef48ba59c32bfa1a9265d5eea8ab225a658903a Mon Sep 17 00:00:00 2001
+From: Werner Koch <wk@gnupg.org>
+Date: Thu, 9 Jan 2014 19:14:09 +0100
+Subject: [PATCH] ecc: Make a macro shorter.
+
+* src/mpi.h (MPI_EC_TWISTEDEDWARDS): Rename to MPI_EC_EDWARDS. CHnage
+all users.
+* cipher/ecc-curves.c (domain_parms): Add parameters for Curve3617 as
+comment.
+* mpi/ec.c (dup_point_twistededwards): Rename to dup_point_edwards.
+(add_points_twistededwards): Rename to add_points_edwards.
+
+Signed-off-by: Werner Koch <wk@gnupg.org>
+
+Upstream-Status: Backport
+2ef48ba59c32bfa1a9265d5eea8ab225a658903a
+
+CVE: CVE-2015-7511 depend patch
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ cipher/ecc-curves.c | 22 +++++++++++++++++++---
+ cipher/ecc-misc.c | 4 ++--
+ cipher/ecc.c | 8 ++++----
+ mpi/ec.c | 22 +++++++++++-----------
+ src/mpi.h | 11 ++++++++---
+ 5 files changed, 44 insertions(+), 23 deletions(-)
+
+Index: libgcrypt-1.6.3/cipher/ecc-curves.c
+===================================================================
+--- libgcrypt-1.6.3.orig/cipher/ecc-curves.c
++++ libgcrypt-1.6.3/cipher/ecc-curves.c
+@@ -105,7 +105,7 @@ static const ecc_domain_parms_t domain_p
+ {
+ /* (-x^2 + y^2 = 1 + dx^2y^2) */
+ "Ed25519", 256, 0,
+- MPI_EC_TWISTEDEDWARDS, ECC_DIALECT_ED25519,
++ MPI_EC_EDWARDS, ECC_DIALECT_ED25519,
+ "0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFED",
+ "-0x01",
+ "-0x2DFC9311D490018C7338BF8688861767FF8FF5B2BEBE27548A14B235ECA6874A",
+@@ -113,6 +113,22 @@ static const ecc_domain_parms_t domain_p
+ "0x216936D3CD6E53FEC0A4E231FDD6DC5C692CC7609525A7B2C9562D608F25D51A",
+ "0x6666666666666666666666666666666666666666666666666666666666666658"
+ },
++#if 0 /* No real specs yet found. */
++ {
++ /* x^2 + y^2 = 1 + 3617x^2y^2 mod 2^414 - 17 */
++ "Curve3617",
++ "0x3FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEF",
++ MPI_EC_EDWARDS, 0,
++ "0x01",
++ "0x0e21",
++ "0x07FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEB3CC92414CF"
++ "706022B36F1C0338AD63CF181B0E71A5E106AF79",
++ "0x1A334905141443300218C0631C326E5FCD46369F44C03EC7F57FF35498A4AB4D"
++ "6D6BA111301A73FAA8537C64C4FD3812F3CBC595",
++ "0x22"
++ },
++#endif /*0*/
+ {
+ "NIST P-192", 192, 1,
+ MPI_EC_WEIERSTRASS, ECC_DIALECT_STANDARD,
+@@ -404,7 +420,7 @@ _gcry_ecc_fill_in_curve (unsigned int nb
+ switch (domain_parms[idx].model)
+ {
+ case MPI_EC_WEIERSTRASS:
+- case MPI_EC_TWISTEDEDWARDS:
++ case MPI_EC_EDWARDS:
+ break;
+ case MPI_EC_MONTGOMERY:
+ return GPG_ERR_NOT_SUPPORTED;
+@@ -1039,7 +1055,7 @@ _gcry_ecc_get_mpi (const char *name, mpi
+ if (name[1] != '@')
+ return _gcry_mpi_ec_ec2os (ec->Q, ec);
+
+- if (!strcmp (name+2, "eddsa") && ec->model == MPI_EC_TWISTEDEDWARDS)
++ if (!strcmp (name+2, "eddsa") && ec->model == MPI_EC_EDWARDS)
+ {
+ unsigned char *encpk;
+ unsigned int encpklen;
+Index: libgcrypt-1.6.3/cipher/ecc-misc.c
+===================================================================
+--- libgcrypt-1.6.3.orig/cipher/ecc-misc.c
++++ libgcrypt-1.6.3/cipher/ecc-misc.c
+@@ -79,7 +79,7 @@ _gcry_ecc_model2str (enum gcry_mpi_ec_mo
+ {
+ case MPI_EC_WEIERSTRASS: str = "Weierstrass"; break;
+ case MPI_EC_MONTGOMERY: str = "Montgomery"; break;
+- case MPI_EC_TWISTEDEDWARDS: str = "Twisted Edwards"; break;
++ case MPI_EC_EDWARDS: str = "Edwards"; break;
+ }
+ return str;
+ }
+@@ -252,7 +252,7 @@ _gcry_ecc_compute_public (mpi_point_t Q,
+
+ if (!d || !G || !ec->p || !ec->a)
+ return NULL;
+- if (ec->model == MPI_EC_TWISTEDEDWARDS && !ec->b)
++ if (ec->model == MPI_EC_EDWARDS && !ec->b)
+ return NULL;
+
+ if (ec->dialect == ECC_DIALECT_ED25519
+Index: libgcrypt-1.6.3/cipher/ecc.c
+===================================================================
+--- libgcrypt-1.6.3.orig/cipher/ecc.c
++++ libgcrypt-1.6.3/cipher/ecc.c
+@@ -642,7 +642,7 @@ ecc_check_secret_key (gcry_sexp_t keypar
+ if (!curvename)
+ {
+ sk.E.model = ((flags & PUBKEY_FLAG_EDDSA)
+- ? MPI_EC_TWISTEDEDWARDS
++ ? MPI_EC_EDWARDS
+ : MPI_EC_WEIERSTRASS);
+ sk.E.dialect = ((flags & PUBKEY_FLAG_EDDSA)
+ ? ECC_DIALECT_ED25519
+@@ -774,7 +774,7 @@ ecc_sign (gcry_sexp_t *r_sig, gcry_sexp_
+ if (!curvename)
+ {
+ sk.E.model = ((ctx.flags & PUBKEY_FLAG_EDDSA)
+- ? MPI_EC_TWISTEDEDWARDS
++ ? MPI_EC_EDWARDS
+ : MPI_EC_WEIERSTRASS);
+ sk.E.dialect = ((ctx.flags & PUBKEY_FLAG_EDDSA)
+ ? ECC_DIALECT_ED25519
+@@ -938,7 +938,7 @@ ecc_verify (gcry_sexp_t s_sig, gcry_sexp
+ if (!curvename)
+ {
+ pk.E.model = ((sigflags & PUBKEY_FLAG_EDDSA)
+- ? MPI_EC_TWISTEDEDWARDS
++ ? MPI_EC_EDWARDS
+ : MPI_EC_WEIERSTRASS);
+ pk.E.dialect = ((sigflags & PUBKEY_FLAG_EDDSA)
+ ? ECC_DIALECT_ED25519
+@@ -1528,7 +1528,7 @@ compute_keygrip (gcry_md_hd_t md, gcry_s
+ if (!curvename)
+ {
+ model = ((flags & PUBKEY_FLAG_EDDSA)
+- ? MPI_EC_TWISTEDEDWARDS
++ ? MPI_EC_EDWARDS
+ : MPI_EC_WEIERSTRASS);
+ dialect = ((flags & PUBKEY_FLAG_EDDSA)
+ ? ECC_DIALECT_ED25519
+Index: libgcrypt-1.6.3/mpi/ec.c
+===================================================================
+--- libgcrypt-1.6.3.orig/mpi/ec.c
++++ libgcrypt-1.6.3/mpi/ec.c
+@@ -605,7 +605,7 @@ _gcry_mpi_ec_get_affine (gcry_mpi_t x, g
+ }
+ return -1;
+
+- case MPI_EC_TWISTEDEDWARDS:
++ case MPI_EC_EDWARDS:
+ {
+ gcry_mpi_t z;
+
+@@ -725,7 +725,7 @@ dup_point_montgomery (mpi_point_t result
+
+ /* RESULT = 2 * POINT (Twisted Edwards version). */
+ static void
+-dup_point_twistededwards (mpi_point_t result, mpi_point_t point, mpi_ec_t ctx)
++dup_point_edwards (mpi_point_t result, mpi_point_t point, mpi_ec_t ctx)
+ {
+ #define X1 (point->x)
+ #define Y1 (point->y)
+@@ -811,8 +811,8 @@ _gcry_mpi_ec_dup_point (mpi_point_t resu
+ case MPI_EC_MONTGOMERY:
+ dup_point_montgomery (result, point, ctx);
+ break;
+- case MPI_EC_TWISTEDEDWARDS:
+- dup_point_twistededwards (result, point, ctx);
++ case MPI_EC_EDWARDS:
++ dup_point_edwards (result, point, ctx);
+ break;
+ }
+ }
+@@ -977,9 +977,9 @@ add_points_montgomery (mpi_point_t resul
+
+ /* RESULT = P1 + P2 (Twisted Edwards version).*/
+ static void
+-add_points_twistededwards (mpi_point_t result,
+- mpi_point_t p1, mpi_point_t p2,
+- mpi_ec_t ctx)
++add_points_edwards (mpi_point_t result,
++ mpi_point_t p1, mpi_point_t p2,
++ mpi_ec_t ctx)
+ {
+ #define X1 (p1->x)
+ #define Y1 (p1->y)
+@@ -1087,8 +1087,8 @@ _gcry_mpi_ec_add_points (mpi_point_t res
+ case MPI_EC_MONTGOMERY:
+ add_points_montgomery (result, p1, p2, ctx);
+ break;
+- case MPI_EC_TWISTEDEDWARDS:
+- add_points_twistededwards (result, p1, p2, ctx);
++ case MPI_EC_EDWARDS:
++ add_points_edwards (result, p1, p2, ctx);
+ break;
+ }
+ }
+@@ -1106,7 +1106,7 @@ _gcry_mpi_ec_mul_point (mpi_point_t resu
+ unsigned int i, loops;
+ mpi_point_struct p1, p2, p1inv;
+
+- if (ctx->model == MPI_EC_TWISTEDEDWARDS)
++ if (ctx->model == MPI_EC_EDWARDS)
+ {
+ /* Simple left to right binary method. GECC Algorithm 3.27 */
+ unsigned int nbits;
+@@ -1269,7 +1269,7 @@ _gcry_mpi_ec_curve_point (gcry_mpi_point
+ log_fatal ("%s: %s not yet supported\n",
+ "_gcry_mpi_ec_curve_point", "Montgomery");
+ break;
+- case MPI_EC_TWISTEDEDWARDS:
++ case MPI_EC_EDWARDS:
+ {
+ /* a · x^2 + y^2 - 1 - b · x^2 · y^2 == 0 */
+ ec_pow2 (x, x, ctx);
+Index: libgcrypt-1.6.3/src/mpi.h
+===================================================================
+--- libgcrypt-1.6.3.orig/src/mpi.h
++++ libgcrypt-1.6.3/src/mpi.h
+@@ -245,13 +245,18 @@ void _gcry_mpi_snatch_point (gcry_mpi_t
+ /* Models describing an elliptic curve. */
+ enum gcry_mpi_ec_models
+ {
+-
++ /* The Short Weierstrass equation is
++ y^2 = x^3 + ax + b
++ */
+ MPI_EC_WEIERSTRASS = 0,
++ /* The Montgomery equation is
++ by^2 = x^3 + ax^2 + x
++ */
+ MPI_EC_MONTGOMERY,
+- MPI_EC_TWISTEDEDWARDS
+- /* The equation for Twisted Edwards curves is
++ /* The Twisted Edwards equation is
+ ax^2 + y^2 = 1 + bx^2y^2
+ Note that we use 'b' instead of the commonly used 'd'. */
++ MPI_EC_EDWARDS
+ };
+
+ /* Dialects used with elliptic curves. It is easier to keep the
diff --git a/yocto-poky/meta/recipes-support/libgcrypt/files/CVE-2015-7511_2.patch b/yocto-poky/meta/recipes-support/libgcrypt/files/CVE-2015-7511_2.patch
new file mode 100644
index 000000000..8093a18cf
--- /dev/null
+++ b/yocto-poky/meta/recipes-support/libgcrypt/files/CVE-2015-7511_2.patch
@@ -0,0 +1,55 @@
+From 88e1358962e902ff1cbec8d53ba3eee46407851a Mon Sep 17 00:00:00 2001
+From: NIIBE Yutaka <gniibe@fsij.org>
+Date: Wed, 25 Nov 2015 12:46:19 +0900
+Subject: [PATCH] ecc: Constant-time multiplication for Weierstrass curve.
+
+* mpi/ec.c (_gcry_mpi_ec_mul_point): Use simple left-to-right binary
+method for Weierstrass curve when SCALAR is secure.
+
+Upstream-Status: Backport
+
+http://git.gnupg.org/cgi-bin/gitweb.cgi?p=libgcrypt.git;a=commit;h=88e1358962e902ff1cbec8d53ba3eee46407851a
+
+CVE: CVE-2015-7511 fix
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ mpi/ec.c | 19 +++++++++++++++----
+ 1 file changed, 15 insertions(+), 4 deletions(-)
+
+Index: libgcrypt-1.6.3/mpi/ec.c
+===================================================================
+--- libgcrypt-1.6.3.orig/mpi/ec.c
++++ libgcrypt-1.6.3/mpi/ec.c
+@@ -1106,16 +1106,27 @@ _gcry_mpi_ec_mul_point (mpi_point_t resu
+ unsigned int i, loops;
+ mpi_point_struct p1, p2, p1inv;
+
+- if (ctx->model == MPI_EC_EDWARDS)
++ if (ctx->model == MPI_EC_EDWARDS
++ || (ctx->model == MPI_EC_WEIERSTRASS
++ && mpi_is_secure (scalar)))
+ {
+ /* Simple left to right binary method. GECC Algorithm 3.27 */
+ unsigned int nbits;
+ int j;
+
+ nbits = mpi_get_nbits (scalar);
+- mpi_set_ui (result->x, 0);
+- mpi_set_ui (result->y, 1);
+- mpi_set_ui (result->z, 1);
++ if (ctx->model == MPI_EC_WEIERSTRASS)
++ {
++ mpi_set_ui (result->x, 1);
++ mpi_set_ui (result->y, 1);
++ mpi_set_ui (result->z, 0);
++ }
++ else
++ {
++ mpi_set_ui (result->x, 0);
++ mpi_set_ui (result->y, 1);
++ mpi_set_ui (result->z, 1);
++ }
+
+ if (mpi_is_secure (scalar))
+ {
diff --git a/yocto-poky/meta/recipes-support/libgcrypt/libgcrypt_1.6.3.bb b/yocto-poky/meta/recipes-support/libgcrypt/libgcrypt_1.6.3.bb
index cd06ce73a..db89faf80 100644
--- a/yocto-poky/meta/recipes-support/libgcrypt/libgcrypt_1.6.3.bb
+++ b/yocto-poky/meta/recipes-support/libgcrypt/libgcrypt_1.6.3.bb
@@ -1,4 +1,8 @@
require libgcrypt.inc
+SRC_URI += "\
+ file://CVE-2015-7511_1.patch \
+ file://CVE-2015-7511_2.patch \
+ "
SRC_URI[md5sum] = "de03b867d02fdf115a1bac8bb8b5c3a3"
SRC_URI[sha256sum] = "69e94e1a7084d94e1a6ca26d436068cb74862d10a7353cfae579a2d88674ff09"
diff --git a/yocto-poky/meta/recipes-support/libgpg-error/libgpg-error/0001-libgpg-error-Add-nios2-support.patch b/yocto-poky/meta/recipes-support/libgpg-error/libgpg-error/0001-libgpg-error-Add-nios2-support.patch
new file mode 100644
index 000000000..dab1c13b6
--- /dev/null
+++ b/yocto-poky/meta/recipes-support/libgpg-error/libgpg-error/0001-libgpg-error-Add-nios2-support.patch
@@ -0,0 +1,46 @@
+From b2af652f43991e4ce6297917da542a9eb5135939 Mon Sep 17 00:00:00 2001
+From: Marek Vasut <marex@denx.de>
+Date: Thu, 17 Sep 2015 03:28:06 +0200
+Subject: [PATCH] libgpg-error: Add nios2 support
+
+Add configuration for the NIOS2 processor.
+
+Signed-off-by: Marek Vasut <marex@denx.de>
+Upstream-Status: Submitted
+---
+ src/syscfg/lock-obj-pub.nios2-unknown-linux-gnu.h | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+ create mode 100644 src/syscfg/lock-obj-pub.nios2-unknown-linux-gnu.h
+
+diff --git a/src/syscfg/lock-obj-pub.nios2-unknown-linux-gnu.h b/src/syscfg/lock-obj-pub.nios2-unknown-linux-gnu.h
+new file mode 100644
+index 0000000..3a24571
+--- /dev/null
++++ b/src/syscfg/lock-obj-pub.nios2-unknown-linux-gnu.h
+@@ -0,0 +1,23 @@
++## lock-obj-pub.nios2-unknown-linux-gnu.h
++## File created by gen-posix-lock-obj - DO NOT EDIT
++## To be included by mkheader into gpg-error.h
++
++typedef struct
++{
++ long _vers;
++ union {
++ volatile char _priv[24];
++ long _x_align;
++ long *_xp_align;
++ } u;
++} gpgrt_lock_t;
++
++#define GPGRT_LOCK_INITIALIZER {1,{{0,0,0,0,0,0,0,0, \
++ 0,0,0,0,0,0,0,0, \
++ 0,0,0,0,0,0,0,0}}}
++##
++## Local Variables:
++## mode: c
++## buffer-read-only: t
++## End:
++##
+--
+2.5.1
+
diff --git a/yocto-poky/meta/recipes-support/libgpg-error/libgpg-error_1.19.bb b/yocto-poky/meta/recipes-support/libgpg-error/libgpg-error_1.19.bb
index 39dbbcf33..c69930a92 100644
--- a/yocto-poky/meta/recipes-support/libgpg-error/libgpg-error_1.19.bb
+++ b/yocto-poky/meta/recipes-support/libgpg-error/libgpg-error_1.19.bb
@@ -12,7 +12,9 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=59530bdf33659b29e73d4adb9f9f6552 \
SECTION = "libs"
SRC_URI = "ftp://ftp.gnupg.org/gcrypt/libgpg-error/libgpg-error-${PV}.tar.bz2 \
- file://pkgconfig.patch"
+ file://pkgconfig.patch \
+ file://0001-libgpg-error-Add-nios2-support.patch \
+ "
SRC_URI[md5sum] = "c04c16245b92829281f43b5bef7d16da"
SRC_URI[sha256sum] = "53120e1333d5c5d28d87ff2854e9e98719c8e214152f17ad5291704d25c4978b"
diff --git a/yocto-poky/meta/recipes-support/libksba/libksba/ksba-add-pkgconfig-support.patch b/yocto-poky/meta/recipes-support/libksba/libksba/ksba-add-pkgconfig-support.patch
index b5886c5b9..d28dfd9eb 100644
--- a/yocto-poky/meta/recipes-support/libksba/libksba/ksba-add-pkgconfig-support.patch
+++ b/yocto-poky/meta/recipes-support/libksba/libksba/ksba-add-pkgconfig-support.patch
@@ -51,7 +51,7 @@ Index: libksba-1.3.0/ksba.pc.in
+Requires:
+Version: @VERSION@
+Libs: -L${libdir} -lksba
-+Libs.private: -L{libdir} -lgpg-error
++Libs.private: -L${libdir} -lgpg-error
+Cflags: -I${includedir}
+
Index: libksba-1.3.0/src/ksba.m4
diff --git a/yocto-poky/meta/recipes-support/libpcre/libpcre_8.37.bb b/yocto-poky/meta/recipes-support/libpcre/libpcre_8.38.bb
index bcfc9e9af..c5676073e 100644
--- a/yocto-poky/meta/recipes-support/libpcre/libpcre_8.37.bb
+++ b/yocto-poky/meta/recipes-support/libpcre/libpcre_8.38.bb
@@ -14,14 +14,19 @@ SRC_URI = "${SOURCEFORGE_MIRROR}/project/pcre/pcre/${PV}/pcre-${PV}.tar.bz2 \
file://Makefile \
"
-SRC_URI[md5sum] = "ed91be292cb01d21bc7e526816c26981"
-SRC_URI[sha256sum] = "51679ea8006ce31379fb0860e46dd86665d864b5020fc9cd19e71260eef4789d"
+SRC_URI[md5sum] = "00aabbfe56d5a48b270f999b508c5ad2"
+SRC_URI[sha256sum] = "b9e02d36e23024d6c02a2e5b25204b3a4fa6ade43e0a5f869f254f49535079df"
S = "${WORKDIR}/pcre-${PV}"
PROVIDES += "pcre"
DEPENDS += "bzip2 zlib"
+PACKAGECONFIG ??= "pcre8"
+
+PACKAGECONFIG[pcre8] = "--enable-pcre8,--disable-pcre8"
+PACKAGECONFIG[pcre16] = "--enable-pcre16,--disable-pcre16"
+PACKAGECONFIG[pcre32] = "--enable-pcre32,--disable-pcre32"
PACKAGECONFIG[pcretest-readline] = "--enable-pcretest-libreadline,--disable-pcretest-libreadline,readline,"
BINCONFIG = "${bindir}/pcre-config"
diff --git a/yocto-poky/meta/recipes-support/libunwind/libunwind-1.1/Add-AO_REQUIRE_CAS-to-fix-build-on-ARM-v6.patch b/yocto-poky/meta/recipes-support/libunwind/libunwind-1.1/Add-AO_REQUIRE_CAS-to-fix-build-on-ARM-v6.patch
new file mode 100644
index 000000000..d55250281
--- /dev/null
+++ b/yocto-poky/meta/recipes-support/libunwind/libunwind-1.1/Add-AO_REQUIRE_CAS-to-fix-build-on-ARM-v6.patch
@@ -0,0 +1,61 @@
+From 24484e80b3e329c9edee1995e102f8612eedb79c Mon Sep 17 00:00:00 2001
+From: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Date: Tue, 13 May 2014 23:32:27 +0200
+Subject: [PATCH] Add AO_REQUIRE_CAS to fix build on ARM < v6
+
+ARM earlier than ARMv6, such as ARMv4 and ARMv5 do not provide
+optimize atomic operations in libatomic_ops. Since libunwind is using
+such operations, it should define AO_REQUIRE_CAS before including
+<atomic_ops.h> so that libatomic_ops knows it should use emulated
+atomic operations instead (even though they are obviously a lot more
+expensive).
+
+Also, while real atomic operations are all inline functions and
+therefore linking against libatomic_ops was not required, the emulated
+atomic operations actually require linking against libatomic_ops, so
+the commented AC_CHECK_LIB test in acinclude.m4 is uncommented to make
+sure we link against libatomic_ops.
+
+Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+
+Upstream-Status: Pending
+Taken from:
+https://raw.githubusercontent.com/rdnetto/teapot-buildroot/master/package/libunwind/libunwind-02-Add-AO_REQUIRE_CAS-to-fix-build-on-ARM-v6.patch
+
+---
+ acinclude.m4 | 8 +-------
+ include/libunwind_i.h | 1 +
+ 2 files changed, 2 insertions(+), 7 deletions(-)
+
+diff --git a/acinclude.m4 b/acinclude.m4
+index 497f7c2..9c15af1 100644
+--- a/acinclude.m4
++++ b/acinclude.m4
+@@ -22,11 +22,5 @@ fi])
+ AC_DEFUN([CHECK_ATOMIC_OPS],
+ [dnl Check whether the system has the atomic_ops package installed.
+ AC_CHECK_HEADERS(atomic_ops.h)
+-#
+-# Don't link against libatomic_ops for now. We don't want libunwind
+-# to depend on libatomic_ops.so. Fortunately, none of the platforms
+-# we care about so far need libatomic_ops.a (everything is done via
+-# inline macros).
+-#
+-# AC_CHECK_LIB(atomic_ops, main)
++ AC_CHECK_LIB(atomic_ops, main)
+ ])
+diff --git a/include/libunwind_i.h b/include/libunwind_i.h
+index 23f615e..deabdfd 100644
+--- a/include/libunwind_i.h
++++ b/include/libunwind_i.h
+@@ -95,6 +95,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+ (pthread_mutex_unlock != NULL ? pthread_mutex_unlock (l) : 0)
+
+ #ifdef HAVE_ATOMIC_OPS_H
++# define AO_REQUIRE_CAS
+ # include <atomic_ops.h>
+ static inline int
+ cmpxchg_ptr (void *addr, void *old, void *new)
+--
+1.9.2
+
diff --git a/yocto-poky/meta/recipes-support/libunwind/libunwind_1.1.bb b/yocto-poky/meta/recipes-support/libunwind/libunwind_1.1.bb
index 8282c1b71..aa62bcc66 100644
--- a/yocto-poky/meta/recipes-support/libunwind/libunwind_1.1.bb
+++ b/yocto-poky/meta/recipes-support/libunwind/libunwind_1.1.bb
@@ -6,7 +6,12 @@ SRC_URI += "\
file://Fix-test-case-link-failure-on-PowerPC-systems-with-Altivec.patch \
file://Link-libunwind-to-libgcc_s-rather-than-libgcc.patch \
file://0001-Invalid-dwarf-opcodes-can-cause-references-beyond-th.patch \
+ file://Add-AO_REQUIRE_CAS-to-fix-build-on-ARM-v6.patch \
"
SRC_URI[md5sum] = "fb4ea2f6fbbe45bf032cd36e586883ce"
SRC_URI[sha256sum] = "9dfe0fcae2a866de9d3942c66995e4b460230446887dbdab302d41a8aee8d09a"
+
+# http://errors.yoctoproject.org/Errors/Details/20487/
+ARM_INSTRUCTION_SET_armv4 = "arm"
+ARM_INSTRUCTION_SET_armv5 = "arm"
diff --git a/yocto-poky/meta/recipes-support/libxslt/libxslt/CVE-2015-7995.patch b/yocto-poky/meta/recipes-support/libxslt/libxslt/CVE-2015-7995.patch
new file mode 100644
index 000000000..e4d09c2ac
--- /dev/null
+++ b/yocto-poky/meta/recipes-support/libxslt/libxslt/CVE-2015-7995.patch
@@ -0,0 +1,33 @@
+From 7ca19df892ca22d9314e95d59ce2abdeff46b617 Mon Sep 17 00:00:00 2001
+From: Daniel Veillard <veillard@redhat.com>
+Date: Thu, 29 Oct 2015 19:33:23 +0800
+Subject: Fix for type confusion in preprocessing attributes
+
+CVE-2015-7995 http://www.openwall.com/lists/oss-security/2015/10/27/10
+We need to check that the parent node is an element before dereferencing
+its namespace
+
+Upstream-Status: Backport
+
+https://git.gnome.org/browse/libxslt/commit/?id=7ca19df892ca22d9314e95d59ce2abdeff46b617
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+---
+ libxslt/preproc.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+Index: libxslt-1.1.28/libxslt/preproc.c
+===================================================================
+--- libxslt-1.1.28.orig/libxslt/preproc.c
++++ libxslt-1.1.28/libxslt/preproc.c
+@@ -2245,7 +2245,8 @@ xsltStylePreCompute(xsltStylesheetPtr st
+ } else if (IS_XSLT_NAME(inst, "attribute")) {
+ xmlNodePtr parent = inst->parent;
+
+- if ((parent == NULL) || (parent->ns == NULL) ||
++ if ((parent == NULL) ||
++ (parent->type != XML_ELEMENT_NODE) || (parent->ns == NULL) ||
+ ((parent->ns != inst->ns) &&
+ (!xmlStrEqual(parent->ns->href, inst->ns->href))) ||
+ (!xmlStrEqual(parent->name, BAD_CAST "attribute-set"))) {
diff --git a/yocto-poky/meta/recipes-support/libxslt/libxslt_1.1.28.bb b/yocto-poky/meta/recipes-support/libxslt/libxslt_1.1.28.bb
index 166bcd86e..87fabecda 100644
--- a/yocto-poky/meta/recipes-support/libxslt/libxslt_1.1.28.bb
+++ b/yocto-poky/meta/recipes-support/libxslt/libxslt_1.1.28.bb
@@ -10,7 +10,8 @@ DEPENDS = "libxml2"
SRC_URI = "ftp://xmlsoft.org/libxslt//libxslt-${PV}.tar.gz \
file://pkgconfig_fix.patch \
- file://pkgconfig.patch"
+ file://pkgconfig.patch \
+ file://CVE-2015-7995.patch"
SRC_URI[md5sum] = "9667bf6f9310b957254fdcf6596600b7"
SRC_URI[sha256sum] = "5fc7151a57b89c03d7b825df5a0fae0a8d5f05674c0e7cf2937ecec4d54a028c"
diff --git a/yocto-poky/meta/recipes-support/nettle/nettle-3.1.1/CVE-2015-8803_8805.patch b/yocto-poky/meta/recipes-support/nettle/nettle-3.1.1/CVE-2015-8803_8805.patch
new file mode 100644
index 000000000..b4ff228f6
--- /dev/null
+++ b/yocto-poky/meta/recipes-support/nettle/nettle-3.1.1/CVE-2015-8803_8805.patch
@@ -0,0 +1,71 @@
+Upstream-Status: Backport
+https://git.lysator.liu.se/nettle/nettle/commit/c71d2c9d20eeebb985e3872e4550137209e3ce4d
+
+CVE: CVE-2015-8803
+CVE: CVE-2015-8805
+
+Same fix for both.
+
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+Index: nettle-3.1.1/ecc-256.c
+===================================================================
+--- nettle-3.1.1.orig/ecc-256.c
++++ nettle-3.1.1/ecc-256.c
+@@ -113,8 +113,19 @@ ecc_256_modp (const struct ecc_modulo *p
+
+ assert (q2 < 2);
+
+- /* We multiply by two low limbs of p, 2^96 - 1, so we could use
+- shifts rather than mul. */
++ /*
++ n-1 n-2 n-3 n-4
++ +---+---+---+---+
++ | u1| u0| u low |
++ +---+---+---+---+
++ - | q1(2^96-1)|
++ +-------+---+
++ |q2(2^.)|
++ +-------+
++
++ We multiply by two low limbs of p, 2^96 - 1, so we could use
++ shifts rather than mul.
++ */
+ t = mpn_submul_1 (rp + n - 4, p->m, 2, q1);
+ t += cnd_sub_n (q2, rp + n - 3, p->m, 1);
+ t += (-q2) & 0xffffffff;
+@@ -124,7 +135,10 @@ ecc_256_modp (const struct ecc_modulo *p
+ u0 -= t;
+ t = (u1 < cy);
+ u1 -= cy;
+- u1 += cnd_add_n (t, rp + n - 4, p->m, 3);
++
++ cy = cnd_add_n (t, rp + n - 4, p->m, 2);
++ u0 += cy;
++ u1 += (u0 < cy);
+ u1 -= (-t) & 0xffffffff;
+ }
+ rp[2] = u0;
+@@ -211,7 +225,7 @@ ecc_256_modq (const struct ecc_modulo *q
+
+ /* Conditional add of p */
+ u1 += t;
+- u2 += (t<<32) + (u0 < t);
++ u2 += (t<<32) + (u1 < t);
+
+ t = cnd_add_n (t, rp + n - 4, q->m, 2);
+ u1 += t;
+Index: nettle-3.1.1/ChangeLog
+===================================================================
+--- nettle-3.1.1.orig/ChangeLog
++++ nettle-3.1.1/ChangeLog
+@@ -1,3 +1,9 @@
++2015-12-10 Niels Möller <nisse@lysator.liu.se>
++
++ * ecc-256.c (ecc_256_modp): Fixed carry propagation bug. Problem
++ reported by Hanno Böck.
++ (ecc_256_modq): Fixed another carry propagation bug.
++
+ 2015-04-24 Niels Möller <nisse@lysator.liu.se>
+
+ * Released nettle-3.1.1.
diff --git a/yocto-poky/meta/recipes-support/nettle/nettle-3.1.1/CVE-2015-8804.patch b/yocto-poky/meta/recipes-support/nettle/nettle-3.1.1/CVE-2015-8804.patch
new file mode 100644
index 000000000..1d34db735
--- /dev/null
+++ b/yocto-poky/meta/recipes-support/nettle/nettle-3.1.1/CVE-2015-8804.patch
@@ -0,0 +1,281 @@
+Upstream-Status: Backport
+ https://git.lysator.liu.se/nettle/nettle/commit/fa269b6ad06dd13c901dbd84a12e52b918a09cd7
+
+CVE: CVE-2015-8804
+Signed-off-by: Armin Kuster <akuster@mvista.com>
+
+Index: nettle-3.1.1/ChangeLog
+===================================================================
+--- nettle-3.1.1.orig/ChangeLog
++++ nettle-3.1.1/ChangeLog
+@@ -1,3 +1,11 @@
++2015-12-15 Niels Möller <nisse@lysator.liu.se>
++
++ * x86_64/ecc-384-modp.asm: Fixed carry propagation bug. Problem
++ reported by Hanno Böck. Simplified the folding to always use
++ non-negative carry, the old code attempted to add in a carry which
++ could be either positive or negative, but didn't get that case
++ right.
++
+ 2015-12-10 Niels Möller <nisse@lysator.liu.se>
+
+ * ecc-256.c (ecc_256_modp): Fixed carry propagation bug. Problem
+Index: nettle-3.1.1/x86_64/ecc-384-modp.asm
+===================================================================
+--- nettle-3.1.1.orig/x86_64/ecc-384-modp.asm
++++ nettle-3.1.1/x86_64/ecc-384-modp.asm
+@@ -1,7 +1,7 @@
+ C x86_64/ecc-384-modp.asm
+
+ ifelse(<
+- Copyright (C) 2013 Niels Möller
++ Copyright (C) 2013, 2015 Niels Möller
+
+ This file is part of GNU Nettle.
+
+@@ -33,7 +33,7 @@ ifelse(<
+ .file "ecc-384-modp.asm"
+
+ define(<RP>, <%rsi>)
+-define(<D4>, <%rax>)
++define(<D5>, <%rax>)
+ define(<T0>, <%rbx>)
+ define(<T1>, <%rcx>)
+ define(<T2>, <%rdx>)
+@@ -48,8 +48,8 @@ define(<H4>, <%r13>)
+ define(<H5>, <%r14>)
+ define(<C2>, <%r15>)
+ define(<C0>, H5) C Overlap
+-define(<D0>, RP) C Overlap
+-define(<TMP>, H4) C Overlap
++define(<TMP>, RP) C Overlap
++
+
+ PROLOGUE(nettle_ecc_384_modp)
+ W64_ENTRY(2, 0)
+@@ -61,34 +61,38 @@ PROLOGUE(nettle_ecc_384_modp)
+ push %r14
+ push %r15
+
+- C First get top 2 limbs, which need folding twice
++ C First get top 2 limbs, which need folding twice.
++ C B^10 = B^6 + B^4 + 2^32 (B-1)B^4.
++ C We handle the terms as follow:
+ C
+- C H5 H4
+- C -H5
+- C ------
+- C H0 D4
++ C B^6: Folded immediatly.
+ C
+- C Then shift right, (H1,H0,D4) <-- (H0,D4) << 32
+- C and add
++ C B^4: Delayed, added in in the next folding.
+ C
+- C H5 H4
+- C H1 H0
+- C ----------
+- C C2 H1 H0
+-
+- mov 80(RP), D4
+- mov 88(RP), H0
+- mov D4, H4
+- mov H0, H5
+- sub H0, D4
+- sbb $0, H0
+-
+- mov D4, T2
+- mov H0, H1
+- shl $32, H0
+- shr $32, T2
++ C 2^32(B-1) B^4: Low half limb delayed until the next
++ C folding. Top 1.5 limbs subtracted and shifter now, resulting
++ C in 2.5 limbs. The low limb saved in D5, high 1.5 limbs added
++ C in.
++
++ mov 80(RP), H4
++ mov 88(RP), H5
++ C Shift right 32 bits, into H1, H0
++ mov H4, H0
++ mov H5, H1
++ mov H5, D5
+ shr $32, H1
+- or T2, H0
++ shl $32, D5
++ shr $32, H0
++ or D5, H0
++
++ C H1 H0
++ C - H1 H0
++ C --------
++ C H1 H0 D5
++ mov H0, D5
++ neg D5
++ sbb H1, H0
++ sbb $0, H1
+
+ xor C2, C2
+ add H4, H0
+@@ -127,118 +131,95 @@ PROLOGUE(nettle_ecc_384_modp)
+ adc H3, T5
+ adc $0, C0
+
+- C H3 H2 H1 H0 0
+- C - H4 H3 H2 H1 H0
+- C ---------------
+- C H3 H2 H1 H0 D0
+-
+- mov XREG(D4), XREG(D4)
+- mov H0, D0
+- neg D0
+- sbb H1, H0
+- sbb H2, H1
+- sbb H3, H2
+- sbb H4, H3
+- sbb $0, D4
+-
+- C Shift right. High bits are sign, to be added to C0.
+- mov D4, TMP
+- sar $32, TMP
+- shl $32, D4
+- add TMP, C0
+-
++ C Shift left, including low half of H4
+ mov H3, TMP
++ shl $32, H4
+ shr $32, TMP
+- shl $32, H3
+- or TMP, D4
++ or TMP, H4
+
+ mov H2, TMP
++ shl $32, H3
+ shr $32, TMP
+- shl $32, H2
+ or TMP, H3
+
+ mov H1, TMP
++ shl $32, H2
+ shr $32, TMP
+- shl $32, H1
+ or TMP, H2
+
+ mov H0, TMP
++ shl $32, H1
+ shr $32, TMP
+- shl $32, H0
+ or TMP, H1
+
+- mov D0, TMP
+- shr $32, TMP
+- shl $32, D0
+- or TMP, H0
++ shl $32, H0
++
++ C H4 H3 H2 H1 H0 0
++ C - H4 H3 H2 H1 H0
++ C ---------------
++ C H4 H3 H2 H1 H0 TMP
+
+- add D0, T0
++ mov H0, TMP
++ neg TMP
++ sbb H1, H0
++ sbb H2, H1
++ sbb H3, H2
++ sbb H4, H3
++ sbb $0, H4
++
++ add TMP, T0
+ adc H0, T1
+ adc H1, T2
+ adc H2, T3
+ adc H3, T4
+- adc D4, T5
++ adc H4, T5
+ adc $0, C0
+
+ C Remains to add in C2 and C0
+- C C0 C0<<32 (-2^32+1)C0
+- C C2 C2<<32 (-2^32+1)C2
+- C where C2 is always positive, while C0 may be -1.
++ C Set H1, H0 = (2^96 - 2^32 + 1) C0
+ mov C0, H0
+ mov C0, H1
+- mov C0, H2
+- sar $63, C0 C Get sign
+ shl $32, H1
+- sub H1, H0 C Gives borrow iff C0 > 0
++ sub H1, H0
+ sbb $0, H1
+- add C0, H2
+
++ C Set H3, H2 = (2^96 - 2^32 + 1) C2
++ mov C2, H2
++ mov C2, H3
++ shl $32, H3
++ sub H3, H2
++ sbb $0, H3
++ add C0, H2 C No carry. Could use lea trick
++
++ xor C0, C0
+ add H0, T0
+ adc H1, T1
+- adc $0, H2
+- adc $0, C0
+-
+- C Set (H1 H0) <-- C2 << 96 - C2 << 32 + 1
+- mov C2, H0
+- mov C2, H1
+- shl $32, H1
+- sub H1, H0
+- sbb $0, H1
+-
+- add H2, H0
+- adc C0, H1
+- adc C2, C0
+- mov C0, H2
+- sar $63, C0
+- add H0, T2
+- adc H1, T3
+- adc H2, T4
+- adc C0, T5
+- sbb C0, C0
++ adc H2, T2
++ adc H3, T3
++ adc C2, T4
++ adc D5, T5 C Value delayed from initial folding
++ adc $0, C0 C Use sbb and switch sign?
+
+ C Final unlikely carry
+ mov C0, H0
+ mov C0, H1
+- mov C0, H2
+- sar $63, C0
+ shl $32, H1
+ sub H1, H0
+ sbb $0, H1
+- add C0, H2
+
+ pop RP
+
+- sub H0, T0
++ add H0, T0
+ mov T0, (RP)
+- sbb H1, T1
++ adc H1, T1
+ mov T1, 8(RP)
+- sbb H2, T2
++ adc C0, T2
+ mov T2, 16(RP)
+- sbb C0, T3
++ adc $0, T3
+ mov T3, 24(RP)
+- sbb C0, T4
++ adc $0, T4
+ mov T4, 32(RP)
+- sbb C0, T5
++ adc $0, T5
+ mov T5, 40(RP)
+
+ pop %r15
diff --git a/yocto-poky/meta/recipes-support/nettle/nettle_3.1.1.bb b/yocto-poky/meta/recipes-support/nettle/nettle_3.1.1.bb
index 7d7134f19..4a40e9ae5 100644
--- a/yocto-poky/meta/recipes-support/nettle/nettle_3.1.1.bb
+++ b/yocto-poky/meta/recipes-support/nettle/nettle_3.1.1.bb
@@ -7,5 +7,10 @@ LIC_FILES_CHKSUM = "file://COPYING.LESSERv3;md5=6a6a8e020838b23406c81b19c1d46df6
file://serpent-decrypt.c;beginline=14;endline=36;md5=ca0d220bc413e1842ecc507690ce416e \
file://serpent-set-key.c;beginline=14;endline=36;md5=ca0d220bc413e1842ecc507690ce416e"
+SRC_URI += "\
+ file://CVE-2015-8803_8805.patch \
+ file://CVE-2015-8804.patch \
+ "
+
SRC_URI[md5sum] = "b40fa88dc32f37a182b6b42092ebb144"
SRC_URI[sha256sum] = "5fd4d25d64d8ddcb85d0d897572af73b05b4d163c6cc49438a5bfbb8ff293d4c"
diff --git a/yocto-poky/meta/recipes-support/p11-kit/p11-kit_0.22.1.bb b/yocto-poky/meta/recipes-support/p11-kit/p11-kit_0.22.1.bb
index 7ad9626e1..ee7795117 100644
--- a/yocto-poky/meta/recipes-support/p11-kit/p11-kit_0.22.1.bb
+++ b/yocto-poky/meta/recipes-support/p11-kit/p11-kit_0.22.1.bb
@@ -10,6 +10,8 @@ SRC_URI = "http://p11-glue.freedesktop.org/releases/${BP}.tar.gz"
SRC_URI[md5sum] = "4e9bea1106628ffb820bdad24a819fac"
SRC_URI[sha256sum] = "ef3a339fcf6aa0e32c8c23f79ba7191e57312be2bda8b24e6d121c2670539a5c"
+EXTRA_OECONF = "--without-trust-paths"
+
FILES_${PN}-dev += " \
${libdir}/p11-kit-proxy.so \
${libdir}/pkcs11/p11-kit-trust.so \
diff --git a/yocto-poky/meta/recipes-support/pinentry/pinentry_0.9.2.bb b/yocto-poky/meta/recipes-support/pinentry/pinentry_0.9.2.bb
index c836ca4b8..d6b713020 100644
--- a/yocto-poky/meta/recipes-support/pinentry/pinentry_0.9.2.bb
+++ b/yocto-poky/meta/recipes-support/pinentry/pinentry_0.9.2.bb
@@ -10,6 +10,8 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=cbbd794e2a0a289b9dfcc9f513d1996e"
inherit autotools
+DEPENDS = "gettext-native"
+
SRC_URI = "ftp://ftp.gnupg.org/gcrypt/${BPN}/${BPN}-${PV}.tar.bz2"
SRC_URI[md5sum] = "f51d454f921111b5156a2291cbf70278"
diff --git a/yocto-poky/meta/recipes-support/user-creation/files/system-xuser.conf b/yocto-poky/meta/recipes-support/user-creation/files/system-xuser.conf
new file mode 100644
index 000000000..d42e3d1f5
--- /dev/null
+++ b/yocto-poky/meta/recipes-support/user-creation/files/system-xuser.conf
@@ -0,0 +1,11 @@
+<!DOCTYPE busconfig PUBLIC "-//freedesktop//DTD D-BUS Bus Configuration 1.0//EN"
+ "http://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">
+<busconfig>
+ <policy user="xuser">
+ <allow send_destination="net.connman"/>
+ <allow send_destination="net.connman.vpn"/>
+ <allow send_destination="org.ofono"/>
+ <allow send_destination="org.bluez"/>
+ </policy>
+</busconfig>
+
diff --git a/yocto-poky/meta/recipes-support/user-creation/xuser-account_0.1.bb b/yocto-poky/meta/recipes-support/user-creation/xuser-account_0.1.bb
index 77ba97dbf..13ba67733 100644
--- a/yocto-poky/meta/recipes-support/user-creation/xuser-account_0.1.bb
+++ b/yocto-poky/meta/recipes-support/user-creation/xuser-account_0.1.bb
@@ -2,7 +2,7 @@ SUMMARY = "Creates an 'xuser' account used for running X11"
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
-SRC_URI = ""
+SRC_URI = "file://system-xuser.conf"
inherit allarch useradd
@@ -15,9 +15,11 @@ do_compile() {
}
do_install() {
- :
+ install -D -m 0644 ${WORKDIR}/system-xuser.conf ${D}${sysconfdir}/dbus-1/system.d/system-xuser.conf
}
+FILES_${PN} = "${sysconfdir}/dbus-1/system.d/system-xuser.conf"
+
USERADD_PACKAGES = "${PN}"
GROUPADD_PARAM_${PN} = "--system shutdown"
USERADD_PARAM_${PN} = "--create-home \
diff --git a/yocto-poky/meta/recipes-support/vte/vte-0.28.2/cve-2012-2738.patch b/yocto-poky/meta/recipes-support/vte/vte-0.28.2/cve-2012-2738.patch
new file mode 100644
index 000000000..240777180
--- /dev/null
+++ b/yocto-poky/meta/recipes-support/vte/vte-0.28.2/cve-2012-2738.patch
@@ -0,0 +1,135 @@
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@intel.com>
+
+From e524b0b3bd8fad844ffa73927c199545b892cdbd Mon Sep 17 00:00:00 2001
+From: Christian Persch <chpe@gnome.org>
+Date: Sat, 19 May 2012 19:36:09 +0200
+Subject: [PATCH 1/2] emulation: Limit integer arguments to 65535
+
+To guard against malicious sequences containing excessively big numbers,
+limit all parsed numbers to 16 bit range. Doing this here in the parsing
+routine is a catch-all guard; this doesn't preclude enforcing
+more stringent limits in the handlers themselves.
+
+https://bugzilla.gnome.org/show_bug.cgi?id=676090
+---
+ src/table.c | 2 +-
+ src/vteseq.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/src/table.c b/src/table.c
+index 140e8c8..85cf631 100644
+--- a/src/table.c
++++ b/src/table.c
+@@ -550,7 +550,7 @@ _vte_table_extract_numbers(GValueArray **array,
+ if (G_UNLIKELY (*array == NULL)) {
+ *array = g_value_array_new(1);
+ }
+- g_value_set_long(&value, total);
++ g_value_set_long(&value, CLAMP (total, 0, G_MAXUSHORT));
+ g_value_array_append(*array, &value);
+ } while (i++ < arginfo->length);
+ g_value_unset(&value);
+diff --git a/src/vteseq.c b/src/vteseq.c
+index 7ef4c8c..10991db 100644
+--- a/src/vteseq.c
++++ b/src/vteseq.c
+@@ -557,7 +557,7 @@ vte_sequence_handler_multiple(VteTerminal *terminal,
+ GValueArray *params,
+ VteTerminalSequenceHandler handler)
+ {
+- vte_sequence_handler_multiple_limited(terminal, params, handler, G_MAXLONG);
++ vte_sequence_handler_multiple_limited(terminal, params, handler, G_MAXUSHORT);
+ }
+
+ static void
+--
+2.4.9 (Apple Git-60)
+
+
+From cf1ad453a8def873c49cf6d88162593402f32bb2 Mon Sep 17 00:00:00 2001
+From: Christian Persch <chpe@gnome.org>
+Date: Sat, 19 May 2012 20:04:12 +0200
+Subject: [PATCH 2/2] emulation: Limit repetitions
+
+Don't allow malicious sequences to cause excessive repetitions.
+
+https://bugzilla.gnome.org/show_bug.cgi?id=676090
+---
+ src/vteseq.c | 25 ++++++++++++++++++-------
+ 1 file changed, 18 insertions(+), 7 deletions(-)
+
+diff --git a/src/vteseq.c b/src/vteseq.c
+index 10991db..209522f 100644
+--- a/src/vteseq.c
++++ b/src/vteseq.c
+@@ -1392,7 +1392,7 @@ vte_sequence_handler_dc (VteTerminal *terminal, GValueArray *params)
+ static void
+ vte_sequence_handler_DC (VteTerminal *terminal, GValueArray *params)
+ {
+- vte_sequence_handler_multiple(terminal, params, vte_sequence_handler_dc);
++ vte_sequence_handler_multiple_r(terminal, params, vte_sequence_handler_dc);
+ }
+
+ /* Delete a line at the current cursor position. */
+@@ -1785,7 +1785,7 @@ vte_sequence_handler_reverse_index (VteTerminal *terminal, GValueArray *params)
+ static void
+ vte_sequence_handler_RI (VteTerminal *terminal, GValueArray *params)
+ {
+- vte_sequence_handler_multiple(terminal, params, vte_sequence_handler_nd);
++ vte_sequence_handler_multiple_r(terminal, params, vte_sequence_handler_nd);
+ }
+
+ /* Save cursor (position). */
+@@ -2777,8 +2777,7 @@ vte_sequence_handler_insert_lines (VteTerminal *terminal, GValueArray *params)
+ {
+ GValue *value;
+ VteScreen *screen;
+- long param, end, row;
+- int i;
++ long param, end, row, i, limit;
+ screen = terminal->pvt->screen;
+ /* The default is one. */
+ param = 1;
+@@ -2796,7 +2795,13 @@ vte_sequence_handler_insert_lines (VteTerminal *terminal, GValueArray *params)
+ } else {
+ end = screen->insert_delta + terminal->row_count - 1;
+ }
+- /* Insert the new lines at the cursor. */
++
++ /* Only allow to insert as many lines as there are between this row
++ * and the end of the scrolling region. See bug #676090.
++ */
++ limit = end - row + 1;
++ param = MIN (param, limit);
++
+ for (i = 0; i < param; i++) {
+ /* Clear a line off the end of the region and add one to the
+ * top of the region. */
+@@ -2817,8 +2822,7 @@ vte_sequence_handler_delete_lines (VteTerminal *terminal, GValueArray *params)
+ {
+ GValue *value;
+ VteScreen *screen;
+- long param, end, row;
+- int i;
++ long param, end, row, i, limit;
+
+ screen = terminal->pvt->screen;
+ /* The default is one. */
+@@ -2837,6 +2841,13 @@ vte_sequence_handler_delete_lines (VteTerminal *terminal, GValueArray *params)
+ } else {
+ end = screen->insert_delta + terminal->row_count - 1;
+ }
++
++ /* Only allow to delete as many lines as there are between this row
++ * and the end of the scrolling region. See bug #676090.
++ */
++ limit = end - row + 1;
++ param = MIN (param, limit);
++
+ /* Clear them from below the current cursor. */
+ for (i = 0; i < param; i++) {
+ /* Insert a line at the end of the region and remove one from
+--
+2.4.9 (Apple Git-60)
+
diff --git a/yocto-poky/meta/recipes-support/vte/vte.inc b/yocto-poky/meta/recipes-support/vte/vte.inc
index 874062adb..07b9e10dc 100644
--- a/yocto-poky/meta/recipes-support/vte/vte.inc
+++ b/yocto-poky/meta/recipes-support/vte/vte.inc
@@ -4,7 +4,8 @@ LICENSE = "LGPLv2.0"
DEPENDS = " glib-2.0 gtk+ intltool-native ncurses gobject-introspection-stub"
RDEPENDS_libvte = "vte-termcap"
-inherit gnome gtk-doc
+inherit gnome gtk-doc distro_features_check
+ANY_OF_DISTRO_FEATURES = "${GTK2DISTROFEATURES}"
EXTRA_OECONF = "--disable-python --disable-introspection"
diff --git a/yocto-poky/meta/recipes-support/vte/vte_0.28.2.bb b/yocto-poky/meta/recipes-support/vte/vte_0.28.2.bb
index b1025cb0e..8b4e7f71d 100644
--- a/yocto-poky/meta/recipes-support/vte/vte_0.28.2.bb
+++ b/yocto-poky/meta/recipes-support/vte/vte_0.28.2.bb
@@ -4,7 +4,8 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=3bf50002aefd002f49e7bb854063f7e7"
PR = "r6"
-SRC_URI += "file://obsolete_automake_macros.patch"
+SRC_URI += "file://obsolete_automake_macros.patch \
+ file://cve-2012-2738.patch"
CFLAGS += "-D_GNU_SOURCE"
diff --git a/yocto-poky/meta/site/nios2-linux b/yocto-poky/meta/site/nios2-linux
new file mode 100644
index 000000000..9e53e5db2
--- /dev/null
+++ b/yocto-poky/meta/site/nios2-linux
@@ -0,0 +1,395 @@
+ac_cv_func_lstat_dereferences_slashed_symlink=${ac_cv_func_lstat_dereferences_slashed_symlink=yes}
+ac_cv_func_lstat_empty_string_bug=${ac_cv_func_lstat_empty_string_bug=no}
+ac_cv_func_stat_empty_string_bug=${ac_cv_func_stat_empty_string_bug=no}
+ac_cv_func_stat_ignores_trailing_slash=${ac_cv_func_stat_ignores_trailing_slash=no}
+ac_cv_header_netinet_sctp_h=${ac_cv_header_netinet_sctp_h=no}
+ac_cv_header_netinet_sctp_uio_h=${ac_cv_header_netinet_sctp_uio_h=no}
+ac_cv_sctp=${ac_cv_sctp=no}
+
+# apache
+ac_cv_func_pthread_key_delete=${ac_cv_func_pthread_key_delete=yes}
+apr_cv_process_shared_works=${apr_cv_process_shared_works=no}
+ac_cv_sizeof_ssize_t=${ac_cv_sizeof_ssize_t=4}
+apr_cv_tcp_nodelay_with_cork=${apr_cv_tcp_nodelay_with_cork=yes}
+
+# bash
+ac_cv_c_long_double=${ac_cv_c_long_double=yes}
+bash_cv_func_sigsetjmp=${bash_cv_func_sigsetjmp=missing}
+
+# coreutils
+utils_cv_sys_open_max=${utils_cv_sys_open_max=1019}
+
+# cvs
+cvs_cv_func_printf_ptr=${cvs_cv_func_printf_ptr=yes}
+
+# db (sleepycat)
+db_cv_fcntl_f_setfd=${db_cv_fcntl_f_setfd=yes}
+db_cv_sprintf_count=${db_cv_sprintf_count=yes}
+db_cv_path_ar=${db_cv_path_ar=/usr/bin/ar}
+db_cv_path_chmod=${db_cv_path_chmod=/bin/chmod}
+db_cv_path_cp=${db_cv_path_cp=/bin/cp}
+db_cv_path_ln=${db_cv_path_ln=/bin/ln}
+db_cv_path_mkdir=${db_cv_path_mkdir=/bin/mkdir}
+db_cv_path_ranlib=${db_cv_path_ranlib=/usr/bin/ranlib}
+db_cv_path_rm=${db_cv_path_rm=/bin/rm}
+db_cv_path_sh=${db_cv_path_sh=/bin/sh}
+db_cv_path_strip=${db_cv_path_strip=/usr/bin/strip}
+db_cv_align_t=${db_cv_align_t='unsigned long long'}
+db_cv_alignp_t=${db_cv_alignp_t='unsigned long'}
+db_cv_mutex=${db_cv_mutex=ARM/gcc-assembly}
+db_cv_posixmutexes=${db_cv_posixmutexes=no}
+db_cv_uimutexes=${db_cv_uimutexes=no}
+
+# D-BUS
+ac_cv_func_posix_getpwnam_r=${ac_cv_func_posix_getpwnam_r=yes}
+
+# edb
+db_cv_spinlocks=${db_cv_spinlocks=no}
+
+# ettercap
+ettercap_cv_type_socklen_t=${ettercap_cv_type_socklen_t=yes}
+
+# fget
+compat_cv_func_snprintf_works=${compat_cv_func_snprintf_works=yes}
+compat_cv_func_basename_works=${compat_cv_func_basename_works=no}
+compat_cv_func_dirname_works=${compat_cv_func_dirname_works=no}
+
+# fnmatch
+ac_cv_func_fnmatch_works=${ac_cv_func_fnmatch_works=yes}
+
+# gettext
+am_cv_func_working_getline=${am_cv_func_working_getline=yes}
+
+# gio
+ac_cv_func_getgrgid_r=${ac_cv_func_getgrgid_r=yes}
+
+# glib
+glib_cv_sizeof_gmutex=${glib_cv_sizeof_gmutex=24}
+glib_cv_sizeof_system_thread=${glib_cv_sizeof_system_thread=4}
+glib_cv_stack_grows=${glib_cv_stack_grows=no}
+glib_cv_uscore=${glib_cv_uscore=no}
+glib_cv_use_pid_surrogate=${glib_cv_use_pid_surrogate=yes}
+glib_cv_has__inline=${glib_cv_has__inline=yes}
+glib_cv_has__inline__=${glib_cv_has__inline__=yes}
+glib_cv_hasinline=${glib_cv_hasinline=yes}
+glib_cv_sane_realloc=${glib_cv_sane_realloc=yes}
+glib_cv_sizeof_gmutex=${glib_cv_sizeof_gmutex=24}
+glib_cv_uscore=${glib_cv_uscore=no}
+glib_cv_va_copy=${glib_cv_va_copy=yes}
+glib_cv_va_val_copy=${glib_cv_va_val_copy=yes}
+glib_cv___va_copy=${glib_cv___va_copy=yes}
+glib_cv_rtldglobal_broken=${glib_cv_rtldglobal_broken=no}
+ac_cv_func_getpwuid_r=${ac_cv_func_getpwuid_r=yes}
+glib_cv_sys_pthread_mutex_trylock_posix=${glib_cv_sys_pthread_mutex_trylock_posix=yes}
+glib_cv_sys_pthread_getspecific_posix=${glib_cv_sys_pthread_getspecific_posix=yes}
+glib_cv_sys_pthread_cond_timedwait_posix=${glib_cv_sys_pthread_cond_timedwait_posix=yes}
+
+# glib-2.0
+glib_cv_long_long_format=${glib_cv_long_long_format=ll}
+glib_cv_sizeof_gmutex=${glib_cv_sizeof_gmutex=24}
+glib_cv_sizeof_intmax_t=${glib_cv_sizeof_intmax_t=8}
+glib_cv_sizeof_ptrdiff_t=${glib_cv_sizeof_ptrdiff_t=4}
+glib_cv_sizeof_size_t=${glib_cv_sizeof_size_t=4}
+glib_cv_sizeof_system_thread=${glib_cv_sizeof_system_thread=4}
+glib_cv_sys_use_pid_niceness_surrogate=${glib_cv_sys_use_pid_niceness_surrogate=yes}
+
+#gstreamer
+as_cv_unaligned_access=${as_cv_unaligned_access=no}
+
+# httppc
+ac_cv_strerror_r_SUSv3=${ac_cv_strerror_r_SUSv3=no}
+
+# ipsec-tools
+ac_cv_va_copy=${ac_cv_va_copy=yes}
+ac_cv___va_copy=${ac_cv___va_copy=yes}
+ac_cv_va_val_copy=${ac_cv_va_val_copy=yes}
+racoon_cv_bug_getaddrinfo=${racoon_cv_bug_getaddrinfo=no}
+
+# jikes
+ac_cv_sizeof_wchar_t=4
+
+# lftp
+ac_cv_need_trio=${ac_cv_need_trio=no}
+lftp_cv_va_copy=${lftp_cv_va_copy=yes}
+lftp_cv_va_val_copy=${lftp_cv_va_val_copy=yes}
+lftp_cv___va_copy=${lftp_cv___va_copy=yes}
+
+# libesmtp
+acx_working_snprintf=${acx_working_snprintf=yes}
+
+# libidl
+libIDL_cv_long_long_format=${libIDL_cv_long_long_format=ll}
+
+# libnet
+ac_libnet_have_packet_socket=${ac_libnet_have_packet_socket=yes}
+
+# libpcap
+ac_cv_linux_vers=${ac_cv_linux_vers=2}
+
+# libxfce4util
+with_broken_putenv=${with_broken_putenv=no}
+
+# links
+ac_cv_lib_png_png_create_info_struct=${ac_cv_lib_png_png_create_info_struct=yes}
+
+# mono
+cv_mono_sizeof_sunpath=108
+mono_cv_sizeof_sunpath=108
+
+# mysql
+mysql_cv_func_atomic_sub=${mysql_cv_func_atomic_sub=no}
+mysql_cv_func_atomic_add=${mysql_cv_func_atomic_add=no}
+
+# nano
+ac_cv_regexec_segfault_emptystr=${ac_cv_regexec_segfault_emptystr=no}
+nano_cv_func_regexec_segv_emptystr=${nano_cv_func_regexec_segv_emptystr=no}
+
+# ORBit2
+ac_cv_alignof_CORBA_boolean=1
+ac_cv_alignof_CORBA_char=1
+ac_cv_alignof_CORBA_double=8
+ac_cv_alignof_CORBA_float=4
+ac_cv_alignof_CORBA_long=4
+ac_cv_alignof_CORBA_long_double=8
+ac_cv_alignof_CORBA_long_long=8
+ac_cv_alignof_CORBA_octet=1
+ac_cv_alignof_CORBA_pointer=4
+ac_cv_alignof_CORBA_short=2
+ac_cv_alignof_CORBA_struct=1
+ac_cv_alignof_CORBA_wchar=2
+ac_cv_func_getaddrinfo=${ac_cv_func_getaddrinfo=yes}
+
+# php
+ac_cv_pread=${ac_cv_pread=no}
+ac_cv_pwrite=${ac_cv_pwrite=no}
+php_cv_lib_cookie_io_functions_use_off64_t=${php_cv_lib_cookie_io_functions_use_off64_t=yes}
+cv_php_mbstring_stdarg=${cv_php_mbstring_stdarg=yes}
+
+# rsync
+rsync_cv_HAVE_BROKEN_LARGEFILE=${rsync_cv_HAVE_BROKEN_LARGEFILE=no}
+rsync_cv_HAVE_SOCKETPAIR=${rsync_cv_HAVE_SOCKETPAIR=yes}
+rsync_cv_HAVE_LONGLONG=${rsync_cv_HAVE_LONGLONG=yes}
+rsync_cv_HAVE_OFF64_T=${rsync_cv_HAVE_OFF64_T=no}
+rsync_cv_HAVE_SHORT_INO_T=${rsync_cv_HAVE_SHORT_INO_T=no}
+rsync_cv_HAVE_UNSIGNED_CHAR=${rsync_cv_HAVE_UNSIGNED_CHAR=no}
+rsync_cv_HAVE_BROKEN_READDIR=${rsync_cv_HAVE_BROKEN_READDIR=no}
+rsync_cv_HAVE_GETTIMEOFDAY_TZ=${rsync_cv_HAVE_GETTIMEOFDAY_TZ=yes}
+rsync_cv_HAVE_C99_VSNPRINTF=${rsync_cv_HAVE_C99_VSNPRINTF=yes}
+rsync_cv_HAVE_SECURE_MKSTEMP=${rsync_cv_HAVE_SECURE_MKSTEMP=yes}
+rsync_cv_REPLACE_INET_NTOA=${rsync_cv_REPLACE_INET_NTOA=no}
+rsync_cv_REPLACE_INET_ATON=${rsync_cv_REPLACE_INET_ATON=no}
+
+# screen
+screen_cv_sys_bcopy_overlap=${screen_cv_sys_bcopy_overlap=no}
+screen_cv_sys_memcpy_overlap=${screen_cv_sys_memcpy_overlap=no}
+screen_cv_sys_memmove_overlap=${screen_cv_sys_memmove_overlap=no}
+screen_cv_sys_fifo_broken_impl=${screen_cv_sys_fifo_broken_impl=yes}
+screen_cv_sys_fifo_usable=${screen_cv_sys_fifo_usable=yes}
+screen_cv_sys_select_broken_retval=${screen_cv_sys_select_broken_retval=no}
+screen_cv_sys_sockets_nofs=${screen_cv_sys_sockets_nofs=no}
+screen_cv_sys_sockets_usable=${screen_cv_sys_sockets_usable=yes}
+screen_cv_sys_terminfo_used=${screen_cv_sys_terminfo_used=yes}
+
+# slrn
+slrn_cv___va_copy=${slrn_cv___va_copy=yes}
+slrn_cv_va_copy=${slrn_cv_va_copy=yes}
+slrn_cv_va_val_copy=${slrn_cv_va_val_copy=yes}
+ac_cv_func_realloc_works=${ac_cv_func_realloc_works=yes}
+ac_cv_func_realloc_0_nonnull=${ac_cv_func_realloc_0_nonnull=yes}
+ac_cv_func_malloc_works=${ac_cv_func_malloc_works=yes}
+ac_cv_func_malloc_0_nonnull=${ac_cv_func_malloc_0_nonnull=yes}
+
+# socat
+ac_cv_ispeed_offset=${ac_cv_ispeed_offset=13}
+sc_cv_termios_ispeed=${sc_cv_termios_ispeed=yes}
+
+# ssh
+ac_cv_have_space_d_name_in_struct_dirent=${ac_cv_dirent_have_space_d_name=yes}
+ac_cv_have_broken_snprintf=${ac_cv_have_broken_snprintf=no}
+ac_cv_have_accrights_in_msghdr=${ac_cv_have_accrights_in_msghdr=no}
+ac_cv_have_control_in_msghdr=${ac_cv_have_control_in_msghdr=yes}
+ac_cv_have_openpty_ctty_bug=${ac_cv_have_openpty_ctty_bug=yes}
+
+# startup-notification
+lf_cv_sane_realloc=yes
+
+# sudo
+sudo_cv_uid_t_len=${sudo_cv_uid_t_len=10}
+
+# xffm
+jm_cv_func_working_readdir=yes
+
+# dpkg
+dpkg_cv_va_copy=${ac_cv_va_copy=yes}
+dpkg_cv___va_copy=${ac_cv___va_copy=yes}
+
+# eds-dbus
+ac_cv_libiconv_utf8=${ac_cv_libiconv_utf8=yes}
+ac_cv_func_getpgrp_void=yes
+ac_cv_func_setpgrp_void=yes
+ac_cv_func_setgrent_void=yes
+ac_cv_func_malloc_0_nonnull=yes
+ac_cv_func_malloc_works=yes
+ac_cv_func_posix_getpwuid_r=${ac_cv_func_posix_getpwuid_r=yes}
+ac_cv_func_posix_getgrgid_r=${ac_cv_func_posix_getgrgid_r=yes}
+ac_cv_func_setvbuf_reversed=no
+ac_cv_sizeof___int64=${ac_cv_sizeof___int64=0}
+ac_cv_sizeof_char=${ac_cv_sizeof_char=1}
+ac_cv_sizeof_wchar_t=${ac_cv_sizeof_wchar_t=1}
+ac_cv_sizeof_unsigned_char=${ac_cv_sizeof_unsigned_char=1}
+ac_cv_sizeof_bool=${ac_cv_sizeof_bool=1}
+ac_cv_sizeof_char_p=${ac_cv_sizeof_int_p=4}
+ac_cv_sizeof_int=${ac_cv_sizeof_int=4}
+ac_cv_sizeof_int_p=${ac_cv_sizeof_int_p=4}
+ac_cv_sizeof_long=${ac_cv_sizeof_long=4}
+ac_cv_sizeof_long_int=${ac_cv_sizeof_long_int=4}
+ac_cv_sizeof_long_long=${ac_cv_sizeof_long_long=8}
+ac_cv_sizeof_off_t=${ac_cv_sizeof_off_t=4}
+ac_cv_sizeof_short=${ac_cv_sizeof_short=2}
+ac_cv_sizeof_short_int=${ac_cv_sizeof_short_int=2}
+ac_cv_sizeof_size_t=${ac_cv_sizeof_size_t=4}
+ac_cv_sizeof_void_p=${ac_cv_sizeof_void_p=4}
+ac_cv_sizeof_long_p=${ac_cv_sizeof_long_p=4}
+ac_cv_sizeof_float=${ac_cv_sizeof_float=4}
+ac_cv_sizeof_double=${ac_cv_sizeof_double=8}
+ac_cv_sizeof_long_double=${ac_cv_sizeof_long_double=8}
+ac_cv_sizeof_ptrdiff_t=${glib_cv_sizeof_ptrdiff_t=4}
+ac_cv_sizeof_unsigned_short=${ac_cv_sizeof_unsigned_short=2}
+ac_cv_sizeof_unsigned=${ac_cv_sizeof_unsigned=4}
+ac_cv_sizeof_unsigned_int=${ac_cv_sizeof_unsigned_int=4}
+ac_cv_sizeof_unsigned_long=${ac_cv_sizeof_unsigned_long=4}
+ac_cv_sizeof_unsigned_long_long=${ac_cv_sizeof_unsigned_long_long=8}
+ac_cv_sizeof_signed_char=${ac_cv_sizeof_signed_char=1}
+
+ac_cv_uchar=${ac_cv_uchar=no}
+ac_cv_uint=${ac_cv_uint=yes}
+ac_cv_ulong=${ac_cv_ulong=yes}
+ac_cv_ushort=${ac_cv_ushort=yes}
+ac_cv_time_r_type=${ac_cv_time_r_type=POSIX}
+
+# samba
+samba_cv_BROKEN_NISPLUS_INCLUDE_FILES=${samba_cv_BROKEN_NISPLUS_INCLUDE_FILES=yes}
+samba_cv_BROKEN_REDHAT_7_SYSTEM_HEADERS=${samba_cv_BROKEN_REDHAT_7_SYSTEM_HEADERS=no}
+samba_cv_HAVE_BROKEN_FCNTL64_LOCKS=${samba_cv_HAVE_BROKEN_FCNTL64_LOCKS=no}
+samba_cv_HAVE_BROKEN_GETGROUPS=${samba_cv_HAVE_BROKEN_GETGROUPS=no}
+samba_cv_HAVE_BROKEN_LINUX_SENDFILE=${samba_cv_HAVE_BROKEN_LINUX_SENDFILE=yes}
+samba_cv_HAVE_BROKEN_READDIR=${samba_cv_HAVE_BROKEN_READDIR=yes}
+samba_cv_HAVE_BROKEN_READDIR_NAME=${samba_cv_HAVE_BROKEN_READDIR_NAME=no}
+samba_cv_HAVE_C99_VSNPRINTF=${samba_cv_HAVE_C99_VSNPRINTF=yes}
+samba_cv_HAVE_DEV64_T=${samba_cv_HAVE_DEV64_T=no}
+samba_cv_HAVE_DEVICE_MAJOR_FN=${samba_cv_HAVE_DEVICE_MAJOR_FN=yes}
+samba_cv_HAVE_DEVICE_MINOR_FN=${samba_cv_HAVE_DEVICE_MINOR_FN=yes}
+samba_cv_HAVE_DQB_FSOFTLIMIT=${samba_cv_HAVE_DQB_FSOFTLIMIT=no}
+samba_cv_HAVE_EXPLICIT_LARGEFILE_SUPPORT=${samba_cv_HAVE_EXPLICIT_LARGEFILE_SUPPORT=yes}
+samba_cv_HAVE_FAM_H=${samba_cv_HAVE_FAM_H=no}
+samba_cv_HAVE_FCNTL_LOCK=${samba_cv_HAVE_FCNTL_LOCK=yes}
+samba_cv_HAVE_FTRUNCATE_EXTEND=${samba_cv_HAVE_FTRUNCATE_EXTEND=yes}
+samba_cv_HAVE_FUNCTION_MACRO=${samba_cv_HAVE_FUNCTION_MACRO=yes}
+samba_cv_HAVE_GETTIMEOFDAY_TZ=${samba_cv_HAVE_GETTIMEOFDAY_TZ=yes}
+samba_cv_HAVE_INO64_T=${samba_cv_HAVE_INO64_T=no}
+samba_cv_HAVE_INT16_FROM_RPC_RPC_H=${samba_cv_HAVE_INT16_FROM_RPC_RPC_H=no}
+samba_cv_HAVE_INT32_FROM_RPC_RPC_H=${samba_cv_HAVE_INT32_FROM_RPC_RPC_H=no}
+samba_cv_HAVE_KERNEL_CHANGE_NOTIFY=${samba_cv_HAVE_KERNEL_CHANGE_NOTIFY=yes}
+samba_cv_HAVE_KERNEL_OPLOCKS_IRIX=${samba_cv_HAVE_KERNEL_OPLOCKS_IRIX=no}
+samba_cv_HAVE_KERNEL_OPLOCKS_LINUX=${samba_cv_HAVE_KERNEL_OPLOCKS_LINUX=yes}
+samba_cv_HAVE_KERNEL_SHARE_MODES=${samba_cv_HAVE_KERNEL_SHARE_MODES=yes}
+samba_cv_HAVE_MAKEDEV=${samba_cv_HAVE_MAKEDEV=yes}
+samba_cv_HAVE_MMAP=${samba_cv_HAVE_MMAP=yes}
+samba_cv_HAVE_NATIVE_ICONV=${samba_cv_HAVE_NATIVE_ICONV=yes}
+samba_cv_HAVE_OFF64_T=${samba_cv_HAVE_OFF64_T=no}
+samba_cv_HAVE_ROOT=${samba_cv_HAVE_ROOT=yes}
+samba_cv_HAVE_RPC_AUTH_ERROR_CONFLICT=${samba_cv_HAVE_RPC_AUTH_ERROR_CONFLICT=no}
+samba_cv_HAVE_SECURE_MKSTEMP=${samba_cv_HAVE_SECURE_MKSTEMP=yes}
+samba_cv_HAVE_SENDFILE=${samba_cv_HAVE_SENDFILE=yes}
+samba_cv_HAVE_SENDFILE64=${samba_cv_HAVE_SENDFILE64=yes}
+samba_cv_HAVE_SOCK_SIN_LEN=${samba_cv_HAVE_SOCK_SIN_LEN=no}
+samba_cv_HAVE_STAT_ST_BLKSIZE=${samba_cv_HAVE_STAT_ST_BLKSIZE=yes}
+samba_cv_HAVE_STAT_ST_BLOCKS=${samba_cv_HAVE_STAT_ST_BLOCKS=yes}
+samba_cv_HAVE_STRUCT_DIR64=${samba_cv_HAVE_STRUCT_DIR64=no}
+samba_cv_HAVE_STRUCT_DIRENT64=${samba_cv_HAVE_STRUCT_DIRENT64=yes}
+samba_cv_HAVE_STRUCT_FLOCK64=${samba_cv_HAVE_STRUCT_FLOCK64=yes}
+samba_cv_HAVE_TRUNCATED_SALT=${samba_cv_HAVE_TRUNCATED_SALT=no}
+samba_cv_HAVE_UINT16_FROM_RPC_RPC_H=${samba_cv_HAVE_UINT16_FROM_RPC_RPC_H=no}
+samba_cv_HAVE_UINT32_FROM_RPC_RPC_H=${samba_cv_HAVE_UINT32_FROM_RPC_RPC_H=no}
+samba_cv_HAVE_UNSIGNED_CHAR=${samba_cv_HAVE_UNSIGNED_CHAR=yes}
+samba_cv_HAVE_UTIMBUF=${samba_cv_HAVE_UTIMBUF=yes}
+samba_cv_HAVE_UT_UT_ADDR=${samba_cv_HAVE_UT_UT_ADDR=yes}
+samba_cv_HAVE_UT_UT_EXIT=${samba_cv_HAVE_UT_UT_EXIT=yes}
+samba_cv_HAVE_UT_UT_HOST=${samba_cv_HAVE_UT_UT_HOST=yes}
+samba_cv_HAVE_UT_UT_ID=${samba_cv_HAVE_UT_UT_ID=yes}
+samba_cv_HAVE_UT_UT_NAME=${samba_cv_HAVE_UT_UT_NAME=yes}
+samba_cv_HAVE_UT_UT_PID=${samba_cv_HAVE_UT_UT_PID=yes}
+samba_cv_HAVE_UT_UT_TIME=${samba_cv_HAVE_UT_UT_TIME=yes}
+samba_cv_HAVE_UT_UT_TV=${samba_cv_HAVE_UT_UT_TV=yes}
+samba_cv_HAVE_UT_UT_TYPE=${samba_cv_HAVE_UT_UT_TYPE=yes}
+samba_cv_HAVE_UT_UT_USER=${samba_cv_HAVE_UT_UT_USER=yes}
+samba_cv_HAVE_UX_UT_SYSLEN=${samba_cv_HAVE_UX_UT_SYSLEN=no}
+samba_cv_HAVE_VA_COPY=${samba_cv_HAVE_VA_COPY=yes}
+samba_cv_HAVE_WORKING_AF_LOCAL=${samba_cv_HAVE_WORKING_AF_LOCAL=yes}
+samba_cv_HAVE_Werror=${samba_cv_HAVE_Werror=yes}
+samba_cv_PUTUTLINE_RETURNS_UTMP=${samba_cv_PUTUTLINE_RETURNS_UTMP=yes}
+samba_cv_QUOTA_WORKS=${samba_cv_QUOTA_WORKS=yes}
+samba_cv_REALPATH_TAKES_NULL=${samba_cv_REALPATH_TAKES_NULL=yes}
+samba_cv_REPLACE_GETPASS=${samba_cv_REPLACE_GETPASS=yes}
+samba_cv_REPLACE_INET_NTOA=${samba_cv_REPLACE_INET_NTOA=no}
+samba_cv_REPLACE_READDIR=${samba_cv_REPLACE_READDIR=no}
+samba_cv_RUN_QUOTA_TESTS=${samba_cv_RUN_QUOTA_TESTS=yes}
+samba_cv_SEEKDIR_RETURNS_VOID=${samba_cv_SEEKDIR_RETURNS_VOID=yes}
+samba_cv_SIZEOF_DEV_T=${samba_cv_SIZEOF_DEV_T=yes}
+samba_cv_SIZEOF_INO_T=${samba_cv_SIZEOF_INO_T=yes}
+samba_cv_SIZEOF_OFF_T=${samba_cv_SIZEOF_OFF_T=yes}
+samba_cv_SYSCONF_SC_NGROUPS_MAX=${samba_cv_SYSCONF_SC_NGROUPS_MAX=yes}
+samba_cv_SYSCONF_SC_NPROCESSORS_ONLN=${samba_cv_SYSCONF_SC_NPROCESSORS_ONLN=yes}
+samba_cv_SYSCONF_SC_NPROC_ONLN=${samba_cv_SYSCONF_SC_NPROC_ONLN=no}
+samba_cv_SYSCONF_SC_PAGESIZE=${samba_cv_SYSCONF_SC_PAGESIZE=yes}
+samba_cv_SYSQUOTA_FOUND=${samba_cv_SYSQUOTA_FOUND=yes}
+samba_cv_SYSQUOTA_WORKS=${samba_cv_SYSQUOTA_WORKS=yes}
+samba_cv_SYSQUOTA_WORKS_XFS=${samba_cv_SYSQUOTA_WORKS_XFS=yes}
+samba_cv_TRY_QUOTAS=${samba_cv_TRY_QUOTAS=no}
+samba_cv_TRY_SYS_QUOTAS=${samba_cv_TRY_SYS_QUOTAS=yes}
+samba_cv_USE_SETRESUID=${samba_cv_USE_SETRESUID=yes}
+samba_cv_WE_USE_SYS_QUOTAS=${samba_cv_WE_USE_SYS_QUOTAS=yes}
+samba_cv_WITH_AFS=${samba_cv_WITH_AFS=no}
+samba_cv_WITH_FAKE_KASERVER=${samba_cv_WITH_FAKE_KASERVER=no}
+samba_cv_WITH_QUOTAS=${samba_cv_WITH_QUOTAS=auto}
+samba_cv_WITH_SYS_QUOTAS=${samba_cv_WITH_SYS_QUOTAS=auto}
+samba_cv_WITH_VFS_AFSACL=${samba_cv_WITH_VFS_AFSACL=no}
+samba_cv_compiler_supports_ll=${samba_cv_compiler_supports_ll=yes}
+samba_cv_found_xfs_header=${samba_cv_found_xfs_header=yes}
+samba_cv_have_longlong=${samba_cv_have_longlong=yes}
+samba_cv_have_setresgid=${samba_cv_have_setresgid=yes}
+samba_cv_have_setresuid=${samba_cv_have_setresuid=yes}
+samba_cv_immediate_structures=${samba_cv_immediate_structures=yes}
+samba_cv_optimize_out_funcation_calls=${samba_cv_optimize_out_funcation_calls=yes}
+samba_cv_sig_atomic_t=${samba_cv_sig_atomic_t=yes}
+samba_cv_socklen_t=${samba_cv_socklen_t=yes}
+samba_cv_struct_timespec=${samba_cv_struct_timespec=yes}
+samba_cv_sysquotas_file=${samba_cv_sysquotas_file=lib/sysquotas_linux.c}
+samba_cv_unixsocket=${samba_cv_unixsocket=yes}
+samba_cv_volatile=${samba_cv_volatile=yes}
+
+#older sambe defines
+samba_cv_USE_SETEUID=${samba_cv_USE_SETEUID=yes}
+samba_cv_USE_SETREUID=${samba_cv_USE_SETREUID=yes}
+samba_cv_USE_SETUIDX=${samba_cv_USE_SETUIDX=yes}
+samba_cv_LINUX_LFS_SUPPORT=${samba_cv_LINUX_LFS_SUPPORT=yes}
+
+# clamav
+clamav_av_func_working_snprintf_long=${clamav_av_func_working_snprintf_long=yes}
+clamav_av_have_in_port_t=${clamav_av_have_in_port_t=yes}
+clamav_av_have_in_addr_t=${clamav_av_have_in_addr_t=yes}
+ac_cv_func_mmap_fixed_mapped=${ac_cv_func_mmap_fixed_mapped=yes}
+
+#dbus
+ac_cv_have_abstract_sockets=${ac_cv_have_abstract_sockets=yes}
+
+# lftp
+ac_cv_file___dev_ptc_=yes
+
+# guile
+ac_cv_func_pthread_attr_getstack=${ac_cv_func_pthread_attr_getstack=yes}
+
+# gnet
+ac_cv_member_struct_sockaddr_sa_len=${ac_cv_member_struct_sockaddr_sa_len=no}
+ac_cv_gnet_have_abstract_sockets=${ac_cv_gnet_have_abstract_sockets=no}
+gnet_sockaddr_family_field_name=${gnet_sockaddr_family_field_name=ss_family}