summaryrefslogtreecommitdiff
path: root/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/0003-Fix-libmctp-build-error.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/0003-Fix-libmctp-build-error.patch')
-rw-r--r--meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/0003-Fix-libmctp-build-error.patch116
1 files changed, 116 insertions, 0 deletions
diff --git a/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/0003-Fix-libmctp-build-error.patch b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/0003-Fix-libmctp-build-error.patch
new file mode 100644
index 000000000..b9a6ca527
--- /dev/null
+++ b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/0003-Fix-libmctp-build-error.patch
@@ -0,0 +1,116 @@
+From af414e45bade3cf7277215d82b59a31c9b459cea Mon Sep 17 00:00:00 2001
+From: Jae Hyun Yoo <jae.hyun.yoo@intel.com>
+Date: Mon, 22 Feb 2021 15:27:22 -0800
+Subject: [PATCH] Fix libmctp build error
+
+This is a quick fix for libmctp building which includes staging
+kernel headers. It's a temporary fix until kernel tree fixes the
+rwonce.h including issue.
+
+Note: Do not upstream it.
+
+Signed-off-by: Jae Hyun Yoo <jae.hyun.yoo@intel.com>
+---
+ include/asm/rwonce.h | 90 ++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 90 insertions(+)
+ create mode 100644 include/asm/rwonce.h
+
+diff --git a/include/asm/rwonce.h b/include/asm/rwonce.h
+new file mode 100644
+index 000000000000..11619bdbebae
+--- /dev/null
++++ b/include/asm/rwonce.h
+@@ -0,0 +1,90 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Prevent the compiler from merging or refetching reads or writes. The
++ * compiler is also forbidden from reordering successive instances of
++ * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
++ * particular ordering. One way to make the compiler aware of ordering is to
++ * put the two invocations of READ_ONCE or WRITE_ONCE in different C
++ * statements.
++ *
++ * These two macros will also work on aggregate data types like structs or
++ * unions.
++ *
++ * Their two major use cases are: (1) Mediating communication between
++ * process-level code and irq/NMI handlers, all running on the same CPU,
++ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
++ * mutilate accesses that either do not require ordering or that interact
++ * with an explicit memory barrier or atomic instruction that provides the
++ * required ordering.
++ */
++#ifndef __ASM_GENERIC_RWONCE_H
++#define __ASM_GENERIC_RWONCE_H
++
++#ifndef __ASSEMBLY__
++
++#include <linux/compiler_types.h>
++#include <linux/kasan-checks.h>
++#include <linux/kcsan-checks.h>
++
++/*
++ * Yes, this permits 64-bit accesses on 32-bit architectures. These will
++ * actually be atomic in some cases (namely Armv7 + LPAE), but for others we
++ * rely on the access being split into 2x32-bit accesses for a 32-bit quantity
++ * (e.g. a virtual address) and a strong prevailing wind.
++ */
++#define compiletime_assert_rwonce_type(t) \
++ compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \
++ "Unsupported access size for {READ,WRITE}_ONCE().")
++
++/*
++ * Use __READ_ONCE() instead of READ_ONCE() if you do not require any
++ * atomicity. Note that this may result in tears!
++ */
++#ifndef __READ_ONCE
++#define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x))
++#endif
++
++#define READ_ONCE(x) \
++({ \
++ compiletime_assert_rwonce_type(x); \
++ __READ_ONCE(x); \
++})
++
++#define __WRITE_ONCE(x, val) \
++do { \
++ *(volatile typeof(x) *)&(x) = (val); \
++} while (0)
++
++#define WRITE_ONCE(x, val) \
++do { \
++ compiletime_assert_rwonce_type(x); \
++ __WRITE_ONCE(x, val); \
++} while (0)
++
++static __always_inline
++unsigned long __read_once_word_nocheck(const void *addr)
++{
++ return __READ_ONCE(*(unsigned long *)addr);
++}
++
++/*
++ * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
++ * word from memory atomically but without telling KASAN/KCSAN. This is
++ * usually used by unwinding code when walking the stack of a running process.
++ */
++#define READ_ONCE_NOCHECK(x) \
++({ \
++ compiletime_assert(sizeof(x) == sizeof(unsigned long), \
++ "Unsupported access size for READ_ONCE_NOCHECK()."); \
++ (typeof(x))__read_once_word_nocheck(&(x)); \
++})
++
++static __always_inline
++unsigned long read_word_at_a_time(const void *addr)
++{
++ kasan_check_read(addr, 1);
++ return *(unsigned long *)addr;
++}
++
++#endif /* __ASSEMBLY__ */
++#endif /* __ASM_GENERIC_RWONCE_H */
+--
+2.17.1
+