summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig4
-rw-r--r--lib/Kconfig.debug67
-rw-r--r--lib/Kconfig.kasan39
-rw-r--r--lib/Makefile14
-rw-r--r--lib/bitmap.c4
-rw-r--r--lib/bootconfig.c33
-rw-r--r--lib/checksum.c4
-rw-r--r--lib/crc64.c2
-rw-r--r--lib/decompress_bunzip2.c2
-rw-r--r--lib/decompress_unlzma.c6
-rw-r--r--lib/devres.c17
-rw-r--r--lib/dynamic_debug.c269
-rw-r--r--lib/ioremap.c287
-rw-r--r--lib/iov_iter.c3
-rw-r--r--lib/kobject.c33
-rw-r--r--lib/kstrtox.c12
-rw-r--r--lib/kunit/kunit-test.c111
-rw-r--r--lib/kunit/string-stream.c14
-rw-r--r--lib/kunit/test.c171
-rw-r--r--lib/livepatch/Makefile4
-rw-r--r--lib/livepatch/test_klp_callbacks_busy.c37
-rw-r--r--lib/livepatch/test_klp_shadow_vars.c240
-rw-r--r--lib/math/rational.c2
-rw-r--r--lib/mpi/mpiutil.c6
-rw-r--r--lib/percpu_counter.c19
-rw-r--r--lib/pldmfw/Makefile2
-rw-r--r--lib/pldmfw/pldmfw.c879
-rw-r--r--lib/pldmfw/pldmfw_private.h238
-rw-r--r--lib/random32.c2
-rw-r--r--lib/rbtree.c2
-rw-r--r--lib/test_bitmap.c58
-rw-r--r--lib/test_bitops.c18
-rw-r--r--lib/test_bits.c75
-rw-r--r--lib/test_bpf.c20
-rw-r--r--lib/test_hmm.c47
-rw-r--r--lib/test_hmm_uapi.h4
-rw-r--r--lib/test_kasan.c87
-rw-r--r--lib/test_kmod.c2
-rw-r--r--lib/test_lockup.c6
-rw-r--r--lib/ts_bm.c2
-rw-r--r--lib/xxhash.c2
-rw-r--r--lib/xz/xz_crc32.c2
-rw-r--r--lib/xz/xz_dec_bcj.c2
-rw-r--r--lib/xz/xz_dec_lzma2.c2
-rw-r--r--lib/xz/xz_lzma2.h2
-rw-r--r--lib/xz/xz_stream.h2
46 files changed, 2075 insertions, 779 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index a5d6f23c4cab..b4b98a03ff98 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -680,3 +680,7 @@ config GENERIC_LIB_CMPDI2
config GENERIC_LIB_UCMPDI2
bool
+
+config PLDMFW
+ bool
+ default n
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 3e64a8a809f9..e068c3c7189a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -365,6 +365,17 @@ config SECTION_MISMATCH_WARN_ONLY
If unsure, say Y.
+config DEBUG_FORCE_FUNCTION_ALIGN_32B
+ bool "Force all function address 32B aligned" if EXPERT
+ help
+ There are cases that a commit from one domain changes the function
+ address alignment of other domains, and cause magic performance
+ bump (regression or improvement). Enable this option will help to
+ verify if the bump is caused by function alignment changes, while
+ it will slightly increase the kernel size and affect icache usage.
+
+ It is mainly for debug and performance tuning use.
+
#
# Select this config option from the architecture Kconfig, if it
# is preferred to always offer frame pointers as a config
@@ -476,6 +487,38 @@ config DEBUG_FS
If unsure, say N.
+choice
+ prompt "Debugfs default access"
+ depends on DEBUG_FS
+ default DEBUG_FS_ALLOW_ALL
+ help
+ This selects the default access restrictions for debugfs.
+ It can be overridden with kernel command line option
+ debugfs=[on,no-mount,off]. The restrictions apply for API access
+ and filesystem registration.
+
+config DEBUG_FS_ALLOW_ALL
+ bool "Access normal"
+ help
+ No restrictions apply. Both API and filesystem registration
+ is on. This is the normal default operation.
+
+config DEBUG_FS_DISALLOW_MOUNT
+ bool "Do not register debugfs as filesystem"
+ help
+ The API is open but filesystem is not loaded. Clients can still do
+ their work and read with debug tools that do not need
+ debugfs filesystem.
+
+config DEBUG_FS_ALLOW_NONE
+ bool "No access"
+ help
+ Access is off. Clients get -PERM when trying to create nodes in
+ debugfs tree and debugfs is not registered as a filesystem.
+ Client can then back-off or continue without debugfs access.
+
+endchoice
+
source "lib/Kconfig.kgdb"
source "lib/Kconfig.ubsan"
@@ -844,10 +887,10 @@ config DEBUG_SHIRQ
bool "Debug shared IRQ handlers"
depends on DEBUG_KERNEL
help
- Enable this to generate a spurious interrupt as soon as a shared
- interrupt handler is registered, and just before one is deregistered.
- Drivers ought to be able to handle interrupts coming in at those
- points; some don't and need to be caught.
+ Enable this to generate a spurious interrupt just before a shared
+ interrupt handler is deregistered (generating one when registering
+ is currently disabled). Drivers need to handle this correctly. Some
+ don't and need to be caught.
menu "Debug Oops, Lockups and Hangs"
@@ -874,7 +917,7 @@ config PANIC_TIMEOUT
int "panic timeout"
default 0
help
- Set the timeout value (in seconds) until a reboot occurs when the
+ Set the timeout value (in seconds) until a reboot occurs when
the kernel panics. If n = 0, then we wait forever. A timeout
value n > 0 will wait n seconds before rebooting, while a timeout
value n < 0 will reboot immediately.
@@ -1035,6 +1078,7 @@ config WQ_WATCHDOG
config TEST_LOCKUP
tristate "Test module to generate lockups"
+ depends on m
help
This builds the "test_lockup" module that helps to make sure
that watchdogs and lockup detectors are working properly.
@@ -2171,7 +2215,7 @@ config LIST_KUNIT_TEST
and associated macros.
KUnit tests run during boot and output the results to the debug log
- in TAP format (http://testanything.org/). Only useful for kernel devs
+ in TAP format (https://testanything.org/). Only useful for kernel devs
running the KUnit test harness, and not intended for inclusion into a
production build.
@@ -2192,6 +2236,17 @@ config LINEAR_RANGES_TEST
If unsure, say N.
+config BITS_TEST
+ tristate "KUnit test for bits.h"
+ depends on KUNIT
+ help
+ This builds the bits unit test.
+ Tests the logic of macros defined in bits.h.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
config TEST_UDELAY
tristate "udelay test driver"
help
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 34b84bcbd3d9..047b53dbfd58 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -18,7 +18,7 @@ config CC_HAS_KASAN_SW_TAGS
config CC_HAS_WORKING_NOSANITIZE_ADDRESS
def_bool !CC_IS_GCC || GCC_VERSION >= 80300
-config KASAN
+menuconfig KASAN
bool "KASAN: runtime memory debugger"
depends on (HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \
(HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)
@@ -29,9 +29,10 @@ config KASAN
designed to find out-of-bounds accesses and use-after-free bugs.
See Documentation/dev-tools/kasan.rst for details.
+if KASAN
+
choice
prompt "KASAN mode"
- depends on KASAN
default KASAN_GENERIC
help
KASAN has two modes: generic KASAN (similar to userspace ASan,
@@ -39,6 +40,7 @@ choice
software tag-based KASAN (a version based on software memory
tagging, arm64 only, similar to userspace HWASan, enabled with
CONFIG_KASAN_SW_TAGS).
+
Both generic and tag-based KASAN are strictly debugging features.
config KASAN_GENERIC
@@ -50,16 +52,18 @@ config KASAN_GENERIC
select STACKDEPOT
help
Enables generic KASAN mode.
- Supported in both GCC and Clang. With GCC it requires version 4.9.2
- or later for basic support and version 5.0 or later for detection of
- out-of-bounds accesses for stack and global variables and for inline
- instrumentation mode (CONFIG_KASAN_INLINE). With Clang it requires
- version 3.7.0 or later and it doesn't support detection of
- out-of-bounds accesses for global variables yet.
+
+ This mode is supported in both GCC and Clang. With GCC it requires
+ version 8.3.0 or later. With Clang it requires version 7.0.0 or
+ later, but detection of out-of-bounds accesses for global variables
+ is supported only since Clang 11.
+
This mode consumes about 1/8th of available memory at kernel start
and introduces an overhead of ~x1.5 for the rest of the allocations.
The performance slowdown is ~x3.
+
For better error detection enable CONFIG_STACKTRACE.
+
Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
(the resulting kernel does not boot).
@@ -72,15 +76,19 @@ config KASAN_SW_TAGS
select STACKDEPOT
help
Enables software tag-based KASAN mode.
+
This mode requires Top Byte Ignore support by the CPU and therefore
- is only supported for arm64.
- This mode requires Clang version 7.0.0 or later.
+ is only supported for arm64. This mode requires Clang version 7.0.0
+ or later.
+
This mode consumes about 1/16th of available memory at kernel start
and introduces an overhead of ~20% for the rest of the allocations.
This mode may potentially introduce problems relating to pointer
casting and comparison, as it embeds tags into the top byte of each
pointer.
+
For better error detection enable CONFIG_STACKTRACE.
+
Currently CONFIG_KASAN_SW_TAGS doesn't work with CONFIG_DEBUG_SLAB
(the resulting kernel does not boot).
@@ -88,7 +96,6 @@ endchoice
choice
prompt "Instrumentation type"
- depends on KASAN
default KASAN_OUTLINE
config KASAN_OUTLINE
@@ -107,13 +114,11 @@ config KASAN_INLINE
memory accesses. This is faster than outline (in some workloads
it gives about x2 boost over outline instrumentation), but
make kernel's .text size much bigger.
- For CONFIG_KASAN_GENERIC this requires GCC 5.0 or later.
endchoice
config KASAN_STACK_ENABLE
bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
- depends on KASAN
help
The LLVM stack address sanitizer has a know problem that
causes excessive stack usage in a lot of functions, see
@@ -134,7 +139,7 @@ config KASAN_STACK
config KASAN_S390_4_LEVEL_PAGING
bool "KASan: use 4-level paging"
- depends on KASAN && S390
+ depends on S390
help
Compiling the kernel with KASan disables automatic 3-level vs
4-level paging selection. 3-level paging is used by default (up
@@ -151,7 +156,7 @@ config KASAN_SW_TAGS_IDENTIFY
config KASAN_VMALLOC
bool "Back mappings in vmalloc space with real shadow memory"
- depends on KASAN && HAVE_ARCH_KASAN_VMALLOC
+ depends on HAVE_ARCH_KASAN_VMALLOC
help
By default, the shadow region for vmalloc space is the read-only
zero page. This means that KASAN cannot detect errors involving
@@ -164,8 +169,10 @@ config KASAN_VMALLOC
config TEST_KASAN
tristate "Module for testing KASAN for bug detection"
- depends on m && KASAN
+ depends on m
help
This is a test module doing various nasty things like
out of bounds accesses, use after free. It is useful for testing
kernel debugging features like KASAN.
+
+endif # KASAN
diff --git a/lib/Makefile b/lib/Makefile
index 7d24dd73e34c..e290fc5707ea 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -3,10 +3,7 @@
# Makefile for some libs needed in the kernel.
#
-ifdef CONFIG_FUNCTION_TRACER
-ORIG_CFLAGS := $(KBUILD_CFLAGS)
-KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
-endif
+ccflags-remove-$(CONFIG_FUNCTION_TRACER) += $(CC_FLAGS_FTRACE)
# These files are disabled because they produce lots of non-interesting and/or
# flaky coverage that is not a function of syscall inputs. For example,
@@ -22,7 +19,7 @@ KCOV_INSTRUMENT_fault-inject.o := n
ifdef CONFIG_AMD_MEM_ENCRYPT
KASAN_SANITIZE_string.o := n
-CFLAGS_string.o := $(call cc-option, -fno-stack-protector)
+CFLAGS_string.o := -fno-stack-protector
endif
# Used by KCSAN while enabled, avoid recursion.
@@ -37,7 +34,6 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
nmi_backtrace.o nodemask.o win_minmax.o memcat_p.o
lib-$(CONFIG_PRINTK) += dump_stack.o
-lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
lib-y += kobject.o klist.o
@@ -325,7 +321,7 @@ endif
UBSAN_SANITIZE_ubsan.o := n
KASAN_SANITIZE_ubsan.o := n
KCSAN_SANITIZE_ubsan.o := n
-CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN)
+CFLAGS_ubsan.o := -fno-stack-protector $(DISABLE_STACKLEAK_PLUGIN)
obj-$(CONFIG_SBITMAP) += sbitmap.o
@@ -340,6 +336,10 @@ obj-$(CONFIG_GENERIC_LIB_CMPDI2) += cmpdi2.o
obj-$(CONFIG_GENERIC_LIB_UCMPDI2) += ucmpdi2.o
obj-$(CONFIG_OBJAGG) += objagg.o
+# pldmfw library
+obj-$(CONFIG_PLDMFW) += pldmfw/
+
# KUnit tests
obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
+obj-$(CONFIG_BITS_TEST) += test_bits.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 0364452b1617..c13d859bc7ab 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -212,13 +212,13 @@ void bitmap_cut(unsigned long *dst, const unsigned long *src,
unsigned long keep = 0, carry;
int i;
- memmove(dst, src, len * sizeof(*dst));
-
if (first % BITS_PER_LONG) {
keep = src[first / BITS_PER_LONG] &
(~0UL >> (BITS_PER_LONG - first % BITS_PER_LONG));
}
+ memmove(dst, src, len * sizeof(*dst));
+
while (cut--) {
for (i = first / BITS_PER_LONG; i < len; i++) {
if (i < len - 1)
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index 912ef4921398..a5f701161f6b 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -329,22 +329,30 @@ const char * __init xbc_node_find_next_key_value(struct xbc_node *root,
/* XBC parse and tree build */
+static int __init xbc_init_node(struct xbc_node *node, char *data, u32 flag)
+{
+ unsigned long offset = data - xbc_data;
+
+ if (WARN_ON(offset >= XBC_DATA_MAX))
+ return -EINVAL;
+
+ node->data = (u16)offset | flag;
+ node->child = 0;
+ node->next = 0;
+
+ return 0;
+}
+
static struct xbc_node * __init xbc_add_node(char *data, u32 flag)
{
struct xbc_node *node;
- unsigned long offset;
if (xbc_node_num == XBC_NODE_MAX)
return NULL;
node = &xbc_nodes[xbc_node_num++];
- offset = data - xbc_data;
- node->data = (u16)offset;
- if (WARN_ON(offset >= XBC_DATA_MAX))
+ if (xbc_init_node(node, data, flag) < 0)
return NULL;
- node->data |= flag;
- node->child = 0;
- node->next = 0;
return node;
}
@@ -603,7 +611,9 @@ static int __init xbc_parse_kv(char **k, char *v, int op)
if (c < 0)
return c;
- if (!xbc_add_sibling(v, XBC_VALUE))
+ if (op == ':' && child) {
+ xbc_init_node(child, v, XBC_VALUE);
+ } else if (!xbc_add_sibling(v, XBC_VALUE))
return -ENOMEM;
if (c == ',') { /* Array */
@@ -787,7 +797,7 @@ int __init xbc_init(char *buf, const char **emsg, int *epos)
p = buf;
do {
- q = strpbrk(p, "{}=+;\n#");
+ q = strpbrk(p, "{}=+;:\n#");
if (!q) {
p = skip_spaces(p);
if (*p != '\0')
@@ -798,9 +808,12 @@ int __init xbc_init(char *buf, const char **emsg, int *epos)
c = *q;
*q++ = '\0';
switch (c) {
+ case ':':
case '+':
if (*q++ != '=') {
- ret = xbc_parse_error("Wrong '+' operator",
+ ret = xbc_parse_error(c == '+' ?
+ "Wrong '+' operator" :
+ "Wrong ':' operator",
q - 2);
break;
}
diff --git a/lib/checksum.c b/lib/checksum.c
index 7ac65a0000ff..c7861e84c526 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -149,12 +149,12 @@ EXPORT_SYMBOL(ip_compute_csum);
* copy from ds while checksumming, otherwise like csum_partial
*/
__wsum
-csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
memcpy(dst, src, len);
return csum_partial(dst, len, sum);
}
-EXPORT_SYMBOL(csum_partial_copy);
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
#ifndef csum_tcpudp_nofold
static inline u32 from64to32(u64 x)
diff --git a/lib/crc64.c b/lib/crc64.c
index f8928ce28280..47cfa054827f 100644
--- a/lib/crc64.c
+++ b/lib/crc64.c
@@ -4,7 +4,7 @@
*
* This is a basic crc64 implementation following ECMA-182 specification,
* which can be found from,
- * http://www.ecma-international.org/publications/standards/Ecma-182.htm
+ * https://www.ecma-international.org/publications/standards/Ecma-182.htm
*
* Dr. Ross N. Williams has a great document to introduce the idea of CRC
* algorithm, here the CRC64 code is also inspired by the table-driven
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 7c4932eed748..f9628f3924ce 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -34,7 +34,7 @@
Phone (337) 232-1234 or 1-800-738-2226
Fax (337) 232-1297
- http://www.hospiceacadiana.com/
+ https://www.hospiceacadiana.com/
Manuel
*/
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
index ed7a1fd819f2..1cf409ef8d04 100644
--- a/lib/decompress_unlzma.c
+++ b/lib/decompress_unlzma.c
@@ -8,7 +8,7 @@
*implementation for lzma.
*Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
*
- *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
+ *Based on LzmaDecode.c from the LZMA SDK 4.22 (https://www.7-zip.org/)
*Copyright (C) 1999-2005 Igor Pavlov
*
*Copyrights of the parts, see headers below.
@@ -56,7 +56,7 @@ static long long INIT read_int(unsigned char *ptr, int size)
/* Small range coder implementation for lzma.
*Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
*
- *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
+ *Based on LzmaDecode.c from the LZMA SDK 4.22 (https://www.7-zip.org/)
*Copyright (c) 1999-2005 Igor Pavlov
*/
@@ -213,7 +213,7 @@ rc_bit_tree_decode(struct rc *rc, uint16_t *p, int num_levels, int *symbol)
* Small lzma deflate implementation.
* Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
*
- * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
+ * Based on LzmaDecode.c from the LZMA SDK 4.22 (https://www.7-zip.org/)
* Copyright (C) 1999-2005 Igor Pavlov
*/
diff --git a/lib/devres.c b/lib/devres.c
index 6ef51f159c54..ebb1573d9ae3 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -119,6 +119,7 @@ __devm_ioremap_resource(struct device *dev, const struct resource *res,
{
resource_size_t size;
void __iomem *dest_ptr;
+ char *pretty_name;
BUG_ON(!dev);
@@ -129,7 +130,15 @@ __devm_ioremap_resource(struct device *dev, const struct resource *res,
size = resource_size(res);
- if (!devm_request_mem_region(dev, res->start, size, dev_name(dev))) {
+ if (res->name)
+ pretty_name = devm_kasprintf(dev, GFP_KERNEL, "%s %s",
+ dev_name(dev), res->name);
+ else
+ pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+ if (!pretty_name)
+ return IOMEM_ERR_PTR(-ENOMEM);
+
+ if (!devm_request_mem_region(dev, res->start, size, pretty_name)) {
dev_err(dev, "can't request region for resource %pR\n", res);
return IOMEM_ERR_PTR(-EBUSY);
}
@@ -204,6 +213,12 @@ void __iomem *devm_ioremap_resource_wc(struct device *dev,
* base = devm_of_iomap(&pdev->dev, node, 0, NULL);
* if (IS_ERR(base))
* return PTR_ERR(base);
+ *
+ * Please Note: This is not a one-to-one replacement for of_iomap() because the
+ * of_iomap() function does not track whether the region is already mapped. If
+ * two drivers try to map the same memory, the of_iomap() function will succeed
+ * but the the devm_of_iomap() function will return -EBUSY.
+ *
*/
void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
resource_size_t *size)
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 321437bbf87d..1d012e597cc3 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -11,7 +11,7 @@
* Copyright (C) 2013 Du, Changbin <changbin.du@gmail.com>
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+#define pr_fmt(fmt) "dyndbg: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
@@ -39,8 +39,8 @@
#include <rdma/ib_verbs.h>
-extern struct _ddebug __start___verbose[];
-extern struct _ddebug __stop___verbose[];
+extern struct _ddebug __start___dyndbg[];
+extern struct _ddebug __stop___dyndbg[];
struct ddebug_table {
struct list_head link;
@@ -62,6 +62,11 @@ struct ddebug_iter {
unsigned int idx;
};
+struct flag_settings {
+ unsigned int flags;
+ unsigned int mask;
+};
+
static DEFINE_MUTEX(ddebug_lock);
static LIST_HEAD(ddebug_tables);
static int verbose;
@@ -87,30 +92,33 @@ static struct { unsigned flag:8; char opt_char; } opt_array[] = {
{ _DPRINTK_FLAGS_NONE, '_' },
};
+struct flagsbuf { char buf[ARRAY_SIZE(opt_array)+1]; };
+
/* format a string into buf[] which describes the _ddebug's flags */
-static char *ddebug_describe_flags(struct _ddebug *dp, char *buf,
- size_t maxlen)
+static char *ddebug_describe_flags(unsigned int flags, struct flagsbuf *fb)
{
- char *p = buf;
+ char *p = fb->buf;
int i;
- BUG_ON(maxlen < 6);
for (i = 0; i < ARRAY_SIZE(opt_array); ++i)
- if (dp->flags & opt_array[i].flag)
+ if (flags & opt_array[i].flag)
*p++ = opt_array[i].opt_char;
- if (p == buf)
+ if (p == fb->buf)
*p++ = '_';
*p = '\0';
- return buf;
+ return fb->buf;
}
-#define vpr_info(fmt, ...) \
+#define vnpr_info(lvl, fmt, ...) \
do { \
- if (verbose) \
+ if (verbose >= lvl) \
pr_info(fmt, ##__VA_ARGS__); \
} while (0)
+#define vpr_info(fmt, ...) vnpr_info(1, fmt, ##__VA_ARGS__)
+#define v2pr_info(fmt, ...) vnpr_info(2, fmt, ##__VA_ARGS__)
+
static void vpr_info_dq(const struct ddebug_query *query, const char *msg)
{
/* trim any trailing newlines */
@@ -124,10 +132,10 @@ static void vpr_info_dq(const struct ddebug_query *query, const char *msg)
vpr_info("%s: func=\"%s\" file=\"%s\" module=\"%s\" format=\"%.*s\" lineno=%u-%u\n",
msg,
- query->function ? query->function : "",
- query->filename ? query->filename : "",
- query->module ? query->module : "",
- fmtlen, query->format ? query->format : "",
+ query->function ?: "",
+ query->filename ?: "",
+ query->module ?: "",
+ fmtlen, query->format ?: "",
query->first_lineno, query->last_lineno);
}
@@ -138,13 +146,13 @@ static void vpr_info_dq(const struct ddebug_query *query, const char *msg)
* logs the changes. Takes ddebug_lock.
*/
static int ddebug_change(const struct ddebug_query *query,
- unsigned int flags, unsigned int mask)
+ struct flag_settings *modifiers)
{
int i;
struct ddebug_table *dt;
unsigned int newflags;
unsigned int nfound = 0;
- char flagbuf[10];
+ struct flagsbuf fbuf;
/* search for matching ddebugs */
mutex_lock(&ddebug_lock);
@@ -173,9 +181,16 @@ static int ddebug_change(const struct ddebug_query *query,
continue;
/* match against the format */
- if (query->format &&
- !strstr(dp->format, query->format))
- continue;
+ if (query->format) {
+ if (*query->format == '^') {
+ char *p;
+ /* anchored search. match must be at beginning */
+ p = strstr(dp->format, query->format+1);
+ if (p != dp->format)
+ continue;
+ } else if (!strstr(dp->format, query->format))
+ continue;
+ }
/* match against the line number range */
if (query->first_lineno &&
@@ -187,22 +202,21 @@ static int ddebug_change(const struct ddebug_query *query,
nfound++;
- newflags = (dp->flags & mask) | flags;
+ newflags = (dp->flags & modifiers->mask) | modifiers->flags;
if (newflags == dp->flags)
continue;
#ifdef CONFIG_JUMP_LABEL
if (dp->flags & _DPRINTK_FLAGS_PRINT) {
- if (!(flags & _DPRINTK_FLAGS_PRINT))
+ if (!(modifiers->flags & _DPRINTK_FLAGS_PRINT))
static_branch_disable(&dp->key.dd_key_true);
- } else if (flags & _DPRINTK_FLAGS_PRINT)
+ } else if (modifiers->flags & _DPRINTK_FLAGS_PRINT)
static_branch_enable(&dp->key.dd_key_true);
#endif
dp->flags = newflags;
- vpr_info("changed %s:%d [%s]%s =%s\n",
+ v2pr_info("changed %s:%d [%s]%s =%s\n",
trim_prefix(dp->filename), dp->lineno,
dt->mod_name, dp->function,
- ddebug_describe_flags(dp, flagbuf,
- sizeof(flagbuf)));
+ ddebug_describe_flags(dp->flags, &fbuf));
}
}
mutex_unlock(&ddebug_lock);
@@ -289,6 +303,41 @@ static inline int parse_lineno(const char *str, unsigned int *val)
return 0;
}
+static int parse_linerange(struct ddebug_query *query, const char *first)
+{
+ char *last = strchr(first, '-');
+
+ if (query->first_lineno || query->last_lineno) {
+ pr_err("match-spec: line used 2x\n");
+ return -EINVAL;
+ }
+ if (last)
+ *last++ = '\0';
+ if (parse_lineno(first, &query->first_lineno) < 0)
+ return -EINVAL;
+ if (last) {
+ /* range <first>-<last> */
+ if (parse_lineno(last, &query->last_lineno) < 0)
+ return -EINVAL;
+
+ /* special case for last lineno not specified */
+ if (query->last_lineno == 0)
+ query->last_lineno = UINT_MAX;
+
+ if (query->last_lineno < query->first_lineno) {
+ pr_err("last-line:%d < 1st-line:%d\n",
+ query->last_lineno,
+ query->first_lineno);
+ return -EINVAL;
+ }
+ } else {
+ query->last_lineno = query->first_lineno;
+ }
+ vpr_info("parsed line %d-%d\n", query->first_lineno,
+ query->last_lineno);
+ return 0;
+}
+
static int check_set(const char **dest, char *src, char *name)
{
int rc = 0;
@@ -304,7 +353,8 @@ static int check_set(const char **dest, char *src, char *name)
/*
* Parse words[] as a ddebug query specification, which is a series
- * of (keyword, value) pairs chosen from these possibilities:
+ * of (keyword, value) pairs or combined keyword=value terms,
+ * chosen from these possibilities:
*
* func <function-name>
* file <full-pathname>
@@ -322,61 +372,62 @@ static int ddebug_parse_query(char *words[], int nwords,
{
unsigned int i;
int rc = 0;
-
- /* check we have an even number of words */
- if (nwords % 2 != 0) {
- pr_err("expecting pairs of match-spec <value>\n");
- return -EINVAL;
- }
- memset(query, 0, sizeof(*query));
+ char *fline;
+ char *keyword, *arg;
if (modname)
/* support $modname.dyndbg=<multiple queries> */
query->module = modname;
- for (i = 0; i < nwords; i += 2) {
- if (!strcmp(words[i], "func")) {
- rc = check_set(&query->function, words[i+1], "func");
- } else if (!strcmp(words[i], "file")) {
- rc = check_set(&query->filename, words[i+1], "file");
- } else if (!strcmp(words[i], "module")) {
- rc = check_set(&query->module, words[i+1], "module");
- } else if (!strcmp(words[i], "format")) {
- string_unescape_inplace(words[i+1], UNESCAPE_SPACE |
- UNESCAPE_OCTAL |
- UNESCAPE_SPECIAL);
- rc = check_set(&query->format, words[i+1], "format");
- } else if (!strcmp(words[i], "line")) {
- char *first = words[i+1];
- char *last = strchr(first, '-');
- if (query->first_lineno || query->last_lineno) {
- pr_err("match-spec: line used 2x\n");
+ for (i = 0; i < nwords; i++) {
+ /* accept keyword=arg */
+ vpr_info("%d w:%s\n", i, words[i]);
+
+ keyword = words[i];
+ arg = strchr(keyword, '=');
+ if (arg) {
+ *arg++ = '\0';
+ } else {
+ i++; /* next word is arg */
+ if (!(i < nwords)) {
+ pr_err("missing arg to keyword: %s\n", keyword);
return -EINVAL;
}
- if (last)
- *last++ = '\0';
- if (parse_lineno(first, &query->first_lineno) < 0)
- return -EINVAL;
- if (last) {
- /* range <first>-<last> */
- if (parse_lineno(last, &query->last_lineno) < 0)
- return -EINVAL;
+ arg = words[i];
+ }
+ vpr_info("%d key:%s arg:%s\n", i, keyword, arg);
- /* special case for last lineno not specified */
- if (query->last_lineno == 0)
- query->last_lineno = UINT_MAX;
+ if (!strcmp(keyword, "func")) {
+ rc = check_set(&query->function, arg, "func");
+ } else if (!strcmp(keyword, "file")) {
+ if (check_set(&query->filename, arg, "file"))
+ return -EINVAL;
- if (query->last_lineno < query->first_lineno) {
- pr_err("last-line:%d < 1st-line:%d\n",
- query->last_lineno,
- query->first_lineno);
+ /* tail :$info is function or line-range */
+ fline = strchr(query->filename, ':');
+ if (!fline)
+ break;
+ *fline++ = '\0';
+ if (isalpha(*fline) || *fline == '*' || *fline == '?') {
+ /* take as function name */
+ if (check_set(&query->function, fline, "func"))
return -EINVAL;
- }
} else {
- query->last_lineno = query->first_lineno;
+ if (parse_linerange(query, fline))
+ return -EINVAL;
}
+ } else if (!strcmp(keyword, "module")) {
+ rc = check_set(&query->module, arg, "module");
+ } else if (!strcmp(keyword, "format")) {
+ string_unescape_inplace(arg, UNESCAPE_SPACE |
+ UNESCAPE_OCTAL |
+ UNESCAPE_SPECIAL);
+ rc = check_set(&query->format, arg, "format");
+ } else if (!strcmp(keyword, "line")) {
+ if (parse_linerange(query, arg))
+ return -EINVAL;
} else {
- pr_err("unknown keyword \"%s\"\n", words[i]);
+ pr_err("unknown keyword \"%s\"\n", keyword);
return -EINVAL;
}
if (rc)
@@ -392,11 +443,9 @@ static int ddebug_parse_query(char *words[], int nwords,
* flags fields of matched _ddebug's. Returns 0 on success
* or <0 on error.
*/
-static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
- unsigned int *maskp)
+static int ddebug_parse_flags(const char *str, struct flag_settings *modifiers)
{
- unsigned flags = 0;
- int op = '=', i;
+ int op, i;
switch (*str) {
case '+':
@@ -413,40 +462,40 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
for (; *str ; ++str) {
for (i = ARRAY_SIZE(opt_array) - 1; i >= 0; i--) {
if (*str == opt_array[i].opt_char) {
- flags |= opt_array[i].flag;
+ modifiers->flags |= opt_array[i].flag;
break;
}
}
if (i < 0) {
- pr_err("unknown flag '%c' in \"%s\"\n", *str, str);
+ pr_err("unknown flag '%c'\n", *str);
return -EINVAL;
}
}
- vpr_info("flags=0x%x\n", flags);
+ vpr_info("flags=0x%x\n", modifiers->flags);
- /* calculate final *flagsp, *maskp according to mask and op */
+ /* calculate final flags, mask based upon op */
switch (op) {
case '=':
- *maskp = 0;
- *flagsp = flags;
+ /* modifiers->flags already set */
+ modifiers->mask = 0;
break;
case '+':
- *maskp = ~0U;
- *flagsp = flags;
+ modifiers->mask = ~0U;
break;
case '-':
- *maskp = ~flags;
- *flagsp = 0;
+ modifiers->mask = ~modifiers->flags;
+ modifiers->flags = 0;
break;
}
- vpr_info("*flagsp=0x%x *maskp=0x%x\n", *flagsp, *maskp);
+ vpr_info("*flagsp=0x%x *maskp=0x%x\n", modifiers->flags, modifiers->mask);
+
return 0;
}
static int ddebug_exec_query(char *query_string, const char *modname)
{
- unsigned int flags = 0, mask = 0;
- struct ddebug_query query;
+ struct flag_settings modifiers = {};
+ struct ddebug_query query = {};
#define MAXWORDS 9
int nwords, nfound;
char *words[MAXWORDS];
@@ -457,7 +506,7 @@ static int ddebug_exec_query(char *query_string, const char *modname)
return -EINVAL;
}
/* check flags 1st (last arg) so query is pairs of spec,val */
- if (ddebug_parse_flags(words[nwords-1], &flags, &mask)) {
+ if (ddebug_parse_flags(words[nwords-1], &modifiers)) {
pr_err("flags parse failed\n");
return -EINVAL;
}
@@ -466,7 +515,7 @@ static int ddebug_exec_query(char *query_string, const char *modname)
return -EINVAL;
}
/* actually go and implement the change */
- nfound = ddebug_change(&query, flags, mask);
+ nfound = ddebug_change(&query, &modifiers);
vpr_info_dq(&query, nfound ? "applied" : "no-match");
return nfound;
@@ -476,7 +525,7 @@ static int ddebug_exec_query(char *query_string, const char *modname)
last error or number of matching callsites. Module name is either
in param (for boot arg) or perhaps in query string.
*/
-static int ddebug_exec_queries(char *query, const char *modname)
+int ddebug_exec_queries(char *query, const char *modname)
{
char *split;
int i, errs = 0, exitcode = 0, rc, nfound = 0;
@@ -508,6 +557,7 @@ static int ddebug_exec_queries(char *query, const char *modname)
return exitcode;
return nfound;
}
+EXPORT_SYMBOL_GPL(ddebug_exec_queries);
#define PREFIX_SIZE 64
@@ -771,8 +821,6 @@ static void *ddebug_proc_start(struct seq_file *m, loff_t *pos)
struct _ddebug *dp;
int n = *pos;
- vpr_info("called m=%p *pos=%lld\n", m, (unsigned long long)*pos);
-
mutex_lock(&ddebug_lock);
if (!n)
@@ -795,9 +843,6 @@ static void *ddebug_proc_next(struct seq_file *m, void *p, loff_t *pos)
struct ddebug_iter *iter = m->private;
struct _ddebug *dp;
- vpr_info("called m=%p p=%p *pos=%lld\n",
- m, p, (unsigned long long)*pos);
-
if (p == SEQ_START_TOKEN)
dp = ddebug_iter_first(iter);
else
@@ -816,9 +861,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
{
struct ddebug_iter *iter = m->private;
struct _ddebug *dp = p;
- char flagsbuf[10];
-
- vpr_info("called m=%p p=%p\n", m, p);
+ struct flagsbuf flags;
if (p == SEQ_START_TOKEN) {
seq_puts(m,
@@ -829,7 +872,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
seq_printf(m, "%s:%u [%s]%s =%s \"",
trim_prefix(dp->filename), dp->lineno,
iter->table->mod_name, dp->function,
- ddebug_describe_flags(dp, flagsbuf, sizeof(flagsbuf)));
+ ddebug_describe_flags(dp->flags, &flags));
seq_escape(m, dp->format, "\t\r\n\"");
seq_puts(m, "\"\n");
@@ -842,7 +885,6 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
*/
static void ddebug_proc_stop(struct seq_file *m, void *p)
{
- vpr_info("called m=%p p=%p\n", m, p);
mutex_unlock(&ddebug_lock);
}
@@ -853,13 +895,6 @@ static const struct seq_operations ddebug_proc_seqops = {
.stop = ddebug_proc_stop
};
-/*
- * File_ops->open method for <debugfs>/dynamic_debug/control. Does
- * the seq_file setup dance, and also creates an iterator to walk the
- * _ddebugs. Note that we create a seq_file always, even for O_WRONLY
- * files where it's not needed, as doing so simplifies the ->release
- * method.
- */
static int ddebug_proc_open(struct inode *inode, struct file *file)
{
vpr_info("called\n");
@@ -909,10 +944,10 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
dt->ddebugs = tab;
mutex_lock(&ddebug_lock);
- list_add_tail(&dt->link, &ddebug_tables);
+ list_add(&dt->link, &ddebug_tables);
mutex_unlock(&ddebug_lock);
- vpr_info("%u debug prints in module %s\n", n, dt->mod_name);
+ v2pr_info("%u debug prints in module %s\n", n, dt->mod_name);
return 0;
}
@@ -971,7 +1006,7 @@ int ddebug_remove_module(const char *mod_name)
struct ddebug_table *dt, *nextdt;
int ret = -ENOENT;
- vpr_info("removing module \"%s\"\n", mod_name);
+ v2pr_info("removing module \"%s\"\n", mod_name);
mutex_lock(&ddebug_lock);
list_for_each_entry_safe(dt, nextdt, &ddebug_tables, link) {
@@ -1029,9 +1064,8 @@ static int __init dynamic_debug_init(void)
char *cmdline;
int ret = 0;
int n = 0, entries = 0, modct = 0;
- int verbose_bytes = 0;
- if (&__start___verbose == &__stop___verbose) {
+ if (&__start___dyndbg == &__stop___dyndbg) {
if (IS_ENABLED(CONFIG_DYNAMIC_DEBUG)) {
pr_warn("_ddebug table is empty in a CONFIG_DYNAMIC_DEBUG build\n");
return 1;
@@ -1040,14 +1074,11 @@ static int __init dynamic_debug_init(void)
ddebug_init_success = 1;
return 0;
}
- iter = __start___verbose;
+ iter = __start___dyndbg;
modname = iter->modname;
iter_start = iter;
- for (; iter < __stop___verbose; iter++) {
+ for (; iter < __stop___dyndbg; iter++) {
entries++;
- verbose_bytes += strlen(iter->modname) + strlen(iter->function)
- + strlen(iter->filename) + strlen(iter->format);
-
if (strcmp(modname, iter->modname)) {
modct++;
ret = ddebug_add_module(iter_start, n, modname);
@@ -1064,9 +1095,9 @@ static int __init dynamic_debug_init(void)
goto out_err;
ddebug_init_success = 1;
- vpr_info("%d modules, %d entries and %d bytes in ddebug tables, %d bytes in (readonly) verbose section\n",
+ vpr_info("%d modules, %d entries and %d bytes in ddebug tables, %d bytes in __dyndbg section\n",
modct, entries, (int)(modct * sizeof(struct ddebug_table)),
- verbose_bytes + (int)(__stop___verbose - __start___verbose));
+ (int)(entries * sizeof(struct _ddebug)));
/* apply ddebug_query boot param, dont unload tables on err */
if (ddebug_setup_string[0] != '\0') {
diff --git a/lib/ioremap.c b/lib/ioremap.c
deleted file mode 100644
index 5ee3526f71b8..000000000000
--- a/lib/ioremap.c
+++ /dev/null
@@ -1,287 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Re-map IO memory to kernel address space so that we can access it.
- * This is needed for high PCI addresses that aren't mapped in the
- * 640k-1MB IO memory area on PC's
- *
- * (C) Copyright 1995 1996 Linus Torvalds
- */
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/io.h>
-#include <linux/export.h>
-#include <asm/cacheflush.h>
-
-#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
-static int __read_mostly ioremap_p4d_capable;
-static int __read_mostly ioremap_pud_capable;
-static int __read_mostly ioremap_pmd_capable;
-static int __read_mostly ioremap_huge_disabled;
-
-static int __init set_nohugeiomap(char *str)
-{
- ioremap_huge_disabled = 1;
- return 0;
-}
-early_param("nohugeiomap", set_nohugeiomap);
-
-void __init ioremap_huge_init(void)
-{
- if (!ioremap_huge_disabled) {
- if (arch_ioremap_p4d_supported())
- ioremap_p4d_capable = 1;
- if (arch_ioremap_pud_supported())
- ioremap_pud_capable = 1;
- if (arch_ioremap_pmd_supported())
- ioremap_pmd_capable = 1;
- }
-}
-
-static inline int ioremap_p4d_enabled(void)
-{
- return ioremap_p4d_capable;
-}
-
-static inline int ioremap_pud_enabled(void)
-{
- return ioremap_pud_capable;
-}
-
-static inline int ioremap_pmd_enabled(void)
-{
- return ioremap_pmd_capable;
-}
-
-#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
-static inline int ioremap_p4d_enabled(void) { return 0; }
-static inline int ioremap_pud_enabled(void) { return 0; }
-static inline int ioremap_pmd_enabled(void) { return 0; }
-#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
-
-static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
- pgtbl_mod_mask *mask)
-{
- pte_t *pte;
- u64 pfn;
-
- pfn = phys_addr >> PAGE_SHIFT;
- pte = pte_alloc_kernel_track(pmd, addr, mask);
- if (!pte)
- return -ENOMEM;
- do {
- BUG_ON(!pte_none(*pte));
- set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
- pfn++;
- } while (pte++, addr += PAGE_SIZE, addr != end);
- *mask |= PGTBL_PTE_MODIFIED;
- return 0;
-}
-
-static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr,
- pgprot_t prot)
-{
- if (!ioremap_pmd_enabled())
- return 0;
-
- if ((end - addr) != PMD_SIZE)
- return 0;
-
- if (!IS_ALIGNED(addr, PMD_SIZE))
- return 0;
-
- if (!IS_ALIGNED(phys_addr, PMD_SIZE))
- return 0;
-
- if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
- return 0;
-
- return pmd_set_huge(pmd, phys_addr, prot);
-}
-
-static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
- pgtbl_mod_mask *mask)
-{
- pmd_t *pmd;
- unsigned long next;
-
- pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
- if (!pmd)
- return -ENOMEM;
- do {
- next = pmd_addr_end(addr, end);
-
- if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) {
- *mask |= PGTBL_PMD_MODIFIED;
- continue;
- }
-
- if (ioremap_pte_range(pmd, addr, next, phys_addr, prot, mask))
- return -ENOMEM;
- } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
- return 0;
-}
-
-static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr,
- pgprot_t prot)
-{
- if (!ioremap_pud_enabled())
- return 0;
-
- if ((end - addr) != PUD_SIZE)
- return 0;
-
- if (!IS_ALIGNED(addr, PUD_SIZE))
- return 0;
-
- if (!IS_ALIGNED(phys_addr, PUD_SIZE))
- return 0;
-
- if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
- return 0;
-
- return pud_set_huge(pud, phys_addr, prot);
-}
-
-static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
- pgtbl_mod_mask *mask)
-{
- pud_t *pud;
- unsigned long next;
-
- pud = pud_alloc_track(&init_mm, p4d, addr, mask);
- if (!pud)
- return -ENOMEM;
- do {
- next = pud_addr_end(addr, end);
-
- if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot)) {
- *mask |= PGTBL_PUD_MODIFIED;
- continue;
- }
-
- if (ioremap_pmd_range(pud, addr, next, phys_addr, prot, mask))
- return -ENOMEM;
- } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
- return 0;
-}
-
-static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr,
- pgprot_t prot)
-{
- if (!ioremap_p4d_enabled())
- return 0;
-
- if ((end - addr) != P4D_SIZE)
- return 0;
-
- if (!IS_ALIGNED(addr, P4D_SIZE))
- return 0;
-
- if (!IS_ALIGNED(phys_addr, P4D_SIZE))
- return 0;
-
- if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
- return 0;
-
- return p4d_set_huge(p4d, phys_addr, prot);
-}
-
-static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
- pgtbl_mod_mask *mask)
-{
- p4d_t *p4d;
- unsigned long next;
-
- p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
- if (!p4d)
- return -ENOMEM;
- do {
- next = p4d_addr_end(addr, end);
-
- if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) {
- *mask |= PGTBL_P4D_MODIFIED;
- continue;
- }
-
- if (ioremap_pud_range(p4d, addr, next, phys_addr, prot, mask))
- return -ENOMEM;
- } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
- return 0;
-}
-
-int ioremap_page_range(unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
-{
- pgd_t *pgd;
- unsigned long start;
- unsigned long next;
- int err;
- pgtbl_mod_mask mask = 0;
-
- might_sleep();
- BUG_ON(addr >= end);
-
- start = addr;
- pgd = pgd_offset_k(addr);
- do {
- next = pgd_addr_end(addr, end);
- err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot,
- &mask);
- if (err)
- break;
- } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
-
- flush_cache_vmap(start, end);
-
- if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
- arch_sync_kernel_mappings(start, end);
-
- return err;
-}
-
-#ifdef CONFIG_GENERIC_IOREMAP
-void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
-{
- unsigned long offset, vaddr;
- phys_addr_t last_addr;
- struct vm_struct *area;
-
- /* Disallow wrap-around or zero size */
- last_addr = addr + size - 1;
- if (!size || last_addr < addr)
- return NULL;
-
- /* Page-align mappings */
- offset = addr & (~PAGE_MASK);
- addr -= offset;
- size = PAGE_ALIGN(size + offset);
-
- area = get_vm_area_caller(size, VM_IOREMAP,
- __builtin_return_address(0));
- if (!area)
- return NULL;
- vaddr = (unsigned long)area->addr;
-
- if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
- free_vm_area(area);
- return NULL;
- }
-
- return (void __iomem *)(vaddr + offset);
-}
-EXPORT_SYMBOL(ioremap_prot);
-
-void iounmap(volatile void __iomem *addr)
-{
- vunmap((void *)((unsigned long)addr & PAGE_MASK));
-}
-EXPORT_SYMBOL(iounmap);
-#endif /* CONFIG_GENERIC_IOREMAP */
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index bf538c2bec77..5e40786c8f12 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <crypto/hash.h>
#include <linux/export.h>
#include <linux/bvec.h>
#include <linux/uio.h>
@@ -1567,7 +1568,7 @@ EXPORT_SYMBOL(csum_and_copy_to_iter);
size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
struct iov_iter *i)
{
-#ifdef CONFIG_CRYPTO
+#ifdef CONFIG_CRYPTO_HASH
struct ahash_request *hash = hashp;
struct scatterlist sg;
size_t copied;
diff --git a/lib/kobject.c b/lib/kobject.c
index 1e4b7382a88e..3afb939f2a1c 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -599,14 +599,7 @@ out:
}
EXPORT_SYMBOL_GPL(kobject_move);
-/**
- * kobject_del() - Unlink kobject from hierarchy.
- * @kobj: object.
- *
- * This is the function that should be called to delete an object
- * successfully added via kobject_add().
- */
-void kobject_del(struct kobject *kobj)
+static void __kobject_del(struct kobject *kobj)
{
struct kernfs_node *sd;
const struct kobj_type *ktype;
@@ -632,9 +625,23 @@ void kobject_del(struct kobject *kobj)
kobj->state_in_sysfs = 0;
kobj_kset_leave(kobj);
- kobject_put(kobj->parent);
kobj->parent = NULL;
}
+
+/**
+ * kobject_del() - Unlink kobject from hierarchy.
+ * @kobj: object.
+ *
+ * This is the function that should be called to delete an object
+ * successfully added via kobject_add().
+ */
+void kobject_del(struct kobject *kobj)
+{
+ struct kobject *parent = kobj->parent;
+
+ __kobject_del(kobj);
+ kobject_put(parent);
+}
EXPORT_SYMBOL(kobject_del);
/**
@@ -670,6 +677,7 @@ EXPORT_SYMBOL(kobject_get_unless_zero);
*/
static void kobject_cleanup(struct kobject *kobj)
{
+ struct kobject *parent = kobj->parent;
struct kobj_type *t = get_ktype(kobj);
const char *name = kobj->name;
@@ -684,7 +692,10 @@ static void kobject_cleanup(struct kobject *kobj)
if (kobj->state_in_sysfs) {
pr_debug("kobject: '%s' (%p): auto cleanup kobject_del\n",
kobject_name(kobj), kobj);
- kobject_del(kobj);
+ __kobject_del(kobj);
+ } else {
+ /* avoid dropping the parent reference unnecessarily */
+ parent = NULL;
}
if (t && t->release) {
@@ -698,6 +709,8 @@ static void kobject_cleanup(struct kobject *kobj)
pr_debug("kobject: '%s': free name\n", name);
kfree_const(name);
}
+
+ kobject_put(parent);
}
#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index 1006bf70bf74..a14ccf905055 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -115,8 +115,7 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
* @res: Where to write the result of the conversion on success.
*
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the obsolete simple_strtoull. Return code must
- * be checked.
+ * Preferred over simple_strtoull(). Return code must be checked.
*/
int kstrtoull(const char *s, unsigned int base, unsigned long long *res)
{
@@ -139,8 +138,7 @@ EXPORT_SYMBOL(kstrtoull);
* @res: Where to write the result of the conversion on success.
*
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the obsolete simple_strtoull. Return code must
- * be checked.
+ * Preferred over simple_strtoll(). Return code must be checked.
*/
int kstrtoll(const char *s, unsigned int base, long long *res)
{
@@ -211,8 +209,7 @@ EXPORT_SYMBOL(_kstrtol);
* @res: Where to write the result of the conversion on success.
*
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the obsolete simple_strtoull. Return code must
- * be checked.
+ * Preferred over simple_strtoul(). Return code must be checked.
*/
int kstrtouint(const char *s, unsigned int base, unsigned int *res)
{
@@ -242,8 +239,7 @@ EXPORT_SYMBOL(kstrtouint);
* @res: Where to write the result of the conversion on success.
*
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the obsolete simple_strtoull. Return code must
- * be checked.
+ * Preferred over simple_strtol(). Return code must be checked.
*/
int kstrtoint(const char *s, unsigned int base, int *res)
{
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index 4f3d36a72f8f..69f902440a0e 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -118,14 +118,14 @@ static int fake_resource_init(struct kunit_resource *res, void *context)
{
struct kunit_test_resource_context *ctx = context;
- res->allocation = &ctx->is_resource_initialized;
+ res->data = &ctx->is_resource_initialized;
ctx->is_resource_initialized = true;
return 0;
}
static void fake_resource_free(struct kunit_resource *res)
{
- bool *is_resource_initialized = res->allocation;
+ bool *is_resource_initialized = res->data;
*is_resource_initialized = false;
}
@@ -154,11 +154,21 @@ static void kunit_resource_test_alloc_resource(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, res);
KUNIT_EXPECT_PTR_EQ(test,
&ctx->is_resource_initialized,
- (bool *) res->allocation);
+ (bool *)res->data);
KUNIT_EXPECT_TRUE(test, list_is_last(&res->node, &ctx->test.resources));
KUNIT_EXPECT_PTR_EQ(test, free, res->free);
+
+ kunit_put_resource(res);
}
+/*
+ * Note: tests below use kunit_alloc_and_get_resource(), so as a consequence
+ * they have a reference to the associated resource that they must release
+ * via kunit_put_resource(). In normal operation, users will only
+ * have to do this for cases where they use kunit_find_resource(), and the
+ * kunit_alloc_resource() function will be used (which does not take a
+ * resource reference).
+ */
static void kunit_resource_test_destroy_resource(struct kunit *test)
{
struct kunit_test_resource_context *ctx = test->priv;
@@ -169,11 +179,12 @@ static void kunit_resource_test_destroy_resource(struct kunit *test)
GFP_KERNEL,
ctx);
+ kunit_put_resource(res);
+
KUNIT_ASSERT_FALSE(test,
- kunit_resource_destroy(&ctx->test,
+ kunit_destroy_resource(&ctx->test,
kunit_resource_instance_match,
- res->free,
- res->allocation));
+ res->data));
KUNIT_EXPECT_FALSE(test, ctx->is_resource_initialized);
KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
@@ -191,6 +202,7 @@ static void kunit_resource_test_cleanup_resources(struct kunit *test)
fake_resource_free,
GFP_KERNEL,
ctx);
+ kunit_put_resource(resources[i]);
}
kunit_cleanup(&ctx->test);
@@ -221,14 +233,14 @@ static int fake_resource_2_init(struct kunit_resource *res, void *context)
KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, allocate_order, 2);
- res->allocation = ctx;
+ res->data = ctx;
return 0;
}
static void fake_resource_2_free(struct kunit_resource *res)
{
- struct kunit_test_resource_context *ctx = res->allocation;
+ struct kunit_test_resource_context *ctx = res->data;
KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, free_order, 2);
}
@@ -236,23 +248,26 @@ static void fake_resource_2_free(struct kunit_resource *res)
static int fake_resource_1_init(struct kunit_resource *res, void *context)
{
struct kunit_test_resource_context *ctx = context;
+ struct kunit_resource *res2;
- kunit_alloc_and_get_resource(&ctx->test,
- fake_resource_2_init,
- fake_resource_2_free,
- GFP_KERNEL,
- ctx);
+ res2 = kunit_alloc_and_get_resource(&ctx->test,
+ fake_resource_2_init,
+ fake_resource_2_free,
+ GFP_KERNEL,
+ ctx);
KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, allocate_order, 1);
- res->allocation = ctx;
+ res->data = ctx;
+
+ kunit_put_resource(res2);
return 0;
}
static void fake_resource_1_free(struct kunit_resource *res)
{
- struct kunit_test_resource_context *ctx = res->allocation;
+ struct kunit_test_resource_context *ctx = res->data;
KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, free_order, 1);
}
@@ -265,13 +280,14 @@ static void fake_resource_1_free(struct kunit_resource *res)
static void kunit_resource_test_proper_free_ordering(struct kunit *test)
{
struct kunit_test_resource_context *ctx = test->priv;
+ struct kunit_resource *res;
/* fake_resource_1 allocates a fake_resource_2 in its init. */
- kunit_alloc_and_get_resource(&ctx->test,
- fake_resource_1_init,
- fake_resource_1_free,
- GFP_KERNEL,
- ctx);
+ res = kunit_alloc_and_get_resource(&ctx->test,
+ fake_resource_1_init,
+ fake_resource_1_free,
+ GFP_KERNEL,
+ ctx);
/*
* Since fake_resource_2_init calls KUNIT_RESOURCE_TEST_MARK_ORDER
@@ -281,6 +297,8 @@ static void kunit_resource_test_proper_free_ordering(struct kunit *test)
KUNIT_EXPECT_EQ(test, ctx->allocate_order[0], 2);
KUNIT_EXPECT_EQ(test, ctx->allocate_order[1], 1);
+ kunit_put_resource(res);
+
kunit_cleanup(&ctx->test);
/*
@@ -292,6 +310,57 @@ static void kunit_resource_test_proper_free_ordering(struct kunit *test)
KUNIT_EXPECT_EQ(test, ctx->free_order[1], 2);
}
+static void kunit_resource_test_static(struct kunit *test)
+{
+ struct kunit_test_resource_context ctx;
+ struct kunit_resource res;
+
+ KUNIT_EXPECT_EQ(test, kunit_add_resource(test, NULL, NULL, &res, &ctx),
+ 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, res.data, (void *)&ctx);
+
+ kunit_cleanup(test);
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&test->resources));
+}
+
+static void kunit_resource_test_named(struct kunit *test)
+{
+ struct kunit_resource res1, res2, *found = NULL;
+ struct kunit_test_resource_context ctx;
+
+ KUNIT_EXPECT_EQ(test,
+ kunit_add_named_resource(test, NULL, NULL, &res1,
+ "resource_1", &ctx),
+ 0);
+ KUNIT_EXPECT_PTR_EQ(test, res1.data, (void *)&ctx);
+
+ KUNIT_EXPECT_EQ(test,
+ kunit_add_named_resource(test, NULL, NULL, &res1,
+ "resource_1", &ctx),
+ -EEXIST);
+
+ KUNIT_EXPECT_EQ(test,
+ kunit_add_named_resource(test, NULL, NULL, &res2,
+ "resource_2", &ctx),
+ 0);
+
+ found = kunit_find_named_resource(test, "resource_1");
+
+ KUNIT_EXPECT_PTR_EQ(test, found, &res1);
+
+ if (found)
+ kunit_put_resource(&res1);
+
+ KUNIT_EXPECT_EQ(test, kunit_destroy_named_resource(test, "resource_2"),
+ 0);
+
+ kunit_cleanup(test);
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&test->resources));
+}
+
static int kunit_resource_test_init(struct kunit *test)
{
struct kunit_test_resource_context *ctx =
@@ -320,6 +389,8 @@ static struct kunit_case kunit_resource_test_cases[] = {
KUNIT_CASE(kunit_resource_test_destroy_resource),
KUNIT_CASE(kunit_resource_test_cleanup_resources),
KUNIT_CASE(kunit_resource_test_proper_free_ordering),
+ KUNIT_CASE(kunit_resource_test_static),
+ KUNIT_CASE(kunit_resource_test_named),
{}
};
diff --git a/lib/kunit/string-stream.c b/lib/kunit/string-stream.c
index 350392013c14..141789ca8949 100644
--- a/lib/kunit/string-stream.c
+++ b/lib/kunit/string-stream.c
@@ -33,14 +33,14 @@ static int string_stream_fragment_init(struct kunit_resource *res,
if (!frag->fragment)
return -ENOMEM;
- res->allocation = frag;
+ res->data = frag;
return 0;
}
static void string_stream_fragment_free(struct kunit_resource *res)
{
- struct string_stream_fragment *frag = res->allocation;
+ struct string_stream_fragment *frag = res->data;
list_del(&frag->node);
kunit_kfree(frag->test, frag->fragment);
@@ -65,9 +65,8 @@ static struct string_stream_fragment *alloc_string_stream_fragment(
static int string_stream_fragment_destroy(struct string_stream_fragment *frag)
{
- return kunit_resource_destroy(frag->test,
+ return kunit_destroy_resource(frag->test,
kunit_resource_instance_match,
- string_stream_fragment_free,
frag);
}
@@ -179,7 +178,7 @@ static int string_stream_init(struct kunit_resource *res, void *context)
if (!stream)
return -ENOMEM;
- res->allocation = stream;
+ res->data = stream;
stream->gfp = ctx->gfp;
stream->test = ctx->test;
INIT_LIST_HEAD(&stream->fragments);
@@ -190,7 +189,7 @@ static int string_stream_init(struct kunit_resource *res, void *context)
static void string_stream_free(struct kunit_resource *res)
{
- struct string_stream *stream = res->allocation;
+ struct string_stream *stream = res->data;
string_stream_clear(stream);
}
@@ -211,8 +210,7 @@ struct string_stream *alloc_string_stream(struct kunit *test, gfp_t gfp)
int string_stream_destroy(struct string_stream *stream)
{
- return kunit_resource_destroy(stream->test,
+ return kunit_destroy_resource(stream->test,
kunit_resource_instance_match,
- string_stream_free,
stream);
}
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index ccb2ffad8dcf..c36037200310 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -8,6 +8,7 @@
#include <kunit/test.h>
#include <linux/kernel.h>
+#include <linux/kref.h>
#include <linux/sched/debug.h>
#include "debugfs.h"
@@ -406,90 +407,116 @@ void __kunit_test_suites_exit(struct kunit_suite **suites)
}
EXPORT_SYMBOL_GPL(__kunit_test_suites_exit);
-struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test,
- kunit_resource_init_t init,
- kunit_resource_free_t free,
- gfp_t internal_gfp,
- void *context)
+/*
+ * Used for static resources and when a kunit_resource * has been created by
+ * kunit_alloc_resource(). When an init function is supplied, @data is passed
+ * into the init function; otherwise, we simply set the resource data field to
+ * the data value passed in.
+ */
+int kunit_add_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ struct kunit_resource *res,
+ void *data)
{
- struct kunit_resource *res;
- int ret;
+ int ret = 0;
- res = kzalloc(sizeof(*res), internal_gfp);
- if (!res)
- return NULL;
+ res->free = free;
+ kref_init(&res->refcount);
- ret = init(res, context);
- if (ret)
- return NULL;
+ if (init) {
+ ret = init(res, data);
+ if (ret)
+ return ret;
+ } else {
+ res->data = data;
+ }
- res->free = free;
spin_lock(&test->lock);
list_add_tail(&res->node, &test->resources);
+ /* refcount for list is established by kref_init() */
spin_unlock(&test->lock);
- return res;
+ return ret;
}
-EXPORT_SYMBOL_GPL(kunit_alloc_and_get_resource);
+EXPORT_SYMBOL_GPL(kunit_add_resource);
-static void kunit_resource_free(struct kunit *test, struct kunit_resource *res)
+int kunit_add_named_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ struct kunit_resource *res,
+ const char *name,
+ void *data)
{
- res->free(res);
- kfree(res);
+ struct kunit_resource *existing;
+
+ if (!name)
+ return -EINVAL;
+
+ existing = kunit_find_named_resource(test, name);
+ if (existing) {
+ kunit_put_resource(existing);
+ return -EEXIST;
+ }
+
+ res->name = name;
+
+ return kunit_add_resource(test, init, free, res, data);
}
+EXPORT_SYMBOL_GPL(kunit_add_named_resource);
-static struct kunit_resource *kunit_resource_find(struct kunit *test,
- kunit_resource_match_t match,
- kunit_resource_free_t free,
- void *match_data)
+struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ gfp_t internal_gfp,
+ void *data)
{
- struct kunit_resource *resource;
+ struct kunit_resource *res;
+ int ret;
- lockdep_assert_held(&test->lock);
+ res = kzalloc(sizeof(*res), internal_gfp);
+ if (!res)
+ return NULL;
- list_for_each_entry_reverse(resource, &test->resources, node) {
- if (resource->free != free)
- continue;
- if (match(test, resource->allocation, match_data))
- return resource;
+ ret = kunit_add_resource(test, init, free, res, data);
+ if (!ret) {
+ /*
+ * bump refcount for get; kunit_resource_put() should be called
+ * when done.
+ */
+ kunit_get_resource(res);
+ return res;
}
-
return NULL;
}
+EXPORT_SYMBOL_GPL(kunit_alloc_and_get_resource);
-static struct kunit_resource *kunit_resource_remove(
- struct kunit *test,
- kunit_resource_match_t match,
- kunit_resource_free_t free,
- void *match_data)
+void kunit_remove_resource(struct kunit *test, struct kunit_resource *res)
{
- struct kunit_resource *resource;
-
spin_lock(&test->lock);
- resource = kunit_resource_find(test, match, free, match_data);
- if (resource)
- list_del(&resource->node);
+ list_del(&res->node);
spin_unlock(&test->lock);
-
- return resource;
+ kunit_put_resource(res);
}
+EXPORT_SYMBOL_GPL(kunit_remove_resource);
-int kunit_resource_destroy(struct kunit *test,
- kunit_resource_match_t match,
- kunit_resource_free_t free,
+int kunit_destroy_resource(struct kunit *test, kunit_resource_match_t match,
void *match_data)
{
- struct kunit_resource *resource;
-
- resource = kunit_resource_remove(test, match, free, match_data);
+ struct kunit_resource *res = kunit_find_resource(test, match,
+ match_data);
- if (!resource)
+ if (!res)
return -ENOENT;
- kunit_resource_free(test, resource);
+ kunit_remove_resource(test, res);
+
+ /* We have a reference also via _find(); drop it. */
+ kunit_put_resource(res);
+
return 0;
}
-EXPORT_SYMBOL_GPL(kunit_resource_destroy);
+EXPORT_SYMBOL_GPL(kunit_destroy_resource);
struct kunit_kmalloc_params {
size_t size;
@@ -500,8 +527,8 @@ static int kunit_kmalloc_init(struct kunit_resource *res, void *context)
{
struct kunit_kmalloc_params *params = context;
- res->allocation = kmalloc(params->size, params->gfp);
- if (!res->allocation)
+ res->data = kmalloc(params->size, params->gfp);
+ if (!res->data)
return -ENOMEM;
return 0;
@@ -509,7 +536,7 @@ static int kunit_kmalloc_init(struct kunit_resource *res, void *context)
static void kunit_kmalloc_free(struct kunit_resource *res)
{
- kfree(res->allocation);
+ kfree(res->data);
}
void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp)
@@ -529,20 +556,25 @@ EXPORT_SYMBOL_GPL(kunit_kmalloc);
void kunit_kfree(struct kunit *test, const void *ptr)
{
- int rc;
+ struct kunit_resource *res;
- rc = kunit_resource_destroy(test,
- kunit_resource_instance_match,
- kunit_kmalloc_free,
- (void *)ptr);
+ res = kunit_find_resource(test, kunit_resource_instance_match,
+ (void *)ptr);
+
+ /*
+ * Removing the resource from the list of resources drops the
+ * reference count to 1; the final put will trigger the free.
+ */
+ kunit_remove_resource(test, res);
+
+ kunit_put_resource(res);
- WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(kunit_kfree);
void kunit_cleanup(struct kunit *test)
{
- struct kunit_resource *resource;
+ struct kunit_resource *res;
/*
* test->resources is a stack - each allocation must be freed in the
@@ -559,13 +591,16 @@ void kunit_cleanup(struct kunit *test)
spin_unlock(&test->lock);
break;
}
- resource = list_last_entry(&test->resources,
- struct kunit_resource,
- node);
- list_del(&resource->node);
+ res = list_last_entry(&test->resources,
+ struct kunit_resource,
+ node);
+ /*
+ * Need to unlock here as a resource may remove another
+ * resource, and this can't happen if the test->lock
+ * is held.
+ */
spin_unlock(&test->lock);
-
- kunit_resource_free(test, resource);
+ kunit_remove_resource(test, res);
}
}
EXPORT_SYMBOL_GPL(kunit_cleanup);
diff --git a/lib/livepatch/Makefile b/lib/livepatch/Makefile
index 295b94bff370..dcc912b3478f 100644
--- a/lib/livepatch/Makefile
+++ b/lib/livepatch/Makefile
@@ -12,7 +12,3 @@ obj-$(CONFIG_TEST_LIVEPATCH) += test_klp_atomic_replace.o \
test_klp_state.o \
test_klp_state2.o \
test_klp_state3.o
-
-# Target modules to be livepatched require CC_FLAGS_FTRACE
-CFLAGS_test_klp_callbacks_busy.o += $(CC_FLAGS_FTRACE)
-CFLAGS_test_klp_callbacks_mod.o += $(CC_FLAGS_FTRACE)
diff --git a/lib/livepatch/test_klp_callbacks_busy.c b/lib/livepatch/test_klp_callbacks_busy.c
index 40beddf8a0e2..7ac845f65be5 100644
--- a/lib/livepatch/test_klp_callbacks_busy.c
+++ b/lib/livepatch/test_klp_callbacks_busy.c
@@ -5,34 +5,53 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
-static int sleep_secs;
-module_param(sleep_secs, int, 0644);
-MODULE_PARM_DESC(sleep_secs, "sleep_secs (default=0)");
+/* load/run-time control from sysfs writer */
+static bool block_transition;
+module_param(block_transition, bool, 0644);
+MODULE_PARM_DESC(block_transition, "block_transition (default=false)");
static void busymod_work_func(struct work_struct *work);
-static DECLARE_DELAYED_WORK(work, busymod_work_func);
+static DECLARE_WORK(work, busymod_work_func);
static void busymod_work_func(struct work_struct *work)
{
- pr_info("%s, sleeping %d seconds ...\n", __func__, sleep_secs);
- msleep(sleep_secs * 1000);
+ pr_info("%s enter\n", __func__);
+
+ while (READ_ONCE(block_transition)) {
+ /*
+ * Busy-wait until the sysfs writer has acknowledged a
+ * blocked transition and clears the flag.
+ */
+ msleep(20);
+ }
+
pr_info("%s exit\n", __func__);
}
static int test_klp_callbacks_busy_init(void)
{
pr_info("%s\n", __func__);
- schedule_delayed_work(&work,
- msecs_to_jiffies(1000 * 0));
+ schedule_work(&work);
+
+ if (!block_transition) {
+ /*
+ * Serialize output: print all messages from the work
+ * function before returning from init().
+ */
+ flush_work(&work);
+ }
+
return 0;
}
static void test_klp_callbacks_busy_exit(void)
{
- cancel_delayed_work_sync(&work);
+ WRITE_ONCE(block_transition, false);
+ flush_work(&work);
pr_info("%s\n", __func__);
}
diff --git a/lib/livepatch/test_klp_shadow_vars.c b/lib/livepatch/test_klp_shadow_vars.c
index f0b5a1d24e55..b99116490858 100644
--- a/lib/livepatch/test_klp_shadow_vars.c
+++ b/lib/livepatch/test_klp_shadow_vars.c
@@ -109,8 +109,7 @@ static void shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor)
static void shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor)
{
klp_shadow_free_all(id, dtor);
- pr_info("klp_%s(id=0x%lx, dtor=PTR%d)\n",
- __func__, id, ptr_id(dtor));
+ pr_info("klp_%s(id=0x%lx, dtor=PTR%d)\n", __func__, id, ptr_id(dtor));
}
@@ -124,12 +123,16 @@ static int shadow_ctor(void *obj, void *shadow_data, void *ctor_data)
return -EINVAL;
*sv = *var;
- pr_info("%s: PTR%d -> PTR%d\n",
- __func__, ptr_id(sv), ptr_id(*var));
+ pr_info("%s: PTR%d -> PTR%d\n", __func__, ptr_id(sv), ptr_id(*var));
return 0;
}
+/*
+ * With more than one item to free in the list, order is not determined and
+ * shadow_dtor will not be passed to shadow_free_all() which would make the
+ * test fail. (see pass 6)
+ */
static void shadow_dtor(void *obj, void *shadow_data)
{
int **sv = shadow_data;
@@ -138,132 +141,153 @@ static void shadow_dtor(void *obj, void *shadow_data)
__func__, ptr_id(obj), ptr_id(sv));
}
-static int test_klp_shadow_vars_init(void)
-{
- void *obj = THIS_MODULE;
- int id = 0x1234;
- gfp_t gfp_flags = GFP_KERNEL;
+/* number of objects we simulate that need shadow vars */
+#define NUM_OBJS 3
- int var1, var2, var3, var4;
- int *pv1, *pv2, *pv3, *pv4;
- int **sv1, **sv2, **sv3, **sv4;
+/* dynamically created obj fields have the following shadow var id values */
+#define SV_ID1 0x1234
+#define SV_ID2 0x1235
- int **sv;
+/*
+ * The main test case adds/removes new fields (shadow var) to each of these
+ * test structure instances. The last group of fields in the struct represent
+ * the idea that shadow variables may be added and removed to and from the
+ * struct during execution.
+ */
+struct test_object {
+ /* add anything here below and avoid to define an empty struct */
+ struct shadow_ptr sp;
- pv1 = &var1;
- pv2 = &var2;
- pv3 = &var3;
- pv4 = &var4;
+ /* these represent shadow vars added and removed with SV_ID{1,2} */
+ /* char nfield1; */
+ /* int nfield2; */
+};
+
+static int test_klp_shadow_vars_init(void)
+{
+ struct test_object objs[NUM_OBJS];
+ char nfields1[NUM_OBJS], *pnfields1[NUM_OBJS], **sv1[NUM_OBJS];
+ char *pndup[NUM_OBJS];
+ int nfields2[NUM_OBJS], *pnfields2[NUM_OBJS], **sv2[NUM_OBJS];
+ void **sv;
+ int ret;
+ int i;
ptr_id(NULL);
- ptr_id(pv1);
- ptr_id(pv2);
- ptr_id(pv3);
- ptr_id(pv4);
/*
* With an empty shadow variable hash table, expect not to find
* any matches.
*/
- sv = shadow_get(obj, id);
+ sv = shadow_get(&objs[0], SV_ID1);
if (!sv)
pr_info(" got expected NULL result\n");
- /*
- * Allocate a few shadow variables with different <obj> and <id>.
- */
- sv1 = shadow_alloc(obj, id, sizeof(pv1), gfp_flags, shadow_ctor, &pv1);
- if (!sv1)
- return -ENOMEM;
-
- sv2 = shadow_alloc(obj + 1, id, sizeof(pv2), gfp_flags, shadow_ctor, &pv2);
- if (!sv2)
- return -ENOMEM;
-
- sv3 = shadow_alloc(obj, id + 1, sizeof(pv3), gfp_flags, shadow_ctor, &pv3);
- if (!sv3)
- return -ENOMEM;
-
- /*
- * Verify we can find our new shadow variables and that they point
- * to expected data.
- */
- sv = shadow_get(obj, id);
- if (!sv)
- return -EINVAL;
- if (sv == sv1 && *sv1 == pv1)
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv1), ptr_id(*sv1));
-
- sv = shadow_get(obj + 1, id);
- if (!sv)
- return -EINVAL;
- if (sv == sv2 && *sv2 == pv2)
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv2), ptr_id(*sv2));
- sv = shadow_get(obj, id + 1);
- if (!sv)
- return -EINVAL;
- if (sv == sv3 && *sv3 == pv3)
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv3), ptr_id(*sv3));
-
- /*
- * Allocate or get a few more, this time with the same <obj>, <id>.
- * The second invocation should return the same shadow var.
- */
- sv4 = shadow_get_or_alloc(obj + 2, id, sizeof(pv4), gfp_flags, shadow_ctor, &pv4);
- if (!sv4)
- return -ENOMEM;
-
- sv = shadow_get_or_alloc(obj + 2, id, sizeof(pv4), gfp_flags, shadow_ctor, &pv4);
- if (!sv)
- return -EINVAL;
- if (sv == sv4 && *sv4 == pv4)
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv4), ptr_id(*sv4));
-
- /*
- * Free the <obj=*, id> shadow variables and check that we can no
- * longer find them.
- */
- shadow_free(obj, id, shadow_dtor); /* sv1 */
- sv = shadow_get(obj, id);
- if (!sv)
- pr_info(" got expected NULL result\n");
+ /* pass 1: init & alloc a char+int pair of svars for each objs */
+ for (i = 0; i < NUM_OBJS; i++) {
+ pnfields1[i] = &nfields1[i];
+ ptr_id(pnfields1[i]);
+
+ if (i % 2) {
+ sv1[i] = shadow_alloc(&objs[i], SV_ID1,
+ sizeof(pnfields1[i]), GFP_KERNEL,
+ shadow_ctor, &pnfields1[i]);
+ } else {
+ sv1[i] = shadow_get_or_alloc(&objs[i], SV_ID1,
+ sizeof(pnfields1[i]), GFP_KERNEL,
+ shadow_ctor, &pnfields1[i]);
+ }
+ if (!sv1[i]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pnfields2[i] = &nfields2[i];
+ ptr_id(pnfields2[i]);
+ sv2[i] = shadow_alloc(&objs[i], SV_ID2, sizeof(pnfields2[i]),
+ GFP_KERNEL, shadow_ctor, &pnfields2[i]);
+ if (!sv2[i]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
- shadow_free(obj + 1, id, shadow_dtor); /* sv2 */
- sv = shadow_get(obj + 1, id);
- if (!sv)
- pr_info(" got expected NULL result\n");
+ /* pass 2: verify we find allocated svars and where they point to */
+ for (i = 0; i < NUM_OBJS; i++) {
+ /* check the "char" svar for all objects */
+ sv = shadow_get(&objs[i], SV_ID1);
+ if (!sv) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if ((char **)sv == sv1[i] && *sv1[i] == pnfields1[i])
+ pr_info(" got expected PTR%d -> PTR%d result\n",
+ ptr_id(sv1[i]), ptr_id(*sv1[i]));
+
+ /* check the "int" svar for all objects */
+ sv = shadow_get(&objs[i], SV_ID2);
+ if (!sv) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if ((int **)sv == sv2[i] && *sv2[i] == pnfields2[i])
+ pr_info(" got expected PTR%d -> PTR%d result\n",
+ ptr_id(sv2[i]), ptr_id(*sv2[i]));
+ }
- shadow_free(obj + 2, id, shadow_dtor); /* sv4 */
- sv = shadow_get(obj + 2, id);
- if (!sv)
- pr_info(" got expected NULL result\n");
+ /* pass 3: verify that 'get_or_alloc' returns already allocated svars */
+ for (i = 0; i < NUM_OBJS; i++) {
+ pndup[i] = &nfields1[i];
+ ptr_id(pndup[i]);
+
+ sv = shadow_get_or_alloc(&objs[i], SV_ID1, sizeof(pndup[i]),
+ GFP_KERNEL, shadow_ctor, &pndup[i]);
+ if (!sv) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if ((char **)sv == sv1[i] && *sv1[i] == pnfields1[i])
+ pr_info(" got expected PTR%d -> PTR%d result\n",
+ ptr_id(sv1[i]), ptr_id(*sv1[i]));
+ }
- /*
- * We should still find an <id+1> variable.
- */
- sv = shadow_get(obj, id + 1);
- if (!sv)
- return -EINVAL;
- if (sv == sv3 && *sv3 == pv3)
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv3), ptr_id(*sv3));
+ /* pass 4: free <objs[*], SV_ID1> pairs of svars, verify removal */
+ for (i = 0; i < NUM_OBJS; i++) {
+ shadow_free(&objs[i], SV_ID1, shadow_dtor); /* 'char' pairs */
+ sv = shadow_get(&objs[i], SV_ID1);
+ if (!sv)
+ pr_info(" got expected NULL result\n");
+ }
- /*
- * Free all the <id+1> variables, too.
- */
- shadow_free_all(id + 1, shadow_dtor); /* sv3 */
- sv = shadow_get(obj, id);
- if (!sv)
- pr_info(" shadow_get() got expected NULL result\n");
+ /* pass 5: check we still find <objs[*], SV_ID2> svar pairs */
+ for (i = 0; i < NUM_OBJS; i++) {
+ sv = shadow_get(&objs[i], SV_ID2); /* 'int' pairs */
+ if (!sv) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if ((int **)sv == sv2[i] && *sv2[i] == pnfields2[i])
+ pr_info(" got expected PTR%d -> PTR%d result\n",
+ ptr_id(sv2[i]), ptr_id(*sv2[i]));
+ }
+ /* pass 6: free all the <objs[*], SV_ID2> svar pairs too. */
+ shadow_free_all(SV_ID2, NULL); /* 'int' pairs */
+ for (i = 0; i < NUM_OBJS; i++) {
+ sv = shadow_get(&objs[i], SV_ID2);
+ if (!sv)
+ pr_info(" got expected NULL result\n");
+ }
free_ptr_list();
return 0;
+out:
+ shadow_free_all(SV_ID1, NULL); /* 'char' pairs */
+ shadow_free_all(SV_ID2, NULL); /* 'int' pairs */
+ free_ptr_list();
+
+ return ret;
}
static void test_klp_shadow_vars_exit(void)
diff --git a/lib/math/rational.c b/lib/math/rational.c
index 31fb27db2deb..df75c8809693 100644
--- a/lib/math/rational.c
+++ b/lib/math/rational.c
@@ -27,7 +27,7 @@
* with the fractional part size described in given_denominator.
*
* for theoretical background, see:
- * http://en.wikipedia.org/wiki/Continued_fraction
+ * https://en.wikipedia.org/wiki/Continued_fraction
*/
void rational_best_approximation(
diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c
index 20ed0f766787..4cd2b335cb7f 100644
--- a/lib/mpi/mpiutil.c
+++ b/lib/mpi/mpiutil.c
@@ -69,7 +69,7 @@ void mpi_free_limb_space(mpi_ptr_t a)
if (!a)
return;
- kzfree(a);
+ kfree_sensitive(a);
}
void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs)
@@ -95,7 +95,7 @@ int mpi_resize(MPI a, unsigned nlimbs)
if (!p)
return -ENOMEM;
memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t));
- kzfree(a->d);
+ kfree_sensitive(a->d);
a->d = p;
} else {
a->d = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
@@ -112,7 +112,7 @@ void mpi_free(MPI a)
return;
if (a->flags & 4)
- kzfree(a->d);
+ kfree_sensitive(a->d);
else
mpi_free_limb_space(a->d);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index a66595ba5543..a2345de90e93 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -99,6 +99,25 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
EXPORT_SYMBOL(percpu_counter_add_batch);
/*
+ * For percpu_counter with a big batch, the devication of its count could
+ * be big, and there is requirement to reduce the deviation, like when the
+ * counter's batch could be runtime decreased to get a better accuracy,
+ * which can be achieved by running this sync function on each CPU.
+ */
+void percpu_counter_sync(struct percpu_counter *fbc)
+{
+ unsigned long flags;
+ s64 count;
+
+ raw_spin_lock_irqsave(&fbc->lock, flags);
+ count = __this_cpu_read(*fbc->counters);
+ fbc->count += count;
+ __this_cpu_sub(*fbc->counters, count);
+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
+}
+EXPORT_SYMBOL(percpu_counter_sync);
+
+/*
* Add up all the per-cpu counts, return the result. This is a more accurate
* but much slower version of percpu_counter_read_positive()
*/
diff --git a/lib/pldmfw/Makefile b/lib/pldmfw/Makefile
new file mode 100644
index 000000000000..99ad10711abe
--- /dev/null
+++ b/lib/pldmfw/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_PLDMFW) += pldmfw.o
diff --git a/lib/pldmfw/pldmfw.c b/lib/pldmfw/pldmfw.c
new file mode 100644
index 000000000000..e5d4b3b2af81
--- /dev/null
+++ b/lib/pldmfw/pldmfw.c
@@ -0,0 +1,879 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2019, Intel Corporation. */
+
+#include <asm/unaligned.h>
+#include <linux/crc32.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pldmfw.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+
+#include "pldmfw_private.h"
+
+/* Internal structure used to store details about the PLDM image file as it is
+ * being validated and processed.
+ */
+struct pldmfw_priv {
+ struct pldmfw *context;
+ const struct firmware *fw;
+
+ /* current offset of firmware image */
+ size_t offset;
+
+ struct list_head records;
+ struct list_head components;
+
+ /* PLDM Firmware Package Header */
+ const struct __pldm_header *header;
+ u16 total_header_size;
+
+ /* length of the component bitmap */
+ u16 component_bitmap_len;
+ u16 bitmap_size;
+
+ /* Start of the component image information */
+ u16 component_count;
+ const u8 *component_start;
+
+ /* Start pf the firmware device id records */
+ const u8 *record_start;
+ u8 record_count;
+
+ /* The CRC at the end of the package header */
+ u32 header_crc;
+
+ struct pldmfw_record *matching_record;
+};
+
+/**
+ * pldm_check_fw_space - Verify that the firmware image has space left
+ * @data: pointer to private data
+ * @offset: offset to start from
+ * @length: length to check for
+ *
+ * Verify that the firmware data can hold a chunk of bytes with the specified
+ * offset and length.
+ *
+ * Returns: zero on success, or -EFAULT if the image does not have enough
+ * space left to fit the expected length.
+ */
+static int
+pldm_check_fw_space(struct pldmfw_priv *data, size_t offset, size_t length)
+{
+ size_t expected_size = offset + length;
+ struct device *dev = data->context->dev;
+
+ if (data->fw->size < expected_size) {
+ dev_dbg(dev, "Firmware file size smaller than expected. Got %zu bytes, needed %zu bytes\n",
+ data->fw->size, expected_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * pldm_move_fw_offset - Move the current firmware offset forward
+ * @data: pointer to private data
+ * @bytes_to_move: number of bytes to move the offset forward by
+ *
+ * Check that there is enough space past the current offset, and then move the
+ * offset forward by this ammount.
+ *
+ * Returns: zero on success, or -EFAULT if the image is too small to fit the
+ * expected length.
+ */
+static int
+pldm_move_fw_offset(struct pldmfw_priv *data, size_t bytes_to_move)
+{
+ int err;
+
+ err = pldm_check_fw_space(data, data->offset, bytes_to_move);
+ if (err)
+ return err;
+
+ data->offset += bytes_to_move;
+
+ return 0;
+}
+
+/**
+ * pldm_parse_header - Validate and extract details about the PLDM header
+ * @data: pointer to private data
+ *
+ * Performs initial basic verification of the PLDM image, up to the first
+ * firmware record.
+ *
+ * This includes the following checks and extractions
+ *
+ * * Verify that the UUID at the start of the header matches the expected
+ * value as defined in the DSP0267 PLDM specification
+ * * Check that the revision is 0x01
+ * * Extract the total header_size and verify that the image is large enough
+ * to contain at least the length of this header
+ * * Extract the size of the component bitmap length
+ * * Extract a pointer to the start of the record area
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int pldm_parse_header(struct pldmfw_priv *data)
+{
+ const struct __pldmfw_record_area *record_area;
+ struct device *dev = data->context->dev;
+ const struct __pldm_header *header;
+ size_t header_size;
+ int err;
+
+ err = pldm_move_fw_offset(data, sizeof(*header));
+ if (err)
+ return err;
+
+ header = (const struct __pldm_header *)data->fw->data;
+ data->header = header;
+
+ if (!uuid_equal(&header->id, &pldm_firmware_header_id)) {
+ dev_dbg(dev, "Invalid package header identifier. Expected UUID %pUB, but got %pUB\n",
+ &pldm_firmware_header_id, &header->id);
+ return -EINVAL;
+ }
+
+ if (header->revision != PACKAGE_HEADER_FORMAT_REVISION) {
+ dev_dbg(dev, "Invalid package header revision. Expected revision %u but got %u\n",
+ PACKAGE_HEADER_FORMAT_REVISION, header->revision);
+ return -EOPNOTSUPP;
+ }
+
+ data->total_header_size = get_unaligned_le16(&header->size);
+ header_size = data->total_header_size - sizeof(*header);
+
+ err = pldm_check_fw_space(data, data->offset, header_size);
+ if (err)
+ return err;
+
+ data->component_bitmap_len =
+ get_unaligned_le16(&header->component_bitmap_len);
+
+ if (data->component_bitmap_len % 8 != 0) {
+ dev_dbg(dev, "Invalid component bitmap length. The length is %u, which is not a multiple of 8\n",
+ data->component_bitmap_len);
+ return -EINVAL;
+ }
+
+ data->bitmap_size = data->component_bitmap_len / 8;
+
+ err = pldm_move_fw_offset(data, header->version_len);
+ if (err)
+ return err;
+
+ /* extract a pointer to the record area, which just follows the main
+ * PLDM header data.
+ */
+ record_area = (const struct __pldmfw_record_area *)(data->fw->data +
+ data->offset);
+
+ err = pldm_move_fw_offset(data, sizeof(*record_area));
+ if (err)
+ return err;
+
+ data->record_count = record_area->record_count;
+ data->record_start = record_area->records;
+
+ return 0;
+}
+
+/**
+ * pldm_check_desc_tlv_len - Check that the length matches expectation
+ * @data: pointer to image details
+ * @type: the descriptor type
+ * @size: the length from the descriptor header
+ *
+ * If the descriptor type is one of the documented descriptor types according
+ * to the standard, verify that the provided length matches.
+ *
+ * If the type is not recognized or is VENDOR_DEFINED, return zero.
+ *
+ * Returns: zero on success, or -EINVAL if the specified size of a standard
+ * TLV does not match the expected value defined for that TLV.
+ */
+static int
+pldm_check_desc_tlv_len(struct pldmfw_priv *data, u16 type, u16 size)
+{
+ struct device *dev = data->context->dev;
+ u16 expected_size;
+
+ switch (type) {
+ case PLDM_DESC_ID_PCI_VENDOR_ID:
+ case PLDM_DESC_ID_PCI_DEVICE_ID:
+ case PLDM_DESC_ID_PCI_SUBVENDOR_ID:
+ case PLDM_DESC_ID_PCI_SUBDEV_ID:
+ expected_size = 2;
+ break;
+ case PLDM_DESC_ID_PCI_REVISION_ID:
+ expected_size = 1;
+ break;
+ case PLDM_DESC_ID_PNP_VENDOR_ID:
+ expected_size = 3;
+ break;
+ case PLDM_DESC_ID_IANA_ENTERPRISE_ID:
+ case PLDM_DESC_ID_ACPI_VENDOR_ID:
+ case PLDM_DESC_ID_PNP_PRODUCT_ID:
+ case PLDM_DESC_ID_ACPI_PRODUCT_ID:
+ expected_size = 4;
+ break;
+ case PLDM_DESC_ID_UUID:
+ expected_size = 16;
+ break;
+ case PLDM_DESC_ID_VENDOR_DEFINED:
+ return 0;
+ default:
+ /* Do not report an error on an unexpected TLV */
+ dev_dbg(dev, "Found unrecognized TLV type 0x%04x\n", type);
+ return 0;
+ }
+
+ if (size != expected_size) {
+ dev_dbg(dev, "Found TLV type 0x%04x with unexpected length. Got %u bytes, but expected %u bytes\n",
+ type, size, expected_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * pldm_parse_desc_tlvs - Check and skip past a number of TLVs
+ * @data: pointer to private data
+ * @record: pointer to the record this TLV belongs too
+ * @desc_count: descriptor count
+ *
+ * From the current offset, read and extract the descriptor TLVs, updating the
+ * current offset each time.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+pldm_parse_desc_tlvs(struct pldmfw_priv *data, struct pldmfw_record *record, u8 desc_count)
+{
+ const struct __pldmfw_desc_tlv *__desc;
+ const u8 *desc_start;
+ u8 i;
+
+ desc_start = data->fw->data + data->offset;
+
+ pldm_for_each_desc_tlv(i, __desc, desc_start, desc_count) {
+ struct pldmfw_desc_tlv *desc;
+ int err;
+ u16 type, size;
+
+ err = pldm_move_fw_offset(data, sizeof(*__desc));
+ if (err)
+ return err;
+
+ type = get_unaligned_le16(&__desc->type);
+
+ /* According to DSP0267, this only includes the data field */
+ size = get_unaligned_le16(&__desc->size);
+
+ err = pldm_check_desc_tlv_len(data, type, size);
+ if (err)
+ return err;
+
+ /* check that we have space and move the offset forward */
+ err = pldm_move_fw_offset(data, size);
+ if (err)
+ return err;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ desc->type = type;
+ desc->size = size;
+ desc->data = __desc->data;
+
+ list_add_tail(&desc->entry, &record->descs);
+ }
+
+ return 0;
+}
+
+/**
+ * pldm_parse_one_record - Verify size of one PLDM record
+ * @data: pointer to image details
+ * @__record: pointer to the record to check
+ *
+ * This function checks that the record size does not exceed either the size
+ * of the firmware file or the total length specified in the header section.
+ *
+ * It also verifies that the recorded length of the start of the record
+ * matches the size calculated by adding the static structure length, the
+ * component bitmap length, the version string length, the length of all
+ * descriptor TLVs, and the length of the package data.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+pldm_parse_one_record(struct pldmfw_priv *data,
+ const struct __pldmfw_record_info *__record)
+{
+ struct pldmfw_record *record;
+ size_t measured_length;
+ int err;
+ const u8 *bitmap_ptr;
+ u16 record_len;
+ int i;
+
+ /* Make a copy and insert it into the record list */
+ record = kzalloc(sizeof(*record), GFP_KERNEL);
+ if (!record)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&record->descs);
+ list_add_tail(&record->entry, &data->records);
+
+ /* Then check that we have space and move the offset */
+ err = pldm_move_fw_offset(data, sizeof(*__record));
+ if (err)
+ return err;
+
+ record_len = get_unaligned_le16(&__record->record_len);
+ record->package_data_len = get_unaligned_le16(&__record->package_data_len);
+ record->version_len = __record->version_len;
+ record->version_type = __record->version_type;
+
+ bitmap_ptr = data->fw->data + data->offset;
+
+ /* check that we have space for the component bitmap length */
+ err = pldm_move_fw_offset(data, data->bitmap_size);
+ if (err)
+ return err;
+
+ record->component_bitmap_len = data->component_bitmap_len;
+ record->component_bitmap = bitmap_zalloc(record->component_bitmap_len,
+ GFP_KERNEL);
+ if (!record->component_bitmap)
+ return -ENOMEM;
+
+ for (i = 0; i < data->bitmap_size; i++)
+ bitmap_set_value8(record->component_bitmap, bitmap_ptr[i], i * 8);
+
+ record->version_string = data->fw->data + data->offset;
+
+ err = pldm_move_fw_offset(data, record->version_len);
+ if (err)
+ return err;
+
+ /* Scan through the descriptor TLVs and find the end */
+ err = pldm_parse_desc_tlvs(data, record, __record->descriptor_count);
+ if (err)
+ return err;
+
+ record->package_data = data->fw->data + data->offset;
+
+ err = pldm_move_fw_offset(data, record->package_data_len);
+ if (err)
+ return err;
+
+ measured_length = data->offset - ((const u8 *)__record - data->fw->data);
+ if (measured_length != record_len) {
+ dev_dbg(data->context->dev, "Unexpected record length. Measured record length is %zu bytes, expected length is %u bytes\n",
+ measured_length, record_len);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * pldm_parse_records - Locate the start of the component area
+ * @data: pointer to private data
+ *
+ * Extract the record count, and loop through each record, searching for the
+ * component area.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int pldm_parse_records(struct pldmfw_priv *data)
+{
+ const struct __pldmfw_component_area *component_area;
+ const struct __pldmfw_record_info *record;
+ int err;
+ u8 i;
+
+ pldm_for_each_record(i, record, data->record_start, data->record_count) {
+ err = pldm_parse_one_record(data, record);
+ if (err)
+ return err;
+ }
+
+ /* Extract a pointer to the component area, which just follows the
+ * PLDM device record data.
+ */
+ component_area = (const struct __pldmfw_component_area *)(data->fw->data + data->offset);
+
+ err = pldm_move_fw_offset(data, sizeof(*component_area));
+ if (err)
+ return err;
+
+ data->component_count =
+ get_unaligned_le16(&component_area->component_image_count);
+ data->component_start = component_area->components;
+
+ return 0;
+}
+
+/**
+ * pldm_parse_components - Locate the CRC header checksum
+ * @data: pointer to private data
+ *
+ * Extract the component count, and find the pointer to the component area.
+ * Scan through each component searching for the end, which should point to
+ * the package header checksum.
+ *
+ * Extract the package header CRC and save it for verification.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int pldm_parse_components(struct pldmfw_priv *data)
+{
+ const struct __pldmfw_component_info *__component;
+ struct device *dev = data->context->dev;
+ const u8 *header_crc_ptr;
+ int err;
+ u8 i;
+
+ pldm_for_each_component(i, __component, data->component_start, data->component_count) {
+ struct pldmfw_component *component;
+ u32 offset, size;
+
+ err = pldm_move_fw_offset(data, sizeof(*__component));
+ if (err)
+ return err;
+
+ err = pldm_move_fw_offset(data, __component->version_len);
+ if (err)
+ return err;
+
+ offset = get_unaligned_le32(&__component->location_offset);
+ size = get_unaligned_le32(&__component->size);
+
+ err = pldm_check_fw_space(data, offset, size);
+ if (err)
+ return err;
+
+ component = kzalloc(sizeof(*component), GFP_KERNEL);
+ if (!component)
+ return -ENOMEM;
+
+ component->index = i;
+ component->classification = get_unaligned_le16(&__component->classification);
+ component->identifier = get_unaligned_le16(&__component->identifier);
+ component->comparison_stamp = get_unaligned_le32(&__component->comparison_stamp);
+ component->options = get_unaligned_le16(&__component->options);
+ component->activation_method = get_unaligned_le16(&__component->activation_method);
+ component->version_type = __component->version_type;
+ component->version_len = __component->version_len;
+ component->version_string = __component->version_string;
+ component->component_data = data->fw->data + offset;
+ component->component_size = size;
+
+ list_add_tail(&component->entry, &data->components);
+ }
+
+ header_crc_ptr = data->fw->data + data->offset;
+
+ err = pldm_move_fw_offset(data, sizeof(data->header_crc));
+ if (err)
+ return err;
+
+ /* Make sure that we reached the expected offset */
+ if (data->offset != data->total_header_size) {
+ dev_dbg(dev, "Invalid firmware header size. Expected %u but got %zu\n",
+ data->total_header_size, data->offset);
+ return -EFAULT;
+ }
+
+ data->header_crc = get_unaligned_le32(header_crc_ptr);
+
+ return 0;
+}
+
+/**
+ * pldm_verify_header_crc - Verify that the CRC in the header matches
+ * @data: pointer to private data
+ *
+ * Calculates the 32-bit CRC using the standard IEEE 802.3 CRC polynomial and
+ * compares it to the value stored in the header.
+ *
+ * Returns: zero on success if the CRC matches, or -EBADMSG on an invalid CRC.
+ */
+static int pldm_verify_header_crc(struct pldmfw_priv *data)
+{
+ struct device *dev = data->context->dev;
+ u32 calculated_crc;
+ size_t length;
+
+ /* Calculate the 32-bit CRC of the header header contents up to but
+ * not including the checksum. Note that the Linux crc32_le function
+ * does not perform an expected final XOR.
+ */
+ length = data->offset - sizeof(data->header_crc);
+ calculated_crc = crc32_le(~0, data->fw->data, length) ^ ~0;
+
+ if (calculated_crc != data->header_crc) {
+ dev_dbg(dev, "Invalid CRC in firmware header. Got 0x%08x but expected 0x%08x\n",
+ calculated_crc, data->header_crc);
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
+/**
+ * pldmfw_free_priv - Free memory allocated while parsing the PLDM image
+ * @data: pointer to the PLDM data structure
+ *
+ * Loops through and clears all allocated memory associated with each
+ * allocated descriptor, record, and component.
+ */
+static void pldmfw_free_priv(struct pldmfw_priv *data)
+{
+ struct pldmfw_component *component, *c_safe;
+ struct pldmfw_record *record, *r_safe;
+ struct pldmfw_desc_tlv *desc, *d_safe;
+
+ list_for_each_entry_safe(component, c_safe, &data->components, entry) {
+ list_del(&component->entry);
+ kfree(component);
+ }
+
+ list_for_each_entry_safe(record, r_safe, &data->records, entry) {
+ list_for_each_entry_safe(desc, d_safe, &record->descs, entry) {
+ list_del(&desc->entry);
+ kfree(desc);
+ }
+
+ if (record->component_bitmap) {
+ bitmap_free(record->component_bitmap);
+ record->component_bitmap = NULL;
+ }
+
+ list_del(&record->entry);
+ kfree(record);
+ }
+}
+
+/**
+ * pldm_parse_image - parse and extract details from PLDM image
+ * @data: pointer to private data
+ *
+ * Verify that the firmware file contains valid data for a PLDM firmware
+ * file. Extract useful pointers and data from the firmware file and store
+ * them in the data structure.
+ *
+ * The PLDM firmware file format is defined in DMTF DSP0267 1.0.0. Care
+ * should be taken to use get_unaligned_le* when accessing data from the
+ * pointers in data.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int pldm_parse_image(struct pldmfw_priv *data)
+{
+ int err;
+
+ if (WARN_ON(!(data->context->dev && data->fw->data && data->fw->size)))
+ return -EINVAL;
+
+ err = pldm_parse_header(data);
+ if (err)
+ return err;
+
+ err = pldm_parse_records(data);
+ if (err)
+ return err;
+
+ err = pldm_parse_components(data);
+ if (err)
+ return err;
+
+ return pldm_verify_header_crc(data);
+}
+
+/* these are u32 so that we can store PCI_ANY_ID */
+struct pldm_pci_record_id {
+ int vendor;
+ int device;
+ int subsystem_vendor;
+ int subsystem_device;
+};
+
+/**
+ * pldmfw_op_pci_match_record - Check if a PCI device matches the record
+ * @context: PLDM fw update structure
+ * @record: list of records extracted from the PLDM image
+ *
+ * Determine of the PCI device associated with this device matches the record
+ * data provided.
+ *
+ * Searches the descriptor TLVs and extracts the relevant descriptor data into
+ * a pldm_pci_record_id. This is then compared against the PCI device ID
+ * information.
+ *
+ * Returns: true if the device matches the record, false otherwise.
+ */
+bool pldmfw_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record)
+{
+ struct pci_dev *pdev = to_pci_dev(context->dev);
+ struct pldm_pci_record_id id = {
+ .vendor = PCI_ANY_ID,
+ .device = PCI_ANY_ID,
+ .subsystem_vendor = PCI_ANY_ID,
+ .subsystem_device = PCI_ANY_ID,
+ };
+ struct pldmfw_desc_tlv *desc;
+
+ list_for_each_entry(desc, &record->descs, entry) {
+ u16 value;
+ int *ptr;
+
+ switch (desc->type) {
+ case PLDM_DESC_ID_PCI_VENDOR_ID:
+ ptr = &id.vendor;
+ break;
+ case PLDM_DESC_ID_PCI_DEVICE_ID:
+ ptr = &id.device;
+ break;
+ case PLDM_DESC_ID_PCI_SUBVENDOR_ID:
+ ptr = &id.subsystem_vendor;
+ break;
+ case PLDM_DESC_ID_PCI_SUBDEV_ID:
+ ptr = &id.subsystem_device;
+ break;
+ default:
+ /* Skip unrelated TLVs */
+ continue;
+ }
+
+ value = get_unaligned_le16(desc->data);
+ /* A value of zero for one of the descriptors is sometimes
+ * used when the record should ignore this field when matching
+ * device. For example if the record applies to any subsystem
+ * device or vendor.
+ */
+ if (value)
+ *ptr = (int)value;
+ else
+ *ptr = PCI_ANY_ID;
+ }
+
+ if ((id.vendor == PCI_ANY_ID || id.vendor == pdev->vendor) &&
+ (id.device == PCI_ANY_ID || id.device == pdev->device) &&
+ (id.subsystem_vendor == PCI_ANY_ID || id.subsystem_vendor == pdev->subsystem_vendor) &&
+ (id.subsystem_device == PCI_ANY_ID || id.subsystem_device == pdev->subsystem_device))
+ return true;
+ else
+ return false;
+}
+EXPORT_SYMBOL(pldmfw_op_pci_match_record);
+
+/**
+ * pldm_find_matching_record - Find the first matching PLDM record
+ * @data: pointer to private data
+ *
+ * Search through PLDM records and find the first matching entry. It is
+ * expected that only one entry matches.
+ *
+ * Store a pointer to the matching record, if found.
+ *
+ * Returns: zero on success, or -ENOENT if no matching record is found.
+ */
+static int pldm_find_matching_record(struct pldmfw_priv *data)
+{
+ struct pldmfw_record *record;
+
+ list_for_each_entry(record, &data->records, entry) {
+ if (data->context->ops->match_record(data->context, record)) {
+ data->matching_record = record;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+/**
+ * pldm_send_package_data - Send firmware the package data for the record
+ * @data: pointer to private data
+ *
+ * Send the package data associated with the matching record to the firmware,
+ * using the send_pkg_data operation.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+pldm_send_package_data(struct pldmfw_priv *data)
+{
+ struct pldmfw_record *record = data->matching_record;
+ const struct pldmfw_ops *ops = data->context->ops;
+
+ return ops->send_package_data(data->context, record->package_data,
+ record->package_data_len);
+}
+
+/**
+ * pldm_send_component_tables - Send component table information to firmware
+ * @data: pointer to private data
+ *
+ * Loop over each component, sending the applicable components to the firmware
+ * via the send_component_table operation.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+pldm_send_component_tables(struct pldmfw_priv *data)
+{
+ unsigned long *bitmap = data->matching_record->component_bitmap;
+ struct pldmfw_component *component;
+ int err;
+
+ list_for_each_entry(component, &data->components, entry) {
+ u8 index = component->index, transfer_flag = 0;
+
+ /* Skip components which are not intended for this device */
+ if (!test_bit(index, bitmap))
+ continue;
+
+ /* determine whether this is the start, middle, end, or both
+ * the start and end of the component tables
+ */
+ if (index == find_first_bit(bitmap, data->component_bitmap_len))
+ transfer_flag |= PLDM_TRANSFER_FLAG_START;
+ if (index == find_last_bit(bitmap, data->component_bitmap_len))
+ transfer_flag |= PLDM_TRANSFER_FLAG_END;
+ if (!transfer_flag)
+ transfer_flag = PLDM_TRANSFER_FLAG_MIDDLE;
+
+ err = data->context->ops->send_component_table(data->context,
+ component,
+ transfer_flag);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * pldm_flash_components - Program each component to device flash
+ * @data: pointer to private data
+ *
+ * Loop through each component that is active for the matching device record,
+ * and send it to the device driver for flashing.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int pldm_flash_components(struct pldmfw_priv *data)
+{
+ unsigned long *bitmap = data->matching_record->component_bitmap;
+ struct pldmfw_component *component;
+ int err;
+
+ list_for_each_entry(component, &data->components, entry) {
+ u8 index = component->index;
+
+ /* Skip components which are not intended for this device */
+ if (!test_bit(index, bitmap))
+ continue;
+
+ err = data->context->ops->flash_component(data->context, component);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * pldm_finalize_update - Finalize the device flash update
+ * @data: pointer to private data
+ *
+ * Tell the device driver to perform any remaining logic to complete the
+ * device update.
+ *
+ * Returns: zero on success, or a PLFM_FWU error indicating the reason for
+ * failure.
+ */
+static int pldm_finalize_update(struct pldmfw_priv *data)
+{
+ if (data->context->ops->finalize_update)
+ return data->context->ops->finalize_update(data->context);
+
+ return 0;
+}
+
+/**
+ * pldmfw_flash_image - Write a PLDM-formatted firmware image to the device
+ * @context: ops and data for firmware update
+ * @fw: firmware object pointing to the relevant firmware file to program
+ *
+ * Parse the data for a given firmware file, verifying that it is a valid PLDM
+ * formatted image that matches this device.
+ *
+ * Extract the device record Package Data and Component Tables and send them
+ * to the device firmware. Extract and write the flash data for each of the
+ * components indicated in the firmware file.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+int pldmfw_flash_image(struct pldmfw *context, const struct firmware *fw)
+{
+ struct pldmfw_priv *data;
+ int err;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&data->records);
+ INIT_LIST_HEAD(&data->components);
+
+ data->fw = fw;
+ data->context = context;
+
+ err = pldm_parse_image(data);
+ if (err)
+ goto out_release_data;
+
+ err = pldm_find_matching_record(data);
+ if (err)
+ goto out_release_data;
+
+ err = pldm_send_package_data(data);
+ if (err)
+ goto out_release_data;
+
+ err = pldm_send_component_tables(data);
+ if (err)
+ goto out_release_data;
+
+ err = pldm_flash_components(data);
+ if (err)
+ goto out_release_data;
+
+ err = pldm_finalize_update(data);
+
+out_release_data:
+ pldmfw_free_priv(data);
+ kfree(data);
+
+ return err;
+}
+EXPORT_SYMBOL(pldmfw_flash_image);
+
+MODULE_AUTHOR("Jacob Keller <jacob.e.keller@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PLDM firmware flash update library");
diff --git a/lib/pldmfw/pldmfw_private.h b/lib/pldmfw/pldmfw_private.h
new file mode 100644
index 000000000000..687ef2200692
--- /dev/null
+++ b/lib/pldmfw/pldmfw_private.h
@@ -0,0 +1,238 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2019, Intel Corporation. */
+
+#ifndef _PLDMFW_PRIVATE_H_
+#define _PLDMFW_PRIVATE_H_
+
+/* The following data structures define the layout of a firmware binary
+ * following the "PLDM For Firmware Update Specification", DMTF standard
+ * #DSP0267.
+ *
+ * pldmfw.c uses these structures to implement a simple engine that will parse
+ * a fw binary file in this format and perform a firmware update for a given
+ * device.
+ *
+ * Due to the variable sized data layout, alignment of fields within these
+ * structures is not guaranteed when reading. For this reason, all multi-byte
+ * field accesses should be done using the unaligned access macros.
+ * Additionally, the standard specifies that multi-byte fields are in
+ * LittleEndian format.
+ *
+ * The structure definitions are not made public, in order to keep direct
+ * accesses within code that is prepared to deal with the limitation of
+ * unaligned access.
+ */
+
+/* UUID for PLDM firmware packages: f018878c-cb7d-4943-9800-a02f059aca02 */
+static const uuid_t pldm_firmware_header_id =
+ UUID_INIT(0xf018878c, 0xcb7d, 0x4943,
+ 0x98, 0x00, 0xa0, 0x2f, 0x05, 0x9a, 0xca, 0x02);
+
+/* Revision number of the PLDM header format this code supports */
+#define PACKAGE_HEADER_FORMAT_REVISION 0x01
+
+/* timestamp104 structure defined in PLDM Base specification */
+#define PLDM_TIMESTAMP_SIZE 13
+struct __pldm_timestamp {
+ u8 b[PLDM_TIMESTAMP_SIZE];
+} __packed __aligned(1);
+
+/* Package Header Information */
+struct __pldm_header {
+ uuid_t id; /* PackageHeaderIdentifier */
+ u8 revision; /* PackageHeaderFormatRevision */
+ __le16 size; /* PackageHeaderSize */
+ struct __pldm_timestamp release_date; /* PackageReleaseDateTime */
+ __le16 component_bitmap_len; /* ComponentBitmapBitLength */
+ u8 version_type; /* PackageVersionStringType */
+ u8 version_len; /* PackageVersionStringLength */
+
+ /*
+ * DSP0267 also includes the following variable length fields at the
+ * end of this structure:
+ *
+ * PackageVersionString, length is version_len.
+ *
+ * The total size of this section is
+ * sizeof(pldm_header) + version_len;
+ */
+ u8 version_string[]; /* PackageVersionString */
+} __packed __aligned(1);
+
+/* Firmware Device ID Record */
+struct __pldmfw_record_info {
+ __le16 record_len; /* RecordLength */
+ u8 descriptor_count; /* DescriptorCount */
+ __le32 device_update_flags; /* DeviceUpdateOptionFlags */
+ u8 version_type; /* ComponentImageSetVersionType */
+ u8 version_len; /* ComponentImageSetVersionLength */
+ __le16 package_data_len; /* FirmwareDevicePackageDataLength */
+
+ /*
+ * DSP0267 also includes the following variable length fields at the
+ * end of this structure:
+ *
+ * ApplicableComponents, length is component_bitmap_len from header
+ * ComponentImageSetVersionString, length is version_len
+ * RecordDescriptors, a series of TLVs with 16bit type and length
+ * FirmwareDevicePackageData, length is package_data_len
+ *
+ * The total size of each record is
+ * sizeof(pldmfw_record_info) +
+ * component_bitmap_len (converted to bytes!) +
+ * version_len +
+ * <length of RecordDescriptors> +
+ * package_data_len
+ */
+ u8 variable_record_data[];
+} __packed __aligned(1);
+
+/* Firmware Descriptor Definition */
+struct __pldmfw_desc_tlv {
+ __le16 type; /* DescriptorType */
+ __le16 size; /* DescriptorSize */
+ u8 data[]; /* DescriptorData */
+} __aligned(1);
+
+/* Firmware Device Identification Area */
+struct __pldmfw_record_area {
+ u8 record_count; /* DeviceIDRecordCount */
+ /* This is not a struct type because the size of each record varies */
+ u8 records[];
+} __aligned(1);
+
+/* Individual Component Image Information */
+struct __pldmfw_component_info {
+ __le16 classification; /* ComponentClassfication */
+ __le16 identifier; /* ComponentIdentifier */
+ __le32 comparison_stamp; /* ComponentComparisonStamp */
+ __le16 options; /* componentOptions */
+ __le16 activation_method; /* RequestedComponentActivationMethod */
+ __le32 location_offset; /* ComponentLocationOffset */
+ __le32 size; /* ComponentSize */
+ u8 version_type; /* ComponentVersionStringType */
+ u8 version_len; /* ComponentVersionStringLength */
+
+ /*
+ * DSP0267 also includes the following variable length fields at the
+ * end of this structure:
+ *
+ * ComponentVersionString, length is version_len
+ *
+ * The total size of this section is
+ * sizeof(pldmfw_component_info) + version_len;
+ */
+ u8 version_string[]; /* ComponentVersionString */
+} __packed __aligned(1);
+
+/* Component Image Information Area */
+struct __pldmfw_component_area {
+ __le16 component_image_count;
+ /* This is not a struct type because the component size varies */
+ u8 components[];
+} __aligned(1);
+
+/**
+ * pldm_first_desc_tlv
+ * @start: byte offset of the start of the descriptor TLVs
+ *
+ * Converts the starting offset of the descriptor TLVs into a pointer to the
+ * first descriptor.
+ */
+#define pldm_first_desc_tlv(start) \
+ ((const struct __pldmfw_desc_tlv *)(start))
+
+/**
+ * pldm_next_desc_tlv
+ * @desc: pointer to a descriptor TLV
+ *
+ * Finds the pointer to the next descriptor following a given descriptor
+ */
+#define pldm_next_desc_tlv(desc) \
+ ((const struct __pldmfw_desc_tlv *)((desc)->data + \
+ get_unaligned_le16(&(desc)->size)))
+
+/**
+ * pldm_for_each_desc_tlv
+ * @i: variable to store descriptor index
+ * @desc: variable to store descriptor pointer
+ * @start: byte offset of the start of the descriptors
+ * @count: the number of descriptors
+ *
+ * for loop macro to iterate over all of the descriptors of a given PLDM
+ * record.
+ */
+#define pldm_for_each_desc_tlv(i, desc, start, count) \
+ for ((i) = 0, (desc) = pldm_first_desc_tlv(start); \
+ (i) < (count); \
+ (i)++, (desc) = pldm_next_desc_tlv(desc))
+
+/**
+ * pldm_first_record
+ * @start: byte offset of the start of the PLDM records
+ *
+ * Converts a starting offset of the PLDM records into a pointer to the first
+ * record.
+ */
+#define pldm_first_record(start) \
+ ((const struct __pldmfw_record_info *)(start))
+
+/**
+ * pldm_next_record
+ * @record: pointer to a PLDM record
+ *
+ * Finds a pointer to the next record following a given record
+ */
+#define pldm_next_record(record) \
+ ((const struct __pldmfw_record_info *) \
+ ((const u8 *)(record) + get_unaligned_le16(&(record)->record_len)))
+
+/**
+ * pldm_for_each_record
+ * @i: variable to store record index
+ * @record: variable to store record pointer
+ * @start: byte offset of the start of the records
+ * @count: the number of records
+ *
+ * for loop macro to iterate over all of the records of a PLDM file.
+ */
+#define pldm_for_each_record(i, record, start, count) \
+ for ((i) = 0, (record) = pldm_first_record(start); \
+ (i) < (count); \
+ (i)++, (record) = pldm_next_record(record))
+
+/**
+ * pldm_first_component
+ * @start: byte offset of the start of the PLDM components
+ *
+ * Convert a starting offset of the PLDM components into a pointer to the
+ * first component
+ */
+#define pldm_first_component(start) \
+ ((const struct __pldmfw_component_info *)(start))
+
+/**
+ * pldm_next_component
+ * @component: pointer to a PLDM component
+ *
+ * Finds a pointer to the next component following a given component
+ */
+#define pldm_next_component(component) \
+ ((const struct __pldmfw_component_info *)((component)->version_string + \
+ (component)->version_len))
+
+/**
+ * pldm_for_each_component
+ * @i: variable to store component index
+ * @component: variable to store component pointer
+ * @start: byte offset to the start of the first component
+ * @count: the number of components
+ *
+ * for loop macro to iterate over all of the components of a PLDM file.
+ */
+#define pldm_for_each_component(i, component, start, count) \
+ for ((i) = 0, (component) = pldm_first_component(start); \
+ (i) < (count); \
+ (i)++, (component) = pldm_next_component(component))
+
+#endif
diff --git a/lib/random32.c b/lib/random32.c
index 3d749abb9e80..932345323af0 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -39,6 +39,7 @@
#include <linux/random.h>
#include <linux/sched.h>
#include <asm/unaligned.h>
+#include <trace/events/random.h>
#ifdef CONFIG_RANDOM32_SELFTEST
static void __init prandom_state_selftest(void);
@@ -82,6 +83,7 @@ u32 prandom_u32(void)
u32 res;
res = prandom_u32_state(state);
+ trace_prandom_u32(res);
put_cpu_var(net_rand_state);
return res;
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 8545872e61db..c4ac5c2421f2 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -13,7 +13,7 @@
#include <linux/export.h>
/*
- * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree
+ * red-black trees properties: https://en.wikipedia.org/wiki/Rbtree
*
* 1) A node is either red or black
* 2) The root is black
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 6b13150667f5..df903c53952b 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -610,6 +610,63 @@ static void __init test_for_each_set_clump8(void)
expect_eq_clump8(start, CLUMP_EXP_NUMBITS, clump_exp, &clump);
}
+struct test_bitmap_cut {
+ unsigned int first;
+ unsigned int cut;
+ unsigned int nbits;
+ unsigned long in[4];
+ unsigned long expected[4];
+};
+
+static struct test_bitmap_cut test_cut[] = {
+ { 0, 0, 8, { 0x0000000aUL, }, { 0x0000000aUL, }, },
+ { 0, 0, 32, { 0xdadadeadUL, }, { 0xdadadeadUL, }, },
+ { 0, 3, 8, { 0x000000aaUL, }, { 0x00000015UL, }, },
+ { 3, 3, 8, { 0x000000aaUL, }, { 0x00000012UL, }, },
+ { 0, 1, 32, { 0xa5a5a5a5UL, }, { 0x52d2d2d2UL, }, },
+ { 0, 8, 32, { 0xdeadc0deUL, }, { 0x00deadc0UL, }, },
+ { 1, 1, 32, { 0x5a5a5a5aUL, }, { 0x2d2d2d2cUL, }, },
+ { 0, 15, 32, { 0xa5a5a5a5UL, }, { 0x00014b4bUL, }, },
+ { 0, 16, 32, { 0xa5a5a5a5UL, }, { 0x0000a5a5UL, }, },
+ { 15, 15, 32, { 0xa5a5a5a5UL, }, { 0x000125a5UL, }, },
+ { 15, 16, 32, { 0xa5a5a5a5UL, }, { 0x0000a5a5UL, }, },
+ { 16, 15, 32, { 0xa5a5a5a5UL, }, { 0x0001a5a5UL, }, },
+
+ { BITS_PER_LONG, BITS_PER_LONG, BITS_PER_LONG,
+ { 0xa5a5a5a5UL, 0xa5a5a5a5UL, },
+ { 0xa5a5a5a5UL, 0xa5a5a5a5UL, },
+ },
+ { 1, BITS_PER_LONG - 1, BITS_PER_LONG,
+ { 0xa5a5a5a5UL, 0xa5a5a5a5UL, },
+ { 0x00000001UL, 0x00000001UL, },
+ },
+
+ { 0, BITS_PER_LONG * 2, BITS_PER_LONG * 2 + 1,
+ { 0xa5a5a5a5UL, 0x00000001UL, 0x00000001UL, 0x00000001UL },
+ { 0x00000001UL, },
+ },
+ { 16, BITS_PER_LONG * 2 + 1, BITS_PER_LONG * 2 + 1 + 16,
+ { 0x0000ffffUL, 0x5a5a5a5aUL, 0x5a5a5a5aUL, 0x5a5a5a5aUL },
+ { 0x2d2dffffUL, },
+ },
+};
+
+static void __init test_bitmap_cut(void)
+{
+ unsigned long b[5], *in = &b[1], *out = &b[0]; /* Partial overlap */
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(test_cut); i++) {
+ struct test_bitmap_cut *t = &test_cut[i];
+
+ memcpy(in, t->in, sizeof(t->in));
+
+ bitmap_cut(out, in, t->first, t->cut, t->nbits);
+
+ expect_eq_bitmap(t->expected, out, t->nbits);
+ }
+}
+
static void __init selftest(void)
{
test_zero_clear();
@@ -623,6 +680,7 @@ static void __init selftest(void)
test_bitmap_parselist_user();
test_mem_optimisations();
test_for_each_set_clump8();
+ test_bitmap_cut();
}
KSTM_MODULE_LOADERS(test_bitmap);
diff --git a/lib/test_bitops.c b/lib/test_bitops.c
index ced25e3a779b..471141ddd691 100644
--- a/lib/test_bitops.c
+++ b/lib/test_bitops.c
@@ -52,9 +52,9 @@ static unsigned long order_comb_long[][2] = {
static int __init test_bitops_startup(void)
{
- int i;
+ int i, bit_set;
- pr_warn("Loaded test module\n");
+ pr_info("Starting bitops test\n");
set_bit(BITOPS_4, g_bitmap);
set_bit(BITOPS_7, g_bitmap);
set_bit(BITOPS_11, g_bitmap);
@@ -81,12 +81,8 @@ static int __init test_bitops_startup(void)
order_comb_long[i][0]);
}
#endif
- return 0;
-}
-static void __exit test_bitops_unstartup(void)
-{
- int bit_set;
+ barrier();
clear_bit(BITOPS_4, g_bitmap);
clear_bit(BITOPS_7, g_bitmap);
@@ -98,7 +94,13 @@ static void __exit test_bitops_unstartup(void)
if (bit_set != BITOPS_LAST)
pr_err("ERROR: FOUND SET BIT %d\n", bit_set);
- pr_warn("Unloaded test module\n");
+ pr_info("Completed bitops test\n");
+
+ return 0;
+}
+
+static void __exit test_bitops_unstartup(void)
+{
}
module_init(test_bitops_startup);
diff --git a/lib/test_bits.c b/lib/test_bits.c
new file mode 100644
index 000000000000..c9368a2314e7
--- /dev/null
+++ b/lib/test_bits.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases for functions and macros in bits.h
+ */
+
+#include <kunit/test.h>
+#include <linux/bits.h>
+
+
+static void genmask_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, 1ul, GENMASK(0, 0));
+ KUNIT_EXPECT_EQ(test, 3ul, GENMASK(1, 0));
+ KUNIT_EXPECT_EQ(test, 6ul, GENMASK(2, 1));
+ KUNIT_EXPECT_EQ(test, 0xFFFFFFFFul, GENMASK(31, 0));
+
+#ifdef TEST_GENMASK_FAILURES
+ /* these should fail compilation */
+ GENMASK(0, 1);
+ GENMASK(0, 10);
+ GENMASK(9, 10);
+#endif
+
+
+}
+
+static void genmask_ull_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, 1ull, GENMASK_ULL(0, 0));
+ KUNIT_EXPECT_EQ(test, 3ull, GENMASK_ULL(1, 0));
+ KUNIT_EXPECT_EQ(test, 0x000000ffffe00000ull, GENMASK_ULL(39, 21));
+ KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, GENMASK_ULL(63, 0));
+
+#ifdef TEST_GENMASK_FAILURES
+ /* these should fail compilation */
+ GENMASK_ULL(0, 1);
+ GENMASK_ULL(0, 10);
+ GENMASK_ULL(9, 10);
+#endif
+}
+
+static void genmask_input_check_test(struct kunit *test)
+{
+ unsigned int x, y;
+ int z, w;
+
+ /* Unknown input */
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(x, 0));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(0, x));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(x, y));
+
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(z, 0));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(0, z));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(z, w));
+
+ /* Valid input */
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(1, 1));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(39, 21));
+}
+
+
+static struct kunit_case bits_test_cases[] = {
+ KUNIT_CASE(genmask_test),
+ KUNIT_CASE(genmask_ull_test),
+ KUNIT_CASE(genmask_input_check_test),
+ {}
+};
+
+static struct kunit_suite bits_test_suite = {
+ .name = "bits-test",
+ .test_cases = bits_test_cases,
+};
+kunit_test_suite(bits_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index a5fddf9ebcb7..ca7d635bccd9 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -5275,31 +5275,21 @@ static struct bpf_test tests[] = {
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Ctx heavy transformations",
{ },
-#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
- CLASSIC | FLAG_EXPECTED_FAIL,
-#else
CLASSIC,
-#endif
{ },
{
{ 1, SKB_VLAN_PRESENT },
{ 10, SKB_VLAN_PRESENT }
},
.fill_helper = bpf_fill_maxinsns6,
- .expected_errcode = -ENOTSUPP,
},
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Call heavy transformations",
{ },
-#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
-#else
CLASSIC | FLAG_NO_DATA,
-#endif
{ },
{ { 1, 0 }, { 10, 0 } },
.fill_helper = bpf_fill_maxinsns7,
- .expected_errcode = -ENOTSUPP,
},
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Jump heavy test",
@@ -5350,28 +5340,18 @@ static struct bpf_test tests[] = {
{
"BPF_MAXINSNS: exec all MSH",
{ },
-#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
- CLASSIC | FLAG_EXPECTED_FAIL,
-#else
CLASSIC,
-#endif
{ 0xfa, 0xfb, 0xfc, 0xfd, },
{ { 4, 0xababab83 } },
.fill_helper = bpf_fill_maxinsns13,
- .expected_errcode = -ENOTSUPP,
},
{
"BPF_MAXINSNS: ld_abs+get_processor_id",
{ },
-#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
- CLASSIC | FLAG_EXPECTED_FAIL,
-#else
CLASSIC,
-#endif
{ },
{ { 1, 0xbee } },
.fill_helper = bpf_fill_ld_abs_get_processor_id,
- .expected_errcode = -ENOTSUPP,
},
/*
* LD_IND / LD_ABS on fragmented SKBs
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index a2a82262b97b..e7dc3de355b7 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -214,6 +214,14 @@ static bool dmirror_interval_invalidate(struct mmu_interval_notifier *mni,
{
struct dmirror *dmirror = container_of(mni, struct dmirror, notifier);
+ /*
+ * Ignore invalidation callbacks for device private pages since
+ * the invalidation is handled as part of the migration process.
+ */
+ if (range->event == MMU_NOTIFY_MIGRATE &&
+ range->migrate_pgmap_owner == dmirror->mdevice)
+ return true;
+
if (mmu_notifier_range_blockable(range))
mutex_lock(&dmirror->mutex);
else if (!mutex_trylock(&dmirror->mutex))
@@ -585,15 +593,6 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
*/
spage = migrate_pfn_to_page(*src);
- /*
- * Don't migrate device private pages from our own driver or
- * others. For our own we would do a device private memory copy
- * not a migration and for others, we would need to fault the
- * other device's page into system memory first.
- */
- if (spage && is_zone_device_page(spage))
- continue;
-
dpage = dmirror_devmem_alloc_page(mdevice);
if (!dpage)
continue;
@@ -702,7 +701,8 @@ static int dmirror_migrate(struct dmirror *dmirror,
args.dst = dst_pfns;
args.start = addr;
args.end = next;
- args.src_owner = NULL;
+ args.pgmap_owner = dmirror->mdevice;
+ args.flags = MIGRATE_VMA_SELECT_SYSTEM;
ret = migrate_vma_setup(&args);
if (ret)
goto out;
@@ -766,6 +766,10 @@ static void dmirror_mkentry(struct dmirror *dmirror, struct hmm_range *range,
*perm |= HMM_DMIRROR_PROT_WRITE;
else
*perm |= HMM_DMIRROR_PROT_READ;
+ if (hmm_pfn_to_map_order(entry) + PAGE_SHIFT == PMD_SHIFT)
+ *perm |= HMM_DMIRROR_PROT_PMD;
+ else if (hmm_pfn_to_map_order(entry) + PAGE_SHIFT == PUD_SHIFT)
+ *perm |= HMM_DMIRROR_PROT_PUD;
}
static bool dmirror_snapshot_invalidate(struct mmu_interval_notifier *mni,
@@ -987,7 +991,7 @@ static void dmirror_devmem_free(struct page *page)
}
static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
- struct dmirror_device *mdevice)
+ struct dmirror *dmirror)
{
const unsigned long *src = args->src;
unsigned long *dst = args->dst;
@@ -1009,6 +1013,7 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
continue;
lock_page(dpage);
+ xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
copy_highpage(dpage, spage);
*dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
if (*src & MIGRATE_PFN_WRITE)
@@ -1017,15 +1022,6 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
return 0;
}
-static void dmirror_devmem_fault_finalize_and_map(struct migrate_vma *args,
- struct dmirror *dmirror)
-{
- /* Invalidate the device's page table mapping. */
- mutex_lock(&dmirror->mutex);
- dmirror_do_update(dmirror, args->start, args->end);
- mutex_unlock(&dmirror->mutex);
-}
-
static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
{
struct migrate_vma args;
@@ -1049,16 +1045,21 @@ static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
args.end = args.start + PAGE_SIZE;
args.src = &src_pfns;
args.dst = &dst_pfns;
- args.src_owner = dmirror->mdevice;
+ args.pgmap_owner = dmirror->mdevice;
+ args.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
if (migrate_vma_setup(&args))
return VM_FAULT_SIGBUS;
- ret = dmirror_devmem_fault_alloc_and_copy(&args, dmirror->mdevice);
+ ret = dmirror_devmem_fault_alloc_and_copy(&args, dmirror);
if (ret)
return ret;
migrate_vma_pages(&args);
- dmirror_devmem_fault_finalize_and_map(&args, dmirror);
+ /*
+ * No device finalize step is needed since
+ * dmirror_devmem_fault_alloc_and_copy() will have already
+ * invalidated the device page table.
+ */
migrate_vma_finalize(&args);
return 0;
}
diff --git a/lib/test_hmm_uapi.h b/lib/test_hmm_uapi.h
index 67b3b2e6ff5d..670b4ef2a5b6 100644
--- a/lib/test_hmm_uapi.h
+++ b/lib/test_hmm_uapi.h
@@ -40,6 +40,8 @@ struct hmm_dmirror_cmd {
* HMM_DMIRROR_PROT_NONE: unpopulated PTE or PTE with no access
* HMM_DMIRROR_PROT_READ: read-only PTE
* HMM_DMIRROR_PROT_WRITE: read/write PTE
+ * HMM_DMIRROR_PROT_PMD: PMD sized page is fully mapped by same permissions
+ * HMM_DMIRROR_PROT_PUD: PUD sized page is fully mapped by same permissions
* HMM_DMIRROR_PROT_ZERO: special read-only zero page
* HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL: Migrated device private page on the
* device the ioctl() is made
@@ -51,6 +53,8 @@ enum {
HMM_DMIRROR_PROT_NONE = 0x00,
HMM_DMIRROR_PROT_READ = 0x01,
HMM_DMIRROR_PROT_WRITE = 0x02,
+ HMM_DMIRROR_PROT_PMD = 0x04,
+ HMM_DMIRROR_PROT_PUD = 0x08,
HMM_DMIRROR_PROT_ZERO = 0x10,
HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL = 0x20,
HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE = 0x30,
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index dc2c6a51d11a..53e953bb1d1d 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -23,6 +23,10 @@
#include <asm/page.h>
+#include "../mm/kasan/kasan.h"
+
+#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_SHADOW_SCALE_SIZE)
+
/*
* We assign some test results to these globals to make sure the tests
* are not eliminated as dead code.
@@ -48,7 +52,8 @@ static noinline void __init kmalloc_oob_right(void)
return;
}
- ptr[size] = 'x';
+ ptr[size + OOB_TAG_OFF] = 'x';
+
kfree(ptr);
}
@@ -100,7 +105,8 @@ static noinline void __init kmalloc_pagealloc_oob_right(void)
return;
}
- ptr[size] = 0;
+ ptr[size + OOB_TAG_OFF] = 0;
+
kfree(ptr);
}
@@ -170,7 +176,8 @@ static noinline void __init kmalloc_oob_krealloc_more(void)
return;
}
- ptr2[size2] = 'x';
+ ptr2[size2 + OOB_TAG_OFF] = 'x';
+
kfree(ptr2);
}
@@ -188,7 +195,9 @@ static noinline void __init kmalloc_oob_krealloc_less(void)
kfree(ptr1);
return;
}
- ptr2[size2] = 'x';
+
+ ptr2[size2 + OOB_TAG_OFF] = 'x';
+
kfree(ptr2);
}
@@ -224,7 +233,8 @@ static noinline void __init kmalloc_oob_memset_2(void)
return;
}
- memset(ptr+7, 0, 2);
+ memset(ptr + 7 + OOB_TAG_OFF, 0, 2);
+
kfree(ptr);
}
@@ -240,7 +250,8 @@ static noinline void __init kmalloc_oob_memset_4(void)
return;
}
- memset(ptr+5, 0, 4);
+ memset(ptr + 5 + OOB_TAG_OFF, 0, 4);
+
kfree(ptr);
}
@@ -257,7 +268,8 @@ static noinline void __init kmalloc_oob_memset_8(void)
return;
}
- memset(ptr+1, 0, 8);
+ memset(ptr + 1 + OOB_TAG_OFF, 0, 8);
+
kfree(ptr);
}
@@ -273,7 +285,8 @@ static noinline void __init kmalloc_oob_memset_16(void)
return;
}
- memset(ptr+1, 0, 16);
+ memset(ptr + 1 + OOB_TAG_OFF, 0, 16);
+
kfree(ptr);
}
@@ -289,7 +302,8 @@ static noinline void __init kmalloc_oob_in_memset(void)
return;
}
- memset(ptr, 0, size+5);
+ memset(ptr, 0, size + 5 + OOB_TAG_OFF);
+
kfree(ptr);
}
@@ -423,7 +437,8 @@ static noinline void __init kmem_cache_oob(void)
return;
}
- *p = p[size];
+ *p = p[size + OOB_TAG_OFF];
+
kmem_cache_free(cache, p);
kmem_cache_destroy(cache);
}
@@ -473,7 +488,7 @@ static noinline void __init kasan_global_oob(void)
static noinline void __init kasan_stack_oob(void)
{
char stack_array[10];
- volatile int i = 0;
+ volatile int i = OOB_TAG_OFF;
char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
pr_info("out-of-bounds on stack\n");
@@ -520,25 +535,25 @@ static noinline void __init copy_user_test(void)
}
pr_info("out-of-bounds in copy_from_user()\n");
- unused = copy_from_user(kmem, usermem, size + 1);
+ unused = copy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
pr_info("out-of-bounds in copy_to_user()\n");
- unused = copy_to_user(usermem, kmem, size + 1);
+ unused = copy_to_user(usermem, kmem, size + 1 + OOB_TAG_OFF);
pr_info("out-of-bounds in __copy_from_user()\n");
- unused = __copy_from_user(kmem, usermem, size + 1);
+ unused = __copy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
pr_info("out-of-bounds in __copy_to_user()\n");
- unused = __copy_to_user(usermem, kmem, size + 1);
+ unused = __copy_to_user(usermem, kmem, size + 1 + OOB_TAG_OFF);
pr_info("out-of-bounds in __copy_from_user_inatomic()\n");
- unused = __copy_from_user_inatomic(kmem, usermem, size + 1);
+ unused = __copy_from_user_inatomic(kmem, usermem, size + 1 + OOB_TAG_OFF);
pr_info("out-of-bounds in __copy_to_user_inatomic()\n");
- unused = __copy_to_user_inatomic(usermem, kmem, size + 1);
+ unused = __copy_to_user_inatomic(usermem, kmem, size + 1 + OOB_TAG_OFF);
pr_info("out-of-bounds in strncpy_from_user()\n");
- unused = strncpy_from_user(kmem, usermem, size + 1);
+ unused = strncpy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
vm_munmap((unsigned long)usermem, PAGE_SIZE);
kfree(kmem);
@@ -766,15 +781,15 @@ static noinline void __init kmalloc_double_kzfree(void)
char *ptr;
size_t size = 16;
- pr_info("double-free (kzfree)\n");
+ pr_info("double-free (kfree_sensitive)\n");
ptr = kmalloc(size, GFP_KERNEL);
if (!ptr) {
pr_err("Allocation failed\n");
return;
}
- kzfree(ptr);
- kzfree(ptr);
+ kfree_sensitive(ptr);
+ kfree_sensitive(ptr);
}
#ifdef CONFIG_KASAN_VMALLOC
@@ -801,6 +816,35 @@ static noinline void __init vmalloc_oob(void)
static void __init vmalloc_oob(void) {}
#endif
+static struct kasan_rcu_info {
+ int i;
+ struct rcu_head rcu;
+} *global_rcu_ptr;
+
+static noinline void __init kasan_rcu_reclaim(struct rcu_head *rp)
+{
+ struct kasan_rcu_info *fp = container_of(rp,
+ struct kasan_rcu_info, rcu);
+
+ kfree(fp);
+ fp->i = 1;
+}
+
+static noinline void __init kasan_rcu_uaf(void)
+{
+ struct kasan_rcu_info *ptr;
+
+ pr_info("use-after-free in kasan_rcu_reclaim\n");
+ ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL);
+ if (!ptr) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ global_rcu_ptr = rcu_dereference_protected(ptr, NULL);
+ call_rcu(&global_rcu_ptr->rcu, kasan_rcu_reclaim);
+}
+
static int __init kmalloc_tests_init(void)
{
/*
@@ -848,6 +892,7 @@ static int __init kmalloc_tests_init(void)
kasan_bitops();
kmalloc_double_kzfree();
vmalloc_oob();
+ kasan_rcu_uaf();
kasan_restore_multi_shot(multishot);
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index e651c37d56db..eab52770070d 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -745,7 +745,7 @@ static int trigger_config_run_type(struct kmod_test_device *test_dev,
break;
case TEST_KMOD_FS_TYPE:
kfree_const(config->test_fs);
- config->test_driver = NULL;
+ config->test_fs = NULL;
copied = config_copy_test_fs(config, test_str,
strlen(test_str));
break;
diff --git a/lib/test_lockup.c b/lib/test_lockup.c
index ff26f36d729f..f1a020bcc763 100644
--- a/lib/test_lockup.c
+++ b/lib/test_lockup.c
@@ -400,7 +400,7 @@ static void test_lockup(bool master)
test_unlock(master, true);
}
-DEFINE_PER_CPU(struct work_struct, test_works);
+static DEFINE_PER_CPU(struct work_struct, test_works);
static void test_work_fn(struct work_struct *work)
{
@@ -512,8 +512,8 @@ static int __init test_lockup_init(void)
if (test_file_path[0]) {
test_file = filp_open(test_file_path, O_RDONLY, 0);
if (IS_ERR(test_file)) {
- pr_err("cannot find file_path\n");
- return -EINVAL;
+ pr_err("failed to open %s: %ld\n", test_file_path, PTR_ERR(test_file));
+ return PTR_ERR(test_file);
}
test_inode = file_inode(test_file);
} else if (test_lock_inode ||
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index 277cb4417ac2..4cf250031f0f 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -11,7 +11,7 @@
* [1] A Fast String Searching Algorithm, R.S. Boyer and Moore.
* Communications of the Association for Computing Machinery,
* 20(10), 1977, pp. 762-772.
- * http://www.cs.utexas.edu/users/moore/publications/fstrpos.pdf
+ * https://www.cs.utexas.edu/users/moore/publications/fstrpos.pdf
*
* [2] Handbook of Exact String Matching Algorithms, Thierry Lecroq, 2004
* http://www-igm.univ-mlv.fr/~lecroq/string/string.pdf
diff --git a/lib/xxhash.c b/lib/xxhash.c
index aa61e2a3802f..d5bb9ff10607 100644
--- a/lib/xxhash.c
+++ b/lib/xxhash.c
@@ -34,7 +34,7 @@
* ("BSD").
*
* You can contact the author at:
- * - xxHash homepage: http://cyan4973.github.io/xxHash/
+ * - xxHash homepage: https://cyan4973.github.io/xxHash/
* - xxHash source repository: https://github.com/Cyan4973/xxHash
*/
diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c
index 912aae5fa09e..88a2c35e1b59 100644
--- a/lib/xz/xz_crc32.c
+++ b/lib/xz/xz_crc32.c
@@ -2,7 +2,7 @@
* CRC32 using the polynomial from IEEE-802.3
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
- * Igor Pavlov <http://7-zip.org/>
+ * Igor Pavlov <https://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c
index a768e6d28bbb..72ddac6ef2ec 100644
--- a/lib/xz/xz_dec_bcj.c
+++ b/lib/xz/xz_dec_bcj.c
@@ -2,7 +2,7 @@
* Branch/Call/Jump (BCJ) filter decoders
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
- * Igor Pavlov <http://7-zip.org/>
+ * Igor Pavlov <https://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c
index 156f26fdc4c9..9f336bc07ed6 100644
--- a/lib/xz/xz_dec_lzma2.c
+++ b/lib/xz/xz_dec_lzma2.c
@@ -2,7 +2,7 @@
* LZMA2 decoder
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
- * Igor Pavlov <http://7-zip.org/>
+ * Igor Pavlov <https://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
diff --git a/lib/xz/xz_lzma2.h b/lib/xz/xz_lzma2.h
index 071d67bee9f5..92d852d4f87a 100644
--- a/lib/xz/xz_lzma2.h
+++ b/lib/xz/xz_lzma2.h
@@ -2,7 +2,7 @@
* LZMA2 definitions
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
- * Igor Pavlov <http://7-zip.org/>
+ * Igor Pavlov <https://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
diff --git a/lib/xz/xz_stream.h b/lib/xz/xz_stream.h
index 66cb5a7055ec..430bb3a0d195 100644
--- a/lib/xz/xz_stream.h
+++ b/lib/xz/xz_stream.h
@@ -19,7 +19,7 @@
/*
* See the .xz file format specification at
- * http://tukaani.org/xz/xz-file-format.txt
+ * https://tukaani.org/xz/xz-file-format.txt
* to understand the container format.
*/