summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig11
-rw-r--r--lib/Kconfig.debug85
-rw-r--r--lib/Kconfig.kgdb1
-rw-r--r--lib/Kconfig.ubsan1
-rw-r--r--lib/Makefile44
-rw-r--r--lib/alloc_tag.c272
-rw-r--r--lib/asn1_encoder.c1
-rw-r--r--lib/atomic64_test.c1
-rw-r--r--lib/bch.c20
-rw-r--r--lib/bitfield_kunit.c1
-rw-r--r--lib/bootconfig.c20
-rw-r--r--lib/btree.c1
-rwxr-xr-xlib/build_OID_registry7
-rw-r--r--lib/buildid.c18
-rw-r--r--lib/checksum_kunit.c1
-rw-r--r--lib/closure.c94
-rw-r--r--lib/cmdline_kunit.c1
-rw-r--r--lib/cmpxchg-emu.c45
-rw-r--r--lib/codetag.c283
-rw-r--r--lib/cpumask_kunit.c1
-rw-r--r--lib/crypto/Kconfig5
-rw-r--r--lib/crypto/Makefile3
-rw-r--r--lib/crypto/aescfb.c257
-rw-r--r--lib/crypto/arc4.c1
-rw-r--r--lib/crypto/des.c1
-rw-r--r--lib/crypto/libchacha.c1
-rw-r--r--lib/crypto/mpi/ec.c6
-rw-r--r--lib/crypto/mpi/mpi-bit.c10
-rw-r--r--lib/crypto/mpi/mpi-pow.c9
-rw-r--r--lib/crypto/poly1305.c1
-rw-r--r--lib/crypto/sha1.c1
-rw-r--r--lib/crypto/sha256.c1
-rw-r--r--lib/crypto/utils.c1
-rw-r--r--lib/debugobjects.c21
-rw-r--r--lib/decompress_bunzip2.c3
-rw-r--r--lib/decompress_unlzma.c2
-rw-r--r--lib/devres.c26
-rw-r--r--lib/dhry_run.c1
-rw-r--r--lib/dim/Makefile4
-rw-r--r--lib/dim/dim.c3
-rw-r--r--lib/dim/net_dim.c144
-rw-r--r--lib/dump_stack.c9
-rw-r--r--lib/dynamic_queue_limits.c13
-rw-r--r--lib/find_bit.c14
-rw-r--r--lib/find_bit_benchmark.c1
-rw-r--r--lib/fonts/Kconfig22
-rw-r--r--lib/fortify_kunit.c229
-rw-r--r--lib/hashtable_test.c1
-rw-r--r--lib/iomap_copy.c13
-rw-r--r--lib/is_signed_type_kunit.c1
-rw-r--r--lib/kfifo.c115
-rw-r--r--lib/kobject_uevent.c17
-rw-r--r--lib/kunit/Kconfig11
-rw-r--r--lib/kunit/Makefile2
-rw-r--r--lib/kunit/assert.c19
-rw-r--r--lib/kunit/assert_test.c388
-rw-r--r--lib/kunit/device.c2
-rw-r--r--lib/kunit/executor.c12
-rw-r--r--lib/kunit/executor_test.c2
-rw-r--r--lib/kunit/kunit-example-test.c1
-rw-r--r--lib/kunit/kunit-test.c46
-rw-r--r--lib/kunit/string-stream-test.c12
-rw-r--r--lib/kunit/test.c4
-rw-r--r--lib/kunit/try-catch.c40
-rw-r--r--lib/kunit/user_alloc.c117
-rw-r--r--lib/kunit_iov_iter.c18
-rw-r--r--lib/list-test.c7
-rw-r--r--lib/maple_tree.c31
-rw-r--r--lib/math/prime_numbers.c3
-rw-r--r--lib/math/rational-test.c1
-rw-r--r--lib/math/rational.c1
-rw-r--r--lib/memcpy_kunit.c54
-rw-r--r--lib/objagg.c20
-rw-r--r--lib/objpool.c112
-rw-r--r--lib/overflow_kunit.c21
-rw-r--r--lib/percpu_counter.c44
-rw-r--r--lib/plist.c42
-rw-r--r--lib/raid6/Makefile35
-rw-r--r--lib/rbtree.c8
-rw-r--r--lib/rhashtable.c22
-rw-r--r--lib/sbitmap.c44
-rw-r--r--lib/siphash_kunit.c1
-rw-r--r--lib/slub_kunit.c4
-rw-r--r--lib/sort.c14
-rw-r--r--lib/stackdepot.c11
-rw-r--r--lib/stackinit_kunit.c1
-rw-r--r--lib/strcat_kunit.c104
-rw-r--r--lib/string_helpers_kunit.c1
-rw-r--r--lib/string_kunit.c462
-rw-r--r--lib/strscpy_kunit.c142
-rw-r--r--lib/test-kstrtox.c1
-rw-r--r--lib/test_bitmap.c208
-rw-r--r--lib/test_bitops.c28
-rw-r--r--lib/test_bits.c1
-rw-r--r--lib/test_blackhole_dev.c1
-rw-r--r--lib/test_bpf.c17
-rw-r--r--lib/test_dynamic_debug.c1
-rw-r--r--lib/test_firmware.c1
-rw-r--r--lib/test_fpu.h8
-rw-r--r--lib/test_fpu_glue.c (renamed from lib/test_fpu.c)38
-rw-r--r--lib/test_fpu_impl.c37
-rw-r--r--lib/test_free_pages.c1
-rw-r--r--lib/test_hash.c1
-rw-r--r--lib/test_hexdump.c3
-rw-r--r--lib/test_hmm.c9
-rw-r--r--lib/test_ida.c1
-rw-r--r--lib/test_kmod.c1
-rw-r--r--lib/test_kprobes.c3
-rw-r--r--lib/test_linear_ranges.c1
-rw-r--r--lib/test_list_sort.c1
-rw-r--r--lib/test_maple_tree.c1
-rw-r--r--lib/test_memcat_p.c1
-rw-r--r--lib/test_meminit.c1
-rw-r--r--lib/test_min_heap.c76
-rw-r--r--lib/test_module.c1
-rw-r--r--lib/test_objagg.c2
-rw-r--r--lib/test_printf.c1
-rw-r--r--lib/test_ref_tracker.c3
-rw-r--r--lib/test_rhashtable.c1
-rw-r--r--lib/test_scanf.c1
-rw-r--r--lib/test_sort.c15
-rw-r--r--lib/test_static_key_base.c1
-rw-r--r--lib/test_static_keys.c1
-rw-r--r--lib/test_sysctl.c1
-rw-r--r--lib/test_ubsan.c1
-rw-r--r--lib/test_user_copy.c331
-rw-r--r--lib/test_uuid.c1
-rw-r--r--lib/test_xarray.c121
-rw-r--r--lib/ts_bm.c1
-rw-r--r--lib/ts_fsm.c1
-rw-r--r--lib/ts_kmp.c1
-rw-r--r--lib/ubsan.h43
-rw-r--r--lib/usercopy.c39
-rw-r--r--lib/usercopy_kunit.c335
-rw-r--r--lib/vdso/Kconfig12
-rw-r--r--lib/vdso/getrandom.c251
-rw-r--r--lib/vdso/gettimeofday.c75
-rw-r--r--lib/vsprintf.c6
-rw-r--r--lib/xarray.c75
-rw-r--r--lib/zlib_deflate/deflate_syms.c1
-rw-r--r--lib/zlib_dfltcc/dfltcc.h1
-rw-r--r--lib/zlib_dfltcc/dfltcc_util.h28
142 files changed, 4106 insertions, 1388 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 4557bb8a5256..b38849af6f13 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -539,13 +539,7 @@ config CPUMASK_OFFSTACK
stack overflow.
config FORCE_NR_CPUS
- bool "Set number of CPUs at compile time"
- depends on SMP && EXPERT && !COMPILE_TEST
- help
- Say Yes if you have NR_CPUS set to an actual number of possible
- CPUs in your system, not to a default value. This forces the core
- code to rely on compile-time value and optimize kernel routines
- better.
+ def_bool !SMP
config CPU_RMAP
bool
@@ -628,7 +622,8 @@ config SIGNATURE
Implementation is done using GnuPG MPI library
config DIMLIB
- bool
+ tristate
+ depends on NET
help
Dynamic Interrupt Moderation library.
Implements an algorithm for dynamically changing CQ moderation values
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 291185f54ee4..a30c03a66172 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -969,6 +969,37 @@ config DEBUG_STACKOVERFLOW
If in doubt, say "N".
+config CODE_TAGGING
+ bool
+ select KALLSYMS
+
+config MEM_ALLOC_PROFILING
+ bool "Enable memory allocation profiling"
+ default n
+ depends on PROC_FS
+ depends on !DEBUG_FORCE_WEAK_PER_CPU
+ select CODE_TAGGING
+ select PAGE_EXTENSION
+ select SLAB_OBJ_EXT
+ help
+ Track allocation source code and record total allocation size
+ initiated at that code location. The mechanism can be used to track
+ memory leaks with a low performance and memory impact.
+
+config MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT
+ bool "Enable memory allocation profiling by default"
+ default y
+ depends on MEM_ALLOC_PROFILING
+
+config MEM_ALLOC_PROFILING_DEBUG
+ bool "Memory allocation profiler debugging"
+ default n
+ depends on MEM_ALLOC_PROFILING
+ select MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT
+ help
+ Adds warnings with helpful error messages for memory allocation
+ profiling.
+
source "lib/Kconfig.kasan"
source "lib/Kconfig.kfence"
source "lib/Kconfig.kmsan"
@@ -1012,7 +1043,9 @@ config PANIC_TIMEOUT
Set the timeout value (in seconds) until a reboot occurs when
the kernel panics. If n = 0, then we wait forever. A timeout
value n > 0 will wait n seconds before rebooting, while a timeout
- value n < 0 will reboot immediately.
+ value n < 0 will reboot immediately. This setting can be overridden
+ with the kernel command line option panic=, and from userspace via
+ /proc/sys/kernel/panic.
config LOCKUP_DETECTOR
bool
@@ -1030,6 +1063,20 @@ config SOFTLOCKUP_DETECTOR
chance to run. The current stack trace is displayed upon
detection and the system will stay locked up.
+config SOFTLOCKUP_DETECTOR_INTR_STORM
+ bool "Detect Interrupt Storm in Soft Lockups"
+ depends on SOFTLOCKUP_DETECTOR && IRQ_TIME_ACCOUNTING
+ select GENERIC_IRQ_STAT_SNAPSHOT
+ default y if NR_CPUS <= 128
+ help
+ Say Y here to enable the kernel to detect interrupt storm
+ during "soft lockups".
+
+ "soft lockups" can be caused by a variety of reasons. If one is
+ caused by an interrupt storm, then the storming interrupts will not
+ be on the callstack. To detect this case, it is necessary to report
+ the CPU stats and the interrupt counts during the "soft lockups".
+
config BOOTPARAM_SOFTLOCKUP_PANIC
bool "Panic (Reboot) On Soft Lockups"
depends on SOFTLOCKUP_DETECTOR
@@ -1251,7 +1298,7 @@ config SCHED_INFO
config SCHEDSTATS
bool "Collect scheduler statistics"
- depends on DEBUG_KERNEL && PROC_FS
+ depends on PROC_FS
select SCHED_INFO
help
If you say Y here, additional code will be inserted into the
@@ -2437,7 +2484,6 @@ config TEST_LKM
config TEST_BITOPS
tristate "Test module for compilation of bitops operations"
- depends on m
help
This builds the "test_bitops" module that is much like the
TEST_LKM module except that it does a basic exercise of the
@@ -2461,18 +2507,6 @@ config TEST_VMALLOC
If unsure, say N.
-config TEST_USER_COPY
- tristate "Test user/kernel boundary protections"
- depends on m
- help
- This builds the "test_user_copy" module that runs sanity checks
- on the copy_to/from_user infrastructure, making sure basic
- user/kernel boundary testing is working. If it fails to load,
- a regression has been detected in the user/kernel memory boundary
- protections.
-
- If unsure, say N.
-
config TEST_BPF
tristate "Test BPF filter functionality"
depends on m && NET
@@ -2759,16 +2793,6 @@ config HW_BREAKPOINT_KUNIT_TEST
If unsure, say N.
-config STRCAT_KUNIT_TEST
- tristate "Test strcat() family of functions at runtime" if !KUNIT_ALL_TESTS
- depends on KUNIT
- default KUNIT_ALL_TESTS
-
-config STRSCPY_KUNIT_TEST
- tristate "Test strscpy*() family of functions at runtime" if !KUNIT_ALL_TESTS
- depends on KUNIT
- default KUNIT_ALL_TESTS
-
config SIPHASH_KUNIT_TEST
tristate "Perform selftest on siphash functions" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -2780,6 +2804,15 @@ config SIPHASH_KUNIT_TEST
This is intended to help people writing architecture-specific
optimized versions. If unsure, say N.
+config USERCOPY_KUNIT_TEST
+ tristate "KUnit Test for user/kernel boundary protections"
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the "usercopy_kunit" module that runs sanity checks
+ on the copy_to/from_user infrastructure, making sure basic
+ user/kernel boundary testing is working.
+
config TEST_UDELAY
tristate "udelay test driver"
help
@@ -2891,7 +2924,7 @@ config TEST_FREE_PAGES
config TEST_FPU
tristate "Test floating point operations in kernel space"
- depends on X86 && !KCOV_INSTRUMENT_ALL
+ depends on ARCH_HAS_KERNEL_FPU_SUPPORT && !KCOV_INSTRUMENT_ALL
help
Enable this option to add /sys/kernel/debug/selftest_helpers/test_fpu
which will trigger a sequence of floating point operations. This is used
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index b5c0e6576749..537e1b3f5734 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -122,6 +122,7 @@ config KDB_DEFAULT_ENABLE
config KDB_KEYBOARD
bool "KGDB_KDB: keyboard as input device"
depends on VT && KGDB_KDB && !PARISC
+ depends on HAS_IOPORT
default n
help
KDB can use a PS/2 type keyboard for an input device
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index e81e1ac4a919..bdda600f8dfb 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -4,6 +4,7 @@ config ARCH_HAS_UBSAN
menuconfig UBSAN
bool "Undefined behaviour sanity checker"
+ depends on ARCH_HAS_UBSAN
help
This option enables the Undefined Behaviour sanity checker.
Compile-time instrumentation is used to detect various undefined
diff --git a/lib/Makefile b/lib/Makefile
index ffc6b2341b45..322bb127b4dc 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -78,7 +78,6 @@ obj-$(CONFIG_TEST_LKM) += test_module.o
obj-$(CONFIG_TEST_VMALLOC) += test_vmalloc.o
obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
obj-$(CONFIG_TEST_SORT) += test_sort.o
-obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
obj-$(CONFIG_TEST_DYNAMIC_DEBUG) += test_dynamic_debug.o
@@ -110,30 +109,10 @@ CFLAGS_test_fprobe.o += $(CC_FLAGS_FTRACE)
obj-$(CONFIG_FPROBE_SANITY_TEST) += test_fprobe.o
obj-$(CONFIG_TEST_OBJPOOL) += test_objpool.o
-#
-# CFLAGS for compiling floating point code inside the kernel. x86/Makefile turns
-# off the generation of FPU/SSE* instructions for kernel proper but FPU_FLAGS
-# get appended last to CFLAGS and thus override those previous compiler options.
-#
-FPU_CFLAGS := -msse -msse2
-ifdef CONFIG_CC_IS_GCC
-# Stack alignment mismatch, proceed with caution.
-# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
-# (8B stack alignment).
-# See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383
-#
-# The "-msse" in the first argument is there so that the
-# -mpreferred-stack-boundary=3 build error:
-#
-# -mpreferred-stack-boundary=3 is not between 4 and 12
-#
-# can be triggered. Otherwise gcc doesn't complain.
-FPU_CFLAGS += -mhard-float
-FPU_CFLAGS += $(call cc-option,-msse -mpreferred-stack-boundary=3,-mpreferred-stack-boundary=4)
-endif
-
obj-$(CONFIG_TEST_FPU) += test_fpu.o
-CFLAGS_test_fpu.o += $(FPU_CFLAGS)
+test_fpu-y := test_fpu_glue.o test_fpu_impl.o
+CFLAGS_test_fpu_impl.o += $(CC_FLAGS_FPU)
+CFLAGS_REMOVE_test_fpu_impl.o += $(CC_FLAGS_NO_FPU)
# Some KUnit files (hooks.o) need to be built-in even when KUnit is a module,
# so we can't just use obj-$(CONFIG_KUNIT).
@@ -233,9 +212,13 @@ obj-$(CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT) += \
of-reconfig-notifier-error-inject.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
+obj-$(CONFIG_CODE_TAGGING) += codetag.o
+obj-$(CONFIG_MEM_ALLOC_PROFILING) += alloc_tag.o
+
lib-$(CONFIG_GENERIC_BUG) += bug.o
obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
+obj-$(CONFIG_ARCH_NEED_CMPXCHG_1_EMU) += cmpxchg-emu.o
obj-$(CONFIG_DYNAMIC_DEBUG_CORE) += dynamic_debug.o
#ensure exported functions have prototypes
@@ -352,7 +335,7 @@ $(obj)/oid_registry_data.c: $(srctree)/include/linux/oid_registry.h \
$(call cmd,build_OID_registry)
quiet_cmd_build_OID_registry = GEN $@
- cmd_build_OID_registry = perl $(srctree)/$(src)/build_OID_registry $< $@
+ cmd_build_OID_registry = perl $(src)/build_OID_registry $< $@
clean-files += oid_registry_data.c
@@ -403,17 +386,16 @@ CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-overread)
CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-truncation)
CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN)
obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o
-obj-$(CONFIG_STRCAT_KUNIT_TEST) += strcat_kunit.o
-obj-$(CONFIG_STRSCPY_KUNIT_TEST) += strscpy_kunit.o
obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o
+obj-$(CONFIG_USERCOPY_KUNIT_TEST) += usercopy_kunit.o
obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
obj-$(CONFIG_FIRMWARE_TABLE) += fw_table.o
# FORTIFY_SOURCE compile-time behavior tests
-TEST_FORTIFY_SRCS = $(wildcard $(srctree)/$(src)/test_fortify/*-*.c)
-TEST_FORTIFY_LOGS = $(patsubst $(srctree)/$(src)/%.c, %.log, $(TEST_FORTIFY_SRCS))
+TEST_FORTIFY_SRCS = $(wildcard $(src)/test_fortify/*-*.c)
+TEST_FORTIFY_LOGS = $(patsubst $(src)/%.c, %.log, $(TEST_FORTIFY_SRCS))
TEST_FORTIFY_LOG = test_fortify.log
quiet_cmd_test_fortify = TEST $@
@@ -444,3 +426,7 @@ $(obj)/$(TEST_FORTIFY_LOG): $(addprefix $(obj)/, $(TEST_FORTIFY_LOGS)) FORCE
ifeq ($(CONFIG_FORTIFY_SOURCE),y)
$(obj)/string.o: $(obj)/$(TEST_FORTIFY_LOG)
endif
+
+# Some architectures define __NO_FORTIFY if __SANITIZE_ADDRESS__ is undefined.
+# Pass CFLAGS_KASAN to avoid warnings.
+$(foreach x, $(patsubst %.log,%.o,$(TEST_FORTIFY_LOGS)), $(eval KASAN_SANITIZE_$(x) := y))
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
new file mode 100644
index 000000000000..81e5f9a70f22
--- /dev/null
+++ b/lib/alloc_tag.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/alloc_tag.h>
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/page_ext.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_buf.h>
+#include <linux/seq_file.h>
+
+static struct codetag_type *alloc_tag_cttype;
+
+DEFINE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
+EXPORT_SYMBOL(_shared_alloc_tag);
+
+DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
+ mem_alloc_profiling_key);
+
+struct allocinfo_private {
+ struct codetag_iterator iter;
+ bool print_header;
+};
+
+static void *allocinfo_start(struct seq_file *m, loff_t *pos)
+{
+ struct allocinfo_private *priv;
+ struct codetag *ct;
+ loff_t node = *pos;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ m->private = priv;
+ if (!priv)
+ return NULL;
+
+ priv->print_header = (node == 0);
+ codetag_lock_module_list(alloc_tag_cttype, true);
+ priv->iter = codetag_get_ct_iter(alloc_tag_cttype);
+ while ((ct = codetag_next_ct(&priv->iter)) != NULL && node)
+ node--;
+
+ return ct ? priv : NULL;
+}
+
+static void *allocinfo_next(struct seq_file *m, void *arg, loff_t *pos)
+{
+ struct allocinfo_private *priv = (struct allocinfo_private *)arg;
+ struct codetag *ct = codetag_next_ct(&priv->iter);
+
+ (*pos)++;
+ if (!ct)
+ return NULL;
+
+ return priv;
+}
+
+static void allocinfo_stop(struct seq_file *m, void *arg)
+{
+ struct allocinfo_private *priv = (struct allocinfo_private *)m->private;
+
+ if (priv) {
+ codetag_lock_module_list(alloc_tag_cttype, false);
+ kfree(priv);
+ }
+}
+
+static void print_allocinfo_header(struct seq_buf *buf)
+{
+ /* Output format version, so we can change it. */
+ seq_buf_printf(buf, "allocinfo - version: 1.0\n");
+ seq_buf_printf(buf, "# <size> <calls> <tag info>\n");
+}
+
+static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct)
+{
+ struct alloc_tag *tag = ct_to_alloc_tag(ct);
+ struct alloc_tag_counters counter = alloc_tag_read(tag);
+ s64 bytes = counter.bytes;
+
+ seq_buf_printf(out, "%12lli %8llu ", bytes, counter.calls);
+ codetag_to_text(out, ct);
+ seq_buf_putc(out, ' ');
+ seq_buf_putc(out, '\n');
+}
+
+static int allocinfo_show(struct seq_file *m, void *arg)
+{
+ struct allocinfo_private *priv = (struct allocinfo_private *)arg;
+ char *bufp;
+ size_t n = seq_get_buf(m, &bufp);
+ struct seq_buf buf;
+
+ seq_buf_init(&buf, bufp, n);
+ if (priv->print_header) {
+ print_allocinfo_header(&buf);
+ priv->print_header = false;
+ }
+ alloc_tag_to_text(&buf, priv->iter.ct);
+ seq_commit(m, seq_buf_used(&buf));
+ return 0;
+}
+
+static const struct seq_operations allocinfo_seq_op = {
+ .start = allocinfo_start,
+ .next = allocinfo_next,
+ .stop = allocinfo_stop,
+ .show = allocinfo_show,
+};
+
+size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep)
+{
+ struct codetag_iterator iter;
+ struct codetag *ct;
+ struct codetag_bytes n;
+ unsigned int i, nr = 0;
+
+ if (can_sleep)
+ codetag_lock_module_list(alloc_tag_cttype, true);
+ else if (!codetag_trylock_module_list(alloc_tag_cttype))
+ return 0;
+
+ iter = codetag_get_ct_iter(alloc_tag_cttype);
+ while ((ct = codetag_next_ct(&iter))) {
+ struct alloc_tag_counters counter = alloc_tag_read(ct_to_alloc_tag(ct));
+
+ n.ct = ct;
+ n.bytes = counter.bytes;
+
+ for (i = 0; i < nr; i++)
+ if (n.bytes > tags[i].bytes)
+ break;
+
+ if (i < count) {
+ nr -= nr == count;
+ memmove(&tags[i + 1],
+ &tags[i],
+ sizeof(tags[0]) * (nr - i));
+ nr++;
+ tags[i] = n;
+ }
+ }
+
+ codetag_lock_module_list(alloc_tag_cttype, false);
+
+ return nr;
+}
+
+static void __init procfs_init(void)
+{
+ proc_create_seq("allocinfo", 0400, NULL, &allocinfo_seq_op);
+}
+
+static bool alloc_tag_module_unload(struct codetag_type *cttype,
+ struct codetag_module *cmod)
+{
+ struct codetag_iterator iter = codetag_get_ct_iter(cttype);
+ struct alloc_tag_counters counter;
+ bool module_unused = true;
+ struct alloc_tag *tag;
+ struct codetag *ct;
+
+ for (ct = codetag_next_ct(&iter); ct; ct = codetag_next_ct(&iter)) {
+ if (iter.cmod != cmod)
+ continue;
+
+ tag = ct_to_alloc_tag(ct);
+ counter = alloc_tag_read(tag);
+
+ if (WARN(counter.bytes,
+ "%s:%u module %s func:%s has %llu allocated at module unload",
+ ct->filename, ct->lineno, ct->modname, ct->function, counter.bytes))
+ module_unused = false;
+ }
+
+ return module_unused;
+}
+
+#ifdef CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT
+static bool mem_profiling_support __meminitdata = true;
+#else
+static bool mem_profiling_support __meminitdata;
+#endif
+
+static int __init setup_early_mem_profiling(char *str)
+{
+ bool enable;
+
+ if (!str || !str[0])
+ return -EINVAL;
+
+ if (!strncmp(str, "never", 5)) {
+ enable = false;
+ mem_profiling_support = false;
+ } else {
+ int res;
+
+ res = kstrtobool(str, &enable);
+ if (res)
+ return res;
+
+ mem_profiling_support = true;
+ }
+
+ if (enable != static_key_enabled(&mem_alloc_profiling_key)) {
+ if (enable)
+ static_branch_enable(&mem_alloc_profiling_key);
+ else
+ static_branch_disable(&mem_alloc_profiling_key);
+ }
+
+ return 0;
+}
+early_param("sysctl.vm.mem_profiling", setup_early_mem_profiling);
+
+static __init bool need_page_alloc_tagging(void)
+{
+ return mem_profiling_support;
+}
+
+static __init void init_page_alloc_tagging(void)
+{
+}
+
+struct page_ext_operations page_alloc_tagging_ops = {
+ .size = sizeof(union codetag_ref),
+ .need = need_page_alloc_tagging,
+ .init = init_page_alloc_tagging,
+};
+EXPORT_SYMBOL(page_alloc_tagging_ops);
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table memory_allocation_profiling_sysctls[] = {
+ {
+ .procname = "mem_profiling",
+ .data = &mem_alloc_profiling_key,
+#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
+ .mode = 0444,
+#else
+ .mode = 0644,
+#endif
+ .proc_handler = proc_do_static_key,
+ },
+};
+
+static void __init sysctl_init(void)
+{
+ if (!mem_profiling_support)
+ memory_allocation_profiling_sysctls[0].mode = 0444;
+
+ register_sysctl_init("vm", memory_allocation_profiling_sysctls);
+}
+#else /* CONFIG_SYSCTL */
+static inline void sysctl_init(void) {}
+#endif /* CONFIG_SYSCTL */
+
+static int __init alloc_tag_init(void)
+{
+ const struct codetag_type_desc desc = {
+ .section = "alloc_tags",
+ .tag_size = sizeof(struct alloc_tag),
+ .module_unload = alloc_tag_module_unload,
+ };
+
+ alloc_tag_cttype = codetag_register_type(&desc);
+ if (IS_ERR(alloc_tag_cttype))
+ return PTR_ERR(alloc_tag_cttype);
+
+ sysctl_init();
+ procfs_init();
+
+ return 0;
+}
+module_init(alloc_tag_init);
diff --git a/lib/asn1_encoder.c b/lib/asn1_encoder.c
index 0fd3c454a468..92f35aae13b1 100644
--- a/lib/asn1_encoder.c
+++ b/lib/asn1_encoder.c
@@ -449,4 +449,5 @@ asn1_encode_boolean(unsigned char *data, const unsigned char *end_data,
}
EXPORT_SYMBOL_GPL(asn1_encode_boolean);
+MODULE_DESCRIPTION("Simple encoder primitives for ASN.1 BER/DER/CER");
MODULE_LICENSE("GPL");
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index d9d170238165..759ea1783cc5 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -273,4 +273,5 @@ static __exit void test_atomics_exit(void) {}
module_init(test_atomics_init);
module_exit(test_atomics_exit);
+MODULE_DESCRIPTION("Testsuite for atomic64_t functions");
MODULE_LICENSE("GPL");
diff --git a/lib/bch.c b/lib/bch.c
index 5f71fd76eca8..1c0cb07cdfeb 100644
--- a/lib/bch.c
+++ b/lib/bch.c
@@ -479,11 +479,8 @@ static int solve_linear_system(struct bch_control *bch, unsigned int *rows,
/* find suitable row for elimination */
for (r = p; r < m; r++) {
if (rows[r] & mask) {
- if (r != p) {
- tmp = rows[r];
- rows[r] = rows[p];
- rows[p] = tmp;
- }
+ if (r != p)
+ swap(rows[r], rows[p]);
rem = r+1;
break;
}
@@ -799,21 +796,14 @@ static void gf_poly_div(struct bch_control *bch, struct gf_poly *a,
static struct gf_poly *gf_poly_gcd(struct bch_control *bch, struct gf_poly *a,
struct gf_poly *b)
{
- struct gf_poly *tmp;
-
dbg("gcd(%s,%s)=", gf_poly_str(a), gf_poly_str(b));
- if (a->deg < b->deg) {
- tmp = b;
- b = a;
- a = tmp;
- }
+ if (a->deg < b->deg)
+ swap(a, b);
while (b->deg > 0) {
gf_poly_mod(bch, a, b, NULL);
- tmp = b;
- b = a;
- a = tmp;
+ swap(a, b);
}
dbg("%s\n", gf_poly_str(a));
diff --git a/lib/bitfield_kunit.c b/lib/bitfield_kunit.c
index 1473d8b4bf0f..5ccd86f61896 100644
--- a/lib/bitfield_kunit.c
+++ b/lib/bitfield_kunit.c
@@ -151,4 +151,5 @@ static struct kunit_suite bitfields_test_suite = {
kunit_test_suites(&bitfields_test_suite);
MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
+MODULE_DESCRIPTION("Test cases for bitfield helpers");
MODULE_LICENSE("GPL");
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index 97f8911ea339..81f29c29f47b 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -4,8 +4,16 @@
* Masami Hiramatsu <mhiramat@kernel.org>
*/
-#ifdef __KERNEL__
+/*
+ * NOTE: This is only for tools/bootconfig, because tools/bootconfig will
+ * run the parser sanity test.
+ * This does NOT mean lib/bootconfig.c is available in the user space.
+ * However, if you change this file, please make sure the tools/bootconfig
+ * has no issue on building and running.
+ */
#include <linux/bootconfig.h>
+
+#ifdef __KERNEL__
#include <linux/bug.h>
#include <linux/ctype.h>
#include <linux/errno.h>
@@ -24,16 +32,6 @@ const char * __init xbc_get_embedded_bootconfig(size_t *size)
return (*size) ? embedded_bootconfig_data : NULL;
}
#endif
-
-#else /* !__KERNEL__ */
-/*
- * NOTE: This is only for tools/bootconfig, because tools/bootconfig will
- * run the parser sanity test.
- * This does NOT mean lib/bootconfig.c is available in the user space.
- * However, if you change this file, please make sure the tools/bootconfig
- * has no issue on building and running.
- */
-#include <linux/bootconfig.h>
#endif
/*
diff --git a/lib/btree.c b/lib/btree.c
index 49420cae3a83..bb81d3393ac5 100644
--- a/lib/btree.c
+++ b/lib/btree.c
@@ -43,7 +43,6 @@
#include <linux/slab.h>
#include <linux/module.h>
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
#define NODESIZE MAX(L1_CACHE_BYTES, 128)
struct btree_geo {
diff --git a/lib/build_OID_registry b/lib/build_OID_registry
index d7fc32ea8ac2..8267e8d71338 100755
--- a/lib/build_OID_registry
+++ b/lib/build_OID_registry
@@ -8,6 +8,7 @@
#
use strict;
+use Cwd qw(abs_path);
my @names = ();
my @oids = ();
@@ -17,6 +18,8 @@ if ($#ARGV != 1) {
exit(2);
}
+my $abs_srctree = abs_path($ENV{'srctree'});
+
#
# Open the file to read from
#
@@ -35,7 +38,9 @@ close IN_FILE || die;
#
open C_FILE, ">$ARGV[1]" or die;
print C_FILE "/*\n";
-print C_FILE " * Automatically generated by ", $0, ". Do not edit\n";
+my $scriptname = $0;
+$scriptname =~ s#^\Q$abs_srctree/\E##;
+print C_FILE " * Automatically generated by ", $scriptname, ". Do not edit\n";
print C_FILE " */\n";
#
diff --git a/lib/buildid.c b/lib/buildid.c
index 898301b49eb6..e02b5507418b 100644
--- a/lib/buildid.c
+++ b/lib/buildid.c
@@ -73,6 +73,13 @@ static int get_build_id_32(const void *page_addr, unsigned char *build_id,
Elf32_Phdr *phdr;
int i;
+ /*
+ * FIXME
+ * Neither ELF spec nor ELF loader require that program headers
+ * start immediately after ELF header.
+ */
+ if (ehdr->e_phoff != sizeof(Elf32_Ehdr))
+ return -EINVAL;
/* only supports phdr that fits in one page */
if (ehdr->e_phnum >
(PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
@@ -98,6 +105,13 @@ static int get_build_id_64(const void *page_addr, unsigned char *build_id,
Elf64_Phdr *phdr;
int i;
+ /*
+ * FIXME
+ * Neither ELF spec nor ELF loader require that program headers
+ * start immediately after ELF header.
+ */
+ if (ehdr->e_phoff != sizeof(Elf64_Ehdr))
+ return -EINVAL;
/* only supports phdr that fits in one page */
if (ehdr->e_phnum >
(PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
@@ -182,8 +196,8 @@ unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX] __ro_after_init;
*/
void __init init_vmlinux_build_id(void)
{
- extern const void __start_notes __weak;
- extern const void __stop_notes __weak;
+ extern const void __start_notes;
+ extern const void __stop_notes;
unsigned int size = &__stop_notes - &__start_notes;
build_id_parse_buf(&__start_notes, vmlinux_build_id, size);
diff --git a/lib/checksum_kunit.c b/lib/checksum_kunit.c
index 404dba36bae3..4e4d081a1d3b 100644
--- a/lib/checksum_kunit.c
+++ b/lib/checksum_kunit.c
@@ -639,4 +639,5 @@ static struct kunit_suite checksum_test_suite = {
kunit_test_suites(&checksum_test_suite);
MODULE_AUTHOR("Noah Goldstein <goldstein.w.n@gmail.com>");
+MODULE_DESCRIPTION("Test cases csum_* APIs");
MODULE_LICENSE("GPL");
diff --git a/lib/closure.c b/lib/closure.c
index c16540552d61..116afae2eed9 100644
--- a/lib/closure.c
+++ b/lib/closure.c
@@ -13,14 +13,25 @@
#include <linux/seq_file.h>
#include <linux/sched/debug.h>
-static inline void closure_put_after_sub(struct closure *cl, int flags)
+static inline void closure_put_after_sub_checks(int flags)
{
int r = flags & CLOSURE_REMAINING_MASK;
- BUG_ON(flags & CLOSURE_GUARD_MASK);
- BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
+ if (WARN(flags & CLOSURE_GUARD_MASK,
+ "closure has guard bits set: %x (%u)",
+ flags & CLOSURE_GUARD_MASK, (unsigned) __fls(r)))
+ r &= ~CLOSURE_GUARD_MASK;
+
+ WARN(!r && (flags & ~CLOSURE_DESTRUCTOR),
+ "closure ref hit 0 with incorrect flags set: %x (%u)",
+ flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags));
+}
+
+static inline void closure_put_after_sub(struct closure *cl, int flags)
+{
+ closure_put_after_sub_checks(flags);
- if (!r) {
+ if (!(flags & CLOSURE_REMAINING_MASK)) {
smp_acquire__after_ctrl_dep();
cl->closure_get_happened = false;
@@ -139,6 +150,78 @@ void __sched __closure_sync(struct closure *cl)
}
EXPORT_SYMBOL(__closure_sync);
+/*
+ * closure_return_sync - finish running a closure, synchronously (i.e. waiting
+ * for outstanding get()s to finish) and returning once closure refcount is 0.
+ *
+ * Unlike closure_sync() this doesn't reinit the ref to 1; subsequent
+ * closure_get_not_zero() calls waill fail.
+ */
+void __sched closure_return_sync(struct closure *cl)
+{
+ struct closure_syncer s = { .task = current };
+
+ cl->s = &s;
+ set_closure_fn(cl, closure_sync_fn, NULL);
+
+ unsigned flags = atomic_sub_return_release(1 + CLOSURE_RUNNING - CLOSURE_DESTRUCTOR,
+ &cl->remaining);
+
+ closure_put_after_sub_checks(flags);
+
+ if (unlikely(flags & CLOSURE_REMAINING_MASK)) {
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (s.done)
+ break;
+ schedule();
+ }
+
+ __set_current_state(TASK_RUNNING);
+ }
+
+ if (cl->parent)
+ closure_put(cl->parent);
+}
+EXPORT_SYMBOL(closure_return_sync);
+
+int __sched __closure_sync_timeout(struct closure *cl, unsigned long timeout)
+{
+ struct closure_syncer s = { .task = current };
+ int ret = 0;
+
+ cl->s = &s;
+ continue_at(cl, closure_sync_fn, NULL);
+
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (s.done)
+ break;
+ if (!timeout) {
+ /*
+ * Carefully undo the continue_at() - but only if it
+ * hasn't completed, i.e. the final closure_put() hasn't
+ * happened yet:
+ */
+ unsigned old, new, v = atomic_read(&cl->remaining);
+ do {
+ old = v;
+ if (!old || (old & CLOSURE_RUNNING))
+ goto success;
+
+ new = old + CLOSURE_REMAINING_INITIALIZER;
+ } while ((v = atomic_cmpxchg(&cl->remaining, old, new)) != old);
+ ret = -ETIME;
+ }
+
+ timeout = schedule_timeout(timeout);
+ }
+success:
+ __set_current_state(TASK_RUNNING);
+ return ret;
+}
+EXPORT_SYMBOL(__closure_sync_timeout);
+
#ifdef CONFIG_DEBUG_CLOSURES
static LIST_HEAD(closure_list);
@@ -161,6 +244,9 @@ void closure_debug_destroy(struct closure *cl)
{
unsigned long flags;
+ if (cl->magic == CLOSURE_MAGIC_STACK)
+ return;
+
BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
cl->magic = CLOSURE_MAGIC_DEAD;
diff --git a/lib/cmdline_kunit.c b/lib/cmdline_kunit.c
index 705b82736be0..c1602f797637 100644
--- a/lib/cmdline_kunit.c
+++ b/lib/cmdline_kunit.c
@@ -153,4 +153,5 @@ static struct kunit_suite cmdline_test_suite = {
};
kunit_test_suite(cmdline_test_suite);
+MODULE_DESCRIPTION("Test cases for API provided by cmdline.c");
MODULE_LICENSE("GPL");
diff --git a/lib/cmpxchg-emu.c b/lib/cmpxchg-emu.c
new file mode 100644
index 000000000000..27f6f97cb60d
--- /dev/null
+++ b/lib/cmpxchg-emu.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Emulated 1-byte cmpxchg operation for architectures lacking direct
+ * support for this size. This is implemented in terms of 4-byte cmpxchg
+ * operations.
+ *
+ * Copyright (C) 2024 Paul E. McKenney.
+ */
+
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/instrumented.h>
+#include <linux/atomic.h>
+#include <linux/panic.h>
+#include <linux/bug.h>
+#include <asm-generic/rwonce.h>
+#include <linux/cmpxchg-emu.h>
+
+union u8_32 {
+ u8 b[4];
+ u32 w;
+};
+
+/* Emulate one-byte cmpxchg() in terms of 4-byte cmpxchg. */
+uintptr_t cmpxchg_emu_u8(volatile u8 *p, uintptr_t old, uintptr_t new)
+{
+ u32 *p32 = (u32 *)(((uintptr_t)p) & ~0x3);
+ int i = ((uintptr_t)p) & 0x3;
+ union u8_32 old32;
+ union u8_32 new32;
+ u32 ret;
+
+ ret = READ_ONCE(*p32);
+ do {
+ old32.w = ret;
+ if (old32.b[i] != old)
+ return old32.b[i];
+ new32.w = old32.w;
+ new32.b[i] = new;
+ instrument_atomic_read_write(p, 1);
+ ret = data_race(cmpxchg(p32, old32.w, new32.w)); // Overridden above.
+ } while (ret != old32.w);
+ return old;
+}
+EXPORT_SYMBOL_GPL(cmpxchg_emu_u8);
diff --git a/lib/codetag.c b/lib/codetag.c
new file mode 100644
index 000000000000..5ace625f2328
--- /dev/null
+++ b/lib/codetag.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/codetag.h>
+#include <linux/idr.h>
+#include <linux/kallsyms.h>
+#include <linux/module.h>
+#include <linux/seq_buf.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+struct codetag_type {
+ struct list_head link;
+ unsigned int count;
+ struct idr mod_idr;
+ struct rw_semaphore mod_lock; /* protects mod_idr */
+ struct codetag_type_desc desc;
+};
+
+struct codetag_range {
+ struct codetag *start;
+ struct codetag *stop;
+};
+
+struct codetag_module {
+ struct module *mod;
+ struct codetag_range range;
+};
+
+static DEFINE_MUTEX(codetag_lock);
+static LIST_HEAD(codetag_types);
+
+void codetag_lock_module_list(struct codetag_type *cttype, bool lock)
+{
+ if (lock)
+ down_read(&cttype->mod_lock);
+ else
+ up_read(&cttype->mod_lock);
+}
+
+bool codetag_trylock_module_list(struct codetag_type *cttype)
+{
+ return down_read_trylock(&cttype->mod_lock) != 0;
+}
+
+struct codetag_iterator codetag_get_ct_iter(struct codetag_type *cttype)
+{
+ struct codetag_iterator iter = {
+ .cttype = cttype,
+ .cmod = NULL,
+ .mod_id = 0,
+ .ct = NULL,
+ };
+
+ return iter;
+}
+
+static inline struct codetag *get_first_module_ct(struct codetag_module *cmod)
+{
+ return cmod->range.start < cmod->range.stop ? cmod->range.start : NULL;
+}
+
+static inline
+struct codetag *get_next_module_ct(struct codetag_iterator *iter)
+{
+ struct codetag *res = (struct codetag *)
+ ((char *)iter->ct + iter->cttype->desc.tag_size);
+
+ return res < iter->cmod->range.stop ? res : NULL;
+}
+
+struct codetag *codetag_next_ct(struct codetag_iterator *iter)
+{
+ struct codetag_type *cttype = iter->cttype;
+ struct codetag_module *cmod;
+ struct codetag *ct;
+
+ lockdep_assert_held(&cttype->mod_lock);
+
+ if (unlikely(idr_is_empty(&cttype->mod_idr)))
+ return NULL;
+
+ ct = NULL;
+ while (true) {
+ cmod = idr_find(&cttype->mod_idr, iter->mod_id);
+
+ /* If module was removed move to the next one */
+ if (!cmod)
+ cmod = idr_get_next_ul(&cttype->mod_idr,
+ &iter->mod_id);
+
+ /* Exit if no more modules */
+ if (!cmod)
+ break;
+
+ if (cmod != iter->cmod) {
+ iter->cmod = cmod;
+ ct = get_first_module_ct(cmod);
+ } else
+ ct = get_next_module_ct(iter);
+
+ if (ct)
+ break;
+
+ iter->mod_id++;
+ }
+
+ iter->ct = ct;
+ return ct;
+}
+
+void codetag_to_text(struct seq_buf *out, struct codetag *ct)
+{
+ if (ct->modname)
+ seq_buf_printf(out, "%s:%u [%s] func:%s",
+ ct->filename, ct->lineno,
+ ct->modname, ct->function);
+ else
+ seq_buf_printf(out, "%s:%u func:%s",
+ ct->filename, ct->lineno, ct->function);
+}
+
+static inline size_t range_size(const struct codetag_type *cttype,
+ const struct codetag_range *range)
+{
+ return ((char *)range->stop - (char *)range->start) /
+ cttype->desc.tag_size;
+}
+
+#ifdef CONFIG_MODULES
+static void *get_symbol(struct module *mod, const char *prefix, const char *name)
+{
+ DECLARE_SEQ_BUF(sb, KSYM_NAME_LEN);
+ const char *buf;
+ void *ret;
+
+ seq_buf_printf(&sb, "%s%s", prefix, name);
+ if (seq_buf_has_overflowed(&sb))
+ return NULL;
+
+ buf = seq_buf_str(&sb);
+ preempt_disable();
+ ret = mod ?
+ (void *)find_kallsyms_symbol_value(mod, buf) :
+ (void *)kallsyms_lookup_name(buf);
+ preempt_enable();
+
+ return ret;
+}
+
+static struct codetag_range get_section_range(struct module *mod,
+ const char *section)
+{
+ return (struct codetag_range) {
+ get_symbol(mod, "__start_", section),
+ get_symbol(mod, "__stop_", section),
+ };
+}
+
+static int codetag_module_init(struct codetag_type *cttype, struct module *mod)
+{
+ struct codetag_range range;
+ struct codetag_module *cmod;
+ int err;
+
+ range = get_section_range(mod, cttype->desc.section);
+ if (!range.start || !range.stop) {
+ pr_warn("Failed to load code tags of type %s from the module %s\n",
+ cttype->desc.section,
+ mod ? mod->name : "(built-in)");
+ return -EINVAL;
+ }
+
+ /* Ignore empty ranges */
+ if (range.start == range.stop)
+ return 0;
+
+ BUG_ON(range.start > range.stop);
+
+ cmod = kmalloc(sizeof(*cmod), GFP_KERNEL);
+ if (unlikely(!cmod))
+ return -ENOMEM;
+
+ cmod->mod = mod;
+ cmod->range = range;
+
+ down_write(&cttype->mod_lock);
+ err = idr_alloc(&cttype->mod_idr, cmod, 0, 0, GFP_KERNEL);
+ if (err >= 0) {
+ cttype->count += range_size(cttype, &range);
+ if (cttype->desc.module_load)
+ cttype->desc.module_load(cttype, cmod);
+ }
+ up_write(&cttype->mod_lock);
+
+ if (err < 0) {
+ kfree(cmod);
+ return err;
+ }
+
+ return 0;
+}
+
+void codetag_load_module(struct module *mod)
+{
+ struct codetag_type *cttype;
+
+ if (!mod)
+ return;
+
+ mutex_lock(&codetag_lock);
+ list_for_each_entry(cttype, &codetag_types, link)
+ codetag_module_init(cttype, mod);
+ mutex_unlock(&codetag_lock);
+}
+
+bool codetag_unload_module(struct module *mod)
+{
+ struct codetag_type *cttype;
+ bool unload_ok = true;
+
+ if (!mod)
+ return true;
+
+ mutex_lock(&codetag_lock);
+ list_for_each_entry(cttype, &codetag_types, link) {
+ struct codetag_module *found = NULL;
+ struct codetag_module *cmod;
+ unsigned long mod_id, tmp;
+
+ down_write(&cttype->mod_lock);
+ idr_for_each_entry_ul(&cttype->mod_idr, cmod, tmp, mod_id) {
+ if (cmod->mod && cmod->mod == mod) {
+ found = cmod;
+ break;
+ }
+ }
+ if (found) {
+ if (cttype->desc.module_unload)
+ if (!cttype->desc.module_unload(cttype, cmod))
+ unload_ok = false;
+
+ cttype->count -= range_size(cttype, &cmod->range);
+ idr_remove(&cttype->mod_idr, mod_id);
+ kfree(cmod);
+ }
+ up_write(&cttype->mod_lock);
+ }
+ mutex_unlock(&codetag_lock);
+
+ return unload_ok;
+}
+
+#else /* CONFIG_MODULES */
+static int codetag_module_init(struct codetag_type *cttype, struct module *mod) { return 0; }
+#endif /* CONFIG_MODULES */
+
+struct codetag_type *
+codetag_register_type(const struct codetag_type_desc *desc)
+{
+ struct codetag_type *cttype;
+ int err;
+
+ BUG_ON(desc->tag_size <= 0);
+
+ cttype = kzalloc(sizeof(*cttype), GFP_KERNEL);
+ if (unlikely(!cttype))
+ return ERR_PTR(-ENOMEM);
+
+ cttype->desc = *desc;
+ idr_init(&cttype->mod_idr);
+ init_rwsem(&cttype->mod_lock);
+
+ err = codetag_module_init(cttype, NULL);
+ if (unlikely(err)) {
+ kfree(cttype);
+ return ERR_PTR(err);
+ }
+
+ mutex_lock(&codetag_lock);
+ list_add_tail(&cttype->link, &codetag_types);
+ mutex_unlock(&codetag_lock);
+
+ return cttype;
+}
diff --git a/lib/cpumask_kunit.c b/lib/cpumask_kunit.c
index a105e6369efc..6b62a6bdd50e 100644
--- a/lib/cpumask_kunit.c
+++ b/lib/cpumask_kunit.c
@@ -152,4 +152,5 @@ static struct kunit_suite test_cpumask_suite = {
};
kunit_test_suite(test_cpumask_suite);
+MODULE_DESCRIPTION("KUnit tests for cpumask");
MODULE_LICENSE("GPL");
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index 45436bfc6dff..b01253cac70a 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -8,6 +8,11 @@ config CRYPTO_LIB_UTILS
config CRYPTO_LIB_AES
tristate
+config CRYPTO_LIB_AESCFB
+ tristate
+ select CRYPTO_LIB_AES
+ select CRYPTO_LIB_UTILS
+
config CRYPTO_LIB_AESGCM
tristate
select CRYPTO_LIB_AES
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
index 8d1446c2be71..969baab8c805 100644
--- a/lib/crypto/Makefile
+++ b/lib/crypto/Makefile
@@ -10,6 +10,9 @@ obj-$(CONFIG_CRYPTO_LIB_CHACHA_GENERIC) += libchacha.o
obj-$(CONFIG_CRYPTO_LIB_AES) += libaes.o
libaes-y := aes.o
+obj-$(CONFIG_CRYPTO_LIB_AESCFB) += libaescfb.o
+libaescfb-y := aescfb.o
+
obj-$(CONFIG_CRYPTO_LIB_AESGCM) += libaesgcm.o
libaesgcm-y := aesgcm.o
diff --git a/lib/crypto/aescfb.c b/lib/crypto/aescfb.c
new file mode 100644
index 000000000000..749dc1258a44
--- /dev/null
+++ b/lib/crypto/aescfb.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Minimal library implementation of AES in CFB mode
+ *
+ * Copyright 2023 Google LLC
+ */
+
+#include <linux/module.h>
+
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+
+#include <asm/irqflags.h>
+
+static void aescfb_encrypt_block(const struct crypto_aes_ctx *ctx, void *dst,
+ const void *src)
+{
+ unsigned long flags;
+
+ /*
+ * In AES-CFB, the AES encryption operates on known 'plaintext' (the IV
+ * and ciphertext), making it susceptible to timing attacks on the
+ * encryption key. The AES library already mitigates this risk to some
+ * extent by pulling the entire S-box into the caches before doing any
+ * substitutions, but this strategy is more effective when running with
+ * interrupts disabled.
+ */
+ local_irq_save(flags);
+ aes_encrypt(ctx, dst, src);
+ local_irq_restore(flags);
+}
+
+/**
+ * aescfb_encrypt - Perform AES-CFB encryption on a block of data
+ *
+ * @ctx: The AES-CFB key schedule
+ * @dst: Pointer to the ciphertext output buffer
+ * @src: Pointer the plaintext (may equal @dst for encryption in place)
+ * @len: The size in bytes of the plaintext and ciphertext.
+ * @iv: The initialization vector (IV) to use for this block of data
+ */
+void aescfb_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src,
+ int len, const u8 iv[AES_BLOCK_SIZE])
+{
+ u8 ks[AES_BLOCK_SIZE];
+ const u8 *v = iv;
+
+ while (len > 0) {
+ aescfb_encrypt_block(ctx, ks, v);
+ crypto_xor_cpy(dst, src, ks, min(len, AES_BLOCK_SIZE));
+ v = dst;
+
+ dst += AES_BLOCK_SIZE;
+ src += AES_BLOCK_SIZE;
+ len -= AES_BLOCK_SIZE;
+ }
+
+ memzero_explicit(ks, sizeof(ks));
+}
+EXPORT_SYMBOL(aescfb_encrypt);
+
+/**
+ * aescfb_decrypt - Perform AES-CFB decryption on a block of data
+ *
+ * @ctx: The AES-CFB key schedule
+ * @dst: Pointer to the plaintext output buffer
+ * @src: Pointer the ciphertext (may equal @dst for decryption in place)
+ * @len: The size in bytes of the plaintext and ciphertext.
+ * @iv: The initialization vector (IV) to use for this block of data
+ */
+void aescfb_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src,
+ int len, const u8 iv[AES_BLOCK_SIZE])
+{
+ u8 ks[2][AES_BLOCK_SIZE];
+
+ aescfb_encrypt_block(ctx, ks[0], iv);
+
+ for (int i = 0; len > 0; i ^= 1) {
+ if (len > AES_BLOCK_SIZE)
+ /*
+ * Generate the keystream for the next block before
+ * performing the XOR, as that may update in place and
+ * overwrite the ciphertext.
+ */
+ aescfb_encrypt_block(ctx, ks[!i], src);
+
+ crypto_xor_cpy(dst, src, ks[i], min(len, AES_BLOCK_SIZE));
+
+ dst += AES_BLOCK_SIZE;
+ src += AES_BLOCK_SIZE;
+ len -= AES_BLOCK_SIZE;
+ }
+
+ memzero_explicit(ks, sizeof(ks));
+}
+EXPORT_SYMBOL(aescfb_decrypt);
+
+MODULE_DESCRIPTION("Generic AES-CFB library");
+MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>");
+MODULE_LICENSE("GPL");
+
+#ifndef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+
+/*
+ * Test code below. Vectors taken from crypto/testmgr.h
+ */
+
+static struct {
+ u8 ptext[64];
+ u8 ctext[64];
+
+ u8 key[AES_MAX_KEY_SIZE];
+ u8 iv[AES_BLOCK_SIZE];
+
+ int klen;
+ int len;
+} const aescfb_tv[] __initconst = {
+ { /* From NIST SP800-38A */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .klen = 16,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
+ "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
+ "\xc8\xa6\x45\x37\xa0\xb3\xa9\x3f"
+ "\xcd\xe3\xcd\xad\x9f\x1c\xe5\x8b"
+ "\x26\x75\x1f\x67\xa3\xcb\xb1\x40"
+ "\xb1\x80\x8c\xf1\x87\xa4\xf4\xdf"
+ "\xc0\x4b\x05\x35\x7c\x5d\x1c\x0e"
+ "\xea\xc4\xc6\x6f\x9f\xf7\xf2\xe6",
+ .len = 64,
+ }, {
+ .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
+ "\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
+ "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
+ .klen = 24,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .ctext = "\xcd\xc8\x0d\x6f\xdd\xf1\x8c\xab"
+ "\x34\xc2\x59\x09\xc9\x9a\x41\x74"
+ "\x67\xce\x7f\x7f\x81\x17\x36\x21"
+ "\x96\x1a\x2b\x70\x17\x1d\x3d\x7a"
+ "\x2e\x1e\x8a\x1d\xd5\x9b\x88\xb1"
+ "\xc8\xe6\x0f\xed\x1e\xfa\xc4\xc9"
+ "\xc0\x5f\x9f\x9c\xa9\x83\x4f\xa0"
+ "\x42\xae\x8f\xba\x58\x4b\x09\xff",
+ .len = 64,
+ }, {
+ .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
+ "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
+ "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
+ "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
+ .klen = 32,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .ctext = "\xdc\x7e\x84\xbf\xda\x79\x16\x4b"
+ "\x7e\xcd\x84\x86\x98\x5d\x38\x60"
+ "\x39\xff\xed\x14\x3b\x28\xb1\xc8"
+ "\x32\x11\x3c\x63\x31\xe5\x40\x7b"
+ "\xdf\x10\x13\x24\x15\xe5\x4b\x92"
+ "\xa1\x3e\xd0\xa8\x26\x7a\xe2\xf9"
+ "\x75\xa3\x85\x74\x1a\xb9\xce\xf8"
+ "\x20\x31\x62\x3d\x55\xb1\xe4\x71",
+ .len = 64,
+ }, { /* > 16 bytes, not a multiple of 16 bytes */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .klen = 16,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae",
+ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
+ "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
+ "\xc8",
+ .len = 17,
+ }, { /* < 16 bytes */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .klen = 16,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f",
+ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad",
+ .len = 7,
+ },
+};
+
+static int __init libaescfb_init(void)
+{
+ for (int i = 0; i < ARRAY_SIZE(aescfb_tv); i++) {
+ struct crypto_aes_ctx ctx;
+ u8 buf[64];
+
+ if (aes_expandkey(&ctx, aescfb_tv[i].key, aescfb_tv[i].klen)) {
+ pr_err("aes_expandkey() failed on vector %d\n", i);
+ return -ENODEV;
+ }
+
+ aescfb_encrypt(&ctx, buf, aescfb_tv[i].ptext, aescfb_tv[i].len,
+ aescfb_tv[i].iv);
+ if (memcmp(buf, aescfb_tv[i].ctext, aescfb_tv[i].len)) {
+ pr_err("aescfb_encrypt() #1 failed on vector %d\n", i);
+ return -ENODEV;
+ }
+
+ /* decrypt in place */
+ aescfb_decrypt(&ctx, buf, buf, aescfb_tv[i].len, aescfb_tv[i].iv);
+ if (memcmp(buf, aescfb_tv[i].ptext, aescfb_tv[i].len)) {
+ pr_err("aescfb_decrypt() failed on vector %d\n", i);
+ return -ENODEV;
+ }
+
+ /* encrypt in place */
+ aescfb_encrypt(&ctx, buf, buf, aescfb_tv[i].len, aescfb_tv[i].iv);
+ if (memcmp(buf, aescfb_tv[i].ctext, aescfb_tv[i].len)) {
+ pr_err("aescfb_encrypt() #2 failed on vector %d\n", i);
+
+ return -ENODEV;
+ }
+
+ }
+ return 0;
+}
+module_init(libaescfb_init);
+
+static void __exit libaescfb_exit(void)
+{
+}
+module_exit(libaescfb_exit);
+#endif
diff --git a/lib/crypto/arc4.c b/lib/crypto/arc4.c
index c2020f19c652..838812d18216 100644
--- a/lib/crypto/arc4.c
+++ b/lib/crypto/arc4.c
@@ -71,4 +71,5 @@ void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len)
}
EXPORT_SYMBOL(arc4_crypt);
+MODULE_DESCRIPTION("ARC4 Cipher Algorithm");
MODULE_LICENSE("GPL");
diff --git a/lib/crypto/des.c b/lib/crypto/des.c
index ef5bb8822aba..9518658b97cf 100644
--- a/lib/crypto/des.c
+++ b/lib/crypto/des.c
@@ -899,4 +899,5 @@ void des3_ede_decrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src)
}
EXPORT_SYMBOL_GPL(des3_ede_decrypt);
+MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
MODULE_LICENSE("GPL");
diff --git a/lib/crypto/libchacha.c b/lib/crypto/libchacha.c
index dabc3accae05..cc1be0496eb9 100644
--- a/lib/crypto/libchacha.c
+++ b/lib/crypto/libchacha.c
@@ -32,4 +32,5 @@ void chacha_crypt_generic(u32 *state, u8 *dst, const u8 *src,
}
EXPORT_SYMBOL(chacha_crypt_generic);
+MODULE_DESCRIPTION("ChaCha stream cipher (RFC7539)");
MODULE_LICENSE("GPL");
diff --git a/lib/crypto/mpi/ec.c b/lib/crypto/mpi/ec.c
index e16dca1e23d5..4781f00982ef 100644
--- a/lib/crypto/mpi/ec.c
+++ b/lib/crypto/mpi/ec.c
@@ -1285,14 +1285,12 @@ void mpi_ec_mul_point(MPI_POINT result,
sum = &p2_;
for (j = nbits-1; j >= 0; j--) {
- MPI_POINT t;
-
sw = mpi_test_bit(scalar, j);
point_swap_cond(q1, q2, sw, ctx);
montgomery_ladder(prd, sum, q1, q2, point->x, ctx);
point_swap_cond(prd, sum, sw, ctx);
- t = q1; q1 = prd; prd = t;
- t = q2; q2 = sum; sum = t;
+ swap(q1, prd);
+ swap(q2, sum);
}
mpi_clear(result->y);
diff --git a/lib/crypto/mpi/mpi-bit.c b/lib/crypto/mpi/mpi-bit.c
index 070ba784c9f1..e08fc202ea5c 100644
--- a/lib/crypto/mpi/mpi-bit.c
+++ b/lib/crypto/mpi/mpi-bit.c
@@ -212,12 +212,10 @@ void mpi_rshift(MPI x, MPI a, unsigned int n)
return;
}
- if (nlimbs) {
- for (i = 0; i < x->nlimbs - nlimbs; i++)
- x->d[i] = x->d[i+nlimbs];
- x->d[i] = 0;
- x->nlimbs -= nlimbs;
- }
+ for (i = 0; i < x->nlimbs - nlimbs; i++)
+ x->d[i] = x->d[i+nlimbs];
+ x->d[i] = 0;
+ x->nlimbs -= nlimbs;
if (x->nlimbs && nbits)
mpihelp_rshift(x->d, x->d, x->nlimbs, nbits);
diff --git a/lib/crypto/mpi/mpi-pow.c b/lib/crypto/mpi/mpi-pow.c
index 2fd7a46d55ec..67fbd4c2503d 100644
--- a/lib/crypto/mpi/mpi-pow.c
+++ b/lib/crypto/mpi/mpi-pow.c
@@ -176,7 +176,6 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
for (;;) {
while (c) {
- mpi_ptr_t tp;
mpi_size_t xsize;
/*if (mpihelp_mul_n(xp, rp, rp, rsize) < 0) goto enomem */
@@ -207,9 +206,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
xsize = msize;
}
- tp = rp;
- rp = xp;
- xp = tp;
+ swap(rp, xp);
rsize = xsize;
if ((mpi_limb_signed_t) e < 0) {
@@ -235,9 +232,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
xsize = msize;
}
- tp = rp;
- rp = xp;
- xp = tp;
+ swap(rp, xp);
rsize = xsize;
}
e <<= 1;
diff --git a/lib/crypto/poly1305.c b/lib/crypto/poly1305.c
index 26d87fc3823e..5d8378d23e95 100644
--- a/lib/crypto/poly1305.c
+++ b/lib/crypto/poly1305.c
@@ -76,3 +76,4 @@ EXPORT_SYMBOL_GPL(poly1305_final_generic);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
+MODULE_DESCRIPTION("Poly1305 authenticator algorithm, RFC7539");
diff --git a/lib/crypto/sha1.c b/lib/crypto/sha1.c
index 1aebe7be9401..6d2922747cab 100644
--- a/lib/crypto/sha1.c
+++ b/lib/crypto/sha1.c
@@ -137,4 +137,5 @@ void sha1_init(__u32 *buf)
}
EXPORT_SYMBOL(sha1_init);
+MODULE_DESCRIPTION("SHA-1 Algorithm");
MODULE_LICENSE("GPL");
diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c
index 3ac1ef8677db..3f42d203c7bc 100644
--- a/lib/crypto/sha256.c
+++ b/lib/crypto/sha256.c
@@ -165,4 +165,5 @@ void sha256(const u8 *data, unsigned int len, u8 *out)
}
EXPORT_SYMBOL(sha256);
+MODULE_DESCRIPTION("SHA-256 Algorithm");
MODULE_LICENSE("GPL");
diff --git a/lib/crypto/utils.c b/lib/crypto/utils.c
index c852c7151b0a..373364141408 100644
--- a/lib/crypto/utils.c
+++ b/lib/crypto/utils.c
@@ -85,4 +85,5 @@ void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
}
EXPORT_SYMBOL_GPL(__crypto_xor);
+MODULE_DESCRIPTION("Crypto library utility functions");
MODULE_LICENSE("GPL");
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index fb12a9bacd2f..7cea91e193a8 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -78,16 +78,17 @@ static bool obj_freeing;
/* The number of objs on the global free list */
static int obj_nr_tofree;
-static int debug_objects_maxchain __read_mostly;
-static int __maybe_unused debug_objects_maxchecked __read_mostly;
-static int debug_objects_fixups __read_mostly;
-static int debug_objects_warnings __read_mostly;
-static int debug_objects_enabled __read_mostly
- = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
-static int debug_objects_pool_size __read_mostly
- = ODEBUG_POOL_SIZE;
-static int debug_objects_pool_min_level __read_mostly
- = ODEBUG_POOL_MIN_LEVEL;
+static int __data_racy debug_objects_maxchain __read_mostly;
+static int __data_racy __maybe_unused debug_objects_maxchecked __read_mostly;
+static int __data_racy debug_objects_fixups __read_mostly;
+static int __data_racy debug_objects_warnings __read_mostly;
+static int __data_racy debug_objects_enabled __read_mostly
+ = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
+static int __data_racy debug_objects_pool_size __read_mostly
+ = ODEBUG_POOL_SIZE;
+static int __data_racy debug_objects_pool_min_level __read_mostly
+ = ODEBUG_POOL_MIN_LEVEL;
+
static const struct debug_obj_descr *descr_test __read_mostly;
static struct kmem_cache *obj_cache __ro_after_init;
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 3518e7394eca..ca736166f100 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -232,7 +232,8 @@ static int INIT get_next_block(struct bunzip_data *bd)
RUNB) */
symCount = symTotal+2;
for (j = 0; j < groupCount; j++) {
- unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1];
+ unsigned char length[MAX_SYMBOLS];
+ unsigned short temp[MAX_HUFCODE_BITS+1];
int minLen, maxLen, pp;
/* Read Huffman code lengths for each symbol. They're
stored in a way similar to mtf; record a starting
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
index 20a858031f12..9d34d35908da 100644
--- a/lib/decompress_unlzma.c
+++ b/lib/decompress_unlzma.c
@@ -37,7 +37,9 @@
#include <linux/decompress/mm.h>
+#ifndef MIN
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+#endif
static long long INIT read_int(unsigned char *ptr, int size)
{
diff --git a/lib/devres.c b/lib/devres.c
index fe0c63caeb68..4fc152de6d8b 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -1,10 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/bug.h>
#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/gfp.h>
+#include <linux/errno.h>
#include <linux/export.h>
+#include <linux/gfp_types.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
#include <linux/of_address.h>
+#include <linux/types.h>
enum devm_ioremap_type {
DEVM_IOREMAP = 0,
@@ -125,12 +128,13 @@ __devm_ioremap_resource(struct device *dev, const struct resource *res,
resource_size_t size;
void __iomem *dest_ptr;
char *pretty_name;
+ int ret;
BUG_ON(!dev);
if (!res || resource_type(res) != IORESOURCE_MEM) {
- dev_err(dev, "invalid resource %pR\n", res);
- return IOMEM_ERR_PTR(-EINVAL);
+ ret = dev_err_probe(dev, -EINVAL, "invalid resource %pR\n", res);
+ return IOMEM_ERR_PTR(ret);
}
if (type == DEVM_IOREMAP && res->flags & IORESOURCE_MEM_NONPOSTED)
@@ -144,20 +148,20 @@ __devm_ioremap_resource(struct device *dev, const struct resource *res,
else
pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
if (!pretty_name) {
- dev_err(dev, "can't generate pretty name for resource %pR\n", res);
- return IOMEM_ERR_PTR(-ENOMEM);
+ ret = dev_err_probe(dev, -ENOMEM, "can't generate pretty name for resource %pR\n", res);
+ return IOMEM_ERR_PTR(ret);
}
if (!devm_request_mem_region(dev, res->start, size, pretty_name)) {
- dev_err(dev, "can't request region for resource %pR\n", res);
- return IOMEM_ERR_PTR(-EBUSY);
+ ret = dev_err_probe(dev, -EBUSY, "can't request region for resource %pR\n", res);
+ return IOMEM_ERR_PTR(ret);
}
dest_ptr = __devm_ioremap(dev, res->start, size, type);
if (!dest_ptr) {
- dev_err(dev, "ioremap failed for resource %pR\n", res);
devm_release_mem_region(dev, res->start, size);
- dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
+ ret = dev_err_probe(dev, -ENOMEM, "ioremap failed for resource %pR\n", res);
+ return IOMEM_ERR_PTR(ret);
}
return dest_ptr;
diff --git a/lib/dhry_run.c b/lib/dhry_run.c
index e6a279dabf84..4a6d05ce4361 100644
--- a/lib/dhry_run.c
+++ b/lib/dhry_run.c
@@ -83,4 +83,5 @@ static int __init dhry_init(void)
module_init(dhry_init);
MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
+MODULE_DESCRIPTION("Dhrystone benchmark test module");
MODULE_LICENSE("GPL");
diff --git a/lib/dim/Makefile b/lib/dim/Makefile
index 1d6858a108cb..c4cc4026c451 100644
--- a/lib/dim/Makefile
+++ b/lib/dim/Makefile
@@ -2,6 +2,6 @@
# DIM Dynamic Interrupt Moderation library
#
-obj-$(CONFIG_DIMLIB) += dim.o
+obj-$(CONFIG_DIMLIB) += dimlib.o
-dim-y := dim.o net_dim.o rdma_dim.o
+dimlib-objs := dim.o net_dim.o rdma_dim.o
diff --git a/lib/dim/dim.c b/lib/dim/dim.c
index e89aaf07bde5..83b65ac74d73 100644
--- a/lib/dim/dim.c
+++ b/lib/dim/dim.c
@@ -82,3 +82,6 @@ bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
return true;
}
EXPORT_SYMBOL(dim_calc_stats);
+
+MODULE_DESCRIPTION("Dynamic Interrupt Moderation (DIM) library");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c
index 4e32f7aaac86..d7e7028e9b19 100644
--- a/lib/dim/net_dim.c
+++ b/lib/dim/net_dim.c
@@ -4,6 +4,7 @@
*/
#include <linux/dim.h>
+#include <linux/rtnetlink.h>
/*
* Net DIM profiles:
@@ -11,12 +12,6 @@
* There are different set of profiles for RX/TX CQs.
* Each profile size must be of NET_DIM_PARAMS_NUM_PROFILES
*/
-#define NET_DIM_PARAMS_NUM_PROFILES 5
-#define NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE 256
-#define NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE 128
-#define NET_DIM_DEF_PROFILE_CQE 1
-#define NET_DIM_DEF_PROFILE_EQE 1
-
#define NET_DIM_RX_EQE_PROFILES { \
{.usec = 1, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
{.usec = 8, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
@@ -101,6 +96,143 @@ net_dim_get_def_tx_moderation(u8 cq_period_mode)
}
EXPORT_SYMBOL(net_dim_get_def_tx_moderation);
+int net_dim_init_irq_moder(struct net_device *dev, u8 profile_flags,
+ u8 coal_flags, u8 rx_mode, u8 tx_mode,
+ void (*rx_dim_work)(struct work_struct *work),
+ void (*tx_dim_work)(struct work_struct *work))
+{
+ struct dim_cq_moder *rxp = NULL, *txp;
+ struct dim_irq_moder *moder;
+ int len;
+
+ dev->irq_moder = kzalloc(sizeof(*dev->irq_moder), GFP_KERNEL);
+ if (!dev->irq_moder)
+ return -ENOMEM;
+
+ moder = dev->irq_moder;
+ len = NET_DIM_PARAMS_NUM_PROFILES * sizeof(*moder->rx_profile);
+
+ moder->coal_flags = coal_flags;
+ moder->profile_flags = profile_flags;
+
+ if (profile_flags & DIM_PROFILE_RX) {
+ moder->rx_dim_work = rx_dim_work;
+ moder->dim_rx_mode = rx_mode;
+ rxp = kmemdup(rx_profile[rx_mode], len, GFP_KERNEL);
+ if (!rxp)
+ goto free_moder;
+
+ rcu_assign_pointer(moder->rx_profile, rxp);
+ }
+
+ if (profile_flags & DIM_PROFILE_TX) {
+ moder->tx_dim_work = tx_dim_work;
+ moder->dim_tx_mode = tx_mode;
+ txp = kmemdup(tx_profile[tx_mode], len, GFP_KERNEL);
+ if (!txp)
+ goto free_rxp;
+
+ rcu_assign_pointer(moder->tx_profile, txp);
+ }
+
+ return 0;
+
+free_rxp:
+ kfree(rxp);
+free_moder:
+ kfree(moder);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(net_dim_init_irq_moder);
+
+/* RTNL lock is held. */
+void net_dim_free_irq_moder(struct net_device *dev)
+{
+ struct dim_cq_moder *rxp, *txp;
+
+ if (!dev->irq_moder)
+ return;
+
+ rxp = rtnl_dereference(dev->irq_moder->rx_profile);
+ txp = rtnl_dereference(dev->irq_moder->tx_profile);
+
+ rcu_assign_pointer(dev->irq_moder->rx_profile, NULL);
+ rcu_assign_pointer(dev->irq_moder->tx_profile, NULL);
+
+ kfree_rcu(rxp, rcu);
+ kfree_rcu(txp, rcu);
+ kfree(dev->irq_moder);
+}
+EXPORT_SYMBOL(net_dim_free_irq_moder);
+
+void net_dim_setting(struct net_device *dev, struct dim *dim, bool is_tx)
+{
+ struct dim_irq_moder *irq_moder = dev->irq_moder;
+
+ if (!irq_moder)
+ return;
+
+ if (is_tx) {
+ INIT_WORK(&dim->work, irq_moder->tx_dim_work);
+ dim->mode = READ_ONCE(irq_moder->dim_tx_mode);
+ return;
+ }
+
+ INIT_WORK(&dim->work, irq_moder->rx_dim_work);
+ dim->mode = READ_ONCE(irq_moder->dim_rx_mode);
+}
+EXPORT_SYMBOL(net_dim_setting);
+
+void net_dim_work_cancel(struct dim *dim)
+{
+ cancel_work_sync(&dim->work);
+}
+EXPORT_SYMBOL(net_dim_work_cancel);
+
+struct dim_cq_moder net_dim_get_rx_irq_moder(struct net_device *dev,
+ struct dim *dim)
+{
+ struct dim_cq_moder res, *profile;
+
+ rcu_read_lock();
+ profile = rcu_dereference(dev->irq_moder->rx_profile);
+ res = profile[dim->profile_ix];
+ rcu_read_unlock();
+
+ res.cq_period_mode = dim->mode;
+
+ return res;
+}
+EXPORT_SYMBOL(net_dim_get_rx_irq_moder);
+
+struct dim_cq_moder net_dim_get_tx_irq_moder(struct net_device *dev,
+ struct dim *dim)
+{
+ struct dim_cq_moder res, *profile;
+
+ rcu_read_lock();
+ profile = rcu_dereference(dev->irq_moder->tx_profile);
+ res = profile[dim->profile_ix];
+ rcu_read_unlock();
+
+ res.cq_period_mode = dim->mode;
+
+ return res;
+}
+EXPORT_SYMBOL(net_dim_get_tx_irq_moder);
+
+void net_dim_set_rx_mode(struct net_device *dev, u8 rx_mode)
+{
+ WRITE_ONCE(dev->irq_moder->dim_rx_mode, rx_mode);
+}
+EXPORT_SYMBOL(net_dim_set_rx_mode);
+
+void net_dim_set_tx_mode(struct net_device *dev, u8 tx_mode)
+{
+ WRITE_ONCE(dev->irq_moder->dim_tx_mode, tx_mode);
+}
+EXPORT_SYMBOL(net_dim_set_tx_mode);
+
static int net_dim_step(struct dim *dim)
{
if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2))
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 222c6d6c8281..1a996fbbf50a 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -54,14 +54,19 @@ void __init dump_stack_set_arch_desc(const char *fmt, ...)
*/
void dump_stack_print_info(const char *log_lvl)
{
- printk("%sCPU: %d PID: %d Comm: %.20s %s%s %s %.*s" BUILD_ID_FMT "\n",
- log_lvl, raw_smp_processor_id(), current->pid, current->comm,
+ printk("%sCPU: %d UID: %u PID: %d Comm: %.20s %s%s %s %.*s" BUILD_ID_FMT "\n",
+ log_lvl, raw_smp_processor_id(),
+ __kuid_val(current_real_cred()->euid),
+ current->pid, current->comm,
kexec_crash_loaded() ? "Kdump: loaded " : "",
print_tainted(),
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version, BUILD_ID_VAL);
+ if (get_taint())
+ printk("%s%s\n", log_lvl, print_tainted_verbose());
+
if (dump_stack_arch_desc_str[0] != '\0')
printk("%sHardware name: %s\n",
log_lvl, dump_stack_arch_desc_str);
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
index a1389db1c30a..e49deddd3de9 100644
--- a/lib/dynamic_queue_limits.c
+++ b/lib/dynamic_queue_limits.c
@@ -15,12 +15,10 @@
#define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0)
#define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0)
-static void dql_check_stall(struct dql *dql)
+static void dql_check_stall(struct dql *dql, unsigned short stall_thrs)
{
- unsigned short stall_thrs;
unsigned long now;
- stall_thrs = READ_ONCE(dql->stall_thrs);
if (!stall_thrs)
return;
@@ -86,9 +84,16 @@ void dql_completed(struct dql *dql, unsigned int count)
{
unsigned int inprogress, prev_inprogress, limit;
unsigned int ovlimit, completed, num_queued;
+ unsigned short stall_thrs;
bool all_prev_completed;
num_queued = READ_ONCE(dql->num_queued);
+ /* Read stall_thrs in advance since it belongs to the same (first)
+ * cache line as ->num_queued. This way, dql_check_stall() does not
+ * need to touch the first cache line again later, reducing the window
+ * of possible false sharing.
+ */
+ stall_thrs = READ_ONCE(dql->stall_thrs);
/* Can't complete more than what's in queue */
BUG_ON(count > num_queued - dql->num_completed);
@@ -178,7 +183,7 @@ void dql_completed(struct dql *dql, unsigned int count)
dql->num_completed = completed;
dql->prev_num_queued = num_queued;
- dql_check_stall(dql);
+ dql_check_stall(dql, stall_thrs);
}
EXPORT_SYMBOL(dql_completed);
diff --git a/lib/find_bit.c b/lib/find_bit.c
index 32f99e9a670e..0836bb3d76c5 100644
--- a/lib/find_bit.c
+++ b/lib/find_bit.c
@@ -87,7 +87,7 @@ out: \
if (sz % BITS_PER_LONG) \
tmp = (FETCH) & BITMAP_LAST_WORD_MASK(sz); \
found: \
- sz = min(idx * BITS_PER_LONG + fns(tmp, nr), sz); \
+ sz = idx * BITS_PER_LONG + fns(tmp, nr); \
out: \
sz; \
})
@@ -116,6 +116,18 @@ unsigned long _find_first_and_bit(const unsigned long *addr1,
EXPORT_SYMBOL(_find_first_and_bit);
#endif
+/*
+ * Find the first set bit in three memory regions.
+ */
+unsigned long _find_first_and_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ const unsigned long *addr3,
+ unsigned long size)
+{
+ return FIND_FIRST_BIT(addr1[idx] & addr2[idx] & addr3[idx], /* nop */, size);
+}
+EXPORT_SYMBOL(_find_first_and_and_bit);
+
#ifndef find_first_zero_bit
/*
* Find the first cleared bit in a memory region.
diff --git a/lib/find_bit_benchmark.c b/lib/find_bit_benchmark.c
index d3fb09e6eff1..402e160e7186 100644
--- a/lib/find_bit_benchmark.c
+++ b/lib/find_bit_benchmark.c
@@ -194,4 +194,5 @@ static int __init find_bit_test(void)
}
module_init(find_bit_test);
+MODULE_DESCRIPTION("Test for find_*_bit functions");
MODULE_LICENSE("GPL");
diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig
index 7e945fdcbf11..3ac26bdbc3ff 100644
--- a/lib/fonts/Kconfig
+++ b/lib/fonts/Kconfig
@@ -10,7 +10,7 @@ if FONT_SUPPORT
config FONTS
bool "Select compiled-in fonts"
- depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
+ depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE || DRM_PANIC
help
Say Y here if you would like to use fonts other than the default
your frame buffer console usually use.
@@ -23,7 +23,7 @@ config FONTS
config FONT_8x8
bool "VGA 8x8 font" if FONTS
- depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
+ depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE || DRM_PANIC
default y if !SPARC && !FONTS
help
This is the "high resolution" font for the VGA frame buffer (the one
@@ -46,7 +46,7 @@ config FONT_8x16
config FONT_6x11
bool "Mac console 6x11 font (not supported by all drivers)" if FONTS
- depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
+ depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE || DRM_PANIC
default y if !SPARC && !FONTS && MAC
help
Small console font with Macintosh-style high-half glyphs. Some Mac
@@ -54,7 +54,7 @@ config FONT_6x11
config FONT_7x14
bool "console 7x14 font (not supported by all drivers)" if FONTS
- depends on FRAMEBUFFER_CONSOLE
+ depends on FRAMEBUFFER_CONSOLE || DRM_PANIC
help
Console font with characters just a bit smaller than the default.
If the standard 8x16 font is a little too big for you, say Y.
@@ -62,7 +62,7 @@ config FONT_7x14
config FONT_PEARL_8x8
bool "Pearl (old m68k) console 8x8 font" if FONTS
- depends on FRAMEBUFFER_CONSOLE
+ depends on FRAMEBUFFER_CONSOLE || DRM_PANIC
default y if !SPARC && !FONTS && AMIGA
help
Small console font with PC-style control-character and high-half
@@ -70,7 +70,7 @@ config FONT_PEARL_8x8
config FONT_ACORN_8x8
bool "Acorn console 8x8 font" if FONTS
- depends on FRAMEBUFFER_CONSOLE
+ depends on FRAMEBUFFER_CONSOLE || DRM_PANIC
default y if !SPARC && !FONTS && ARM && ARCH_ACORN
help
Small console font with PC-style control characters and high-half
@@ -90,7 +90,7 @@ config FONT_6x10
config FONT_10x18
bool "console 10x18 font (not supported by all drivers)" if FONTS
- depends on FRAMEBUFFER_CONSOLE
+ depends on FRAMEBUFFER_CONSOLE || DRM_PANIC
help
This is a high resolution console font for machines with very
big letters. It fits between the sun 12x22 and the normal 8x16 font.
@@ -105,7 +105,8 @@ config FONT_SUN8x16
config FONT_SUN12x22
bool "Sparc console 12x22 font (not supported by all drivers)"
- depends on FRAMEBUFFER_CONSOLE && (!SPARC && FONTS || SPARC)
+ depends on FRAMEBUFFER_CONSOLE || DRM_PANIC
+ depends on !SPARC && FONTS
help
This is the high resolution console font for Sun machines with very
big letters (like the letters used in the SPARC PROM). If the
@@ -113,7 +114,8 @@ config FONT_SUN12x22
config FONT_TER16x32
bool "Terminus 16x32 font (not supported by all drivers)"
- depends on FRAMEBUFFER_CONSOLE && (!SPARC && FONTS || SPARC)
+ depends on FRAMEBUFFER_CONSOLE || DRM_PANIC
+ depends on !SPARC && FONTS || SPARC
help
Terminus Font is a clean, fixed width bitmap font, designed
for long (8 and more hours per day) work with computers.
@@ -122,7 +124,7 @@ config FONT_TER16x32
config FONT_6x8
bool "OLED 6x8 font" if FONTS
- depends on FRAMEBUFFER_CONSOLE
+ depends on FRAMEBUFFER_CONSOLE || DRM_PANIC
help
This font is useful for small displays (OLED).
diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
index 493ec02dd5b3..f9ad60a9c7bd 100644
--- a/lib/fortify_kunit.c
+++ b/lib/fortify_kunit.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Runtime test cases for CONFIG_FORTIFY_SOURCE. For testing memcpy(),
- * see FORTIFY_MEM_* tests in LKDTM (drivers/misc/lkdtm/fortify.c).
+ * Runtime test cases for CONFIG_FORTIFY_SOURCE. For additional memcpy()
+ * testing see FORTIFY_MEM_* tests in LKDTM (drivers/misc/lkdtm/fortify.c).
*
* For corner cases with UBSAN, try testing with:
*
@@ -15,14 +15,31 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+/* We don't need to fill dmesg with the fortify WARNs during testing. */
+#ifdef DEBUG
+# define FORTIFY_REPORT_KUNIT(x...) __fortify_report(x)
+# define FORTIFY_WARN_KUNIT(x...) WARN_ONCE(x)
+#else
+# define FORTIFY_REPORT_KUNIT(x...) do { } while (0)
+# define FORTIFY_WARN_KUNIT(x...) do { } while (0)
+#endif
+
/* Redefine fortify_panic() to track failures. */
void fortify_add_kunit_error(int write);
#define fortify_panic(func, write, avail, size, retfail) do { \
- __fortify_report(FORTIFY_REASON(func, write), avail, size); \
+ FORTIFY_REPORT_KUNIT(FORTIFY_REASON(func, write), avail, size); \
fortify_add_kunit_error(write); \
return (retfail); \
} while (0)
+/* Redefine fortify_warn_once() to track memcpy() failures. */
+#define fortify_warn_once(chk_func, x...) do { \
+ bool __result = chk_func; \
+ FORTIFY_WARN_KUNIT(__result, x); \
+ if (__result) \
+ fortify_add_kunit_error(1); \
+} while (0)
+
#include <kunit/device.h>
#include <kunit/test.h>
#include <kunit/test-bug.h>
@@ -64,7 +81,7 @@ void fortify_add_kunit_error(int write)
kunit_put_resource(resource);
}
-static void known_sizes_test(struct kunit *test)
+static void fortify_test_known_sizes(struct kunit *test)
{
KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8);
KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_of_10), 10);
@@ -97,7 +114,7 @@ static noinline size_t want_minus_one(int pick)
return __compiletime_strlen(str);
}
-static void control_flow_split_test(struct kunit *test)
+static void fortify_test_control_flow_split(struct kunit *test)
{
KUNIT_EXPECT_EQ(test, want_minus_one(pick), SIZE_MAX);
}
@@ -173,11 +190,11 @@ static volatile size_t unknown_size = 50;
#endif
#define DEFINE_ALLOC_SIZE_TEST_PAIR(allocator) \
-static void alloc_size_##allocator##_const_test(struct kunit *test) \
+static void fortify_test_alloc_size_##allocator##_const(struct kunit *test) \
{ \
CONST_TEST_BODY(TEST_##allocator); \
} \
-static void alloc_size_##allocator##_dynamic_test(struct kunit *test) \
+static void fortify_test_alloc_size_##allocator##_dynamic(struct kunit *test) \
{ \
DYNAMIC_TEST_BODY(TEST_##allocator); \
}
@@ -217,11 +234,6 @@ static void alloc_size_##allocator##_dynamic_test(struct kunit *test) \
checker(expected_size, \
kmalloc_array_node(alloc_size, 1, gfp, NUMA_NO_NODE), \
kfree(p)); \
- checker(expected_size, __kmalloc(alloc_size, gfp), \
- kfree(p)); \
- checker(expected_size, \
- __kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
- kfree(p)); \
\
orig = kmalloc(alloc_size, gfp); \
KUNIT_EXPECT_TRUE(test, orig != NULL); \
@@ -267,28 +279,28 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc)
\
checker((expected_pages) * PAGE_SIZE, \
kvmalloc((alloc_pages) * PAGE_SIZE, gfp), \
- vfree(p)); \
+ kvfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvmalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
- vfree(p)); \
+ kvfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvzalloc((alloc_pages) * PAGE_SIZE, gfp), \
- vfree(p)); \
+ kvfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvzalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
- vfree(p)); \
+ kvfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvcalloc(1, (alloc_pages) * PAGE_SIZE, gfp), \
- vfree(p)); \
+ kvfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvcalloc((alloc_pages) * PAGE_SIZE, 1, gfp), \
- vfree(p)); \
+ kvfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvmalloc_array(1, (alloc_pages) * PAGE_SIZE, gfp), \
- vfree(p)); \
+ kvfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvmalloc_array((alloc_pages) * PAGE_SIZE, 1, gfp), \
- vfree(p)); \
+ kvfree(p)); \
\
prev_size = (expected_pages) * PAGE_SIZE; \
orig = kvmalloc(prev_size, gfp); \
@@ -346,6 +358,31 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
} while (0)
DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc)
+static const char * const test_strs[] = {
+ "",
+ "Hello there",
+ "A longer string, just for variety",
+};
+
+#define TEST_realloc(checker) do { \
+ gfp_t gfp = GFP_KERNEL; \
+ size_t len; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(test_strs); i++) { \
+ len = strlen(test_strs[i]); \
+ KUNIT_EXPECT_EQ(test, __builtin_constant_p(len), 0); \
+ checker(len, kmemdup_array(test_strs[i], 1, len, gfp), \
+ kfree(p)); \
+ checker(len, kmemdup(test_strs[i], len, gfp), \
+ kfree(p)); \
+ } \
+} while (0)
+static void fortify_test_realloc_size(struct kunit *test)
+{
+ TEST_realloc(check_dynamic);
+}
+
/*
* We can't have an array at the end of a structure or else
* builds without -fstrict-flex-arrays=3 will report them as
@@ -361,7 +398,7 @@ struct fortify_padding {
/* Force compiler into not being able to resolve size at compile-time. */
static volatile int unconst;
-static void strlen_test(struct kunit *test)
+static void fortify_test_strlen(struct kunit *test)
{
struct fortify_padding pad = { };
int i, end = sizeof(pad.buf) - 1;
@@ -384,7 +421,7 @@ static void strlen_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
}
-static void strnlen_test(struct kunit *test)
+static void fortify_test_strnlen(struct kunit *test)
{
struct fortify_padding pad = { };
int i, end = sizeof(pad.buf) - 1;
@@ -422,7 +459,7 @@ static void strnlen_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
}
-static void strcpy_test(struct kunit *test)
+static void fortify_test_strcpy(struct kunit *test)
{
struct fortify_padding pad = { };
char src[sizeof(pad.buf) + 1] = { };
@@ -480,7 +517,7 @@ static void strcpy_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
}
-static void strncpy_test(struct kunit *test)
+static void fortify_test_strncpy(struct kunit *test)
{
struct fortify_padding pad = { };
char src[] = "Copy me fully into a small buffer and I will overflow!";
@@ -539,7 +576,7 @@ static void strncpy_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
}
-static void strscpy_test(struct kunit *test)
+static void fortify_test_strscpy(struct kunit *test)
{
struct fortify_padding pad = { };
char src[] = "Copy me fully into a small buffer and I will overflow!";
@@ -596,7 +633,7 @@ static void strscpy_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
}
-static void strcat_test(struct kunit *test)
+static void fortify_test_strcat(struct kunit *test)
{
struct fortify_padding pad = { };
char src[sizeof(pad.buf) / 2] = { };
@@ -653,7 +690,7 @@ static void strcat_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
}
-static void strncat_test(struct kunit *test)
+static void fortify_test_strncat(struct kunit *test)
{
struct fortify_padding pad = { };
char src[sizeof(pad.buf)] = { };
@@ -726,7 +763,7 @@ static void strncat_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
}
-static void strlcat_test(struct kunit *test)
+static void fortify_test_strlcat(struct kunit *test)
{
struct fortify_padding pad = { };
char src[sizeof(pad.buf)] = { };
@@ -811,7 +848,74 @@ static void strlcat_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
}
-static void memscan_test(struct kunit *test)
+/* Check for 0-sized arrays... */
+struct fortify_zero_sized {
+ unsigned long bytes_before;
+ char buf[0];
+ unsigned long bytes_after;
+};
+
+#define __fortify_test(memfunc) \
+static void fortify_test_##memfunc(struct kunit *test) \
+{ \
+ struct fortify_zero_sized zero = { }; \
+ struct fortify_padding pad = { }; \
+ char srcA[sizeof(pad.buf) + 2]; \
+ char srcB[sizeof(pad.buf) + 2]; \
+ size_t len = sizeof(pad.buf) + unconst; \
+ \
+ memset(srcA, 'A', sizeof(srcA)); \
+ KUNIT_ASSERT_EQ(test, srcA[0], 'A'); \
+ memset(srcB, 'B', sizeof(srcB)); \
+ KUNIT_ASSERT_EQ(test, srcB[0], 'B'); \
+ \
+ memfunc(pad.buf, srcA, 0 + unconst); \
+ KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ memfunc(pad.buf + 1, srcB, 1 + unconst); \
+ KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \
+ KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \
+ KUNIT_EXPECT_EQ(test, pad.buf[2], '\0'); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ memfunc(pad.buf, srcA, 1 + unconst); \
+ KUNIT_EXPECT_EQ(test, pad.buf[0], 'A'); \
+ KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ memfunc(pad.buf, srcA, len - 1); \
+ KUNIT_EXPECT_EQ(test, pad.buf[1], 'A'); \
+ KUNIT_EXPECT_EQ(test, pad.buf[len - 1], '\0'); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ memfunc(pad.buf, srcA, len); \
+ KUNIT_EXPECT_EQ(test, pad.buf[1], 'A'); \
+ KUNIT_EXPECT_EQ(test, pad.buf[len - 1], 'A'); \
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ memfunc(pad.buf, srcA, len + 1); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); \
+ memfunc(pad.buf + 1, srcB, len); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); \
+ \
+ /* Reset error counter. */ \
+ fortify_write_overflows = 0; \
+ /* Copy nothing into nothing: no errors. */ \
+ memfunc(zero.buf, srcB, 0 + unconst); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ memfunc(zero.buf, srcB, 1 + unconst); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); \
+}
+__fortify_test(memcpy)
+__fortify_test(memmove)
+
+static void fortify_test_memscan(struct kunit *test)
{
char haystack[] = "Where oh where is my memory range?";
char *mem = haystack + strlen("Where oh where is ");
@@ -830,7 +934,7 @@ static void memscan_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
}
-static void memchr_test(struct kunit *test)
+static void fortify_test_memchr(struct kunit *test)
{
char haystack[] = "Where oh where is my memory range?";
char *mem = haystack + strlen("Where oh where is ");
@@ -849,7 +953,7 @@ static void memchr_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
}
-static void memchr_inv_test(struct kunit *test)
+static void fortify_test_memchr_inv(struct kunit *test)
{
char haystack[] = "Where oh where is my memory range?";
char *mem = haystack + 1;
@@ -869,7 +973,7 @@ static void memchr_inv_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
}
-static void memcmp_test(struct kunit *test)
+static void fortify_test_memcmp(struct kunit *test)
{
char one[] = "My mind is going ...";
char two[] = "My mind is going ... I can feel it.";
@@ -880,7 +984,7 @@ static void memcmp_test(struct kunit *test)
KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len), 0);
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
/* Still in bounds, but no longer matching. */
- KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len + 1), -32);
+ KUNIT_ASSERT_LT(test, memcmp(one, two, one_len + 1), 0);
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
/* Catch too-large ranges. */
@@ -891,7 +995,7 @@ static void memcmp_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
}
-static void kmemdup_test(struct kunit *test)
+static void fortify_test_kmemdup(struct kunit *test)
{
char src[] = "I got Doom running on it!";
char *copy;
@@ -917,19 +1021,19 @@ static void kmemdup_test(struct kunit *test)
/* Out of bounds by 1 byte. */
copy = kmemdup(src, len + 1, GFP_KERNEL);
- KUNIT_EXPECT_NULL(test, copy);
+ KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
kfree(copy);
/* Way out of bounds. */
copy = kmemdup(src, len * 2, GFP_KERNEL);
- KUNIT_EXPECT_NULL(test, copy);
+ KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
kfree(copy);
/* Starting offset causing out of bounds. */
copy = kmemdup(src + 1, len, GFP_KERNEL);
- KUNIT_EXPECT_NULL(test, copy);
+ KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3);
kfree(copy);
}
@@ -951,31 +1055,33 @@ static int fortify_test_init(struct kunit *test)
}
static struct kunit_case fortify_test_cases[] = {
- KUNIT_CASE(known_sizes_test),
- KUNIT_CASE(control_flow_split_test),
- KUNIT_CASE(alloc_size_kmalloc_const_test),
- KUNIT_CASE(alloc_size_kmalloc_dynamic_test),
- KUNIT_CASE(alloc_size_vmalloc_const_test),
- KUNIT_CASE(alloc_size_vmalloc_dynamic_test),
- KUNIT_CASE(alloc_size_kvmalloc_const_test),
- KUNIT_CASE(alloc_size_kvmalloc_dynamic_test),
- KUNIT_CASE(alloc_size_devm_kmalloc_const_test),
- KUNIT_CASE(alloc_size_devm_kmalloc_dynamic_test),
- KUNIT_CASE(strlen_test),
- KUNIT_CASE(strnlen_test),
- KUNIT_CASE(strcpy_test),
- KUNIT_CASE(strncpy_test),
- KUNIT_CASE(strscpy_test),
- KUNIT_CASE(strcat_test),
- KUNIT_CASE(strncat_test),
- KUNIT_CASE(strlcat_test),
+ KUNIT_CASE(fortify_test_known_sizes),
+ KUNIT_CASE(fortify_test_control_flow_split),
+ KUNIT_CASE(fortify_test_alloc_size_kmalloc_const),
+ KUNIT_CASE(fortify_test_alloc_size_kmalloc_dynamic),
+ KUNIT_CASE(fortify_test_alloc_size_vmalloc_const),
+ KUNIT_CASE(fortify_test_alloc_size_vmalloc_dynamic),
+ KUNIT_CASE(fortify_test_alloc_size_kvmalloc_const),
+ KUNIT_CASE(fortify_test_alloc_size_kvmalloc_dynamic),
+ KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_const),
+ KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_dynamic),
+ KUNIT_CASE(fortify_test_realloc_size),
+ KUNIT_CASE(fortify_test_strlen),
+ KUNIT_CASE(fortify_test_strnlen),
+ KUNIT_CASE(fortify_test_strcpy),
+ KUNIT_CASE(fortify_test_strncpy),
+ KUNIT_CASE(fortify_test_strscpy),
+ KUNIT_CASE(fortify_test_strcat),
+ KUNIT_CASE(fortify_test_strncat),
+ KUNIT_CASE(fortify_test_strlcat),
/* skip memset: performs bounds checking on whole structs */
- /* skip memcpy: still using warn-and-overwrite instead of hard-fail */
- KUNIT_CASE(memscan_test),
- KUNIT_CASE(memchr_test),
- KUNIT_CASE(memchr_inv_test),
- KUNIT_CASE(memcmp_test),
- KUNIT_CASE(kmemdup_test),
+ KUNIT_CASE(fortify_test_memcpy),
+ KUNIT_CASE(fortify_test_memmove),
+ KUNIT_CASE(fortify_test_memscan),
+ KUNIT_CASE(fortify_test_memchr),
+ KUNIT_CASE(fortify_test_memchr_inv),
+ KUNIT_CASE(fortify_test_memcmp),
+ KUNIT_CASE(fortify_test_kmemdup),
{}
};
@@ -987,4 +1093,5 @@ static struct kunit_suite fortify_test_suite = {
kunit_test_suite(fortify_test_suite);
+MODULE_DESCRIPTION("Runtime test cases for CONFIG_FORTIFY_SOURCE");
MODULE_LICENSE("GPL");
diff --git a/lib/hashtable_test.c b/lib/hashtable_test.c
index 1d1b3288dee2..3521de6bad15 100644
--- a/lib/hashtable_test.c
+++ b/lib/hashtable_test.c
@@ -314,4 +314,5 @@ static struct kunit_suite hashtable_test_module = {
kunit_test_suites(&hashtable_test_module);
+MODULE_DESCRIPTION("KUnit test for the Kernel Hashtable structures");
MODULE_LICENSE("GPL");
diff --git a/lib/iomap_copy.c b/lib/iomap_copy.c
index 5de7c04e05ef..2fd5712fb7c0 100644
--- a/lib/iomap_copy.c
+++ b/lib/iomap_copy.c
@@ -16,9 +16,8 @@
* time. Order of access is not guaranteed, nor is a memory barrier
* performed afterwards.
*/
-void __attribute__((weak)) __iowrite32_copy(void __iomem *to,
- const void *from,
- size_t count)
+#ifndef __iowrite32_copy
+void __iowrite32_copy(void __iomem *to, const void *from, size_t count)
{
u32 __iomem *dst = to;
const u32 *src = from;
@@ -28,6 +27,7 @@ void __attribute__((weak)) __iowrite32_copy(void __iomem *to,
__raw_writel(*src++, dst++);
}
EXPORT_SYMBOL_GPL(__iowrite32_copy);
+#endif
/**
* __ioread32_copy - copy data from MMIO space, in 32-bit units
@@ -60,9 +60,8 @@ EXPORT_SYMBOL_GPL(__ioread32_copy);
* time. Order of access is not guaranteed, nor is a memory barrier
* performed afterwards.
*/
-void __attribute__((weak)) __iowrite64_copy(void __iomem *to,
- const void *from,
- size_t count)
+#ifndef __iowrite64_copy
+void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
{
#ifdef CONFIG_64BIT
u64 __iomem *dst = to;
@@ -75,5 +74,5 @@ void __attribute__((weak)) __iowrite64_copy(void __iomem *to,
__iowrite32_copy(to, from, count * 2);
#endif
}
-
EXPORT_SYMBOL_GPL(__iowrite64_copy);
+#endif
diff --git a/lib/is_signed_type_kunit.c b/lib/is_signed_type_kunit.c
index 0a7f6ae62839..88adbe813f3a 100644
--- a/lib/is_signed_type_kunit.c
+++ b/lib/is_signed_type_kunit.c
@@ -46,4 +46,5 @@ static struct kunit_suite is_signed_type_test_suite = {
kunit_test_suite(is_signed_type_test_suite);
+MODULE_DESCRIPTION("is_signed_type() KUnit test suite");
MODULE_LICENSE("Dual MIT/GPL");
diff --git a/lib/kfifo.c b/lib/kfifo.c
index 12f5a347aa13..a8b2eed90599 100644
--- a/lib/kfifo.c
+++ b/lib/kfifo.c
@@ -5,13 +5,14 @@
* Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net>
*/
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/slab.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/kfifo.h>
#include <linux/log2.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
#include <linux/uaccess.h>
-#include <linux/kfifo.h>
/*
* internal helper to calculate the unused elements in a fifo
@@ -163,6 +164,19 @@ unsigned int __kfifo_out_peek(struct __kfifo *fifo,
}
EXPORT_SYMBOL(__kfifo_out_peek);
+unsigned int __kfifo_out_linear(struct __kfifo *fifo,
+ unsigned int *tail, unsigned int n)
+{
+ unsigned int size = fifo->mask + 1;
+ unsigned int off = fifo->out & fifo->mask;
+
+ if (tail)
+ *tail = off;
+
+ return min3(n, fifo->in - fifo->out, size - off);
+}
+EXPORT_SYMBOL(__kfifo_out_linear);
+
unsigned int __kfifo_out(struct __kfifo *fifo,
void *buf, unsigned int len)
{
@@ -292,51 +306,31 @@ int __kfifo_to_user(struct __kfifo *fifo, void __user *to,
}
EXPORT_SYMBOL(__kfifo_to_user);
-static int setup_sgl_buf(struct scatterlist *sgl, void *buf,
- int nents, unsigned int len)
+static unsigned int setup_sgl_buf(struct __kfifo *fifo, struct scatterlist *sgl,
+ unsigned int data_offset, int nents,
+ unsigned int len, dma_addr_t dma)
{
- int n;
- unsigned int l;
- unsigned int off;
- struct page *page;
+ const void *buf = fifo->data + data_offset;
- if (!nents)
+ if (!nents || !len)
return 0;
- if (!len)
- return 0;
+ sg_set_buf(sgl, buf, len);
- n = 0;
- page = virt_to_page(buf);
- off = offset_in_page(buf);
- l = 0;
-
- while (len >= l + PAGE_SIZE - off) {
- struct page *npage;
-
- l += PAGE_SIZE;
- buf += PAGE_SIZE;
- npage = virt_to_page(buf);
- if (page_to_phys(page) != page_to_phys(npage) - l) {
- sg_set_page(sgl, page, l - off, off);
- sgl = sg_next(sgl);
- if (++n == nents || sgl == NULL)
- return n;
- page = npage;
- len -= l - off;
- l = off = 0;
- }
+ if (dma != DMA_MAPPING_ERROR) {
+ sg_dma_address(sgl) = dma + data_offset;
+ sg_dma_len(sgl) = len;
}
- sg_set_page(sgl, page, len, off);
- return n + 1;
+
+ return 1;
}
static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl,
- int nents, unsigned int len, unsigned int off)
+ int nents, unsigned int len, unsigned int off, dma_addr_t dma)
{
unsigned int size = fifo->mask + 1;
unsigned int esize = fifo->esize;
- unsigned int l;
+ unsigned int len_to_end;
unsigned int n;
off &= fifo->mask;
@@ -345,16 +339,17 @@ static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl,
size *= esize;
len *= esize;
}
- l = min(len, size - off);
+ len_to_end = min(len, size - off);
- n = setup_sgl_buf(sgl, fifo->data + off, nents, l);
- n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l);
+ n = setup_sgl_buf(fifo, sgl, off, nents, len_to_end, dma);
+ n += setup_sgl_buf(fifo, sgl + n, 0, nents - n, len - len_to_end, dma);
return n;
}
unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo,
- struct scatterlist *sgl, int nents, unsigned int len)
+ struct scatterlist *sgl, int nents, unsigned int len,
+ dma_addr_t dma)
{
unsigned int l;
@@ -362,12 +357,13 @@ unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo,
if (len > l)
len = l;
- return setup_sgl(fifo, sgl, nents, len, fifo->in);
+ return setup_sgl(fifo, sgl, nents, len, fifo->in, dma);
}
EXPORT_SYMBOL(__kfifo_dma_in_prepare);
unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo,
- struct scatterlist *sgl, int nents, unsigned int len)
+ struct scatterlist *sgl, int nents, unsigned int len,
+ dma_addr_t dma)
{
unsigned int l;
@@ -375,7 +371,7 @@ unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo,
if (len > l)
len = l;
- return setup_sgl(fifo, sgl, nents, len, fifo->out);
+ return setup_sgl(fifo, sgl, nents, len, fifo->out, dma);
}
EXPORT_SYMBOL(__kfifo_dma_out_prepare);
@@ -473,6 +469,19 @@ unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, void *buf,
}
EXPORT_SYMBOL(__kfifo_out_peek_r);
+unsigned int __kfifo_out_linear_r(struct __kfifo *fifo,
+ unsigned int *tail, unsigned int n, size_t recsize)
+{
+ if (fifo->in == fifo->out)
+ return 0;
+
+ if (tail)
+ *tail = fifo->out + recsize;
+
+ return min(n, __kfifo_peek_n(fifo, recsize));
+}
+EXPORT_SYMBOL(__kfifo_out_linear_r);
+
unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf,
unsigned int len, size_t recsize)
{
@@ -546,7 +555,8 @@ int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to,
EXPORT_SYMBOL(__kfifo_to_user_r);
unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo,
- struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
+ struct scatterlist *sgl, int nents, unsigned int len, size_t recsize,
+ dma_addr_t dma)
{
BUG_ON(!nents);
@@ -555,7 +565,7 @@ unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo,
if (len + recsize > kfifo_unused(fifo))
return 0;
- return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize);
+ return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize, dma);
}
EXPORT_SYMBOL(__kfifo_dma_in_prepare_r);
@@ -569,7 +579,8 @@ void __kfifo_dma_in_finish_r(struct __kfifo *fifo,
EXPORT_SYMBOL(__kfifo_dma_in_finish_r);
unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo,
- struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
+ struct scatterlist *sgl, int nents, unsigned int len, size_t recsize,
+ dma_addr_t dma)
{
BUG_ON(!nents);
@@ -578,15 +589,7 @@ unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo,
if (len + recsize > fifo->in - fifo->out)
return 0;
- return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize);
+ return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize, dma);
}
EXPORT_SYMBOL(__kfifo_dma_out_prepare_r);
-void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize)
-{
- unsigned int len;
-
- len = __kfifo_peek_n(fifo, recsize);
- fifo->out += len + recsize;
-}
-EXPORT_SYMBOL(__kfifo_dma_out_finish_r);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 03b427e2707e..b7f2fa08d9c8 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -433,8 +433,23 @@ static void zap_modalias_env(struct kobj_uevent_env *env)
len = strlen(env->envp[i]) + 1;
if (i != env->envp_idx - 1) {
+ /* @env->envp[] contains pointers to @env->buf[]
+ * with @env->buflen chars, and we are removing
+ * variable MODALIAS here pointed by @env->envp[i]
+ * with length @len as shown below:
+ *
+ * 0 @env->buf[] @env->buflen
+ * ---------------------------------------------
+ * ^ ^ ^ ^
+ * | |-> @len <-| target block |
+ * @env->envp[0] @env->envp[i] @env->envp[i + 1]
+ *
+ * so the "target block" indicated above is moved
+ * backward by @len, and its right size is
+ * @env->buflen - (@env->envp[i + 1] - @env->envp[0]).
+ */
memmove(env->envp[i], env->envp[i + 1],
- env->buflen - len);
+ env->buflen - (env->envp[i + 1] - env->envp[0]));
for (j = i; j < env->envp_idx - 1; j++)
env->envp[j] = env->envp[j + 1] - len;
diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig
index 68a6daec0aef..34d7242d526d 100644
--- a/lib/kunit/Kconfig
+++ b/lib/kunit/Kconfig
@@ -24,6 +24,17 @@ config KUNIT_DEBUGFS
test suite, which allow users to see results of the last test suite
run that occurred.
+config KUNIT_FAULT_TEST
+ bool "Enable KUnit tests which print BUG stacktraces"
+ depends on KUNIT_TEST
+ depends on !UML
+ default y
+ help
+ Enables fault handling tests for the KUnit framework. These tests may
+ trigger a kernel BUG(), and the associated stack trace, even when they
+ pass. If this conflicts with your test infrastrcture (or is confusing
+ or annoying), they can be disabled by setting this to N.
+
config KUNIT_TEST
tristate "KUnit test for KUnit" if !KUNIT_ALL_TESTS
default KUNIT_ALL_TESTS
diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile
index 309659a32a78..30f6bbf04a4a 100644
--- a/lib/kunit/Makefile
+++ b/lib/kunit/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_KUNIT) += kunit.o
kunit-objs += test.o \
resource.o \
+ user_alloc.o \
static_stub.o \
string-stream.o \
assert.o \
@@ -22,6 +23,7 @@ obj-$(CONFIG_KUNIT_TEST) += kunit-test.o
# string-stream-test compiles built-in only.
ifeq ($(CONFIG_KUNIT_TEST),y)
obj-$(CONFIG_KUNIT_TEST) += string-stream-test.o
+obj-$(CONFIG_KUNIT_TEST) += assert_test.o
endif
obj-$(CONFIG_KUNIT_EXAMPLE_TEST) += kunit-example-test.o
diff --git a/lib/kunit/assert.c b/lib/kunit/assert.c
index dd1d633d0fe2..867aa5c4bccf 100644
--- a/lib/kunit/assert.c
+++ b/lib/kunit/assert.c
@@ -7,6 +7,7 @@
*/
#include <kunit/assert.h>
#include <kunit/test.h>
+#include <kunit/visibility.h>
#include "string-stream.h"
@@ -30,8 +31,9 @@ void kunit_assert_prologue(const struct kunit_loc *loc,
}
EXPORT_SYMBOL_GPL(kunit_assert_prologue);
-static void kunit_assert_print_msg(const struct va_format *message,
- struct string_stream *stream)
+VISIBLE_IF_KUNIT
+void kunit_assert_print_msg(const struct va_format *message,
+ struct string_stream *stream)
{
if (message->fmt)
string_stream_add(stream, "\n%pV", message);
@@ -89,7 +91,7 @@ void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
EXPORT_SYMBOL_GPL(kunit_ptr_not_err_assert_format);
/* Checks if `text` is a literal representing `value`, e.g. "5" and 5 */
-static bool is_literal(const char *text, long long value)
+VISIBLE_IF_KUNIT bool is_literal(const char *text, long long value)
{
char *buffer;
int len;
@@ -166,7 +168,7 @@ EXPORT_SYMBOL_GPL(kunit_binary_ptr_assert_format);
/* Checks if KUNIT_EXPECT_STREQ() args were string literals.
* Note: `text` will have ""s where as `value` will not.
*/
-static bool is_str_literal(const char *text, const char *value)
+VISIBLE_IF_KUNIT bool is_str_literal(const char *text, const char *value)
{
int len;
@@ -208,10 +210,11 @@ EXPORT_SYMBOL_GPL(kunit_binary_str_assert_format);
/* Adds a hexdump of a buffer to a string_stream comparing it with
* a second buffer. The different bytes are marked with <>.
*/
-static void kunit_assert_hexdump(struct string_stream *stream,
- const void *buf,
- const void *compared_buf,
- const size_t len)
+VISIBLE_IF_KUNIT
+void kunit_assert_hexdump(struct string_stream *stream,
+ const void *buf,
+ const void *compared_buf,
+ const size_t len)
{
size_t i;
const u8 *buf1 = buf;
diff --git a/lib/kunit/assert_test.c b/lib/kunit/assert_test.c
new file mode 100644
index 000000000000..4a5967712186
--- /dev/null
+++ b/lib/kunit/assert_test.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * KUnit test for the assertion formatting functions.
+ * Author: Ivan Orlov <ivan.orlov0322@gmail.com>
+ */
+#include <kunit/test.h>
+#include "string-stream.h"
+
+#define TEST_PTR_EXPECTED_BUF_SIZE 32
+#define HEXDUMP_TEST_BUF_LEN 5
+#define ASSERT_TEST_EXPECT_CONTAIN(test, str, substr) KUNIT_EXPECT_TRUE(test, strstr(str, substr))
+#define ASSERT_TEST_EXPECT_NCONTAIN(test, str, substr) KUNIT_EXPECT_FALSE(test, strstr(str, substr))
+
+static void kunit_test_is_literal(struct kunit *test)
+{
+ KUNIT_EXPECT_TRUE(test, is_literal("5", 5));
+ KUNIT_EXPECT_TRUE(test, is_literal("0", 0));
+ KUNIT_EXPECT_TRUE(test, is_literal("1234567890", 1234567890));
+ KUNIT_EXPECT_TRUE(test, is_literal("-1234567890", -1234567890));
+ KUNIT_EXPECT_FALSE(test, is_literal("05", 5));
+ KUNIT_EXPECT_FALSE(test, is_literal("", 0));
+ KUNIT_EXPECT_FALSE(test, is_literal("-0", 0));
+ KUNIT_EXPECT_FALSE(test, is_literal("12#45", 1245));
+}
+
+static void kunit_test_is_str_literal(struct kunit *test)
+{
+ KUNIT_EXPECT_TRUE(test, is_str_literal("\"Hello, World!\"", "Hello, World!"));
+ KUNIT_EXPECT_TRUE(test, is_str_literal("\"\"", ""));
+ KUNIT_EXPECT_TRUE(test, is_str_literal("\"\"\"", "\""));
+ KUNIT_EXPECT_FALSE(test, is_str_literal("", ""));
+ KUNIT_EXPECT_FALSE(test, is_str_literal("\"", "\""));
+ KUNIT_EXPECT_FALSE(test, is_str_literal("\"Abacaba", "Abacaba"));
+ KUNIT_EXPECT_FALSE(test, is_str_literal("Abacaba\"", "Abacaba"));
+ KUNIT_EXPECT_FALSE(test, is_str_literal("\"Abacaba\"", "\"Abacaba\""));
+}
+
+KUNIT_DEFINE_ACTION_WRAPPER(kfree_wrapper, kfree, const void *);
+
+/* this function is used to get a "char *" string from the string stream and defer its cleanup */
+static char *get_str_from_stream(struct kunit *test, struct string_stream *stream)
+{
+ char *str = string_stream_get_string(stream);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, str);
+ kunit_add_action(test, kfree_wrapper, (void *)str);
+
+ return str;
+}
+
+static void kunit_test_assert_prologue(struct kunit *test)
+{
+ struct string_stream *stream;
+ char *str;
+ const struct kunit_loc location = {
+ .file = "testfile.c",
+ .line = 1337,
+ };
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ /* Test an expectation fail prologue */
+ kunit_assert_prologue(&location, KUNIT_EXPECTATION, stream);
+ str = get_str_from_stream(test, stream);
+ ASSERT_TEST_EXPECT_CONTAIN(test, str, "EXPECTATION");
+ ASSERT_TEST_EXPECT_CONTAIN(test, str, "testfile.c");
+ ASSERT_TEST_EXPECT_CONTAIN(test, str, "1337");
+
+ /* Test an assertion fail prologue */
+ string_stream_clear(stream);
+ kunit_assert_prologue(&location, KUNIT_ASSERTION, stream);
+ str = get_str_from_stream(test, stream);
+ ASSERT_TEST_EXPECT_CONTAIN(test, str, "ASSERTION");
+ ASSERT_TEST_EXPECT_CONTAIN(test, str, "testfile.c");
+ ASSERT_TEST_EXPECT_CONTAIN(test, str, "1337");
+}
+
+/*
+ * This function accepts an arbitrary count of parameters and generates a va_format struct,
+ * which can be used to validate kunit_assert_print_msg function
+ */
+static void verify_assert_print_msg(struct kunit *test,
+ struct string_stream *stream,
+ char *expected, const char *format, ...)
+{
+ va_list list;
+ const struct va_format vformat = {
+ .fmt = format,
+ .va = &list,
+ };
+
+ va_start(list, format);
+ string_stream_clear(stream);
+ kunit_assert_print_msg(&vformat, stream);
+ KUNIT_EXPECT_STREQ(test, get_str_from_stream(test, stream), expected);
+}
+
+static void kunit_test_assert_print_msg(struct kunit *test)
+{
+ struct string_stream *stream;
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ verify_assert_print_msg(test, stream, "\nTest", "Test");
+ verify_assert_print_msg(test, stream, "\nAbacaba -123 234", "%s %d %u",
+ "Abacaba", -123, 234U);
+ verify_assert_print_msg(test, stream, "", NULL);
+}
+
+/*
+ * Further code contains the tests for different assert format functions.
+ * This helper function accepts the assert format function, executes it and
+ * validates the result string from the stream by checking that all of the
+ * substrings exist in the output.
+ */
+static void validate_assert(assert_format_t format_func, struct kunit *test,
+ const struct kunit_assert *assert,
+ struct string_stream *stream, int num_checks, ...)
+{
+ size_t i;
+ va_list checks;
+ char *cur_substr_exp;
+ struct va_format message = { NULL, NULL };
+
+ va_start(checks, num_checks);
+ string_stream_clear(stream);
+ format_func(assert, &message, stream);
+
+ for (i = 0; i < num_checks; i++) {
+ cur_substr_exp = va_arg(checks, char *);
+ ASSERT_TEST_EXPECT_CONTAIN(test, get_str_from_stream(test, stream), cur_substr_exp);
+ }
+}
+
+static void kunit_test_unary_assert_format(struct kunit *test)
+{
+ struct string_stream *stream;
+ struct kunit_assert assert = {};
+ struct kunit_unary_assert un_assert = {
+ .assert = assert,
+ .condition = "expr",
+ .expected_true = true,
+ };
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ validate_assert(kunit_unary_assert_format, test, &un_assert.assert,
+ stream, 2, "true", "is false");
+
+ un_assert.expected_true = false;
+ validate_assert(kunit_unary_assert_format, test, &un_assert.assert,
+ stream, 2, "false", "is true");
+}
+
+static void kunit_test_ptr_not_err_assert_format(struct kunit *test)
+{
+ struct string_stream *stream;
+ struct kunit_assert assert = {};
+ struct kunit_ptr_not_err_assert not_err_assert = {
+ .assert = assert,
+ .text = "expr",
+ .value = NULL,
+ };
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ /* Value is NULL. The corresponding message should be printed out */
+ validate_assert(kunit_ptr_not_err_assert_format, test,
+ &not_err_assert.assert,
+ stream, 1, "null");
+
+ /* Value is not NULL, but looks like an error pointer. Error should be printed out */
+ not_err_assert.value = (void *)-12;
+ validate_assert(kunit_ptr_not_err_assert_format, test,
+ &not_err_assert.assert, stream, 2,
+ "error", "-12");
+}
+
+static void kunit_test_binary_assert_format(struct kunit *test)
+{
+ struct string_stream *stream;
+ struct kunit_assert assert = {};
+ struct kunit_binary_assert_text text = {
+ .left_text = "1 + 2",
+ .operation = "==",
+ .right_text = "2",
+ };
+ const struct kunit_binary_assert binary_assert = {
+ .assert = assert,
+ .text = &text,
+ .left_value = 3,
+ .right_value = 2,
+ };
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ /*
+ * Printed values should depend on the input we provide: the left text, right text, left
+ * value and the right value.
+ */
+ validate_assert(kunit_binary_assert_format, test, &binary_assert.assert,
+ stream, 4, "1 + 2", "2", "3", "==");
+
+ text.right_text = "4 - 2";
+ validate_assert(kunit_binary_assert_format, test, &binary_assert.assert,
+ stream, 3, "==", "1 + 2", "4 - 2");
+
+ text.left_text = "3";
+ validate_assert(kunit_binary_assert_format, test, &binary_assert.assert,
+ stream, 4, "3", "4 - 2", "2", "==");
+
+ text.right_text = "2";
+ validate_assert(kunit_binary_assert_format, test, &binary_assert.assert,
+ stream, 3, "3", "2", "==");
+}
+
+static void kunit_test_binary_ptr_assert_format(struct kunit *test)
+{
+ struct string_stream *stream;
+ struct kunit_assert assert = {};
+ char *addr_var_a, *addr_var_b;
+ static const void *var_a = (void *)0xDEADBEEF;
+ static const void *var_b = (void *)0xBADDCAFE;
+ struct kunit_binary_assert_text text = {
+ .left_text = "var_a",
+ .operation = "==",
+ .right_text = "var_b",
+ };
+ struct kunit_binary_ptr_assert binary_ptr_assert = {
+ .assert = assert,
+ .text = &text,
+ .left_value = var_a,
+ .right_value = var_b,
+ };
+
+ addr_var_a = kunit_kzalloc(test, TEST_PTR_EXPECTED_BUF_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, addr_var_a);
+ addr_var_b = kunit_kzalloc(test, TEST_PTR_EXPECTED_BUF_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, addr_var_b);
+ /*
+ * Print the addresses to the buffers first.
+ * This is necessary as we may have different count of leading zeros in the pointer
+ * on different architectures.
+ */
+ snprintf(addr_var_a, TEST_PTR_EXPECTED_BUF_SIZE, "%px", var_a);
+ snprintf(addr_var_b, TEST_PTR_EXPECTED_BUF_SIZE, "%px", var_b);
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+ validate_assert(kunit_binary_ptr_assert_format, test, &binary_ptr_assert.assert,
+ stream, 3, addr_var_a, addr_var_b, "==");
+}
+
+static void kunit_test_binary_str_assert_format(struct kunit *test)
+{
+ struct string_stream *stream;
+ struct kunit_assert assert = {};
+ static const char *var_a = "abacaba";
+ static const char *var_b = "kernel";
+ struct kunit_binary_assert_text text = {
+ .left_text = "var_a",
+ .operation = "==",
+ .right_text = "var_b",
+ };
+ struct kunit_binary_str_assert binary_str_assert = {
+ .assert = assert,
+ .text = &text,
+ .left_value = var_a,
+ .right_value = var_b,
+ };
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ validate_assert(kunit_binary_str_assert_format, test,
+ &binary_str_assert.assert,
+ stream, 5, "var_a", "var_b", "\"abacaba\"",
+ "\"kernel\"", "==");
+
+ text.left_text = "\"abacaba\"";
+ validate_assert(kunit_binary_str_assert_format, test, &binary_str_assert.assert,
+ stream, 4, "\"abacaba\"", "var_b", "\"kernel\"", "==");
+
+ text.right_text = "\"kernel\"";
+ validate_assert(kunit_binary_str_assert_format, test, &binary_str_assert.assert,
+ stream, 3, "\"abacaba\"", "\"kernel\"", "==");
+}
+
+static const u8 hex_testbuf1[] = { 0x26, 0x74, 0x6b, 0x9c, 0x55,
+ 0x45, 0x9d, 0x47, 0xd6, 0x47,
+ 0x2, 0x89, 0x8c, 0x81, 0x94,
+ 0x12, 0xfe, 0x01 };
+static const u8 hex_testbuf2[] = { 0x26, 0x74, 0x6b, 0x9c, 0x55,
+ 0x45, 0x9d, 0x47, 0x21, 0x47,
+ 0xcd, 0x89, 0x24, 0x50, 0x94,
+ 0x12, 0xba, 0x01 };
+static void kunit_test_assert_hexdump(struct kunit *test)
+{
+ struct string_stream *stream;
+ char *str;
+ size_t i;
+ char buf[HEXDUMP_TEST_BUF_LEN];
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+ /* Check that we are getting output like <xx> for non-matching numbers. */
+ kunit_assert_hexdump(stream, hex_testbuf1, hex_testbuf2, sizeof(hex_testbuf1));
+ str = get_str_from_stream(test, stream);
+ for (i = 0; i < sizeof(hex_testbuf1); i++) {
+ snprintf(buf, HEXDUMP_TEST_BUF_LEN, "<%02x>", hex_testbuf1[i]);
+ if (hex_testbuf1[i] != hex_testbuf2[i])
+ ASSERT_TEST_EXPECT_CONTAIN(test, str, buf);
+ }
+ /* We shouldn't get any <xx> numbers when comparing the buffer with itself. */
+ string_stream_clear(stream);
+ kunit_assert_hexdump(stream, hex_testbuf1, hex_testbuf1, sizeof(hex_testbuf1));
+ str = get_str_from_stream(test, stream);
+ ASSERT_TEST_EXPECT_NCONTAIN(test, str, "<");
+ ASSERT_TEST_EXPECT_NCONTAIN(test, str, ">");
+}
+
+static void kunit_test_mem_assert_format(struct kunit *test)
+{
+ struct string_stream *stream;
+ struct string_stream *expected_stream;
+ struct kunit_assert assert = {};
+ static const struct kunit_binary_assert_text text = {
+ .left_text = "hex_testbuf1",
+ .operation = "==",
+ .right_text = "hex_testbuf2",
+ };
+ struct kunit_mem_assert mem_assert = {
+ .assert = assert,
+ .text = &text,
+ .left_value = NULL,
+ .right_value = hex_testbuf2,
+ .size = sizeof(hex_testbuf1),
+ };
+
+ expected_stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_stream);
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ /* The left value is NULL */
+ validate_assert(kunit_mem_assert_format, test, &mem_assert.assert,
+ stream, 2, "hex_testbuf1", "is not null");
+
+ /* The right value is NULL, the left value is not NULL */
+ mem_assert.left_value = hex_testbuf1;
+ mem_assert.right_value = NULL;
+ validate_assert(kunit_mem_assert_format, test, &mem_assert.assert,
+ stream, 2, "hex_testbuf2", "is not null");
+
+ /* Both arguments are not null */
+ mem_assert.left_value = hex_testbuf1;
+ mem_assert.right_value = hex_testbuf2;
+
+ validate_assert(kunit_mem_assert_format, test, &mem_assert.assert,
+ stream, 3, "hex_testbuf1", "hex_testbuf2", "==");
+}
+
+static struct kunit_case assert_test_cases[] = {
+ KUNIT_CASE(kunit_test_is_literal),
+ KUNIT_CASE(kunit_test_is_str_literal),
+ KUNIT_CASE(kunit_test_assert_prologue),
+ KUNIT_CASE(kunit_test_assert_print_msg),
+ KUNIT_CASE(kunit_test_unary_assert_format),
+ KUNIT_CASE(kunit_test_ptr_not_err_assert_format),
+ KUNIT_CASE(kunit_test_binary_assert_format),
+ KUNIT_CASE(kunit_test_binary_ptr_assert_format),
+ KUNIT_CASE(kunit_test_binary_str_assert_format),
+ KUNIT_CASE(kunit_test_assert_hexdump),
+ KUNIT_CASE(kunit_test_mem_assert_format),
+ {}
+};
+
+static struct kunit_suite assert_test_suite = {
+ .name = "kunit-assert",
+ .test_cases = assert_test_cases,
+};
+
+kunit_test_suites(&assert_test_suite);
diff --git a/lib/kunit/device.c b/lib/kunit/device.c
index abc603730b8e..25c81ed465fb 100644
--- a/lib/kunit/device.c
+++ b/lib/kunit/device.c
@@ -51,7 +51,7 @@ int kunit_bus_init(void)
error = bus_register(&kunit_bus_type);
if (error)
- bus_unregister(&kunit_bus_type);
+ root_device_unregister(kunit_bus_device);
return error;
}
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 70b9a43cd257..34b7b6833df3 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -70,32 +70,26 @@ struct kunit_glob_filter {
static int kunit_parse_glob_filter(struct kunit_glob_filter *parsed,
const char *filter_glob)
{
- const int len = strlen(filter_glob);
const char *period = strchr(filter_glob, '.');
if (!period) {
- parsed->suite_glob = kzalloc(len + 1, GFP_KERNEL);
+ parsed->suite_glob = kstrdup(filter_glob, GFP_KERNEL);
if (!parsed->suite_glob)
return -ENOMEM;
-
parsed->test_glob = NULL;
- strcpy(parsed->suite_glob, filter_glob);
return 0;
}
- parsed->suite_glob = kzalloc(period - filter_glob + 1, GFP_KERNEL);
+ parsed->suite_glob = kstrndup(filter_glob, period - filter_glob, GFP_KERNEL);
if (!parsed->suite_glob)
return -ENOMEM;
- parsed->test_glob = kzalloc(len - (period - filter_glob) + 1, GFP_KERNEL);
+ parsed->test_glob = kstrdup(period + 1, GFP_KERNEL);
if (!parsed->test_glob) {
kfree(parsed->suite_glob);
return -ENOMEM;
}
- strncpy(parsed->suite_glob, filter_glob, period - filter_glob);
- strncpy(parsed->test_glob, period + 1, len - (period - filter_glob));
-
return 0;
}
diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
index 3f7f967e3688..f0090c2729cd 100644
--- a/lib/kunit/executor_test.c
+++ b/lib/kunit/executor_test.c
@@ -286,7 +286,7 @@ static struct kunit_suite *alloc_fake_suite(struct kunit *test,
/* We normally never expect to allocate suites, hence the non-const cast. */
suite = kunit_kzalloc(test, sizeof(*suite), GFP_KERNEL);
- strncpy((char *)suite->name, suite_name, sizeof(suite->name) - 1);
+ strscpy((char *)suite->name, suite_name, sizeof(suite->name));
suite->test_cases = test_cases;
return suite;
diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c
index 798924f7cc86..3056d6bc705d 100644
--- a/lib/kunit/kunit-example-test.c
+++ b/lib/kunit/kunit-example-test.c
@@ -374,4 +374,5 @@ static struct kunit_suite example_init_test_suite = {
*/
kunit_test_init_section_suites(&example_init_test_suite);
+MODULE_DESCRIPTION("Example KUnit test suite");
MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index f7980ef236a3..37e02be1e710 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -109,6 +109,48 @@ static struct kunit_suite kunit_try_catch_test_suite = {
.test_cases = kunit_try_catch_test_cases,
};
+#if IS_ENABLED(CONFIG_KUNIT_FAULT_TEST)
+
+static void kunit_test_null_dereference(void *data)
+{
+ struct kunit *test = data;
+ int *null = NULL;
+
+ *null = 0;
+
+ KUNIT_FAIL(test, "This line should never be reached\n");
+}
+
+static void kunit_test_fault_null_dereference(struct kunit *test)
+{
+ struct kunit_try_catch_test_context *ctx = test->priv;
+ struct kunit_try_catch *try_catch = ctx->try_catch;
+
+ kunit_try_catch_init(try_catch,
+ test,
+ kunit_test_null_dereference,
+ kunit_test_catch);
+ kunit_try_catch_run(try_catch, test);
+
+ KUNIT_EXPECT_EQ(test, try_catch->try_result, -EINTR);
+ KUNIT_EXPECT_TRUE(test, ctx->function_called);
+}
+
+#endif /* CONFIG_KUNIT_FAULT_TEST */
+
+static struct kunit_case kunit_fault_test_cases[] = {
+#if IS_ENABLED(CONFIG_KUNIT_FAULT_TEST)
+ KUNIT_CASE(kunit_test_fault_null_dereference),
+#endif /* CONFIG_KUNIT_FAULT_TEST */
+ {}
+};
+
+static struct kunit_suite kunit_fault_test_suite = {
+ .name = "kunit_fault",
+ .init = kunit_try_catch_test_init,
+ .test_cases = kunit_fault_test_cases,
+};
+
/*
* Context for testing test managed resources
* is_resource_initialized is used to test arbitrary resources
@@ -826,6 +868,8 @@ static struct kunit_suite kunit_current_test_suite = {
kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite,
&kunit_log_test_suite, &kunit_status_test_suite,
- &kunit_current_test_suite, &kunit_device_test_suite);
+ &kunit_current_test_suite, &kunit_device_test_suite,
+ &kunit_fault_test_suite);
+MODULE_DESCRIPTION("KUnit test for core test infrastructure");
MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/string-stream-test.c b/lib/kunit/string-stream-test.c
index 03fb511826f7..7511442ea98f 100644
--- a/lib/kunit/string-stream-test.c
+++ b/lib/kunit/string-stream-test.c
@@ -22,18 +22,10 @@ struct string_stream_test_priv {
};
/* Avoids a cast warning if kfree() is passed direct to kunit_add_action(). */
-static void kfree_wrapper(void *p)
-{
- kfree(p);
-}
+KUNIT_DEFINE_ACTION_WRAPPER(kfree_wrapper, kfree, const void *);
/* Avoids a cast warning if string_stream_destroy() is passed direct to kunit_add_action(). */
-static void cleanup_raw_stream(void *p)
-{
- struct string_stream *stream = p;
-
- string_stream_destroy(stream);
-}
+KUNIT_DEFINE_ACTION_WRAPPER(cleanup_raw_stream, string_stream_destroy, struct string_stream *);
static char *get_concatenated_string(struct kunit *test, struct string_stream *stream)
{
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 1d1475578515..e8b1b52a19ab 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -712,6 +712,9 @@ int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_
{
unsigned int i;
+ if (num_suites == 0)
+ return 0;
+
if (!kunit_enabled() && num_suites > 0) {
pr_info("kunit: disabled\n");
return 0;
@@ -935,4 +938,5 @@ static void __exit kunit_exit(void)
}
module_exit(kunit_exit);
+MODULE_DESCRIPTION("Base unit test (KUnit) API");
MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c
index f7825991d576..6bbe0025b079 100644
--- a/lib/kunit/try-catch.c
+++ b/lib/kunit/try-catch.c
@@ -11,13 +11,14 @@
#include <linux/completion.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
+#include <linux/sched/task.h>
#include "try-catch-impl.h"
void __noreturn kunit_try_catch_throw(struct kunit_try_catch *try_catch)
{
try_catch->try_result = -EFAULT;
- kthread_complete_and_exit(try_catch->try_completion, -EFAULT);
+ kthread_exit(0);
}
EXPORT_SYMBOL_GPL(kunit_try_catch_throw);
@@ -25,9 +26,12 @@ static int kunit_generic_run_threadfn_adapter(void *data)
{
struct kunit_try_catch *try_catch = data;
+ try_catch->try_result = -EINTR;
try_catch->try(try_catch->context);
+ if (try_catch->try_result == -EINTR)
+ try_catch->try_result = 0;
- kthread_complete_and_exit(try_catch->try_completion, 0);
+ return 0;
}
static unsigned long kunit_test_timeout(void)
@@ -57,30 +61,38 @@ static unsigned long kunit_test_timeout(void)
void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
{
- DECLARE_COMPLETION_ONSTACK(try_completion);
struct kunit *test = try_catch->test;
struct task_struct *task_struct;
+ struct completion *task_done;
int exit_code, time_remaining;
try_catch->context = context;
- try_catch->try_completion = &try_completion;
try_catch->try_result = 0;
- task_struct = kthread_run(kunit_generic_run_threadfn_adapter,
- try_catch,
- "kunit_try_catch_thread");
+ task_struct = kthread_create(kunit_generic_run_threadfn_adapter,
+ try_catch, "kunit_try_catch_thread");
if (IS_ERR(task_struct)) {
+ try_catch->try_result = PTR_ERR(task_struct);
try_catch->catch(try_catch->context);
return;
}
+ get_task_struct(task_struct);
+ /*
+ * As for a vfork(2), task_struct->vfork_done (pointing to the
+ * underlying kthread->exited) can be used to wait for the end of a
+ * kernel thread. It is set to NULL when the thread exits, so we
+ * keep a copy here.
+ */
+ task_done = task_struct->vfork_done;
+ wake_up_process(task_struct);
- time_remaining = wait_for_completion_timeout(&try_completion,
+ time_remaining = wait_for_completion_timeout(task_done,
kunit_test_timeout());
if (time_remaining == 0) {
- kunit_err(test, "try timed out\n");
try_catch->try_result = -ETIMEDOUT;
kthread_stop(task_struct);
}
+ put_task_struct(task_struct);
exit_code = try_catch->try_result;
if (!exit_code)
@@ -88,8 +100,14 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
if (exit_code == -EFAULT)
try_catch->try_result = 0;
- else if (exit_code == -EINTR)
- kunit_err(test, "wake_up_process() was never called\n");
+ else if (exit_code == -EINTR) {
+ if (test->last_seen.file)
+ kunit_err(test, "try faulted: last line seen %s:%d\n",
+ test->last_seen.file, test->last_seen.line);
+ else
+ kunit_err(test, "try faulted\n");
+ } else if (exit_code == -ETIMEDOUT)
+ kunit_err(test, "try timed out\n");
else if (exit_code)
kunit_err(test, "Unknown error: %d\n", exit_code);
diff --git a/lib/kunit/user_alloc.c b/lib/kunit/user_alloc.c
new file mode 100644
index 000000000000..ae935df09a5e
--- /dev/null
+++ b/lib/kunit/user_alloc.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit userspace memory allocation resource management.
+ */
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+
+struct kunit_vm_mmap_resource {
+ unsigned long addr;
+ size_t size;
+};
+
+/* vm_mmap() arguments */
+struct kunit_vm_mmap_params {
+ struct file *file;
+ unsigned long addr;
+ unsigned long len;
+ unsigned long prot;
+ unsigned long flag;
+ unsigned long offset;
+};
+
+/* Create and attach a new mm if it doesn't already exist. */
+static int kunit_attach_mm(void)
+{
+ struct mm_struct *mm;
+
+ if (current->mm)
+ return 0;
+
+ /* arch_pick_mmap_layout() is only sane with MMU systems. */
+ if (!IS_ENABLED(CONFIG_MMU))
+ return -EINVAL;
+
+ mm = mm_alloc();
+ if (!mm)
+ return -ENOMEM;
+
+ /* Define the task size. */
+ mm->task_size = TASK_SIZE;
+
+ /* Make sure we can allocate new VMAs. */
+ arch_pick_mmap_layout(mm, &current->signal->rlim[RLIMIT_STACK]);
+
+ /* Attach the mm. It will be cleaned up when the process dies. */
+ kthread_use_mm(mm);
+
+ return 0;
+}
+
+static int kunit_vm_mmap_init(struct kunit_resource *res, void *context)
+{
+ struct kunit_vm_mmap_params *p = context;
+ struct kunit_vm_mmap_resource vres;
+ int ret;
+
+ ret = kunit_attach_mm();
+ if (ret)
+ return ret;
+
+ vres.size = p->len;
+ vres.addr = vm_mmap(p->file, p->addr, p->len, p->prot, p->flag, p->offset);
+ if (!vres.addr)
+ return -ENOMEM;
+ res->data = kmemdup(&vres, sizeof(vres), GFP_KERNEL);
+ if (!res->data) {
+ vm_munmap(vres.addr, vres.size);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void kunit_vm_mmap_free(struct kunit_resource *res)
+{
+ struct kunit_vm_mmap_resource *vres = res->data;
+
+ /*
+ * Since this is executed from the test monitoring process,
+ * the test's mm has already been torn down. We don't need
+ * to run vm_munmap(vres->addr, vres->size), only clean up
+ * the vres.
+ */
+
+ kfree(vres);
+ res->data = NULL;
+}
+
+unsigned long kunit_vm_mmap(struct kunit *test, struct file *file,
+ unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flag,
+ unsigned long offset)
+{
+ struct kunit_vm_mmap_params params = {
+ .file = file,
+ .addr = addr,
+ .len = len,
+ .prot = prot,
+ .flag = flag,
+ .offset = offset,
+ };
+ struct kunit_vm_mmap_resource *vres;
+
+ vres = kunit_alloc_resource(test,
+ kunit_vm_mmap_init,
+ kunit_vm_mmap_free,
+ GFP_KERNEL,
+ &params);
+ if (vres)
+ return vres->addr;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kunit_vm_mmap);
+
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/lib/kunit_iov_iter.c b/lib/kunit_iov_iter.c
index 859b67c4d697..27e0c8ee71d8 100644
--- a/lib/kunit_iov_iter.c
+++ b/lib/kunit_iov_iter.c
@@ -139,7 +139,7 @@ static void __init iov_kunit_copy_to_kvec(struct kunit *test)
return;
}
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
/*
@@ -194,7 +194,7 @@ stop:
return;
}
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
struct bvec_test_range {
@@ -302,7 +302,7 @@ static void __init iov_kunit_copy_to_bvec(struct kunit *test)
return;
}
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
/*
@@ -359,7 +359,7 @@ stop:
return;
}
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
static void iov_kunit_destroy_xarray(void *data)
@@ -453,7 +453,7 @@ static void __init iov_kunit_copy_to_xarray(struct kunit *test)
return;
}
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
/*
@@ -516,7 +516,7 @@ stop:
return;
}
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
/*
@@ -596,7 +596,7 @@ static void __init iov_kunit_extract_pages_kvec(struct kunit *test)
stop:
KUNIT_EXPECT_EQ(test, size, 0);
KUNIT_EXPECT_EQ(test, iter.count, 0);
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
/*
@@ -674,7 +674,7 @@ static void __init iov_kunit_extract_pages_bvec(struct kunit *test)
stop:
KUNIT_EXPECT_EQ(test, size, 0);
KUNIT_EXPECT_EQ(test, iter.count, 0);
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
/*
@@ -753,7 +753,7 @@ static void __init iov_kunit_extract_pages_xarray(struct kunit *test)
}
stop:
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
static struct kunit_case __refdata iov_kunit_cases[] = {
diff --git a/lib/list-test.c b/lib/list-test.c
index 0cc27de9cec8..37cbc33e9fdb 100644
--- a/lib/list-test.c
+++ b/lib/list-test.c
@@ -1201,12 +1201,6 @@ static struct kunit_suite hlist_test_module = {
};
-struct klist_test_struct {
- int data;
- struct klist klist;
- struct klist_node klist_node;
-};
-
static int node_count;
static struct klist_node *last_node;
@@ -1499,4 +1493,5 @@ static struct kunit_suite klist_test_module = {
kunit_test_suites(&list_test_module, &hlist_test_module, &klist_test_module);
+MODULE_DESCRIPTION("KUnit test for the Kernel Linked-list structures");
MODULE_LICENSE("GPL v2");
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 55e1b35bf877..aa3a5df15b8e 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -4203,31 +4203,28 @@ slow_path:
*
* Return: The contents that was stored at the index.
*/
-static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
+static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
wr_mas->content = mas_start(mas);
if (mas_is_none(mas) || mas_is_ptr(mas)) {
mas_store_root(mas, wr_mas->entry);
- return wr_mas->content;
+ return;
}
if (unlikely(!mas_wr_walk(wr_mas))) {
mas_wr_spanning_store(wr_mas);
- return wr_mas->content;
+ return;
}
/* At this point, we are at the leaf node that needs to be altered. */
mas_wr_end_piv(wr_mas);
/* New root for a single pointer */
- if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
+ if (unlikely(!mas->index && mas->last == ULONG_MAX))
mas_new_root(mas, wr_mas->entry);
- return wr_mas->content;
- }
-
- mas_wr_modify(wr_mas);
- return wr_mas->content;
+ else
+ mas_wr_modify(wr_mas);
}
/**
@@ -5109,18 +5106,18 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
if (size == 0 || max - min < size - 1)
return -EINVAL;
- if (mas_is_start(mas)) {
+ if (mas_is_start(mas))
mas_start(mas);
- mas->offset = mas_data_end(mas);
- } else if (mas->offset >= 2) {
- mas->offset -= 2;
- } else if (!mas_rewind_node(mas)) {
+ else if ((mas->offset < 2) && (!mas_rewind_node(mas)))
return -EBUSY;
- }
- /* Empty set. */
- if (mas_is_none(mas) || mas_is_ptr(mas))
+ if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
return mas_sparse_area(mas, min, max, size, false);
+ else if (mas->offset >= 2)
+ mas->offset -= 2;
+ else
+ mas->offset = mas_data_end(mas);
+
/* The start of the window can only be within these values. */
mas->index = min;
diff --git a/lib/math/prime_numbers.c b/lib/math/prime_numbers.c
index d42cebf7407f..9a17ee9af93a 100644
--- a/lib/math/prime_numbers.c
+++ b/lib/math/prime_numbers.c
@@ -6,8 +6,6 @@
#include <linux/prime_numbers.h>
#include <linux/slab.h>
-#define bitmap_size(nbits) (BITS_TO_LONGS(nbits) * sizeof(unsigned long))
-
struct primes {
struct rcu_head rcu;
unsigned long last, sz;
@@ -313,4 +311,5 @@ module_exit(primes_exit);
module_param_named(selftest, selftest_max, ulong, 0400);
MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Prime number library");
MODULE_LICENSE("GPL");
diff --git a/lib/math/rational-test.c b/lib/math/rational-test.c
index 01611ddff420..47486a95f088 100644
--- a/lib/math/rational-test.c
+++ b/lib/math/rational-test.c
@@ -53,4 +53,5 @@ static struct kunit_suite rational_test_suite = {
kunit_test_suites(&rational_test_suite);
+MODULE_DESCRIPTION("Rational fractions unit test");
MODULE_LICENSE("GPL v2");
diff --git a/lib/math/rational.c b/lib/math/rational.c
index ec59d426ea63..d2c34e629ee1 100644
--- a/lib/math/rational.c
+++ b/lib/math/rational.c
@@ -108,4 +108,5 @@ void rational_best_approximation(
EXPORT_SYMBOL(rational_best_approximation);
+MODULE_DESCRIPTION("Rational fraction support library");
MODULE_LICENSE("GPL v2");
diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
index fd16e6ce53d1..d36933554e46 100644
--- a/lib/memcpy_kunit.c
+++ b/lib/memcpy_kunit.c
@@ -493,58 +493,6 @@ static void memmove_overlap_test(struct kunit *test)
}
}
-static void strtomem_test(struct kunit *test)
-{
- static const char input[sizeof(unsigned long)] = "hi";
- static const char truncate[] = "this is too long";
- struct {
- unsigned long canary1;
- unsigned char output[sizeof(unsigned long)] __nonstring;
- unsigned long canary2;
- } wrap;
-
- memset(&wrap, 0xFF, sizeof(wrap));
- KUNIT_EXPECT_EQ_MSG(test, wrap.canary1, ULONG_MAX,
- "bad initial canary value");
- KUNIT_EXPECT_EQ_MSG(test, wrap.canary2, ULONG_MAX,
- "bad initial canary value");
-
- /* Check unpadded copy leaves surroundings untouched. */
- strtomem(wrap.output, input);
- KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
- KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]);
- KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]);
- for (size_t i = 2; i < sizeof(wrap.output); i++)
- KUNIT_EXPECT_EQ(test, wrap.output[i], 0xFF);
- KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
-
- /* Check truncated copy leaves surroundings untouched. */
- memset(&wrap, 0xFF, sizeof(wrap));
- strtomem(wrap.output, truncate);
- KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
- for (size_t i = 0; i < sizeof(wrap.output); i++)
- KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]);
- KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
-
- /* Check padded copy leaves only string padded. */
- memset(&wrap, 0xFF, sizeof(wrap));
- strtomem_pad(wrap.output, input, 0xAA);
- KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
- KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]);
- KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]);
- for (size_t i = 2; i < sizeof(wrap.output); i++)
- KUNIT_EXPECT_EQ(test, wrap.output[i], 0xAA);
- KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
-
- /* Check truncated padded copy has no padding. */
- memset(&wrap, 0xFF, sizeof(wrap));
- strtomem(wrap.output, truncate);
- KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
- for (size_t i = 0; i < sizeof(wrap.output); i++)
- KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]);
- KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
-}
-
static struct kunit_case memcpy_test_cases[] = {
KUNIT_CASE(memset_test),
KUNIT_CASE(memcpy_test),
@@ -552,7 +500,6 @@ static struct kunit_case memcpy_test_cases[] = {
KUNIT_CASE_SLOW(memmove_test),
KUNIT_CASE_SLOW(memmove_large_test),
KUNIT_CASE_SLOW(memmove_overlap_test),
- KUNIT_CASE(strtomem_test),
{}
};
@@ -563,4 +510,5 @@ static struct kunit_suite memcpy_test_suite = {
kunit_test_suite(memcpy_test_suite);
+MODULE_DESCRIPTION("test cases for memcpy(), memmove(), and memset()");
MODULE_LICENSE("GPL");
diff --git a/lib/objagg.c b/lib/objagg.c
index 1e248629ed64..363e43e849ac 100644
--- a/lib/objagg.c
+++ b/lib/objagg.c
@@ -167,6 +167,9 @@ static int objagg_obj_parent_assign(struct objagg *objagg,
{
void *delta_priv;
+ if (WARN_ON(!objagg_obj_is_root(parent)))
+ return -EINVAL;
+
delta_priv = objagg->ops->delta_create(objagg->priv, parent->obj,
objagg_obj->obj);
if (IS_ERR(delta_priv))
@@ -421,7 +424,7 @@ static struct objagg_obj *__objagg_obj_get(struct objagg *objagg, void *obj)
*
* There are 3 main options this function wraps:
* 1) The object according to "obj" already exist. In that case
- * the reference counter is incrementes and the object is returned.
+ * the reference counter is incremented and the object is returned.
* 2) The object does not exist, but it can be aggregated within
* another object. In that case, user ops->delta_create() is called
* to obtain delta data and a new object is created with returned
@@ -903,20 +906,6 @@ static const struct objagg_opt_algo *objagg_opt_algos[] = {
[OBJAGG_OPT_ALGO_SIMPLE_GREEDY] = &objagg_opt_simple_greedy,
};
-static int objagg_hints_obj_cmp(struct rhashtable_compare_arg *arg,
- const void *obj)
-{
- struct rhashtable *ht = arg->ht;
- struct objagg_hints *objagg_hints =
- container_of(ht, struct objagg_hints, node_ht);
- const struct objagg_ops *ops = objagg_hints->ops;
- const char *ptr = obj;
-
- ptr += ht->p.key_offset;
- return ops->hints_obj_cmp ? ops->hints_obj_cmp(ptr, arg->key) :
- memcmp(ptr, arg->key, ht->p.key_len);
-}
-
/**
* objagg_hints_get - obtains hints instance
* @objagg: objagg instance
@@ -955,7 +944,6 @@ struct objagg_hints *objagg_hints_get(struct objagg *objagg,
offsetof(struct objagg_hints_node, obj);
objagg_hints->ht_params.head_offset =
offsetof(struct objagg_hints_node, ht_node);
- objagg_hints->ht_params.obj_cmpfn = objagg_hints_obj_cmp;
err = rhashtable_init(&objagg_hints->node_ht, &objagg_hints->ht_params);
if (err)
diff --git a/lib/objpool.c b/lib/objpool.c
index cfdc02420884..234f9d0bd081 100644
--- a/lib/objpool.c
+++ b/lib/objpool.c
@@ -50,7 +50,7 @@ objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs,
{
int i, cpu_count = 0;
- for (i = 0; i < pool->nr_cpus; i++) {
+ for (i = 0; i < nr_cpu_ids; i++) {
struct objpool_slot *slot;
int nodes, size, rc;
@@ -60,8 +60,8 @@ objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs,
continue;
/* compute how many objects to be allocated with this slot */
- nodes = nr_objs / num_possible_cpus();
- if (cpu_count < (nr_objs % num_possible_cpus()))
+ nodes = nr_objs / pool->nr_possible_cpus;
+ if (cpu_count < (nr_objs % pool->nr_possible_cpus))
nodes++;
cpu_count++;
@@ -103,7 +103,7 @@ static void objpool_fini_percpu_slots(struct objpool_head *pool)
if (!pool->cpu_slots)
return;
- for (i = 0; i < pool->nr_cpus; i++)
+ for (i = 0; i < nr_cpu_ids; i++)
kvfree(pool->cpu_slots[i]);
kfree(pool->cpu_slots);
}
@@ -130,13 +130,13 @@ int objpool_init(struct objpool_head *pool, int nr_objs, int object_size,
/* initialize objpool pool */
memset(pool, 0, sizeof(struct objpool_head));
- pool->nr_cpus = nr_cpu_ids;
+ pool->nr_possible_cpus = num_possible_cpus();
pool->obj_size = object_size;
pool->capacity = capacity;
pool->gfp = gfp & ~__GFP_ZERO;
pool->context = context;
pool->release = release;
- slot_size = pool->nr_cpus * sizeof(struct objpool_slot);
+ slot_size = nr_cpu_ids * sizeof(struct objpool_slot);
pool->cpu_slots = kzalloc(slot_size, pool->gfp);
if (!pool->cpu_slots)
return -ENOMEM;
@@ -152,106 +152,6 @@ int objpool_init(struct objpool_head *pool, int nr_objs, int object_size,
}
EXPORT_SYMBOL_GPL(objpool_init);
-/* adding object to slot, abort if the slot was already full */
-static inline int
-objpool_try_add_slot(void *obj, struct objpool_head *pool, int cpu)
-{
- struct objpool_slot *slot = pool->cpu_slots[cpu];
- uint32_t head, tail;
-
- /* loading tail and head as a local snapshot, tail first */
- tail = READ_ONCE(slot->tail);
-
- do {
- head = READ_ONCE(slot->head);
- /* fault caught: something must be wrong */
- WARN_ON_ONCE(tail - head > pool->nr_objs);
- } while (!try_cmpxchg_acquire(&slot->tail, &tail, tail + 1));
-
- /* now the tail position is reserved for the given obj */
- WRITE_ONCE(slot->entries[tail & slot->mask], obj);
- /* update sequence to make this obj available for pop() */
- smp_store_release(&slot->last, tail + 1);
-
- return 0;
-}
-
-/* reclaim an object to object pool */
-int objpool_push(void *obj, struct objpool_head *pool)
-{
- unsigned long flags;
- int rc;
-
- /* disable local irq to avoid preemption & interruption */
- raw_local_irq_save(flags);
- rc = objpool_try_add_slot(obj, pool, raw_smp_processor_id());
- raw_local_irq_restore(flags);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(objpool_push);
-
-/* try to retrieve object from slot */
-static inline void *objpool_try_get_slot(struct objpool_head *pool, int cpu)
-{
- struct objpool_slot *slot = pool->cpu_slots[cpu];
- /* load head snapshot, other cpus may change it */
- uint32_t head = smp_load_acquire(&slot->head);
-
- while (head != READ_ONCE(slot->last)) {
- void *obj;
-
- /*
- * data visibility of 'last' and 'head' could be out of
- * order since memory updating of 'last' and 'head' are
- * performed in push() and pop() independently
- *
- * before any retrieving attempts, pop() must guarantee
- * 'last' is behind 'head', that is to say, there must
- * be available objects in slot, which could be ensured
- * by condition 'last != head && last - head <= nr_objs'
- * that is equivalent to 'last - head - 1 < nr_objs' as
- * 'last' and 'head' are both unsigned int32
- */
- if (READ_ONCE(slot->last) - head - 1 >= pool->nr_objs) {
- head = READ_ONCE(slot->head);
- continue;
- }
-
- /* obj must be retrieved before moving forward head */
- obj = READ_ONCE(slot->entries[head & slot->mask]);
-
- /* move head forward to mark it's consumption */
- if (try_cmpxchg_release(&slot->head, &head, head + 1))
- return obj;
- }
-
- return NULL;
-}
-
-/* allocate an object from object pool */
-void *objpool_pop(struct objpool_head *pool)
-{
- void *obj = NULL;
- unsigned long flags;
- int i, cpu;
-
- /* disable local irq to avoid preemption & interruption */
- raw_local_irq_save(flags);
-
- cpu = raw_smp_processor_id();
- for (i = 0; i < num_possible_cpus(); i++) {
- obj = objpool_try_get_slot(pool, cpu);
- if (obj)
- break;
- cpu = cpumask_next_wrap(cpu, cpu_possible_mask, -1, 1);
- }
- raw_local_irq_restore(flags);
-
- return obj;
-}
-EXPORT_SYMBOL_GPL(objpool_pop);
-
/* release whole objpool forcely */
void objpool_free(struct objpool_head *pool)
{
diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c
index 4ef31b0bb74d..f314a0c15a6d 100644
--- a/lib/overflow_kunit.c
+++ b/lib/overflow_kunit.c
@@ -1178,14 +1178,28 @@ struct foo {
s16 array[] __counted_by(counter);
};
+struct bar {
+ int a;
+ u32 counter;
+ s16 array[];
+};
+
static void DEFINE_FLEX_test(struct kunit *test)
{
- DEFINE_RAW_FLEX(struct foo, two, array, 2);
+ /* Using _RAW_ on a __counted_by struct will initialize "counter" to zero */
+ DEFINE_RAW_FLEX(struct foo, two_but_zero, array, 2);
+#if __has_attribute(__counted_by__)
+ int expected_raw_size = sizeof(struct foo);
+#else
+ int expected_raw_size = sizeof(struct foo) + 2 * sizeof(s16);
+#endif
+ /* Without annotation, it will always be on-stack size. */
+ DEFINE_RAW_FLEX(struct bar, two, array, 2);
DEFINE_FLEX(struct foo, eight, array, counter, 8);
DEFINE_FLEX(struct foo, empty, array, counter, 0);
- KUNIT_EXPECT_EQ(test, __struct_size(two),
- sizeof(struct foo) + sizeof(s16) + sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __struct_size(two_but_zero), expected_raw_size);
+ KUNIT_EXPECT_EQ(test, __struct_size(two), sizeof(struct bar) + 2 * sizeof(s16));
KUNIT_EXPECT_EQ(test, __struct_size(eight), 24);
KUNIT_EXPECT_EQ(test, __struct_size(empty), sizeof(struct foo));
}
@@ -1223,4 +1237,5 @@ static struct kunit_suite overflow_test_suite = {
kunit_test_suite(overflow_test_suite);
+MODULE_DESCRIPTION("Test cases for arithmetic overflow checks");
MODULE_LICENSE("Dual MIT/GPL");
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 44dd133594d4..51bc5246986d 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -73,17 +73,50 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
EXPORT_SYMBOL(percpu_counter_set);
/*
- * local_irq_save() is needed to make the function irq safe:
- * - The slow path would be ok as protected by an irq-safe spinlock.
- * - this_cpu_add would be ok as it is irq-safe by definition.
- * But:
- * The decision slow path/fast path and the actual update must be atomic, too.
+ * Add to a counter while respecting batch size.
+ *
+ * There are 2 implementations, both dealing with the following problem:
+ *
+ * The decision slow path/fast path and the actual update must be atomic.
* Otherwise a call in process context could check the current values and
* decide that the fast path can be used. If now an interrupt occurs before
* the this_cpu_add(), and the interrupt updates this_cpu(*fbc->counters),
* then the this_cpu_add() that is executed after the interrupt has completed
* can produce values larger than "batch" or even overflows.
*/
+#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
+/*
+ * Safety against interrupts is achieved in 2 ways:
+ * 1. the fast path uses local cmpxchg (note: no lock prefix)
+ * 2. the slow path operates with interrupts disabled
+ */
+void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
+{
+ s64 count;
+ unsigned long flags;
+
+ count = this_cpu_read(*fbc->counters);
+ do {
+ if (unlikely(abs(count + amount) >= batch)) {
+ raw_spin_lock_irqsave(&fbc->lock, flags);
+ /*
+ * Note: by now we might have migrated to another CPU
+ * or the value might have changed.
+ */
+ count = __this_cpu_read(*fbc->counters);
+ fbc->count += count + amount;
+ __this_cpu_sub(*fbc->counters, count);
+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
+ return;
+ }
+ } while (!this_cpu_try_cmpxchg(*fbc->counters, &count, count + amount));
+}
+#else
+/*
+ * local_irq_save() is used to make the function irq safe:
+ * - The slow path would be ok as protected by an irq-safe spinlock.
+ * - this_cpu_add would be ok as it is irq-safe by definition.
+ */
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
{
s64 count;
@@ -101,6 +134,7 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
}
local_irq_restore(flags);
}
+#endif
EXPORT_SYMBOL(percpu_counter_add_batch);
/*
diff --git a/lib/plist.c b/lib/plist.c
index 0d86ed7a76ac..c6bce1226874 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -47,8 +47,8 @@ static void plist_check_list(struct list_head *top)
plist_check_prev_next(top, prev, next);
while (next != top) {
- prev = next;
- next = prev->next;
+ WRITE_ONCE(prev, next);
+ WRITE_ONCE(next, prev->next);
plist_check_prev_next(top, prev, next);
}
}
@@ -72,7 +72,7 @@ static void plist_check_head(struct plist_head *head)
*/
void plist_add(struct plist_node *node, struct plist_head *head)
{
- struct plist_node *first, *iter, *prev = NULL;
+ struct plist_node *first, *iter, *prev = NULL, *last, *reverse_iter;
struct list_head *node_next = &head->node_list;
plist_check_head(head);
@@ -83,16 +83,26 @@ void plist_add(struct plist_node *node, struct plist_head *head)
goto ins_node;
first = iter = plist_first(head);
+ last = reverse_iter = list_entry(first->prio_list.prev, struct plist_node, prio_list);
do {
if (node->prio < iter->prio) {
node_next = &iter->node_list;
break;
+ } else if (node->prio >= reverse_iter->prio) {
+ prev = reverse_iter;
+ iter = list_entry(reverse_iter->prio_list.next,
+ struct plist_node, prio_list);
+ if (likely(reverse_iter != last))
+ node_next = &iter->node_list;
+ break;
}
prev = iter;
iter = list_entry(iter->prio_list.next,
struct plist_node, prio_list);
+ reverse_iter = list_entry(reverse_iter->prio_list.prev,
+ struct plist_node, prio_list);
} while (iter != first);
if (!prev || prev->prio != node->prio)
@@ -255,6 +265,32 @@ static int __init plist_test(void)
}
printk(KERN_DEBUG "end plist test\n");
+
+ /* Worst case test for plist_add() */
+ unsigned int test_data[241];
+
+ for (i = 0; i < ARRAY_SIZE(test_data); i++)
+ test_data[i] = i;
+
+ ktime_t start, end, time_elapsed = 0;
+
+ plist_head_init(&test_head);
+
+ for (i = 0; i < ARRAY_SIZE(test_node); i++) {
+ plist_node_init(test_node + i, 0);
+ test_node[i].prio = test_data[i];
+ }
+
+ for (i = 0; i < ARRAY_SIZE(test_node); i++) {
+ if (plist_node_empty(test_node + i)) {
+ start = ktime_get();
+ plist_add(test_node + i, &test_head);
+ end = ktime_get();
+ time_elapsed += (end - start);
+ }
+ }
+
+ pr_debug("plist_add worst case test time elapsed %lld\n", time_elapsed);
return 0;
}
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 385a94aa0b99..29127dd05d63 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -33,27 +33,8 @@ CFLAGS_REMOVE_vpermxor8.o += -msoft-float
endif
endif
-# The GCC option -ffreestanding is required in order to compile code containing
-# ARM/NEON intrinsics in a non C99-compliant environment (such as the kernel)
-ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
-NEON_FLAGS := -ffreestanding
-# Enable <arm_neon.h>
-NEON_FLAGS += -isystem $(shell $(CC) -print-file-name=include)
-ifeq ($(ARCH),arm)
-NEON_FLAGS += -march=armv7-a -mfloat-abi=softfp -mfpu=neon
-endif
-CFLAGS_recov_neon_inner.o += $(NEON_FLAGS)
-ifeq ($(ARCH),arm64)
-CFLAGS_REMOVE_recov_neon_inner.o += -mgeneral-regs-only
-CFLAGS_REMOVE_neon1.o += -mgeneral-regs-only
-CFLAGS_REMOVE_neon2.o += -mgeneral-regs-only
-CFLAGS_REMOVE_neon4.o += -mgeneral-regs-only
-CFLAGS_REMOVE_neon8.o += -mgeneral-regs-only
-endif
-endif
-
quiet_cmd_unroll = UNROLL $@
- cmd_unroll = $(AWK) -v N=$* -f $(srctree)/$(src)/unroll.awk < $< > $@
+ cmd_unroll = $(AWK) -v N=$* -f $(src)/unroll.awk < $< > $@
targets += int1.c int2.c int4.c int8.c
$(obj)/int%.c: $(src)/int.uc $(src)/unroll.awk FORCE
@@ -75,10 +56,16 @@ targets += vpermxor1.c vpermxor2.c vpermxor4.c vpermxor8.c
$(obj)/vpermxor%.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
-CFLAGS_neon1.o += $(NEON_FLAGS)
-CFLAGS_neon2.o += $(NEON_FLAGS)
-CFLAGS_neon4.o += $(NEON_FLAGS)
-CFLAGS_neon8.o += $(NEON_FLAGS)
+CFLAGS_neon1.o += $(CC_FLAGS_FPU)
+CFLAGS_neon2.o += $(CC_FLAGS_FPU)
+CFLAGS_neon4.o += $(CC_FLAGS_FPU)
+CFLAGS_neon8.o += $(CC_FLAGS_FPU)
+CFLAGS_recov_neon_inner.o += $(CC_FLAGS_FPU)
+CFLAGS_REMOVE_neon1.o += $(CC_FLAGS_NO_FPU)
+CFLAGS_REMOVE_neon2.o += $(CC_FLAGS_NO_FPU)
+CFLAGS_REMOVE_neon4.o += $(CC_FLAGS_NO_FPU)
+CFLAGS_REMOVE_neon8.o += $(CC_FLAGS_NO_FPU)
+CFLAGS_REMOVE_recov_neon_inner.o += $(CC_FLAGS_NO_FPU)
targets += neon1.c neon2.c neon4.c neon8.c
$(obj)/neon%.c: $(src)/neon.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 5114eda6309c..989c2d615f92 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -297,9 +297,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
* / \ / \
* N S --> N sl
* / \ \
- * sl Sr S
+ * sl sr S
* \
- * Sr
+ * sr
*
* Note: p might be red, and then both
* p and sl are red after rotation(which
@@ -312,9 +312,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
* / \ / \
* N sl --> P S
* \ / \
- * S N Sr
+ * S N sr
* \
- * Sr
+ * sr
*/
tmp1 = tmp2->rb_right;
WRITE_ONCE(sibling->rb_left, tmp1);
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 6ae2ba8e06a2..dbbed19f8fff 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -130,7 +130,8 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht,
if (ntbl)
return ntbl;
- ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ ntbl = alloc_hooks_tag(ht->alloc_tag,
+ kmalloc_noprof(PAGE_SIZE, GFP_ATOMIC|__GFP_ZERO));
if (ntbl && leaf) {
for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
@@ -157,7 +158,8 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
- tbl = kzalloc(size, gfp);
+ tbl = alloc_hooks_tag(ht->alloc_tag,
+ kmalloc_noprof(size, gfp|__GFP_ZERO));
if (!tbl)
return NULL;
@@ -181,7 +183,9 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
int i;
static struct lock_class_key __key;
- tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp);
+ tbl = alloc_hooks_tag(ht->alloc_tag,
+ kvmalloc_node_noprof(struct_size(tbl, buckets, nbuckets),
+ gfp|__GFP_ZERO, NUMA_NO_NODE));
size = nbuckets;
@@ -1016,7 +1020,7 @@ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
* .obj_hashfn = my_hash_fn,
* };
*/
-int rhashtable_init(struct rhashtable *ht,
+int rhashtable_init_noprof(struct rhashtable *ht,
const struct rhashtable_params *params)
{
struct bucket_table *tbl;
@@ -1031,6 +1035,8 @@ int rhashtable_init(struct rhashtable *ht,
spin_lock_init(&ht->lock);
memcpy(&ht->p, params, sizeof(*params));
+ alloc_tag_record(ht->alloc_tag);
+
if (params->min_size)
ht->p.min_size = roundup_pow_of_two(params->min_size);
@@ -1076,7 +1082,7 @@ int rhashtable_init(struct rhashtable *ht,
return 0;
}
-EXPORT_SYMBOL_GPL(rhashtable_init);
+EXPORT_SYMBOL_GPL(rhashtable_init_noprof);
/**
* rhltable_init - initialize a new hash list table
@@ -1087,15 +1093,15 @@ EXPORT_SYMBOL_GPL(rhashtable_init);
*
* See documentation for rhashtable_init.
*/
-int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
+int rhltable_init_noprof(struct rhltable *hlt, const struct rhashtable_params *params)
{
int err;
- err = rhashtable_init(&hlt->ht, params);
+ err = rhashtable_init_noprof(&hlt->ht, params);
hlt->ht.rhlist = true;
return err;
}
-EXPORT_SYMBOL_GPL(rhltable_init);
+EXPORT_SYMBOL_GPL(rhltable_init_noprof);
static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
void (*free_fn)(void *ptr, void *arg),
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 92c6b1fd8989..5e2e93307f0d 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -60,12 +60,30 @@ static inline void update_alloc_hint_after_get(struct sbitmap *sb,
/*
* See if we have deferred clears that we can batch move
*/
-static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
+static inline bool sbitmap_deferred_clear(struct sbitmap_word *map,
+ unsigned int depth, unsigned int alloc_hint, bool wrap)
{
- unsigned long mask;
+ unsigned long mask, word_mask;
- if (!READ_ONCE(map->cleared))
- return false;
+ guard(spinlock_irqsave)(&map->swap_lock);
+
+ if (!map->cleared) {
+ if (depth == 0)
+ return false;
+
+ word_mask = (~0UL) >> (BITS_PER_LONG - depth);
+ /*
+ * The current behavior is to always retry after moving
+ * ->cleared to word, and we change it to retry in case
+ * of any free bits. To avoid an infinite loop, we need
+ * to take wrap & alloc_hint into account, otherwise a
+ * soft lockup may occur.
+ */
+ if (!wrap && alloc_hint)
+ word_mask &= ~((1UL << alloc_hint) - 1);
+
+ return (READ_ONCE(map->word) & word_mask) != word_mask;
+ }
/*
* First get a stable cleared mask, setting the old mask to 0.
@@ -85,6 +103,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
bool alloc_hint)
{
unsigned int bits_per_word;
+ int i;
if (shift < 0)
shift = sbitmap_calculate_shift(depth);
@@ -116,6 +135,9 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
return -ENOMEM;
}
+ for (i = 0; i < sb->map_nr; i++)
+ spin_lock_init(&sb->map[i].swap_lock);
+
return 0;
}
EXPORT_SYMBOL_GPL(sbitmap_init_node);
@@ -126,7 +148,7 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
unsigned int i;
for (i = 0; i < sb->map_nr; i++)
- sbitmap_deferred_clear(&sb->map[i]);
+ sbitmap_deferred_clear(&sb->map[i], 0, 0, 0);
sb->depth = depth;
sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
@@ -179,7 +201,7 @@ static int sbitmap_find_bit_in_word(struct sbitmap_word *map,
alloc_hint, wrap);
if (nr != -1)
break;
- if (!sbitmap_deferred_clear(map))
+ if (!sbitmap_deferred_clear(map, depth, alloc_hint, wrap))
break;
} while (1);
@@ -494,18 +516,18 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
struct sbitmap_word *map = &sb->map[index];
unsigned long get_mask;
unsigned int map_depth = __map_depth(sb, index);
+ unsigned long val;
- sbitmap_deferred_clear(map);
- if (map->word == (1UL << (map_depth - 1)) - 1)
+ sbitmap_deferred_clear(map, 0, 0, 0);
+ val = READ_ONCE(map->word);
+ if (val == (1UL << (map_depth - 1)) - 1)
goto next;
- nr = find_first_zero_bit(&map->word, map_depth);
+ nr = find_first_zero_bit(&val, map_depth);
if (nr + nr_tags <= map_depth) {
atomic_long_t *ptr = (atomic_long_t *) &map->word;
- unsigned long val;
get_mask = ((1UL << nr_tags) - 1) << nr;
- val = READ_ONCE(map->word);
while (!atomic_long_try_cmpxchg(ptr, &val,
get_mask | val))
;
diff --git a/lib/siphash_kunit.c b/lib/siphash_kunit.c
index a3c697e8be35..26bd4e8dc03e 100644
--- a/lib/siphash_kunit.c
+++ b/lib/siphash_kunit.c
@@ -194,4 +194,5 @@ static struct kunit_suite siphash_test_suite = {
kunit_test_suite(siphash_test_suite);
MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
+MODULE_DESCRIPTION("Test cases for siphash.c");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
index d4a3730b08fa..e6667a28c014 100644
--- a/lib/slub_kunit.c
+++ b/lib/slub_kunit.c
@@ -55,7 +55,7 @@ static void test_next_pointer(struct kunit *test)
ptr_addr = (unsigned long *)(p + s->offset);
tmp = *ptr_addr;
- p[s->offset] = 0x12;
+ p[s->offset] = ~p[s->offset];
/*
* Expecting three errors.
@@ -140,7 +140,7 @@ static void test_kmalloc_redzone_access(struct kunit *test)
{
struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_kmalloc", 32,
SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
- u8 *p = kmalloc_trace(s, GFP_KERNEL, 18);
+ u8 *p = __kmalloc_cache_noprof(s, GFP_KERNEL, 18);
kasan_disable_current();
diff --git a/lib/sort.c b/lib/sort.c
index a0509088f82a..048b7a6ef967 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -5,13 +5,11 @@
* This performs n*log2(n) + 0.37*n + o(n) comparisons on average,
* and 1.5*n*log2(n) + O(n) in the (very contrived) worst case.
*
- * Glibc qsort() manages n*log2(n) - 1.26*n for random inputs (1.63*n
+ * Quicksort manages n*log2(n) - 1.26*n for random inputs (1.63*n
* better) at the expense of stack usage and much larger code to avoid
* quicksort's O(n^2) worst case.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/types.h>
#include <linux/export.h>
#include <linux/sort.h>
@@ -252,10 +250,7 @@ void sort_r(void *base, size_t num, size_t size,
a = size << shift;
n -= size;
do_swap(base + a, base + n, size, swap_func, priv);
- } else if (n > size) { /* Sorting: Extract root */
- n -= size;
- do_swap(base, base + n, size, swap_func, priv);
- } else { /* Sort complete */
+ } else { /* Sort complete */
break;
}
@@ -285,6 +280,11 @@ void sort_r(void *base, size_t num, size_t size,
do_swap(base + b, base + c, size, swap_func, priv);
}
}
+
+ n -= size;
+ do_swap(base, base + n, size, swap_func, priv);
+ if (n == size * 2 && do_cmp(base, base + size, cmp_func, priv) > 0)
+ do_swap(base, base + size, size, swap_func, priv);
}
EXPORT_SYMBOL(sort_r);
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index cd8f23455285..5ed34cc963fc 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -624,15 +624,8 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
* we won't be able to do that under the lock.
*/
if (unlikely(can_alloc && !READ_ONCE(new_pool))) {
- /*
- * Zero out zone modifiers, as we don't have specific zone
- * requirements. Keep the flags related to allocation in atomic
- * contexts, I/O, nolockdep.
- */
- alloc_flags &= ~GFP_ZONEMASK;
- alloc_flags &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP);
- alloc_flags |= __GFP_NOWARN;
- page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER);
+ page = alloc_pages(gfp_nested_mask(alloc_flags),
+ DEPOT_POOL_ORDER);
if (page)
prealloc = page_address(page);
}
diff --git a/lib/stackinit_kunit.c b/lib/stackinit_kunit.c
index 3bc14d1ee816..c14c6f8e6308 100644
--- a/lib/stackinit_kunit.c
+++ b/lib/stackinit_kunit.c
@@ -471,4 +471,5 @@ static struct kunit_suite stackinit_test_suite = {
kunit_test_suites(&stackinit_test_suite);
+MODULE_DESCRIPTION("Test cases for compiler-based stack variable zeroing");
MODULE_LICENSE("GPL");
diff --git a/lib/strcat_kunit.c b/lib/strcat_kunit.c
deleted file mode 100644
index e21be95514af..000000000000
--- a/lib/strcat_kunit.c
+++ /dev/null
@@ -1,104 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Kernel module for testing 'strcat' family of functions.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <kunit/test.h>
-#include <linux/string.h>
-
-static volatile int unconst;
-
-static void strcat_test(struct kunit *test)
-{
- char dest[8];
-
- /* Destination is terminated. */
- memset(dest, 0, sizeof(dest));
- KUNIT_EXPECT_EQ(test, strlen(dest), 0);
- /* Empty copy does nothing. */
- KUNIT_EXPECT_TRUE(test, strcat(dest, "") == dest);
- KUNIT_EXPECT_STREQ(test, dest, "");
- /* 4 characters copied in, stops at %NUL. */
- KUNIT_EXPECT_TRUE(test, strcat(dest, "four\000123") == dest);
- KUNIT_EXPECT_STREQ(test, dest, "four");
- KUNIT_EXPECT_EQ(test, dest[5], '\0');
- /* 2 more characters copied in okay. */
- KUNIT_EXPECT_TRUE(test, strcat(dest, "AB") == dest);
- KUNIT_EXPECT_STREQ(test, dest, "fourAB");
-}
-
-static void strncat_test(struct kunit *test)
-{
- char dest[8];
-
- /* Destination is terminated. */
- memset(dest, 0, sizeof(dest));
- KUNIT_EXPECT_EQ(test, strlen(dest), 0);
- /* Empty copy of size 0 does nothing. */
- KUNIT_EXPECT_TRUE(test, strncat(dest, "", 0 + unconst) == dest);
- KUNIT_EXPECT_STREQ(test, dest, "");
- /* Empty copy of size 1 does nothing too. */
- KUNIT_EXPECT_TRUE(test, strncat(dest, "", 1 + unconst) == dest);
- KUNIT_EXPECT_STREQ(test, dest, "");
- /* Copy of max 0 characters should do nothing. */
- KUNIT_EXPECT_TRUE(test, strncat(dest, "asdf", 0 + unconst) == dest);
- KUNIT_EXPECT_STREQ(test, dest, "");
-
- /* 4 characters copied in, even if max is 8. */
- KUNIT_EXPECT_TRUE(test, strncat(dest, "four\000123", 8 + unconst) == dest);
- KUNIT_EXPECT_STREQ(test, dest, "four");
- KUNIT_EXPECT_EQ(test, dest[5], '\0');
- KUNIT_EXPECT_EQ(test, dest[6], '\0');
- /* 2 characters copied in okay, 2 ignored. */
- KUNIT_EXPECT_TRUE(test, strncat(dest, "ABCD", 2 + unconst) == dest);
- KUNIT_EXPECT_STREQ(test, dest, "fourAB");
-}
-
-static void strlcat_test(struct kunit *test)
-{
- char dest[8] = "";
- int len = sizeof(dest) + unconst;
-
- /* Destination is terminated. */
- KUNIT_EXPECT_EQ(test, strlen(dest), 0);
- /* Empty copy is size 0. */
- KUNIT_EXPECT_EQ(test, strlcat(dest, "", len), 0);
- KUNIT_EXPECT_STREQ(test, dest, "");
- /* Size 1 should keep buffer terminated, report size of source only. */
- KUNIT_EXPECT_EQ(test, strlcat(dest, "four", 1 + unconst), 4);
- KUNIT_EXPECT_STREQ(test, dest, "");
-
- /* 4 characters copied in. */
- KUNIT_EXPECT_EQ(test, strlcat(dest, "four", len), 4);
- KUNIT_EXPECT_STREQ(test, dest, "four");
- /* 2 characters copied in okay, gets to 6 total. */
- KUNIT_EXPECT_EQ(test, strlcat(dest, "AB", len), 6);
- KUNIT_EXPECT_STREQ(test, dest, "fourAB");
- /* 2 characters ignored if max size (7) reached. */
- KUNIT_EXPECT_EQ(test, strlcat(dest, "CD", 7 + unconst), 8);
- KUNIT_EXPECT_STREQ(test, dest, "fourAB");
- /* 1 of 2 characters skipped, now at true max size. */
- KUNIT_EXPECT_EQ(test, strlcat(dest, "EFG", len), 9);
- KUNIT_EXPECT_STREQ(test, dest, "fourABE");
- /* Everything else ignored, now at full size. */
- KUNIT_EXPECT_EQ(test, strlcat(dest, "1234", len), 11);
- KUNIT_EXPECT_STREQ(test, dest, "fourABE");
-}
-
-static struct kunit_case strcat_test_cases[] = {
- KUNIT_CASE(strcat_test),
- KUNIT_CASE(strncat_test),
- KUNIT_CASE(strlcat_test),
- {}
-};
-
-static struct kunit_suite strcat_test_suite = {
- .name = "strcat",
- .test_cases = strcat_test_cases,
-};
-
-kunit_test_suite(strcat_test_suite);
-
-MODULE_LICENSE("GPL");
diff --git a/lib/string_helpers_kunit.c b/lib/string_helpers_kunit.c
index f88e39fd68d6..c853046183d2 100644
--- a/lib/string_helpers_kunit.c
+++ b/lib/string_helpers_kunit.c
@@ -625,4 +625,5 @@ static struct kunit_suite string_helpers_test_suite = {
kunit_test_suites(&string_helpers_test_suite);
+MODULE_DESCRIPTION("Test cases for string helpers module");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/string_kunit.c b/lib/string_kunit.c
index eabf025cf77c..c919e3293da6 100644
--- a/lib/string_kunit.c
+++ b/lib/string_kunit.c
@@ -11,7 +11,13 @@
#include <linux/slab.h>
#include <linux/string.h>
-static void test_memset16(struct kunit *test)
+#define STRCMP_LARGE_BUF_LEN 2048
+#define STRCMP_CHANGE_POINT 1337
+#define STRCMP_TEST_EXPECT_EQUAL(test, fn, ...) KUNIT_EXPECT_EQ(test, fn(__VA_ARGS__), 0)
+#define STRCMP_TEST_EXPECT_LOWER(test, fn, ...) KUNIT_EXPECT_LT(test, fn(__VA_ARGS__), 0)
+#define STRCMP_TEST_EXPECT_GREATER(test, fn, ...) KUNIT_EXPECT_GT(test, fn(__VA_ARGS__), 0)
+
+static void string_test_memset16(struct kunit *test)
{
unsigned i, j, k;
u16 v, *p;
@@ -40,7 +46,7 @@ static void test_memset16(struct kunit *test)
}
}
-static void test_memset32(struct kunit *test)
+static void string_test_memset32(struct kunit *test)
{
unsigned i, j, k;
u32 v, *p;
@@ -69,7 +75,7 @@ static void test_memset32(struct kunit *test)
}
}
-static void test_memset64(struct kunit *test)
+static void string_test_memset64(struct kunit *test)
{
unsigned i, j, k;
u64 v, *p;
@@ -98,7 +104,7 @@ static void test_memset64(struct kunit *test)
}
}
-static void test_strchr(struct kunit *test)
+static void string_test_strchr(struct kunit *test)
{
const char *test_string = "abcdefghijkl";
const char *empty_string = "";
@@ -121,7 +127,7 @@ static void test_strchr(struct kunit *test)
KUNIT_ASSERT_NULL(test, result);
}
-static void test_strnchr(struct kunit *test)
+static void string_test_strnchr(struct kunit *test)
{
const char *test_string = "abcdefghijkl";
const char *empty_string = "";
@@ -154,7 +160,7 @@ static void test_strnchr(struct kunit *test)
KUNIT_ASSERT_NULL(test, result);
}
-static void test_strspn(struct kunit *test)
+static void string_test_strspn(struct kunit *test)
{
static const struct strspn_test {
const char str[16];
@@ -179,13 +185,444 @@ static void test_strspn(struct kunit *test)
}
}
+static char strcmp_buffer1[STRCMP_LARGE_BUF_LEN];
+static char strcmp_buffer2[STRCMP_LARGE_BUF_LEN];
+
+static void strcmp_fill_buffers(char fill1, char fill2)
+{
+ memset(strcmp_buffer1, fill1, STRCMP_LARGE_BUF_LEN);
+ memset(strcmp_buffer2, fill2, STRCMP_LARGE_BUF_LEN);
+ strcmp_buffer1[STRCMP_LARGE_BUF_LEN - 1] = 0;
+ strcmp_buffer2[STRCMP_LARGE_BUF_LEN - 1] = 0;
+}
+
+static void string_test_strcmp(struct kunit *test)
+{
+ /* Equal strings */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcmp, "Hello, Kernel!", "Hello, Kernel!");
+ /* First string is lexicographically less than the second */
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Hello, KUnit!", "Hello, Kernel!");
+ /* First string is lexicographically larger than the second */
+ STRCMP_TEST_EXPECT_GREATER(test, strcmp, "Hello, Kernel!", "Hello, KUnit!");
+ /* Empty string is always lexicographically less than any non-empty string */
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, "", "Non-empty string");
+ /* Two empty strings should be equal */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcmp, "", "");
+ /* Compare two strings which have only one char difference */
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Abacaba", "Abadaba");
+ /* Compare two strings which have the same prefix*/
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Just a string", "Just a string and something else");
+}
+
+static void string_test_strcmp_long_strings(struct kunit *test)
+{
+ strcmp_fill_buffers('B', 'B');
+ STRCMP_TEST_EXPECT_EQUAL(test, strcmp, strcmp_buffer1, strcmp_buffer2);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'A';
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, strcmp_buffer1, strcmp_buffer2);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
+ STRCMP_TEST_EXPECT_GREATER(test, strcmp, strcmp_buffer1, strcmp_buffer2);
+}
+
+static void string_test_strncmp(struct kunit *test)
+{
+ /* Equal strings */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Hello, KUnit!", "Hello, KUnit!", 13);
+ /* First string is lexicographically less than the second */
+ STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Hello, KUnit!", "Hello, Kernel!", 13);
+ /* Result is always 'equal' when count = 0 */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Hello, Kernel!", "Hello, KUnit!", 0);
+ /* Strings with common prefix are equal if count = length of prefix */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Abacaba", "Abadaba", 3);
+ /* Strings with common prefix are not equal when count = length of prefix + 1 */
+ STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Abacaba", "Abadaba", 4);
+ /* If one string is a prefix of another, the shorter string is lexicographically smaller */
+ STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Just a string", "Just a string and something else",
+ strlen("Just a string and something else"));
+ /*
+ * If one string is a prefix of another, and we check first length
+ * of prefix chars, the result is 'equal'
+ */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Just a string", "Just a string and something else",
+ strlen("Just a string"));
+}
+
+static void string_test_strncmp_long_strings(struct kunit *test)
+{
+ strcmp_fill_buffers('B', 'B');
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'A';
+ STRCMP_TEST_EXPECT_LOWER(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
+ STRCMP_TEST_EXPECT_GREATER(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+ /* the strings are equal up to STRCMP_CHANGE_POINT */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_CHANGE_POINT);
+ STRCMP_TEST_EXPECT_GREATER(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_CHANGE_POINT + 1);
+}
+
+static void string_test_strcasecmp(struct kunit *test)
+{
+ /* Same strings in different case should be equal */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "Hello, Kernel!", "HeLLO, KErNeL!");
+ /* Empty strings should be equal */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "", "");
+ /* Despite ascii code for 'a' is larger than ascii code for 'B', 'a' < 'B' */
+ STRCMP_TEST_EXPECT_LOWER(test, strcasecmp, "a", "B");
+ STRCMP_TEST_EXPECT_GREATER(test, strcasecmp, "B", "a");
+ /* Special symbols and numbers should be processed correctly */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "-+**.1230ghTTT~^", "-+**.1230Ghttt~^");
+}
+
+static void string_test_strcasecmp_long_strings(struct kunit *test)
+{
+ strcmp_fill_buffers('b', 'B');
+ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, strcmp_buffer1, strcmp_buffer2);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'a';
+ STRCMP_TEST_EXPECT_LOWER(test, strcasecmp, strcmp_buffer1, strcmp_buffer2);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
+ STRCMP_TEST_EXPECT_GREATER(test, strcasecmp, strcmp_buffer1, strcmp_buffer2);
+}
+
+static void string_test_strncasecmp(struct kunit *test)
+{
+ /* Same strings in different case should be equal */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "AbAcAbA", "Abacaba", strlen("Abacaba"));
+ /* strncasecmp should check 'count' chars only */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "AbaCaBa", "abaCaDa", 5);
+ STRCMP_TEST_EXPECT_LOWER(test, strncasecmp, "a", "B", 1);
+ STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, "B", "a", 1);
+ /* Result is always 'equal' when count = 0 */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "Abacaba", "Not abacaba", 0);
+}
+
+static void string_test_strncasecmp_long_strings(struct kunit *test)
+{
+ strcmp_fill_buffers('b', 'B');
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'a';
+ STRCMP_TEST_EXPECT_LOWER(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
+ STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_CHANGE_POINT);
+ STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_CHANGE_POINT + 1);
+}
+
+/**
+ * strscpy_check() - Run a specific test case.
+ * @test: KUnit test context pointer
+ * @src: Source string, argument to strscpy_pad()
+ * @count: Size of destination buffer, argument to strscpy_pad()
+ * @expected: Expected return value from call to strscpy_pad()
+ * @chars: Number of characters from the src string expected to be
+ * written to the dst buffer.
+ * @terminator: 1 if there should be a terminating null byte 0 otherwise.
+ * @pad: Number of pad characters expected (in the tail of dst buffer).
+ * (@pad does not include the null terminator byte.)
+ *
+ * Calls strscpy_pad() and verifies the return value and state of the
+ * destination buffer after the call returns.
+ */
+static void strscpy_check(struct kunit *test, char *src, int count,
+ int expected, int chars, int terminator, int pad)
+{
+ int nr_bytes_poison;
+ int max_expected;
+ int max_count;
+ int written;
+ char buf[6];
+ int index, i;
+ const char POISON = 'z';
+
+ KUNIT_ASSERT_TRUE_MSG(test, src != NULL,
+ "null source string not supported");
+
+ memset(buf, POISON, sizeof(buf));
+ /* Future proofing test suite, validate args */
+ max_count = sizeof(buf) - 2; /* Space for null and to verify overflow */
+ max_expected = count - 1; /* Space for the null */
+
+ KUNIT_ASSERT_LE_MSG(test, count, max_count,
+ "count (%d) is too big (%d) ... aborting", count, max_count);
+ KUNIT_EXPECT_LE_MSG(test, expected, max_expected,
+ "expected (%d) is bigger than can possibly be returned (%d)",
+ expected, max_expected);
+
+ written = strscpy_pad(buf, src, count);
+ KUNIT_ASSERT_EQ(test, written, expected);
+
+ if (count && written == -E2BIG) {
+ KUNIT_ASSERT_EQ_MSG(test, 0, strncmp(buf, src, count - 1),
+ "buffer state invalid for -E2BIG");
+ KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0',
+ "too big string is not null terminated correctly");
+ }
+
+ for (i = 0; i < chars; i++)
+ KUNIT_ASSERT_EQ_MSG(test, buf[i], src[i],
+ "buf[i]==%c != src[i]==%c", buf[i], src[i]);
+
+ if (terminator)
+ KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0',
+ "string is not null terminated correctly");
+
+ for (i = 0; i < pad; i++) {
+ index = chars + terminator + i;
+ KUNIT_ASSERT_EQ_MSG(test, buf[index], '\0',
+ "padding missing at index: %d", i);
+ }
+
+ nr_bytes_poison = sizeof(buf) - chars - terminator - pad;
+ for (i = 0; i < nr_bytes_poison; i++) {
+ index = sizeof(buf) - 1 - i; /* Check from the end back */
+ KUNIT_ASSERT_EQ_MSG(test, buf[index], POISON,
+ "poison value missing at index: %d", i);
+ }
+}
+
+static void string_test_strscpy(struct kunit *test)
+{
+ char dest[8];
+
+ /*
+ * strscpy_check() uses a destination buffer of size 6 and needs at
+ * least 2 characters spare (one for null and one to check for
+ * overflow). This means we should only call tc() with
+ * strings up to a maximum of 4 characters long and 'count'
+ * should not exceed 4. To test with longer strings increase
+ * the buffer size in tc().
+ */
+
+ /* strscpy_check(test, src, count, expected, chars, terminator, pad) */
+ strscpy_check(test, "a", 0, -E2BIG, 0, 0, 0);
+ strscpy_check(test, "", 0, -E2BIG, 0, 0, 0);
+
+ strscpy_check(test, "a", 1, -E2BIG, 0, 1, 0);
+ strscpy_check(test, "", 1, 0, 0, 1, 0);
+
+ strscpy_check(test, "ab", 2, -E2BIG, 1, 1, 0);
+ strscpy_check(test, "a", 2, 1, 1, 1, 0);
+ strscpy_check(test, "", 2, 0, 0, 1, 1);
+
+ strscpy_check(test, "abc", 3, -E2BIG, 2, 1, 0);
+ strscpy_check(test, "ab", 3, 2, 2, 1, 0);
+ strscpy_check(test, "a", 3, 1, 1, 1, 1);
+ strscpy_check(test, "", 3, 0, 0, 1, 2);
+
+ strscpy_check(test, "abcd", 4, -E2BIG, 3, 1, 0);
+ strscpy_check(test, "abc", 4, 3, 3, 1, 0);
+ strscpy_check(test, "ab", 4, 2, 2, 1, 1);
+ strscpy_check(test, "a", 4, 1, 1, 1, 2);
+ strscpy_check(test, "", 4, 0, 0, 1, 3);
+
+ /* Compile-time-known source strings. */
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", ARRAY_SIZE(dest)), 0);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", 3), 0);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", 1), 0);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", 0), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", ARRAY_SIZE(dest)), 5);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 3), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 1), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 0), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "This is too long", ARRAY_SIZE(dest)), -E2BIG);
+}
+
+static volatile int unconst;
+
+static void string_test_strcat(struct kunit *test)
+{
+ char dest[8];
+
+ /* Destination is terminated. */
+ memset(dest, 0, sizeof(dest));
+ KUNIT_EXPECT_EQ(test, strlen(dest), 0);
+ /* Empty copy does nothing. */
+ KUNIT_EXPECT_TRUE(test, strcat(dest, "") == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+ /* 4 characters copied in, stops at %NUL. */
+ KUNIT_EXPECT_TRUE(test, strcat(dest, "four\000123") == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "four");
+ KUNIT_EXPECT_EQ(test, dest[5], '\0');
+ /* 2 more characters copied in okay. */
+ KUNIT_EXPECT_TRUE(test, strcat(dest, "AB") == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+}
+
+static void string_test_strncat(struct kunit *test)
+{
+ char dest[8];
+
+ /* Destination is terminated. */
+ memset(dest, 0, sizeof(dest));
+ KUNIT_EXPECT_EQ(test, strlen(dest), 0);
+ /* Empty copy of size 0 does nothing. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "", 0 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+ /* Empty copy of size 1 does nothing too. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "", 1 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+ /* Copy of max 0 characters should do nothing. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "asdf", 0 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+
+ /* 4 characters copied in, even if max is 8. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "four\000123", 8 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "four");
+ KUNIT_EXPECT_EQ(test, dest[5], '\0');
+ KUNIT_EXPECT_EQ(test, dest[6], '\0');
+ /* 2 characters copied in okay, 2 ignored. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "ABCD", 2 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+}
+
+static void string_test_strlcat(struct kunit *test)
+{
+ char dest[8] = "";
+ int len = sizeof(dest) + unconst;
+
+ /* Destination is terminated. */
+ KUNIT_EXPECT_EQ(test, strlen(dest), 0);
+ /* Empty copy is size 0. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "", len), 0);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+ /* Size 1 should keep buffer terminated, report size of source only. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "four", 1 + unconst), 4);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+
+ /* 4 characters copied in. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "four", len), 4);
+ KUNIT_EXPECT_STREQ(test, dest, "four");
+ /* 2 characters copied in okay, gets to 6 total. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "AB", len), 6);
+ KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+ /* 2 characters ignored if max size (7) reached. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "CD", 7 + unconst), 8);
+ KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+ /* 1 of 2 characters skipped, now at true max size. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "EFG", len), 9);
+ KUNIT_EXPECT_STREQ(test, dest, "fourABE");
+ /* Everything else ignored, now at full size. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "1234", len), 11);
+ KUNIT_EXPECT_STREQ(test, dest, "fourABE");
+}
+
+static void string_test_strtomem(struct kunit *test)
+{
+ static const char input[sizeof(unsigned long)] = "hi";
+ static const char truncate[] = "this is too long";
+ struct {
+ unsigned long canary1;
+ unsigned char output[sizeof(unsigned long)] __nonstring;
+ unsigned long canary2;
+ } wrap;
+
+ memset(&wrap, 0xFF, sizeof(wrap));
+ KUNIT_EXPECT_EQ_MSG(test, wrap.canary1, ULONG_MAX,
+ "bad initial canary value");
+ KUNIT_EXPECT_EQ_MSG(test, wrap.canary2, ULONG_MAX,
+ "bad initial canary value");
+
+ /* Check unpadded copy leaves surroundings untouched. */
+ strtomem(wrap.output, input);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]);
+ KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]);
+ for (size_t i = 2; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], 0xFF);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+
+ /* Check truncated copy leaves surroundings untouched. */
+ memset(&wrap, 0xFF, sizeof(wrap));
+ strtomem(wrap.output, truncate);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ for (size_t i = 0; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+
+ /* Check padded copy leaves only string padded. */
+ memset(&wrap, 0xFF, sizeof(wrap));
+ strtomem_pad(wrap.output, input, 0xAA);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]);
+ KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]);
+ for (size_t i = 2; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], 0xAA);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+
+ /* Check truncated padded copy has no padding. */
+ memset(&wrap, 0xFF, sizeof(wrap));
+ strtomem(wrap.output, truncate);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ for (size_t i = 0; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+}
+
+
+static void string_test_memtostr(struct kunit *test)
+{
+ char nonstring[7] = { 'a', 'b', 'c', 'd', 'e', 'f', 'g' };
+ char nonstring_small[3] = { 'a', 'b', 'c' };
+ char dest[sizeof(nonstring) + 1];
+
+ /* Copy in a non-NUL-terminated string into exactly right-sized dest. */
+ KUNIT_EXPECT_EQ(test, sizeof(dest), sizeof(nonstring) + 1);
+ memset(dest, 'X', sizeof(dest));
+ memtostr(dest, nonstring);
+ KUNIT_EXPECT_STREQ(test, dest, "abcdefg");
+ memset(dest, 'X', sizeof(dest));
+ memtostr(dest, nonstring_small);
+ KUNIT_EXPECT_STREQ(test, dest, "abc");
+ KUNIT_EXPECT_EQ(test, dest[7], 'X');
+
+ memset(dest, 'X', sizeof(dest));
+ memtostr_pad(dest, nonstring);
+ KUNIT_EXPECT_STREQ(test, dest, "abcdefg");
+ memset(dest, 'X', sizeof(dest));
+ memtostr_pad(dest, nonstring_small);
+ KUNIT_EXPECT_STREQ(test, dest, "abc");
+ KUNIT_EXPECT_EQ(test, dest[7], '\0');
+}
+
static struct kunit_case string_test_cases[] = {
- KUNIT_CASE(test_memset16),
- KUNIT_CASE(test_memset32),
- KUNIT_CASE(test_memset64),
- KUNIT_CASE(test_strchr),
- KUNIT_CASE(test_strnchr),
- KUNIT_CASE(test_strspn),
+ KUNIT_CASE(string_test_memset16),
+ KUNIT_CASE(string_test_memset32),
+ KUNIT_CASE(string_test_memset64),
+ KUNIT_CASE(string_test_strchr),
+ KUNIT_CASE(string_test_strnchr),
+ KUNIT_CASE(string_test_strspn),
+ KUNIT_CASE(string_test_strcmp),
+ KUNIT_CASE(string_test_strcmp_long_strings),
+ KUNIT_CASE(string_test_strncmp),
+ KUNIT_CASE(string_test_strncmp_long_strings),
+ KUNIT_CASE(string_test_strcasecmp),
+ KUNIT_CASE(string_test_strcasecmp_long_strings),
+ KUNIT_CASE(string_test_strncasecmp),
+ KUNIT_CASE(string_test_strncasecmp_long_strings),
+ KUNIT_CASE(string_test_strscpy),
+ KUNIT_CASE(string_test_strcat),
+ KUNIT_CASE(string_test_strncat),
+ KUNIT_CASE(string_test_strlcat),
+ KUNIT_CASE(string_test_strtomem),
+ KUNIT_CASE(string_test_memtostr),
{}
};
@@ -196,4 +633,5 @@ static struct kunit_suite string_test_suite = {
kunit_test_suites(&string_test_suite);
+MODULE_DESCRIPTION("Test cases for string functions");
MODULE_LICENSE("GPL v2");
diff --git a/lib/strscpy_kunit.c b/lib/strscpy_kunit.c
deleted file mode 100644
index a6b6344354ed..000000000000
--- a/lib/strscpy_kunit.c
+++ /dev/null
@@ -1,142 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Kernel module for testing 'strscpy' family of functions.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <kunit/test.h>
-#include <linux/string.h>
-
-/*
- * tc() - Run a specific test case.
- * @src: Source string, argument to strscpy_pad()
- * @count: Size of destination buffer, argument to strscpy_pad()
- * @expected: Expected return value from call to strscpy_pad()
- * @terminator: 1 if there should be a terminating null byte 0 otherwise.
- * @chars: Number of characters from the src string expected to be
- * written to the dst buffer.
- * @pad: Number of pad characters expected (in the tail of dst buffer).
- * (@pad does not include the null terminator byte.)
- *
- * Calls strscpy_pad() and verifies the return value and state of the
- * destination buffer after the call returns.
- */
-static void tc(struct kunit *test, char *src, int count, int expected,
- int chars, int terminator, int pad)
-{
- int nr_bytes_poison;
- int max_expected;
- int max_count;
- int written;
- char buf[6];
- int index, i;
- const char POISON = 'z';
-
- KUNIT_ASSERT_TRUE_MSG(test, src != NULL,
- "null source string not supported");
-
- memset(buf, POISON, sizeof(buf));
- /* Future proofing test suite, validate args */
- max_count = sizeof(buf) - 2; /* Space for null and to verify overflow */
- max_expected = count - 1; /* Space for the null */
-
- KUNIT_ASSERT_LE_MSG(test, count, max_count,
- "count (%d) is too big (%d) ... aborting", count, max_count);
- KUNIT_EXPECT_LE_MSG(test, expected, max_expected,
- "expected (%d) is bigger than can possibly be returned (%d)",
- expected, max_expected);
-
- written = strscpy_pad(buf, src, count);
- KUNIT_ASSERT_EQ(test, written, expected);
-
- if (count && written == -E2BIG) {
- KUNIT_ASSERT_EQ_MSG(test, 0, strncmp(buf, src, count - 1),
- "buffer state invalid for -E2BIG");
- KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0',
- "too big string is not null terminated correctly");
- }
-
- for (i = 0; i < chars; i++)
- KUNIT_ASSERT_EQ_MSG(test, buf[i], src[i],
- "buf[i]==%c != src[i]==%c", buf[i], src[i]);
-
- if (terminator)
- KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0',
- "string is not null terminated correctly");
-
- for (i = 0; i < pad; i++) {
- index = chars + terminator + i;
- KUNIT_ASSERT_EQ_MSG(test, buf[index], '\0',
- "padding missing at index: %d", i);
- }
-
- nr_bytes_poison = sizeof(buf) - chars - terminator - pad;
- for (i = 0; i < nr_bytes_poison; i++) {
- index = sizeof(buf) - 1 - i; /* Check from the end back */
- KUNIT_ASSERT_EQ_MSG(test, buf[index], POISON,
- "poison value missing at index: %d", i);
- }
-}
-
-static void strscpy_test(struct kunit *test)
-{
- char dest[8];
-
- /*
- * tc() uses a destination buffer of size 6 and needs at
- * least 2 characters spare (one for null and one to check for
- * overflow). This means we should only call tc() with
- * strings up to a maximum of 4 characters long and 'count'
- * should not exceed 4. To test with longer strings increase
- * the buffer size in tc().
- */
-
- /* tc(test, src, count, expected, chars, terminator, pad) */
- tc(test, "a", 0, -E2BIG, 0, 0, 0);
- tc(test, "", 0, -E2BIG, 0, 0, 0);
-
- tc(test, "a", 1, -E2BIG, 0, 1, 0);
- tc(test, "", 1, 0, 0, 1, 0);
-
- tc(test, "ab", 2, -E2BIG, 1, 1, 0);
- tc(test, "a", 2, 1, 1, 1, 0);
- tc(test, "", 2, 0, 0, 1, 1);
-
- tc(test, "abc", 3, -E2BIG, 2, 1, 0);
- tc(test, "ab", 3, 2, 2, 1, 0);
- tc(test, "a", 3, 1, 1, 1, 1);
- tc(test, "", 3, 0, 0, 1, 2);
-
- tc(test, "abcd", 4, -E2BIG, 3, 1, 0);
- tc(test, "abc", 4, 3, 3, 1, 0);
- tc(test, "ab", 4, 2, 2, 1, 1);
- tc(test, "a", 4, 1, 1, 1, 2);
- tc(test, "", 4, 0, 0, 1, 3);
-
- /* Compile-time-known source strings. */
- KUNIT_EXPECT_EQ(test, strscpy(dest, "", ARRAY_SIZE(dest)), 0);
- KUNIT_EXPECT_EQ(test, strscpy(dest, "", 3), 0);
- KUNIT_EXPECT_EQ(test, strscpy(dest, "", 1), 0);
- KUNIT_EXPECT_EQ(test, strscpy(dest, "", 0), -E2BIG);
- KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", ARRAY_SIZE(dest)), 5);
- KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 3), -E2BIG);
- KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 1), -E2BIG);
- KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 0), -E2BIG);
- KUNIT_EXPECT_EQ(test, strscpy(dest, "This is too long", ARRAY_SIZE(dest)), -E2BIG);
-}
-
-static struct kunit_case strscpy_test_cases[] = {
- KUNIT_CASE(strscpy_test),
- {}
-};
-
-static struct kunit_suite strscpy_test_suite = {
- .name = "strscpy",
- .test_cases = strscpy_test_cases,
-};
-
-kunit_test_suite(strscpy_test_suite);
-
-MODULE_AUTHOR("Tobin C. Harding <tobin@kernel.org>");
-MODULE_LICENSE("GPL");
diff --git a/lib/test-kstrtox.c b/lib/test-kstrtox.c
index f355f67169b6..ee87fef66cb5 100644
--- a/lib/test-kstrtox.c
+++ b/lib/test-kstrtox.c
@@ -732,4 +732,5 @@ static int __init test_kstrtox_init(void)
return -EINVAL;
}
module_init(test_kstrtox_init);
+MODULE_DESCRIPTION("Module test for kstrto*() APIs");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 6b2b33579f56..65a75d58ed9e 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -60,18 +60,17 @@ static const unsigned long exp3_1_0[] __initconst = {
};
static bool __init
-__check_eq_uint(const char *srcfile, unsigned int line,
- const unsigned int exp_uint, unsigned int x)
+__check_eq_ulong(const char *srcfile, unsigned int line,
+ const unsigned long exp_ulong, unsigned long x)
{
- if (exp_uint != x) {
- pr_err("[%s:%u] expected %u, got %u\n",
- srcfile, line, exp_uint, x);
+ if (exp_ulong != x) {
+ pr_err("[%s:%u] expected %lu, got %lu\n",
+ srcfile, line, exp_ulong, x);
return false;
}
return true;
}
-
static bool __init
__check_eq_bitmap(const char *srcfile, unsigned int line,
const unsigned long *exp_bmap, const unsigned long *bmap,
@@ -185,7 +184,8 @@ __check_eq_str(const char *srcfile, unsigned int line,
result; \
})
-#define expect_eq_uint(...) __expect_eq(uint, ##__VA_ARGS__)
+#define expect_eq_ulong(...) __expect_eq(ulong, ##__VA_ARGS__)
+#define expect_eq_uint(x, y) expect_eq_ulong((unsigned int)(x), (unsigned int)(y))
#define expect_eq_bitmap(...) __expect_eq(bitmap, ##__VA_ARGS__)
#define expect_eq_pbl(...) __expect_eq(pbl, ##__VA_ARGS__)
#define expect_eq_u32_array(...) __expect_eq(u32_array, ##__VA_ARGS__)
@@ -244,7 +244,7 @@ static void __init test_find_nth_bit(void)
expect_eq_uint(60, find_nth_bit(bmap, 64 * 3, 5));
expect_eq_uint(80, find_nth_bit(bmap, 64 * 3, 6));
expect_eq_uint(123, find_nth_bit(bmap, 64 * 3, 7));
- expect_eq_uint(64 * 3, find_nth_bit(bmap, 64 * 3, 8));
+ expect_eq_uint(0, !!(find_nth_bit(bmap, 64 * 3, 8) < 64 * 3));
expect_eq_uint(10, find_nth_bit(bmap, 64 * 3 - 1, 0));
expect_eq_uint(20, find_nth_bit(bmap, 64 * 3 - 1, 1));
@@ -254,7 +254,7 @@ static void __init test_find_nth_bit(void)
expect_eq_uint(60, find_nth_bit(bmap, 64 * 3 - 1, 5));
expect_eq_uint(80, find_nth_bit(bmap, 64 * 3 - 1, 6));
expect_eq_uint(123, find_nth_bit(bmap, 64 * 3 - 1, 7));
- expect_eq_uint(64 * 3 - 1, find_nth_bit(bmap, 64 * 3 - 1, 8));
+ expect_eq_uint(0, !!(find_nth_bit(bmap, 64 * 3 - 1, 8) < 64 * 3 - 1));
for_each_set_bit(bit, exp1, EXP1_IN_BITS) {
b = find_nth_bit(exp1, EXP1_IN_BITS, cnt++);
@@ -548,7 +548,7 @@ static void __init test_bitmap_parselist(void)
}
if (ptest.flags & PARSE_TIME)
- pr_err("parselist: %d: input is '%s' OK, Time: %llu\n",
+ pr_info("parselist: %d: input is '%s' OK, Time: %llu\n",
i, ptest.in, time);
#undef ptest
@@ -587,7 +587,7 @@ static void __init test_bitmap_printlist(void)
goto out;
}
- pr_err("bitmap_print_to_pagebuf: input is '%s', Time: %llu\n", buf, time);
+ pr_info("bitmap_print_to_pagebuf: input is '%s', Time: %llu\n", buf, time);
out:
kfree(buf);
kfree(bmap);
@@ -665,7 +665,7 @@ static void __init test_bitmap_parse(void)
}
if (test.flags & PARSE_TIME)
- pr_err("parse: %d: input is '%s' OK, Time: %llu\n",
+ pr_info("parse: %d: input is '%s' OK, Time: %llu\n",
i, test.in, time);
}
}
@@ -1245,14 +1245,7 @@ static void __init test_bitmap_const_eval(void)
* in runtime.
*/
- /*
- * Equals to `unsigned long bitmap[1] = { GENMASK(6, 5), }`.
- * Clang on s390 optimizes bitops at compile-time as intended, but at
- * the same time stops treating @bitmap and @bitopvar as compile-time
- * constants after regular test_bit() is executed, thus triggering the
- * build bugs below. So, call const_test_bit() there directly until
- * the compiler is fixed.
- */
+ /* Equals to `unsigned long bitmap[1] = { GENMASK(6, 5), }` */
bitmap_clear(bitmap, 0, BITS_PER_LONG);
if (!test_bit(7, bitmap))
bitmap_set(bitmap, 5, 2);
@@ -1284,8 +1277,179 @@ static void __init test_bitmap_const_eval(void)
/* ~BIT(25) */
BUILD_BUG_ON(!__builtin_constant_p(~var));
BUILD_BUG_ON(~var != ~BIT(25));
+
+ /* ~BIT(25) | BIT(25) == ~0UL */
+ bitmap_complement(&var, &var, BITS_PER_LONG);
+ __assign_bit(25, &var, true);
+
+ /* !(~(~0UL)) == 1 */
+ res = bitmap_full(&var, BITS_PER_LONG);
+ BUILD_BUG_ON(!__builtin_constant_p(res));
+ BUILD_BUG_ON(!res);
+}
+
+/*
+ * Test bitmap should be big enough to include the cases when start is not in
+ * the first word, and start+nbits lands in the following word.
+ */
+#define TEST_BIT_LEN (1000)
+
+/*
+ * Helper function to test bitmap_write() overwriting the chosen byte pattern.
+ */
+static void __init test_bitmap_write_helper(const char *pattern)
+{
+ DECLARE_BITMAP(bitmap, TEST_BIT_LEN);
+ DECLARE_BITMAP(exp_bitmap, TEST_BIT_LEN);
+ DECLARE_BITMAP(pat_bitmap, TEST_BIT_LEN);
+ unsigned long w, r, bit;
+ int i, n, nbits;
+
+ /*
+ * Only parse the pattern once and store the result in the intermediate
+ * bitmap.
+ */
+ bitmap_parselist(pattern, pat_bitmap, TEST_BIT_LEN);
+
+ /*
+ * Check that writing a single bit does not accidentally touch the
+ * adjacent bits.
+ */
+ for (i = 0; i < TEST_BIT_LEN; i++) {
+ bitmap_copy(bitmap, pat_bitmap, TEST_BIT_LEN);
+ bitmap_copy(exp_bitmap, pat_bitmap, TEST_BIT_LEN);
+ for (bit = 0; bit <= 1; bit++) {
+ bitmap_write(bitmap, bit, i, 1);
+ __assign_bit(i, exp_bitmap, bit);
+ expect_eq_bitmap(exp_bitmap, bitmap,
+ TEST_BIT_LEN);
+ }
+ }
+
+ /* Ensure writing 0 bits does not change anything. */
+ bitmap_copy(bitmap, pat_bitmap, TEST_BIT_LEN);
+ bitmap_copy(exp_bitmap, pat_bitmap, TEST_BIT_LEN);
+ for (i = 0; i < TEST_BIT_LEN; i++) {
+ bitmap_write(bitmap, ~0UL, i, 0);
+ expect_eq_bitmap(exp_bitmap, bitmap, TEST_BIT_LEN);
+ }
+
+ for (nbits = BITS_PER_LONG; nbits >= 1; nbits--) {
+ w = IS_ENABLED(CONFIG_64BIT) ? 0xdeadbeefdeadbeefUL
+ : 0xdeadbeefUL;
+ w >>= (BITS_PER_LONG - nbits);
+ for (i = 0; i <= TEST_BIT_LEN - nbits; i++) {
+ bitmap_copy(bitmap, pat_bitmap, TEST_BIT_LEN);
+ bitmap_copy(exp_bitmap, pat_bitmap, TEST_BIT_LEN);
+ for (n = 0; n < nbits; n++)
+ __assign_bit(i + n, exp_bitmap, w & BIT(n));
+ bitmap_write(bitmap, w, i, nbits);
+ expect_eq_bitmap(exp_bitmap, bitmap, TEST_BIT_LEN);
+ r = bitmap_read(bitmap, i, nbits);
+ expect_eq_ulong(r, w);
+ }
+ }
+}
+
+static void __init test_bitmap_read_write(void)
+{
+ unsigned char *pattern[3] = {"", "all:1/2", "all"};
+ DECLARE_BITMAP(bitmap, TEST_BIT_LEN);
+ unsigned long zero_bits = 0, bits_per_long = BITS_PER_LONG;
+ unsigned long val;
+ int i, pi;
+
+ /*
+ * Reading/writing zero bits should not crash the kernel.
+ * READ_ONCE() prevents constant folding.
+ */
+ bitmap_write(NULL, 0, 0, READ_ONCE(zero_bits));
+ /* Return value of bitmap_read() is undefined here. */
+ bitmap_read(NULL, 0, READ_ONCE(zero_bits));
+
+ /*
+ * Reading/writing more than BITS_PER_LONG bits should not crash the
+ * kernel. READ_ONCE() prevents constant folding.
+ */
+ bitmap_write(NULL, 0, 0, READ_ONCE(bits_per_long) + 1);
+ /* Return value of bitmap_read() is undefined here. */
+ bitmap_read(NULL, 0, READ_ONCE(bits_per_long) + 1);
+
+ /*
+ * Ensure that bitmap_read() reads the same value that was previously
+ * written, and two consequent values are correctly merged.
+ * The resulting bit pattern is asymmetric to rule out possible issues
+ * with bit numeration order.
+ */
+ for (i = 0; i < TEST_BIT_LEN - 7; i++) {
+ bitmap_zero(bitmap, TEST_BIT_LEN);
+
+ bitmap_write(bitmap, 0b10101UL, i, 5);
+ val = bitmap_read(bitmap, i, 5);
+ expect_eq_ulong(0b10101UL, val);
+
+ bitmap_write(bitmap, 0b101UL, i + 5, 3);
+ val = bitmap_read(bitmap, i + 5, 3);
+ expect_eq_ulong(0b101UL, val);
+
+ val = bitmap_read(bitmap, i, 8);
+ expect_eq_ulong(0b10110101UL, val);
+ }
+
+ for (pi = 0; pi < ARRAY_SIZE(pattern); pi++)
+ test_bitmap_write_helper(pattern[pi]);
}
+static void __init test_bitmap_read_perf(void)
+{
+ DECLARE_BITMAP(bitmap, TEST_BIT_LEN);
+ unsigned int cnt, nbits, i;
+ unsigned long val;
+ ktime_t time;
+
+ bitmap_fill(bitmap, TEST_BIT_LEN);
+ time = ktime_get();
+ for (cnt = 0; cnt < 5; cnt++) {
+ for (nbits = 1; nbits <= BITS_PER_LONG; nbits++) {
+ for (i = 0; i < TEST_BIT_LEN; i++) {
+ if (i + nbits > TEST_BIT_LEN)
+ break;
+ /*
+ * Prevent the compiler from optimizing away the
+ * bitmap_read() by using its value.
+ */
+ WRITE_ONCE(val, bitmap_read(bitmap, i, nbits));
+ }
+ }
+ }
+ time = ktime_get() - time;
+ pr_info("Time spent in %s:\t%llu\n", __func__, time);
+}
+
+static void __init test_bitmap_write_perf(void)
+{
+ DECLARE_BITMAP(bitmap, TEST_BIT_LEN);
+ unsigned int cnt, nbits, i;
+ unsigned long val = 0xfeedface;
+ ktime_t time;
+
+ bitmap_zero(bitmap, TEST_BIT_LEN);
+ time = ktime_get();
+ for (cnt = 0; cnt < 5; cnt++) {
+ for (nbits = 1; nbits <= BITS_PER_LONG; nbits++) {
+ for (i = 0; i < TEST_BIT_LEN; i++) {
+ if (i + nbits > TEST_BIT_LEN)
+ break;
+ bitmap_write(bitmap, val, i, nbits);
+ }
+ }
+ }
+ time = ktime_get() - time;
+ pr_info("Time spent in %s:\t%llu\n", __func__, time);
+}
+
+#undef TEST_BIT_LEN
+
static void __init selftest(void)
{
test_zero_clear();
@@ -1303,6 +1467,9 @@ static void __init selftest(void)
test_bitmap_cut();
test_bitmap_print_buf();
test_bitmap_const_eval();
+ test_bitmap_read_write();
+ test_bitmap_read_perf();
+ test_bitmap_write_perf();
test_find_nth_bit();
test_for_each_set_bit();
@@ -1319,4 +1486,5 @@ static void __init selftest(void)
KSTM_MODULE_LOADERS(test_bitmap);
MODULE_AUTHOR("david decotigny <david.decotigny@googlers.com>");
+MODULE_DESCRIPTION("Test cases for bitmap API");
MODULE_LICENSE("GPL");
diff --git a/lib/test_bitops.c b/lib/test_bitops.c
index 3b7bcbee84db..55669624bb28 100644
--- a/lib/test_bitops.c
+++ b/lib/test_bitops.c
@@ -5,9 +5,11 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/cleanup.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/printk.h>
+#include <linux/slab.h>
/* a tiny module only meant to test
*
@@ -50,6 +52,30 @@ static unsigned long order_comb_long[][2] = {
};
#endif
+static int __init test_fns(void)
+{
+ static volatile __always_used unsigned long tmp __initdata;
+ unsigned long *buf __free(kfree) = NULL;
+ unsigned int i, n;
+ ktime_t time;
+
+ buf = kmalloc_array(10000, sizeof(unsigned long), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ get_random_bytes(buf, 10000 * sizeof(unsigned long));
+ time = ktime_get();
+
+ for (n = 0; n < BITS_PER_LONG; n++)
+ for (i = 0; i < 10000; i++)
+ tmp = fns(buf[i], n);
+
+ time = ktime_get() - time;
+ pr_err("fns: %18llu ns\n", time);
+
+ return 0;
+}
+
static int __init test_bitops_startup(void)
{
int i, bit_set;
@@ -94,6 +120,8 @@ static int __init test_bitops_startup(void)
if (bit_set != BITOPS_LAST)
pr_err("ERROR: FOUND SET BIT %d\n", bit_set);
+ test_fns();
+
pr_info("Completed bitops test\n");
return 0;
diff --git a/lib/test_bits.c b/lib/test_bits.c
index c9368a2314e7..01313980f175 100644
--- a/lib/test_bits.c
+++ b/lib/test_bits.c
@@ -72,4 +72,5 @@ static struct kunit_suite bits_test_suite = {
};
kunit_test_suite(bits_test_suite);
+MODULE_DESCRIPTION("Test cases for functions and macros in bits.h");
MODULE_LICENSE("GPL");
diff --git a/lib/test_blackhole_dev.c b/lib/test_blackhole_dev.c
index f247089d63c0..ec290ac2a0d9 100644
--- a/lib/test_blackhole_dev.c
+++ b/lib/test_blackhole_dev.c
@@ -96,4 +96,5 @@ module_init(test_blackholedev_init);
module_exit(test_blackholedev_exit);
MODULE_AUTHOR("Mahesh Bandewar <maheshb@google.com>");
+MODULE_DESCRIPTION("module test of the blackhole_dev");
MODULE_LICENSE("GPL");
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 569e6d2dc55c..ca4b0eea81a2 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -1740,7 +1740,7 @@ static int __bpf_emit_cmpxchg32(struct bpf_test *self, void *arg,
/* Result unsuccessful */
insns[i++] = BPF_STX_MEM(BPF_W, R10, R1, -4);
insns[i++] = BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R2, -4);
- insns[i++] = BPF_ZEXT_REG(R0), /* Zext always inserted by verifier */
+ insns[i++] = BPF_ZEXT_REG(R0); /* Zext always inserted by verifier */
insns[i++] = BPF_LDX_MEM(BPF_W, R3, R10, -4);
insns[i++] = BPF_JMP32_REG(BPF_JEQ, R1, R3, 2);
@@ -1754,7 +1754,7 @@ static int __bpf_emit_cmpxchg32(struct bpf_test *self, void *arg,
/* Result successful */
i += __bpf_ld_imm64(&insns[i], R0, dst);
insns[i++] = BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R2, -4);
- insns[i++] = BPF_ZEXT_REG(R0), /* Zext always inserted by verifier */
+ insns[i++] = BPF_ZEXT_REG(R0); /* Zext always inserted by verifier */
insns[i++] = BPF_LDX_MEM(BPF_W, R3, R10, -4);
insns[i++] = BPF_JMP32_REG(BPF_JEQ, R2, R3, 2);
@@ -13431,7 +13431,7 @@ static struct bpf_test tests[] = {
.stack_depth = 8,
.nr_testruns = NR_PATTERN_RUNS,
},
- /* 64-bit atomic magnitudes */
+ /* 32-bit atomic magnitudes */
{
"ATOMIC_W_ADD: all operand magnitudes",
{ },
@@ -15198,6 +15198,7 @@ struct tail_call_test {
int flags;
int result;
int stack_depth;
+ bool has_tail_call;
};
/* Flags that can be passed to tail call test cases */
@@ -15273,6 +15274,7 @@ static struct tail_call_test tail_call_tests[] = {
BPF_EXIT_INSN(),
},
.result = 3,
+ .has_tail_call = true,
},
{
"Tail call 3",
@@ -15283,6 +15285,7 @@ static struct tail_call_test tail_call_tests[] = {
BPF_EXIT_INSN(),
},
.result = 6,
+ .has_tail_call = true,
},
{
"Tail call 4",
@@ -15293,6 +15296,7 @@ static struct tail_call_test tail_call_tests[] = {
BPF_EXIT_INSN(),
},
.result = 10,
+ .has_tail_call = true,
},
{
"Tail call load/store leaf",
@@ -15323,6 +15327,7 @@ static struct tail_call_test tail_call_tests[] = {
},
.result = 0,
.stack_depth = 16,
+ .has_tail_call = true,
},
{
"Tail call error path, max count reached",
@@ -15335,6 +15340,7 @@ static struct tail_call_test tail_call_tests[] = {
},
.flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
.result = (MAX_TAIL_CALL_CNT + 1) * MAX_TESTRUNS,
+ .has_tail_call = true,
},
{
"Tail call count preserved across function calls",
@@ -15357,6 +15363,7 @@ static struct tail_call_test tail_call_tests[] = {
.stack_depth = 8,
.flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
.result = (MAX_TAIL_CALL_CNT + 1) * MAX_TESTRUNS,
+ .has_tail_call = true,
},
{
"Tail call error path, NULL target",
@@ -15369,6 +15376,7 @@ static struct tail_call_test tail_call_tests[] = {
},
.flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
.result = MAX_TESTRUNS,
+ .has_tail_call = true,
},
{
"Tail call error path, index out of range",
@@ -15381,6 +15389,7 @@ static struct tail_call_test tail_call_tests[] = {
},
.flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
.result = MAX_TESTRUNS,
+ .has_tail_call = true,
},
};
@@ -15430,6 +15439,7 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
fp->len = len;
fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
fp->aux->stack_depth = test->stack_depth;
+ fp->aux->tail_call_reachable = test->has_tail_call;
memcpy(fp->insnsi, test->insns, len * sizeof(struct bpf_insn));
/* Relocate runtime tail call offsets and addresses */
@@ -15706,4 +15716,5 @@ static void __exit test_bpf_exit(void)
module_init(test_bpf_init);
module_exit(test_bpf_exit);
+MODULE_DESCRIPTION("Testsuite for BPF interpreter and BPF JIT compiler");
MODULE_LICENSE("GPL");
diff --git a/lib/test_dynamic_debug.c b/lib/test_dynamic_debug.c
index 8dd250ad022b..77c2a669b6af 100644
--- a/lib/test_dynamic_debug.c
+++ b/lib/test_dynamic_debug.c
@@ -162,4 +162,5 @@ module_init(test_dynamic_debug_init);
module_exit(test_dynamic_debug_exit);
MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>");
+MODULE_DESCRIPTION("Kernel module for testing dynamic_debug");
MODULE_LICENSE("GPL");
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 9cfdcd6d21db..bcb32cbff188 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -1567,4 +1567,5 @@ static void __exit test_firmware_exit(void)
module_exit(test_firmware_exit);
MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
+MODULE_DESCRIPTION("interface to trigger and test firmware loading");
MODULE_LICENSE("GPL");
diff --git a/lib/test_fpu.h b/lib/test_fpu.h
new file mode 100644
index 000000000000..4459807084bc
--- /dev/null
+++ b/lib/test_fpu.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _LIB_TEST_FPU_H
+#define _LIB_TEST_FPU_H
+
+int test_fpu(void);
+
+#endif
diff --git a/lib/test_fpu.c b/lib/test_fpu_glue.c
index e82db19fed84..074f30301f29 100644
--- a/lib/test_fpu.c
+++ b/lib/test_fpu_glue.c
@@ -17,39 +17,9 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/debugfs.h>
-#include <asm/fpu/api.h>
+#include <linux/fpu.h>
-static int test_fpu(void)
-{
- /*
- * This sequence of operations tests that rounding mode is
- * to nearest and that denormal numbers are supported.
- * Volatile variables are used to avoid compiler optimizing
- * the calculations away.
- */
- volatile double a, b, c, d, e, f, g;
-
- a = 4.0;
- b = 1e-15;
- c = 1e-310;
-
- /* Sets precision flag */
- d = a + b;
-
- /* Result depends on rounding mode */
- e = a + b / 2;
-
- /* Denormal and very large values */
- f = b / c;
-
- /* Depends on denormal support */
- g = a + c * f;
-
- if (d > a && e > a && g > a)
- return 0;
- else
- return -EINVAL;
-}
+#include "test_fpu.h"
static int test_fpu_get(void *data, u64 *val)
{
@@ -68,6 +38,9 @@ static struct dentry *selftest_dir;
static int __init test_fpu_init(void)
{
+ if (!kernel_fpu_available())
+ return -EINVAL;
+
selftest_dir = debugfs_create_dir("selftest_helpers", NULL);
if (!selftest_dir)
return -ENOMEM;
@@ -86,4 +59,5 @@ static void __exit test_fpu_exit(void)
module_init(test_fpu_init);
module_exit(test_fpu_exit);
+MODULE_DESCRIPTION("Test cases for floating point operations");
MODULE_LICENSE("GPL");
diff --git a/lib/test_fpu_impl.c b/lib/test_fpu_impl.c
new file mode 100644
index 000000000000..777894dbbe86
--- /dev/null
+++ b/lib/test_fpu_impl.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/errno.h>
+
+#include "test_fpu.h"
+
+int test_fpu(void)
+{
+ /*
+ * This sequence of operations tests that rounding mode is
+ * to nearest and that denormal numbers are supported.
+ * Volatile variables are used to avoid compiler optimizing
+ * the calculations away.
+ */
+ volatile double a, b, c, d, e, f, g;
+
+ a = 4.0;
+ b = 1e-15;
+ c = 1e-310;
+
+ /* Sets precision flag */
+ d = a + b;
+
+ /* Result depends on rounding mode */
+ e = a + b / 2;
+
+ /* Denormal and very large values */
+ f = b / c;
+
+ /* Depends on denormal support */
+ g = a + c * f;
+
+ if (d > a && e > a && g > a)
+ return 0;
+ else
+ return -EINVAL;
+}
diff --git a/lib/test_free_pages.c b/lib/test_free_pages.c
index 9ebf6f5549f3..48952364c540 100644
--- a/lib/test_free_pages.c
+++ b/lib/test_free_pages.c
@@ -44,4 +44,5 @@ static void m_ex(void)
module_init(m_in);
module_exit(m_ex);
MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
+MODULE_DESCRIPTION("Check that free_pages() doesn't leak memory");
MODULE_LICENSE("GPL");
diff --git a/lib/test_hash.c b/lib/test_hash.c
index bb25fda34794..a7af39662a0a 100644
--- a/lib/test_hash.c
+++ b/lib/test_hash.c
@@ -235,4 +235,5 @@ static struct kunit_suite hash_test_suite = {
kunit_test_suite(hash_test_suite);
+MODULE_DESCRIPTION("Test cases for <linux/hash.h> and <linux/stringhash.h>");
MODULE_LICENSE("GPL");
diff --git a/lib/test_hexdump.c b/lib/test_hexdump.c
index b916801f23a8..751645645988 100644
--- a/lib/test_hexdump.c
+++ b/lib/test_hexdump.c
@@ -113,7 +113,7 @@ static void __init test_hexdump_prepare_test(size_t len, int rowsize,
*p++ = ' ';
} while (p < test + rs * 2 + rs / gs + 1);
- strncpy(p, data_a, l);
+ memcpy(p, data_a, l);
p += l;
}
@@ -253,4 +253,5 @@ static void __exit test_hexdump_exit(void)
module_exit(test_hexdump_exit);
MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("Test cases for lib/hexdump.c module");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 717dcb830127..ee20e1f9bae9 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -1226,8 +1226,8 @@ static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
unsigned long *src_pfns;
unsigned long *dst_pfns;
- src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
- dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
+ src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
+ dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
migrate_device_range(src_pfns, start_pfn, npages);
for (i = 0; i < npages; i++) {
@@ -1250,8 +1250,8 @@ static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
}
migrate_device_pages(src_pfns, dst_pfns, npages);
migrate_device_finalize(src_pfns, dst_pfns, npages);
- kfree(src_pfns);
- kfree(dst_pfns);
+ kvfree(src_pfns);
+ kvfree(dst_pfns);
}
/* Removes free pages from the free list so they can't be re-allocated */
@@ -1550,4 +1550,5 @@ static void __exit hmm_dmirror_exit(void)
module_init(hmm_dmirror_init);
module_exit(hmm_dmirror_exit);
+MODULE_DESCRIPTION("HMM (Heterogeneous Memory Management) test module");
MODULE_LICENSE("GPL");
diff --git a/lib/test_ida.c b/lib/test_ida.c
index 072a49897e71..c80155a1956d 100644
--- a/lib/test_ida.c
+++ b/lib/test_ida.c
@@ -214,4 +214,5 @@ static void ida_exit(void)
module_init(ida_checks);
module_exit(ida_exit);
MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
+MODULE_DESCRIPTION("Test the IDA API");
MODULE_LICENSE("GPL");
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index 1eec3b7ac67c..064ed0fce75a 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -1223,4 +1223,5 @@ static void __exit test_kmod_exit(void)
module_exit(test_kmod_exit);
MODULE_AUTHOR("Luis R. Rodriguez <mcgrof@kernel.org>");
+MODULE_DESCRIPTION("kmod stress test driver");
MODULE_LICENSE("GPL");
diff --git a/lib/test_kprobes.c b/lib/test_kprobes.c
index 0648f7154f5c..b7582010125c 100644
--- a/lib/test_kprobes.c
+++ b/lib/test_kprobes.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * test_kprobes.c - simple sanity test for *probes
+ * test_kprobes.c - simple sanity test for k*probes
*
* Copyright IBM Corp. 2008
*/
@@ -400,4 +400,5 @@ static struct kunit_suite kprobes_test_suite = {
kunit_test_suites(&kprobes_test_suite);
+MODULE_DESCRIPTION("simple sanity test for k*probes");
MODULE_LICENSE("GPL");
diff --git a/lib/test_linear_ranges.c b/lib/test_linear_ranges.c
index c18f9c0f1f25..f482be00f1bc 100644
--- a/lib/test_linear_ranges.c
+++ b/lib/test_linear_ranges.c
@@ -216,4 +216,5 @@ static struct kunit_suite range_test_module = {
kunit_test_suites(&range_test_module);
+MODULE_DESCRIPTION("KUnit test for the linear_ranges helper");
MODULE_LICENSE("GPL");
diff --git a/lib/test_list_sort.c b/lib/test_list_sort.c
index cc5f335f29b5..30879abc8a42 100644
--- a/lib/test_list_sort.c
+++ b/lib/test_list_sort.c
@@ -119,4 +119,5 @@ static struct kunit_suite list_sort_suite = {
kunit_test_suites(&list_sort_suite);
+MODULE_DESCRIPTION("list_sort() KUnit test suite");
MODULE_LICENSE("GPL");
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index 399380db449c..31561e0e1a0d 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -3946,4 +3946,5 @@ static void __exit maple_tree_harvest(void)
module_init(maple_tree_seed);
module_exit(maple_tree_harvest);
MODULE_AUTHOR("Liam R. Howlett <Liam.Howlett@Oracle.com>");
+MODULE_DESCRIPTION("maple tree API test module");
MODULE_LICENSE("GPL");
diff --git a/lib/test_memcat_p.c b/lib/test_memcat_p.c
index 849c477d49d0..7e0797a6bebf 100644
--- a/lib/test_memcat_p.c
+++ b/lib/test_memcat_p.c
@@ -112,4 +112,5 @@ static void __exit test_memcat_p_exit(void)
module_init(test_memcat_p_init);
module_exit(test_memcat_p_exit);
+MODULE_DESCRIPTION("Test cases for memcat_p() in lib/memcat_p.c");
MODULE_LICENSE("GPL");
diff --git a/lib/test_meminit.c b/lib/test_meminit.c
index 0dc173849a54..6298f66c964b 100644
--- a/lib/test_meminit.c
+++ b/lib/test_meminit.c
@@ -436,4 +436,5 @@ static int __init test_meminit_init(void)
}
module_init(test_meminit_init);
+MODULE_DESCRIPTION("Test cases for SL[AOU]B/page initialization at alloc/free time");
MODULE_LICENSE("GPL");
diff --git a/lib/test_min_heap.c b/lib/test_min_heap.c
index 7b01b4387cfb..64c877e73b64 100644
--- a/lib/test_min_heap.c
+++ b/lib/test_min_heap.c
@@ -11,17 +11,19 @@
#include <linux/printk.h>
#include <linux/random.h>
-static __init bool less_than(const void *lhs, const void *rhs)
+DEFINE_MIN_HEAP(int, min_heap_test);
+
+static __init bool less_than(const void *lhs, const void *rhs, void __always_unused *args)
{
return *(int *)lhs < *(int *)rhs;
}
-static __init bool greater_than(const void *lhs, const void *rhs)
+static __init bool greater_than(const void *lhs, const void *rhs, void __always_unused *args)
{
return *(int *)lhs > *(int *)rhs;
}
-static __init void swap_ints(void *lhs, void *rhs)
+static __init void swap_ints(void *lhs, void *rhs, void __always_unused *args)
{
int temp = *(int *)lhs;
@@ -30,7 +32,7 @@ static __init void swap_ints(void *lhs, void *rhs)
}
static __init int pop_verify_heap(bool min_heap,
- struct min_heap *heap,
+ struct min_heap_test *heap,
const struct min_heap_callbacks *funcs)
{
int *values = heap->data;
@@ -38,7 +40,7 @@ static __init int pop_verify_heap(bool min_heap,
int last;
last = values[0];
- min_heap_pop(heap, funcs);
+ min_heap_pop(heap, funcs, NULL);
while (heap->nr > 0) {
if (min_heap) {
if (last > values[0]) {
@@ -54,7 +56,7 @@ static __init int pop_verify_heap(bool min_heap,
}
}
last = values[0];
- min_heap_pop(heap, funcs);
+ min_heap_pop(heap, funcs, NULL);
}
return err;
}
@@ -63,20 +65,19 @@ static __init int test_heapify_all(bool min_heap)
{
int values[] = { 3, 1, 2, 4, 0x8000000, 0x7FFFFFF, 0,
-3, -1, -2, -4, 0x8000000, 0x7FFFFFF };
- struct min_heap heap = {
+ struct min_heap_test heap = {
.data = values,
.nr = ARRAY_SIZE(values),
.size = ARRAY_SIZE(values),
};
struct min_heap_callbacks funcs = {
- .elem_size = sizeof(int),
.less = min_heap ? less_than : greater_than,
.swp = swap_ints,
};
int i, err;
/* Test with known set of values. */
- min_heapify_all(&heap, &funcs);
+ min_heapify_all(&heap, &funcs, NULL);
err = pop_verify_heap(min_heap, &heap, &funcs);
@@ -85,7 +86,7 @@ static __init int test_heapify_all(bool min_heap)
for (i = 0; i < heap.nr; i++)
values[i] = get_random_u32();
- min_heapify_all(&heap, &funcs);
+ min_heapify_all(&heap, &funcs, NULL);
err += pop_verify_heap(min_heap, &heap, &funcs);
return err;
@@ -96,13 +97,12 @@ static __init int test_heap_push(bool min_heap)
const int data[] = { 3, 1, 2, 4, 0x80000000, 0x7FFFFFFF, 0,
-3, -1, -2, -4, 0x80000000, 0x7FFFFFFF };
int values[ARRAY_SIZE(data)];
- struct min_heap heap = {
+ struct min_heap_test heap = {
.data = values,
.nr = 0,
.size = ARRAY_SIZE(values),
};
struct min_heap_callbacks funcs = {
- .elem_size = sizeof(int),
.less = min_heap ? less_than : greater_than,
.swp = swap_ints,
};
@@ -110,14 +110,14 @@ static __init int test_heap_push(bool min_heap)
/* Test with known set of values copied from data. */
for (i = 0; i < ARRAY_SIZE(data); i++)
- min_heap_push(&heap, &data[i], &funcs);
+ min_heap_push(&heap, &data[i], &funcs, NULL);
err = pop_verify_heap(min_heap, &heap, &funcs);
/* Test with randomly generated values. */
while (heap.nr < heap.size) {
temp = get_random_u32();
- min_heap_push(&heap, &temp, &funcs);
+ min_heap_push(&heap, &temp, &funcs, NULL);
}
err += pop_verify_heap(min_heap, &heap, &funcs);
@@ -129,13 +129,12 @@ static __init int test_heap_pop_push(bool min_heap)
const int data[] = { 3, 1, 2, 4, 0x80000000, 0x7FFFFFFF, 0,
-3, -1, -2, -4, 0x80000000, 0x7FFFFFFF };
int values[ARRAY_SIZE(data)];
- struct min_heap heap = {
+ struct min_heap_test heap = {
.data = values,
.nr = 0,
.size = ARRAY_SIZE(values),
};
struct min_heap_callbacks funcs = {
- .elem_size = sizeof(int),
.less = min_heap ? less_than : greater_than,
.swp = swap_ints,
};
@@ -144,28 +143,62 @@ static __init int test_heap_pop_push(bool min_heap)
/* Fill values with data to pop and replace. */
temp = min_heap ? 0x80000000 : 0x7FFFFFFF;
for (i = 0; i < ARRAY_SIZE(data); i++)
- min_heap_push(&heap, &temp, &funcs);
+ min_heap_push(&heap, &temp, &funcs, NULL);
/* Test with known set of values copied from data. */
for (i = 0; i < ARRAY_SIZE(data); i++)
- min_heap_pop_push(&heap, &data[i], &funcs);
+ min_heap_pop_push(&heap, &data[i], &funcs, NULL);
err = pop_verify_heap(min_heap, &heap, &funcs);
heap.nr = 0;
for (i = 0; i < ARRAY_SIZE(data); i++)
- min_heap_push(&heap, &temp, &funcs);
+ min_heap_push(&heap, &temp, &funcs, NULL);
/* Test with randomly generated values. */
for (i = 0; i < ARRAY_SIZE(data); i++) {
temp = get_random_u32();
- min_heap_pop_push(&heap, &temp, &funcs);
+ min_heap_pop_push(&heap, &temp, &funcs, NULL);
}
err += pop_verify_heap(min_heap, &heap, &funcs);
return err;
}
+static __init int test_heap_del(bool min_heap)
+{
+ int values[] = { 3, 1, 2, 4, 0x8000000, 0x7FFFFFF, 0,
+ -3, -1, -2, -4, 0x8000000, 0x7FFFFFF };
+ struct min_heap_test heap;
+
+ min_heap_init(&heap, values, ARRAY_SIZE(values));
+ heap.nr = ARRAY_SIZE(values);
+ struct min_heap_callbacks funcs = {
+ .less = min_heap ? less_than : greater_than,
+ .swp = swap_ints,
+ };
+ int i, err;
+
+ /* Test with known set of values. */
+ min_heapify_all(&heap, &funcs, NULL);
+ for (i = 0; i < ARRAY_SIZE(values) / 2; i++)
+ min_heap_del(&heap, get_random_u32() % heap.nr, &funcs, NULL);
+ err = pop_verify_heap(min_heap, &heap, &funcs);
+
+
+ /* Test with randomly generated values. */
+ heap.nr = ARRAY_SIZE(values);
+ for (i = 0; i < heap.nr; i++)
+ values[i] = get_random_u32();
+ min_heapify_all(&heap, &funcs, NULL);
+
+ for (i = 0; i < ARRAY_SIZE(values) / 2; i++)
+ min_heap_del(&heap, get_random_u32() % heap.nr, &funcs, NULL);
+ err += pop_verify_heap(min_heap, &heap, &funcs);
+
+ return err;
+}
+
static int __init test_min_heap_init(void)
{
int err = 0;
@@ -176,6 +209,8 @@ static int __init test_min_heap_init(void)
err += test_heap_push(false);
err += test_heap_pop_push(true);
err += test_heap_pop_push(false);
+ err += test_heap_del(true);
+ err += test_heap_del(false);
if (err) {
pr_err("test failed with %d errors\n", err);
return -EINVAL;
@@ -191,4 +226,5 @@ static void __exit test_min_heap_exit(void)
}
module_exit(test_min_heap_exit);
+MODULE_DESCRIPTION("Test cases for the min max heap");
MODULE_LICENSE("GPL");
diff --git a/lib/test_module.c b/lib/test_module.c
index debd19e35198..3d1b29b74807 100644
--- a/lib/test_module.c
+++ b/lib/test_module.c
@@ -31,4 +31,5 @@ static void __exit test_module_exit(void)
module_exit(test_module_exit);
MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
+MODULE_DESCRIPTION("module loading subsystem test module");
MODULE_LICENSE("GPL");
diff --git a/lib/test_objagg.c b/lib/test_objagg.c
index c0c957c50635..d34df4306b87 100644
--- a/lib/test_objagg.c
+++ b/lib/test_objagg.c
@@ -60,7 +60,7 @@ static struct objagg_obj *world_obj_get(struct world *world,
if (!world->key_refs[key_id_index(key_id)]) {
world->objagg_objs[key_id_index(key_id)] = objagg_obj;
} else if (world->objagg_objs[key_id_index(key_id)] != objagg_obj) {
- pr_err("Key %u: God another object for the same key.\n",
+ pr_err("Key %u: Got another object for the same key.\n",
key_id);
err = -EINVAL;
goto err_key_id_check;
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 69b6a5e177f2..965cb6f28527 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -824,4 +824,5 @@ static void __init selftest(void)
KSTM_MODULE_LOADERS(test_printf);
MODULE_AUTHOR("Rasmus Villemoes <linux@rasmusvillemoes.dk>");
+MODULE_DESCRIPTION("Test cases for printf facility");
MODULE_LICENSE("GPL");
diff --git a/lib/test_ref_tracker.c b/lib/test_ref_tracker.c
index 49970a7c96f3..b983ceb12afc 100644
--- a/lib/test_ref_tracker.c
+++ b/lib/test_ref_tracker.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Referrence tracker self test.
+ * Reference tracker self test.
*
* Copyright (c) 2021 Eric Dumazet <edumazet@google.com>
*/
@@ -112,4 +112,5 @@ static void __exit test_ref_tracker_exit(void)
module_init(test_ref_tracker_init);
module_exit(test_ref_tracker_exit);
+MODULE_DESCRIPTION("Reference tracker self test");
MODULE_LICENSE("GPL v2");
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 42b585208249..c63db03ebb9d 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -811,4 +811,5 @@ static void __exit test_rht_exit(void)
module_init(test_rht_init);
module_exit(test_rht_exit);
+MODULE_DESCRIPTION("Resizable, Scalable, Concurrent Hash Table test module");
MODULE_LICENSE("GPL v2");
diff --git a/lib/test_scanf.c b/lib/test_scanf.c
index a2707af2951a..7257b1768545 100644
--- a/lib/test_scanf.c
+++ b/lib/test_scanf.c
@@ -810,4 +810,5 @@ static void __init selftest(void)
KSTM_MODULE_LOADERS(test_scanf);
MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_DESCRIPTION("Test cases for sscanf facility");
MODULE_LICENSE("GPL v2");
diff --git a/lib/test_sort.c b/lib/test_sort.c
index be02e3a098cf..cd4a338d1153 100644
--- a/lib/test_sort.c
+++ b/lib/test_sort.c
@@ -29,7 +29,19 @@ static void test_sort(struct kunit *test)
sort(a, TEST_LEN, sizeof(*a), cmpint, NULL);
- for (i = 0; i < TEST_LEN-1; i++)
+ for (i = 0; i < TEST_LEN - 1; i++)
+ KUNIT_ASSERT_LE(test, a[i], a[i + 1]);
+
+ r = 48;
+
+ for (i = 0; i < TEST_LEN - 1; i++) {
+ r = (r * 725861) % 6599;
+ a[i] = r;
+ }
+
+ sort(a, TEST_LEN - 1, sizeof(*a), cmpint, NULL);
+
+ for (i = 0; i < TEST_LEN - 2; i++)
KUNIT_ASSERT_LE(test, a[i], a[i + 1]);
}
@@ -45,4 +57,5 @@ static struct kunit_suite sort_test_suite = {
kunit_test_suites(&sort_test_suite);
+MODULE_DESCRIPTION("sort() KUnit test suite");
MODULE_LICENSE("GPL");
diff --git a/lib/test_static_key_base.c b/lib/test_static_key_base.c
index 5089a2e2bdd8..9f507672afa5 100644
--- a/lib/test_static_key_base.c
+++ b/lib/test_static_key_base.c
@@ -57,4 +57,5 @@ module_init(test_static_key_base_init);
module_exit(test_static_key_base_exit);
MODULE_AUTHOR("Jason Baron <jbaron@akamai.com>");
+MODULE_DESCRIPTION("Kernel module to support testing static keys");
MODULE_LICENSE("GPL");
diff --git a/lib/test_static_keys.c b/lib/test_static_keys.c
index 42daa74be029..00c715f30df9 100644
--- a/lib/test_static_keys.c
+++ b/lib/test_static_keys.c
@@ -236,4 +236,5 @@ module_init(test_static_key_init);
module_exit(test_static_key_exit);
MODULE_AUTHOR("Jason Baron <jbaron@akamai.com>");
+MODULE_DESCRIPTION("Kernel module for testing static keys");
MODULE_LICENSE("GPL");
diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c
index 9321d850931f..b6696fa1d426 100644
--- a/lib/test_sysctl.c
+++ b/lib/test_sysctl.c
@@ -280,4 +280,5 @@ static void __exit test_sysctl_exit(void)
module_exit(test_sysctl_exit);
MODULE_AUTHOR("Luis R. Rodriguez <mcgrof@kernel.org>");
+MODULE_DESCRIPTION("proc sysctl test driver");
MODULE_LICENSE("GPL");
diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c
index c288df9372ed..5d7b10e98610 100644
--- a/lib/test_ubsan.c
+++ b/lib/test_ubsan.c
@@ -156,4 +156,5 @@ static void __exit test_ubsan_exit(void)
module_exit(test_ubsan_exit);
MODULE_AUTHOR("Jinbum Park <jinb.park7@gmail.com>");
+MODULE_DESCRIPTION("UBSAN unit test");
MODULE_LICENSE("GPL v2");
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
deleted file mode 100644
index 5ff04d8fe971..000000000000
--- a/lib/test_user_copy.c
+++ /dev/null
@@ -1,331 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Kernel module for testing copy_to/from_user infrastructure.
- *
- * Copyright 2013 Google Inc. All Rights Reserved
- *
- * Authors:
- * Kees Cook <keescook@chromium.org>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/mman.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-
-/*
- * Several 32-bit architectures support 64-bit {get,put}_user() calls.
- * As there doesn't appear to be anything that can safely determine
- * their capability at compile-time, we just have to opt-out certain archs.
- */
-#if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \
- !defined(CONFIG_M68K) && \
- !defined(CONFIG_MICROBLAZE) && \
- !defined(CONFIG_NIOS2) && \
- !defined(CONFIG_PPC32) && \
- !defined(CONFIG_SUPERH))
-# define TEST_U64
-#endif
-
-#define test(condition, msg, ...) \
-({ \
- int cond = (condition); \
- if (cond) \
- pr_warn("[%d] " msg "\n", __LINE__, ##__VA_ARGS__); \
- cond; \
-})
-
-static bool is_zeroed(void *from, size_t size)
-{
- return memchr_inv(from, 0x0, size) == NULL;
-}
-
-static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size)
-{
- int ret = 0;
- size_t start, end, i, zero_start, zero_end;
-
- if (test(size < 2 * PAGE_SIZE, "buffer too small"))
- return -EINVAL;
-
- /*
- * We want to cross a page boundary to exercise the code more
- * effectively. We also don't want to make the size we scan too large,
- * otherwise the test can take a long time and cause soft lockups. So
- * scan a 1024 byte region across the page boundary.
- */
- size = 1024;
- start = PAGE_SIZE - (size / 2);
-
- kmem += start;
- umem += start;
-
- zero_start = size / 4;
- zero_end = size - zero_start;
-
- /*
- * We conduct a series of check_nonzero_user() tests on a block of
- * memory with the following byte-pattern (trying every possible
- * [start,end] pair):
- *
- * [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ]
- *
- * And we verify that check_nonzero_user() acts identically to
- * memchr_inv().
- */
-
- memset(kmem, 0x0, size);
- for (i = 1; i < zero_start; i += 2)
- kmem[i] = 0xff;
- for (i = zero_end; i < size; i += 2)
- kmem[i] = 0xff;
-
- ret |= test(copy_to_user(umem, kmem, size),
- "legitimate copy_to_user failed");
-
- for (start = 0; start <= size; start++) {
- for (end = start; end <= size; end++) {
- size_t len = end - start;
- int retval = check_zeroed_user(umem + start, len);
- int expected = is_zeroed(kmem + start, len);
-
- ret |= test(retval != expected,
- "check_nonzero_user(=%d) != memchr_inv(=%d) mismatch (start=%zu, end=%zu)",
- retval, expected, start, end);
- }
- }
-
- return ret;
-}
-
-static int test_copy_struct_from_user(char *kmem, char __user *umem,
- size_t size)
-{
- int ret = 0;
- char *umem_src = NULL, *expected = NULL;
- size_t ksize, usize;
-
- umem_src = kmalloc(size, GFP_KERNEL);
- ret = test(umem_src == NULL, "kmalloc failed");
- if (ret)
- goto out_free;
-
- expected = kmalloc(size, GFP_KERNEL);
- ret = test(expected == NULL, "kmalloc failed");
- if (ret)
- goto out_free;
-
- /* Fill umem with a fixed byte pattern. */
- memset(umem_src, 0x3e, size);
- ret |= test(copy_to_user(umem, umem_src, size),
- "legitimate copy_to_user failed");
-
- /* Check basic case -- (usize == ksize). */
- ksize = size;
- usize = size;
-
- memcpy(expected, umem_src, ksize);
-
- memset(kmem, 0x0, size);
- ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
- "copy_struct_from_user(usize == ksize) failed");
- ret |= test(memcmp(kmem, expected, ksize),
- "copy_struct_from_user(usize == ksize) gives unexpected copy");
-
- /* Old userspace case -- (usize < ksize). */
- ksize = size;
- usize = size / 2;
-
- memcpy(expected, umem_src, usize);
- memset(expected + usize, 0x0, ksize - usize);
-
- memset(kmem, 0x0, size);
- ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
- "copy_struct_from_user(usize < ksize) failed");
- ret |= test(memcmp(kmem, expected, ksize),
- "copy_struct_from_user(usize < ksize) gives unexpected copy");
-
- /* New userspace (-E2BIG) case -- (usize > ksize). */
- ksize = size / 2;
- usize = size;
-
- memset(kmem, 0x0, size);
- ret |= test(copy_struct_from_user(kmem, ksize, umem, usize) != -E2BIG,
- "copy_struct_from_user(usize > ksize) didn't give E2BIG");
-
- /* New userspace (success) case -- (usize > ksize). */
- ksize = size / 2;
- usize = size;
-
- memcpy(expected, umem_src, ksize);
- ret |= test(clear_user(umem + ksize, usize - ksize),
- "legitimate clear_user failed");
-
- memset(kmem, 0x0, size);
- ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
- "copy_struct_from_user(usize > ksize) failed");
- ret |= test(memcmp(kmem, expected, ksize),
- "copy_struct_from_user(usize > ksize) gives unexpected copy");
-
-out_free:
- kfree(expected);
- kfree(umem_src);
- return ret;
-}
-
-static int __init test_user_copy_init(void)
-{
- int ret = 0;
- char *kmem;
- char __user *usermem;
- char *bad_usermem;
- unsigned long user_addr;
- u8 val_u8;
- u16 val_u16;
- u32 val_u32;
-#ifdef TEST_U64
- u64 val_u64;
-#endif
-
- kmem = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
- if (!kmem)
- return -ENOMEM;
-
- user_addr = vm_mmap(NULL, 0, PAGE_SIZE * 2,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_ANONYMOUS | MAP_PRIVATE, 0);
- if (user_addr >= (unsigned long)(TASK_SIZE)) {
- pr_warn("Failed to allocate user memory\n");
- kfree(kmem);
- return -ENOMEM;
- }
-
- usermem = (char __user *)user_addr;
- bad_usermem = (char *)user_addr;
-
- /*
- * Legitimate usage: none of these copies should fail.
- */
- memset(kmem, 0x3a, PAGE_SIZE * 2);
- ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE),
- "legitimate copy_to_user failed");
- memset(kmem, 0x0, PAGE_SIZE);
- ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE),
- "legitimate copy_from_user failed");
- ret |= test(memcmp(kmem, kmem + PAGE_SIZE, PAGE_SIZE),
- "legitimate usercopy failed to copy data");
-
-#define test_legit(size, check) \
- do { \
- val_##size = check; \
- ret |= test(put_user(val_##size, (size __user *)usermem), \
- "legitimate put_user (" #size ") failed"); \
- val_##size = 0; \
- ret |= test(get_user(val_##size, (size __user *)usermem), \
- "legitimate get_user (" #size ") failed"); \
- ret |= test(val_##size != check, \
- "legitimate get_user (" #size ") failed to do copy"); \
- if (val_##size != check) { \
- pr_info("0x%llx != 0x%llx\n", \
- (unsigned long long)val_##size, \
- (unsigned long long)check); \
- } \
- } while (0)
-
- test_legit(u8, 0x5a);
- test_legit(u16, 0x5a5b);
- test_legit(u32, 0x5a5b5c5d);
-#ifdef TEST_U64
- test_legit(u64, 0x5a5b5c5d6a6b6c6d);
-#endif
-#undef test_legit
-
- /* Test usage of check_nonzero_user(). */
- ret |= test_check_nonzero_user(kmem, usermem, 2 * PAGE_SIZE);
- /* Test usage of copy_struct_from_user(). */
- ret |= test_copy_struct_from_user(kmem, usermem, 2 * PAGE_SIZE);
-
- /*
- * Invalid usage: none of these copies should succeed.
- */
-
- /* Prepare kernel memory with check values. */
- memset(kmem, 0x5a, PAGE_SIZE);
- memset(kmem + PAGE_SIZE, 0, PAGE_SIZE);
-
- /* Reject kernel-to-kernel copies through copy_from_user(). */
- ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE),
- PAGE_SIZE),
- "illegal all-kernel copy_from_user passed");
-
- /* Destination half of buffer should have been zeroed. */
- ret |= test(memcmp(kmem + PAGE_SIZE, kmem, PAGE_SIZE),
- "zeroing failure for illegal all-kernel copy_from_user");
-
-#if 0
- /*
- * When running with SMAP/PAN/etc, this will Oops the kernel
- * due to the zeroing of userspace memory on failure. This needs
- * to be tested in LKDTM instead, since this test module does not
- * expect to explode.
- */
- ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem,
- PAGE_SIZE),
- "illegal reversed copy_from_user passed");
-#endif
- ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE,
- PAGE_SIZE),
- "illegal all-kernel copy_to_user passed");
- ret |= test(!copy_to_user((char __user *)kmem, bad_usermem,
- PAGE_SIZE),
- "illegal reversed copy_to_user passed");
-
-#define test_illegal(size, check) \
- do { \
- val_##size = (check); \
- ret |= test(!get_user(val_##size, (size __user *)kmem), \
- "illegal get_user (" #size ") passed"); \
- ret |= test(val_##size != (size)0, \
- "zeroing failure for illegal get_user (" #size ")"); \
- if (val_##size != (size)0) { \
- pr_info("0x%llx != 0\n", \
- (unsigned long long)val_##size); \
- } \
- ret |= test(!put_user(val_##size, (size __user *)kmem), \
- "illegal put_user (" #size ") passed"); \
- } while (0)
-
- test_illegal(u8, 0x5a);
- test_illegal(u16, 0x5a5b);
- test_illegal(u32, 0x5a5b5c5d);
-#ifdef TEST_U64
- test_illegal(u64, 0x5a5b5c5d6a6b6c6d);
-#endif
-#undef test_illegal
-
- vm_munmap(user_addr, PAGE_SIZE * 2);
- kfree(kmem);
-
- if (ret == 0) {
- pr_info("tests passed.\n");
- return 0;
- }
-
- return -EINVAL;
-}
-
-module_init(test_user_copy_init);
-
-static void __exit test_user_copy_exit(void)
-{
- pr_info("unloaded.\n");
-}
-
-module_exit(test_user_copy_exit);
-
-MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
-MODULE_LICENSE("GPL");
diff --git a/lib/test_uuid.c b/lib/test_uuid.c
index cd819c397dc7..0124fad5d72c 100644
--- a/lib/test_uuid.c
+++ b/lib/test_uuid.c
@@ -130,4 +130,5 @@ static void __exit test_uuid_exit(void)
module_exit(test_uuid_exit);
MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("Test cases for lib/uuid.c module");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index ebe2af2e072d..d5c5cbba33ed 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -744,15 +744,20 @@ static noinline void check_xa_multi_store_adv_add(struct xarray *xa,
do {
xas_lock_irq(&xas);
-
xas_store(&xas, p);
- XA_BUG_ON(xa, xas_error(&xas));
- XA_BUG_ON(xa, xa_load(xa, index) != p);
-
xas_unlock_irq(&xas);
+ /*
+ * In our selftest case the only failure we can expect is for
+ * there not to be enough memory as we're not mimicking the
+ * entire page cache, so verify that's the only error we can run
+ * into here. The xas_nomem() which follows will ensure to fix
+ * that condition for us so to chug on on the loop.
+ */
+ XA_BUG_ON(xa, xas_error(&xas) && xas_error(&xas) != -ENOMEM);
} while (xas_nomem(&xas, GFP_KERNEL));
XA_BUG_ON(xa, xas_error(&xas));
+ XA_BUG_ON(xa, xa_load(xa, index) != p);
}
/* mimics page_cache_delete() */
@@ -1783,9 +1788,11 @@ static void check_split_1(struct xarray *xa, unsigned long index,
unsigned int order, unsigned int new_order)
{
XA_STATE_ORDER(xas, xa, index, new_order);
- unsigned int i;
+ unsigned int i, found;
+ void *entry;
xa_store_order(xa, index, order, xa, GFP_KERNEL);
+ xa_set_mark(xa, index, XA_MARK_1);
xas_split_alloc(&xas, xa, order, GFP_KERNEL);
xas_lock(&xas);
@@ -1802,6 +1809,16 @@ static void check_split_1(struct xarray *xa, unsigned long index,
xa_set_mark(xa, index, XA_MARK_0);
XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
+ xas_set_order(&xas, index, 0);
+ found = 0;
+ rcu_read_lock();
+ xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_1) {
+ found++;
+ XA_BUG_ON(xa, xa_is_internal(entry));
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, found != 1 << (order - new_order));
+
xa_destroy(xa);
}
@@ -1984,6 +2001,97 @@ static noinline void check_get_order(struct xarray *xa)
}
}
+static noinline void check_xas_get_order(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
+ unsigned int order;
+ unsigned long i, j;
+
+ for (order = 0; order < max_order; order++) {
+ for (i = 0; i < 10; i++) {
+ xas_set_order(&xas, i << order, order);
+ do {
+ xas_lock(&xas);
+ xas_store(&xas, xa_mk_value(i));
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ for (j = i << order; j < (i + 1) << order; j++) {
+ xas_set_order(&xas, j, 0);
+ rcu_read_lock();
+ xas_load(&xas);
+ XA_BUG_ON(xa, xas_get_order(&xas) != order);
+ rcu_read_unlock();
+ }
+
+ xas_lock(&xas);
+ xas_set_order(&xas, i << order, order);
+ xas_store(&xas, NULL);
+ xas_unlock(&xas);
+ }
+ }
+}
+
+static noinline void check_xas_conflict_get_order(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+
+ void *entry;
+ int only_once;
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
+ unsigned int order;
+ unsigned long i, j, k;
+
+ for (order = 0; order < max_order; order++) {
+ for (i = 0; i < 10; i++) {
+ xas_set_order(&xas, i << order, order);
+ do {
+ xas_lock(&xas);
+ xas_store(&xas, xa_mk_value(i));
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ /*
+ * Ensure xas_get_order works with xas_for_each_conflict.
+ */
+ j = i << order;
+ for (k = 0; k < order; k++) {
+ only_once = 0;
+ xas_set_order(&xas, j + (1 << k), k);
+ xas_lock(&xas);
+ xas_for_each_conflict(&xas, entry) {
+ XA_BUG_ON(xa, entry != xa_mk_value(i));
+ XA_BUG_ON(xa, xas_get_order(&xas) != order);
+ only_once++;
+ }
+ XA_BUG_ON(xa, only_once != 1);
+ xas_unlock(&xas);
+ }
+
+ if (order < max_order - 1) {
+ only_once = 0;
+ xas_set_order(&xas, (i & ~1UL) << order, order + 1);
+ xas_lock(&xas);
+ xas_for_each_conflict(&xas, entry) {
+ XA_BUG_ON(xa, entry != xa_mk_value(i));
+ XA_BUG_ON(xa, xas_get_order(&xas) != order);
+ only_once++;
+ }
+ XA_BUG_ON(xa, only_once != 1);
+ xas_unlock(&xas);
+ }
+
+ xas_set_order(&xas, i << order, order);
+ xas_lock(&xas);
+ xas_store(&xas, NULL);
+ xas_unlock(&xas);
+ }
+ }
+}
+
+
static noinline void check_destroy(struct xarray *xa)
{
unsigned long index;
@@ -2035,6 +2143,8 @@ static int xarray_checks(void)
check_multi_store(&array);
check_multi_store_advanced(&array);
check_get_order(&array);
+ check_xas_get_order(&array);
+ check_xas_conflict_get_order(&array);
check_xa_alloc();
check_find(&array);
check_find_entry(&array);
@@ -2063,4 +2173,5 @@ static void xarray_exit(void)
module_init(xarray_checks);
module_exit(xarray_exit);
MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
+MODULE_DESCRIPTION("XArray API test module");
MODULE_LICENSE("GPL");
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index e5f30f9177df..eed5967238c5 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -216,6 +216,7 @@ static void __exit exit_bm(void)
textsearch_unregister(&bm_ops);
}
+MODULE_DESCRIPTION("Boyer-Moore text search implementation");
MODULE_LICENSE("GPL");
module_init(init_bm);
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
index 64fd9015ad80..053615f4fcd7 100644
--- a/lib/ts_fsm.c
+++ b/lib/ts_fsm.c
@@ -331,6 +331,7 @@ static void __exit exit_fsm(void)
textsearch_unregister(&fsm_ops);
}
+MODULE_DESCRIPTION("naive finite state machine text search");
MODULE_LICENSE("GPL");
module_init(init_fsm);
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c
index c77a3d537f24..5520dc28255a 100644
--- a/lib/ts_kmp.c
+++ b/lib/ts_kmp.c
@@ -147,6 +147,7 @@ static void __exit exit_kmp(void)
textsearch_unregister(&kmp_ops);
}
+MODULE_DESCRIPTION("Knuth-Morris-Pratt text search implementation");
MODULE_LICENSE("GPL");
module_init(init_kmp);
diff --git a/lib/ubsan.h b/lib/ubsan.h
index 0abbbac8700d..07e37d4429b4 100644
--- a/lib/ubsan.h
+++ b/lib/ubsan.h
@@ -43,7 +43,7 @@ enum {
struct type_descriptor {
u16 type_kind;
u16 type_info;
- char type_name[1];
+ char type_name[];
};
struct source_location {
@@ -124,19 +124,32 @@ typedef s64 s_max;
typedef u64 u_max;
#endif
-void __ubsan_handle_add_overflow(void *data, void *lhs, void *rhs);
-void __ubsan_handle_sub_overflow(void *data, void *lhs, void *rhs);
-void __ubsan_handle_mul_overflow(void *data, void *lhs, void *rhs);
-void __ubsan_handle_negate_overflow(void *_data, void *old_val);
-void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs);
-void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, void *ptr);
-void __ubsan_handle_type_mismatch_v1(void *_data, void *ptr);
-void __ubsan_handle_out_of_bounds(void *_data, void *index);
-void __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs);
-void __ubsan_handle_builtin_unreachable(void *_data);
-void __ubsan_handle_load_invalid_value(void *_data, void *val);
-void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
- unsigned long align,
- unsigned long offset);
+/*
+ * When generating Runtime Calls, Clang doesn't respect the -mregparm=3
+ * option used on i386: https://github.com/llvm/llvm-project/issues/89670
+ * Fix this for earlier Clang versions by forcing the calling convention
+ * to use non-register arguments.
+ */
+#if defined(CONFIG_X86_32) && \
+ defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 190000
+# define ubsan_linkage asmlinkage
+#else
+# define ubsan_linkage
+#endif
+
+void ubsan_linkage __ubsan_handle_add_overflow(void *data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_sub_overflow(void *data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_mul_overflow(void *data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_negate_overflow(void *_data, void *old_val);
+void ubsan_linkage __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_type_mismatch(struct type_mismatch_data *data, void *ptr);
+void ubsan_linkage __ubsan_handle_type_mismatch_v1(void *_data, void *ptr);
+void ubsan_linkage __ubsan_handle_out_of_bounds(void *_data, void *index);
+void ubsan_linkage __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_builtin_unreachable(void *_data);
+void ubsan_linkage __ubsan_handle_load_invalid_value(void *_data, void *val);
+void ubsan_linkage __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
+ unsigned long align,
+ unsigned long offset);
#endif
diff --git a/lib/usercopy.c b/lib/usercopy.c
index d29fe29c6849..7b17b83c8042 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -1,46 +1,29 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/fault-inject-usercopy.h>
#include <linux/instrumented.h>
-#include <linux/uaccess.h>
+#include <linux/kernel.h>
#include <linux/nospec.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/wordpart.h>
/* out-of-line parts */
-#ifndef INLINE_COPY_FROM_USER
+#if !defined(INLINE_COPY_FROM_USER) || defined(CONFIG_RUST)
unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n)
{
- unsigned long res = n;
- might_fault();
- if (!should_fail_usercopy() && likely(access_ok(from, n))) {
- /*
- * Ensure that bad access_ok() speculation will not
- * lead to nasty side effects *after* the copy is
- * finished:
- */
- barrier_nospec();
- instrument_copy_from_user_before(to, from, n);
- res = raw_copy_from_user(to, from, n);
- instrument_copy_from_user_after(to, from, n, res);
- }
- if (unlikely(res))
- memset(to + (n - res), 0, res);
- return res;
+ return _inline_copy_from_user(to, from, n);
}
EXPORT_SYMBOL(_copy_from_user);
#endif
-#ifndef INLINE_COPY_TO_USER
+#if !defined(INLINE_COPY_TO_USER) || defined(CONFIG_RUST)
unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
{
- might_fault();
- if (should_fail_usercopy())
- return n;
- if (likely(access_ok(to, n))) {
- instrument_copy_to_user(to, from, n);
- n = raw_copy_to_user(to, from, n);
- }
- return n;
+ return _inline_copy_to_user(to, from, n);
}
EXPORT_SYMBOL(_copy_to_user);
#endif
diff --git a/lib/usercopy_kunit.c b/lib/usercopy_kunit.c
new file mode 100644
index 000000000000..77fa00a13df7
--- /dev/null
+++ b/lib/usercopy_kunit.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Kernel module for testing copy_to/from_user infrastructure.
+ *
+ * Copyright 2013 Google Inc. All Rights Reserved
+ *
+ * Authors:
+ * Kees Cook <keescook@chromium.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mman.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <kunit/test.h>
+
+/*
+ * Several 32-bit architectures support 64-bit {get,put}_user() calls.
+ * As there doesn't appear to be anything that can safely determine
+ * their capability at compile-time, we just have to opt-out certain archs.
+ */
+#if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \
+ !defined(CONFIG_M68K) && \
+ !defined(CONFIG_MICROBLAZE) && \
+ !defined(CONFIG_NIOS2) && \
+ !defined(CONFIG_PPC32) && \
+ !defined(CONFIG_SUPERH))
+# define TEST_U64
+#endif
+
+struct usercopy_test_priv {
+ char *kmem;
+ char __user *umem;
+ size_t size;
+};
+
+static bool is_zeroed(void *from, size_t size)
+{
+ return memchr_inv(from, 0x0, size) == NULL;
+}
+
+/* Test usage of check_nonzero_user(). */
+static void usercopy_test_check_nonzero_user(struct kunit *test)
+{
+ size_t start, end, i, zero_start, zero_end;
+ struct usercopy_test_priv *priv = test->priv;
+ char __user *umem = priv->umem;
+ char *kmem = priv->kmem;
+ size_t size = priv->size;
+
+ KUNIT_ASSERT_GE_MSG(test, size, 2 * PAGE_SIZE, "buffer too small");
+
+ /*
+ * We want to cross a page boundary to exercise the code more
+ * effectively. We also don't want to make the size we scan too large,
+ * otherwise the test can take a long time and cause soft lockups. So
+ * scan a 1024 byte region across the page boundary.
+ */
+ size = 1024;
+ start = PAGE_SIZE - (size / 2);
+
+ kmem += start;
+ umem += start;
+
+ zero_start = size / 4;
+ zero_end = size - zero_start;
+
+ /*
+ * We conduct a series of check_nonzero_user() tests on a block of
+ * memory with the following byte-pattern (trying every possible
+ * [start,end] pair):
+ *
+ * [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ]
+ *
+ * And we verify that check_nonzero_user() acts identically to
+ * memchr_inv().
+ */
+
+ memset(kmem, 0x0, size);
+ for (i = 1; i < zero_start; i += 2)
+ kmem[i] = 0xff;
+ for (i = zero_end; i < size; i += 2)
+ kmem[i] = 0xff;
+
+ KUNIT_EXPECT_EQ_MSG(test, copy_to_user(umem, kmem, size), 0,
+ "legitimate copy_to_user failed");
+
+ for (start = 0; start <= size; start++) {
+ for (end = start; end <= size; end++) {
+ size_t len = end - start;
+ int retval = check_zeroed_user(umem + start, len);
+ int expected = is_zeroed(kmem + start, len);
+
+ KUNIT_ASSERT_EQ_MSG(test, retval, expected,
+ "check_nonzero_user(=%d) != memchr_inv(=%d) mismatch (start=%zu, end=%zu)",
+ retval, expected, start, end);
+ }
+ }
+}
+
+/* Test usage of copy_struct_from_user(). */
+static void usercopy_test_copy_struct_from_user(struct kunit *test)
+{
+ char *umem_src = NULL, *expected = NULL;
+ struct usercopy_test_priv *priv = test->priv;
+ char __user *umem = priv->umem;
+ char *kmem = priv->kmem;
+ size_t size = priv->size;
+ size_t ksize, usize;
+
+ umem_src = kunit_kmalloc(test, size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, umem_src);
+
+ expected = kunit_kmalloc(test, size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected);
+
+ /* Fill umem with a fixed byte pattern. */
+ memset(umem_src, 0x3e, size);
+ KUNIT_ASSERT_EQ_MSG(test, copy_to_user(umem, umem_src, size), 0,
+ "legitimate copy_to_user failed");
+
+ /* Check basic case -- (usize == ksize). */
+ ksize = size;
+ usize = size;
+
+ memcpy(expected, umem_src, ksize);
+
+ memset(kmem, 0x0, size);
+ KUNIT_EXPECT_EQ_MSG(test, copy_struct_from_user(kmem, ksize, umem, usize), 0,
+ "copy_struct_from_user(usize == ksize) failed");
+ KUNIT_EXPECT_MEMEQ_MSG(test, kmem, expected, ksize,
+ "copy_struct_from_user(usize == ksize) gives unexpected copy");
+
+ /* Old userspace case -- (usize < ksize). */
+ ksize = size;
+ usize = size / 2;
+
+ memcpy(expected, umem_src, usize);
+ memset(expected + usize, 0x0, ksize - usize);
+
+ memset(kmem, 0x0, size);
+ KUNIT_EXPECT_EQ_MSG(test, copy_struct_from_user(kmem, ksize, umem, usize), 0,
+ "copy_struct_from_user(usize < ksize) failed");
+ KUNIT_EXPECT_MEMEQ_MSG(test, kmem, expected, ksize,
+ "copy_struct_from_user(usize < ksize) gives unexpected copy");
+
+ /* New userspace (-E2BIG) case -- (usize > ksize). */
+ ksize = size / 2;
+ usize = size;
+
+ memset(kmem, 0x0, size);
+ KUNIT_EXPECT_EQ_MSG(test, copy_struct_from_user(kmem, ksize, umem, usize), -E2BIG,
+ "copy_struct_from_user(usize > ksize) didn't give E2BIG");
+
+ /* New userspace (success) case -- (usize > ksize). */
+ ksize = size / 2;
+ usize = size;
+
+ memcpy(expected, umem_src, ksize);
+ KUNIT_EXPECT_EQ_MSG(test, clear_user(umem + ksize, usize - ksize), 0,
+ "legitimate clear_user failed");
+
+ memset(kmem, 0x0, size);
+ KUNIT_EXPECT_EQ_MSG(test, copy_struct_from_user(kmem, ksize, umem, usize), 0,
+ "copy_struct_from_user(usize > ksize) failed");
+ KUNIT_EXPECT_MEMEQ_MSG(test, kmem, expected, ksize,
+ "copy_struct_from_user(usize > ksize) gives unexpected copy");
+}
+
+/*
+ * Legitimate usage: none of these copies should fail.
+ */
+static void usercopy_test_valid(struct kunit *test)
+{
+ struct usercopy_test_priv *priv = test->priv;
+ char __user *usermem = priv->umem;
+ char *kmem = priv->kmem;
+
+ memset(kmem, 0x3a, PAGE_SIZE * 2);
+ KUNIT_EXPECT_EQ_MSG(test, 0, copy_to_user(usermem, kmem, PAGE_SIZE),
+ "legitimate copy_to_user failed");
+ memset(kmem, 0x0, PAGE_SIZE);
+ KUNIT_EXPECT_EQ_MSG(test, 0, copy_from_user(kmem, usermem, PAGE_SIZE),
+ "legitimate copy_from_user failed");
+ KUNIT_EXPECT_MEMEQ_MSG(test, kmem, kmem + PAGE_SIZE, PAGE_SIZE,
+ "legitimate usercopy failed to copy data");
+
+#define test_legit(size, check) \
+ do { \
+ size val_##size = (check); \
+ KUNIT_EXPECT_EQ_MSG(test, 0, \
+ put_user(val_##size, (size __user *)usermem), \
+ "legitimate put_user (" #size ") failed"); \
+ val_##size = 0; \
+ KUNIT_EXPECT_EQ_MSG(test, 0, \
+ get_user(val_##size, (size __user *)usermem), \
+ "legitimate get_user (" #size ") failed"); \
+ KUNIT_EXPECT_EQ_MSG(test, val_##size, check, \
+ "legitimate get_user (" #size ") failed to do copy"); \
+ } while (0)
+
+ test_legit(u8, 0x5a);
+ test_legit(u16, 0x5a5b);
+ test_legit(u32, 0x5a5b5c5d);
+#ifdef TEST_U64
+ test_legit(u64, 0x5a5b5c5d6a6b6c6d);
+#endif
+#undef test_legit
+}
+
+/*
+ * Invalid usage: none of these copies should succeed.
+ */
+static void usercopy_test_invalid(struct kunit *test)
+{
+ struct usercopy_test_priv *priv = test->priv;
+ char __user *usermem = priv->umem;
+ char *bad_usermem = (char *)usermem;
+ char *kmem = priv->kmem;
+ u64 *kmem_u64 = (u64 *)kmem;
+
+ if (IS_ENABLED(CONFIG_ALTERNATE_USER_ADDRESS_SPACE) ||
+ !IS_ENABLED(CONFIG_MMU)) {
+ kunit_skip(test, "Testing for kernel/userspace address confusion is only sensible on architectures with a shared address space");
+ return;
+ }
+
+ /* Prepare kernel memory with check values. */
+ memset(kmem, 0x5a, PAGE_SIZE);
+ memset(kmem + PAGE_SIZE, 0, PAGE_SIZE);
+
+ /* Reject kernel-to-kernel copies through copy_from_user(). */
+ KUNIT_EXPECT_NE_MSG(test, copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE),
+ PAGE_SIZE), 0,
+ "illegal all-kernel copy_from_user passed");
+
+ /* Destination half of buffer should have been zeroed. */
+ KUNIT_EXPECT_MEMEQ_MSG(test, kmem + PAGE_SIZE, kmem, PAGE_SIZE,
+ "zeroing failure for illegal all-kernel copy_from_user");
+
+#if 0
+ /*
+ * When running with SMAP/PAN/etc, this will Oops the kernel
+ * due to the zeroing of userspace memory on failure. This needs
+ * to be tested in LKDTM instead, since this test module does not
+ * expect to explode.
+ */
+ KUNIT_EXPECT_NE_MSG(test, copy_from_user(bad_usermem, (char __user *)kmem,
+ PAGE_SIZE), 0,
+ "illegal reversed copy_from_user passed");
+#endif
+ KUNIT_EXPECT_NE_MSG(test, copy_to_user((char __user *)kmem, kmem + PAGE_SIZE,
+ PAGE_SIZE), 0,
+ "illegal all-kernel copy_to_user passed");
+
+ KUNIT_EXPECT_NE_MSG(test, copy_to_user((char __user *)kmem, bad_usermem,
+ PAGE_SIZE), 0,
+ "illegal reversed copy_to_user passed");
+
+#define test_illegal(size, check) \
+ do { \
+ size val_##size = (check); \
+ /* get_user() */ \
+ KUNIT_EXPECT_NE_MSG(test, get_user(val_##size, (size __user *)kmem), 0, \
+ "illegal get_user (" #size ") passed"); \
+ KUNIT_EXPECT_EQ_MSG(test, val_##size, 0, \
+ "zeroing failure for illegal get_user (" #size ")"); \
+ /* put_user() */ \
+ *kmem_u64 = 0xF09FA4AFF09FA4AF; \
+ KUNIT_EXPECT_NE_MSG(test, put_user(val_##size, (size __user *)kmem), 0, \
+ "illegal put_user (" #size ") passed"); \
+ KUNIT_EXPECT_EQ_MSG(test, *kmem_u64, 0xF09FA4AFF09FA4AF, \
+ "illegal put_user (" #size ") wrote to kernel memory!"); \
+ } while (0)
+
+ test_illegal(u8, 0x5a);
+ test_illegal(u16, 0x5a5b);
+ test_illegal(u32, 0x5a5b5c5d);
+#ifdef TEST_U64
+ test_illegal(u64, 0x5a5b5c5d6a6b6c6d);
+#endif
+#undef test_illegal
+}
+
+static int usercopy_test_init(struct kunit *test)
+{
+ struct usercopy_test_priv *priv;
+ unsigned long user_addr;
+
+ if (!IS_ENABLED(CONFIG_MMU)) {
+ kunit_skip(test, "Userspace allocation testing not available on non-MMU systems");
+ return 0;
+ }
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+ test->priv = priv;
+ priv->size = PAGE_SIZE * 2;
+
+ priv->kmem = kunit_kmalloc(test, priv->size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->kmem);
+
+ user_addr = kunit_vm_mmap(test, NULL, 0, priv->size,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ KUNIT_ASSERT_NE_MSG(test, user_addr, 0,
+ "Could not create userspace mm");
+ KUNIT_ASSERT_LT_MSG(test, user_addr, (unsigned long)TASK_SIZE,
+ "Failed to allocate user memory");
+ priv->umem = (char __user *)user_addr;
+
+ return 0;
+}
+
+static struct kunit_case usercopy_test_cases[] = {
+ KUNIT_CASE(usercopy_test_valid),
+ KUNIT_CASE(usercopy_test_invalid),
+ KUNIT_CASE(usercopy_test_check_nonzero_user),
+ KUNIT_CASE(usercopy_test_copy_struct_from_user),
+ {}
+};
+
+static struct kunit_suite usercopy_test_suite = {
+ .name = "usercopy",
+ .init = usercopy_test_init,
+ .test_cases = usercopy_test_cases,
+};
+
+kunit_test_suites(&usercopy_test_suite);
+MODULE_AUTHOR("Kees Cook <kees@kernel.org>");
+MODULE_DESCRIPTION("Kernel module for testing copy_to/from_user infrastructure");
+MODULE_LICENSE("GPL");
diff --git a/lib/vdso/Kconfig b/lib/vdso/Kconfig
index d883ac299508..82fe827af542 100644
--- a/lib/vdso/Kconfig
+++ b/lib/vdso/Kconfig
@@ -30,4 +30,16 @@ config GENERIC_VDSO_TIME_NS
Selected by architectures which support time namespaces in the
VDSO
+config GENERIC_VDSO_OVERFLOW_PROTECT
+ bool
+ help
+ Select to add multiplication overflow protection to the VDSO
+ time getter functions for the price of an extra conditional
+ in the hotpath.
+
endif
+
+config VDSO_GETRANDOM
+ bool
+ help
+ Selected by architectures that support vDSO getrandom().
diff --git a/lib/vdso/getrandom.c b/lib/vdso/getrandom.c
new file mode 100644
index 000000000000..b230f0b10832
--- /dev/null
+++ b/lib/vdso/getrandom.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include <linux/cache.h>
+#include <linux/kernel.h>
+#include <linux/time64.h>
+#include <vdso/datapage.h>
+#include <vdso/getrandom.h>
+#include <asm/vdso/getrandom.h>
+#include <asm/vdso/vsyscall.h>
+#include <asm/unaligned.h>
+#include <uapi/linux/mman.h>
+
+#define MEMCPY_AND_ZERO_SRC(type, dst, src, len) do { \
+ while (len >= sizeof(type)) { \
+ __put_unaligned_t(type, __get_unaligned_t(type, src), dst); \
+ __put_unaligned_t(type, 0, src); \
+ dst += sizeof(type); \
+ src += sizeof(type); \
+ len -= sizeof(type); \
+ } \
+} while (0)
+
+static void memcpy_and_zero_src(void *dst, void *src, size_t len)
+{
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+ if (IS_ENABLED(CONFIG_64BIT))
+ MEMCPY_AND_ZERO_SRC(u64, dst, src, len);
+ MEMCPY_AND_ZERO_SRC(u32, dst, src, len);
+ MEMCPY_AND_ZERO_SRC(u16, dst, src, len);
+ }
+ MEMCPY_AND_ZERO_SRC(u8, dst, src, len);
+}
+
+/**
+ * __cvdso_getrandom_data - Generic vDSO implementation of getrandom() syscall.
+ * @rng_info: Describes state of kernel RNG, memory shared with kernel.
+ * @buffer: Destination buffer to fill with random bytes.
+ * @len: Size of @buffer in bytes.
+ * @flags: Zero or more GRND_* flags.
+ * @opaque_state: Pointer to an opaque state area.
+ * @opaque_len: Length of opaque state area.
+ *
+ * This implements a "fast key erasure" RNG using ChaCha20, in the same way that the kernel's
+ * getrandom() syscall does. It periodically reseeds its key from the kernel's RNG, at the same
+ * schedule that the kernel's RNG is reseeded. If the kernel's RNG is not ready, then this always
+ * calls into the syscall.
+ *
+ * If @buffer, @len, and @flags are 0, and @opaque_len is ~0UL, then @opaque_state is populated
+ * with a struct vgetrandom_opaque_params and the function returns 0; if it does not return 0,
+ * this function should not be used.
+ *
+ * @opaque_state *must* be allocated by calling mmap(2) using the mmap_prot and mmap_flags fields
+ * from the struct vgetrandom_opaque_params, and states must not straddle pages. Unless external
+ * locking is used, one state must be allocated per thread, as it is not safe to call this function
+ * concurrently with the same @opaque_state. However, it is safe to call this using the same
+ * @opaque_state that is shared between main code and signal handling code, within the same thread.
+ *
+ * Returns: The number of random bytes written to @buffer, or a negative value indicating an error.
+ */
+static __always_inline ssize_t
+__cvdso_getrandom_data(const struct vdso_rng_data *rng_info, void *buffer, size_t len,
+ unsigned int flags, void *opaque_state, size_t opaque_len)
+{
+ ssize_t ret = min_t(size_t, INT_MAX & PAGE_MASK /* = MAX_RW_COUNT */, len);
+ struct vgetrandom_state *state = opaque_state;
+ size_t batch_len, nblocks, orig_len = len;
+ bool in_use, have_retried = false;
+ unsigned long current_generation;
+ void *orig_buffer = buffer;
+ u32 counter[2] = { 0 };
+
+ if (unlikely(opaque_len == ~0UL && !buffer && !len && !flags)) {
+ *(struct vgetrandom_opaque_params *)opaque_state = (struct vgetrandom_opaque_params) {
+ .size_of_opaque_state = sizeof(*state),
+ .mmap_prot = PROT_READ | PROT_WRITE,
+ .mmap_flags = MAP_DROPPABLE | MAP_ANONYMOUS
+ };
+ return 0;
+ }
+
+ /* The state must not straddle a page, since pages can be zeroed at any time. */
+ if (unlikely(((unsigned long)opaque_state & ~PAGE_MASK) + sizeof(*state) > PAGE_SIZE))
+ return -EFAULT;
+
+ /* If the caller passes the wrong size, which might happen due to CRIU, fallback. */
+ if (unlikely(opaque_len != sizeof(*state)))
+ goto fallback_syscall;
+
+ /*
+ * If the kernel's RNG is not yet ready, then it's not possible to provide random bytes from
+ * userspace, because A) the various @flags require this to block, or not, depending on
+ * various factors unavailable to userspace, and B) the kernel's behavior before the RNG is
+ * ready is to reseed from the entropy pool at every invocation.
+ */
+ if (unlikely(!READ_ONCE(rng_info->is_ready)))
+ goto fallback_syscall;
+
+ /*
+ * This condition is checked after @rng_info->is_ready, because before the kernel's RNG is
+ * initialized, the @flags parameter may require this to block or return an error, even when
+ * len is zero.
+ */
+ if (unlikely(!len))
+ return 0;
+
+ /*
+ * @state->in_use is basic reentrancy protection against this running in a signal handler
+ * with the same @opaque_state, but obviously not atomic wrt multiple CPUs or more than one
+ * level of reentrancy. If a signal interrupts this after reading @state->in_use, but before
+ * writing @state->in_use, there is still no race, because the signal handler will run to
+ * its completion before returning execution.
+ */
+ in_use = READ_ONCE(state->in_use);
+ if (unlikely(in_use))
+ /* The syscall simply fills the buffer and does not touch @state, so fallback. */
+ goto fallback_syscall;
+ WRITE_ONCE(state->in_use, true);
+
+retry_generation:
+ /*
+ * @rng_info->generation must always be read here, as it serializes @state->key with the
+ * kernel's RNG reseeding schedule.
+ */
+ current_generation = READ_ONCE(rng_info->generation);
+
+ /*
+ * If @state->generation doesn't match the kernel RNG's generation, then it means the
+ * kernel's RNG has reseeded, and so @state->key is reseeded as well.
+ */
+ if (unlikely(state->generation != current_generation)) {
+ /*
+ * Write the generation before filling the key, in case of fork. If there is a fork
+ * just after this line, the parent and child will get different random bytes from
+ * the syscall, which is good. However, were this line to occur after the getrandom
+ * syscall, then both child and parent could have the same bytes and the same
+ * generation counter, so the fork would not be detected. Therefore, write
+ * @state->generation before the call to the getrandom syscall.
+ */
+ WRITE_ONCE(state->generation, current_generation);
+
+ /*
+ * Prevent the syscall from being reordered wrt current_generation. Pairs with the
+ * smp_store_release(&_vdso_rng_data.generation) in random.c.
+ */
+ smp_rmb();
+
+ /* Reseed @state->key using fresh bytes from the kernel. */
+ if (getrandom_syscall(state->key, sizeof(state->key), 0) != sizeof(state->key)) {
+ /*
+ * If the syscall failed to refresh the key, then @state->key is now
+ * invalid, so invalidate the generation so that it is not used again, and
+ * fallback to using the syscall entirely.
+ */
+ WRITE_ONCE(state->generation, 0);
+
+ /*
+ * Set @state->in_use to false only after the last write to @state in the
+ * line above.
+ */
+ WRITE_ONCE(state->in_use, false);
+
+ goto fallback_syscall;
+ }
+
+ /*
+ * Set @state->pos to beyond the end of the batch, so that the batch is refilled
+ * using the new key.
+ */
+ state->pos = sizeof(state->batch);
+ }
+
+ /* Set len to the total amount of bytes that this function is allowed to read, ret. */
+ len = ret;
+more_batch:
+ /*
+ * First use bytes out of @state->batch, which may have been filled by the last call to this
+ * function.
+ */
+ batch_len = min_t(size_t, sizeof(state->batch) - state->pos, len);
+ if (batch_len) {
+ /* Zeroing at the same time as memcpying helps preserve forward secrecy. */
+ memcpy_and_zero_src(buffer, state->batch + state->pos, batch_len);
+ state->pos += batch_len;
+ buffer += batch_len;
+ len -= batch_len;
+ }
+
+ if (!len) {
+ /* Prevent the loop from being reordered wrt ->generation. */
+ barrier();
+
+ /*
+ * Since @rng_info->generation will never be 0, re-read @state->generation, rather
+ * than using the local current_generation variable, to learn whether a fork
+ * occurred or if @state was zeroed due to memory pressure. Primarily, though, this
+ * indicates whether the kernel's RNG has reseeded, in which case generate a new key
+ * and start over.
+ */
+ if (unlikely(READ_ONCE(state->generation) != READ_ONCE(rng_info->generation))) {
+ /*
+ * Prevent this from looping forever in case of low memory or racing with a
+ * user force-reseeding the kernel's RNG using the ioctl.
+ */
+ if (have_retried) {
+ WRITE_ONCE(state->in_use, false);
+ goto fallback_syscall;
+ }
+
+ have_retried = true;
+ buffer = orig_buffer;
+ goto retry_generation;
+ }
+
+ /*
+ * Set @state->in_use to false only when there will be no more reads or writes of
+ * @state.
+ */
+ WRITE_ONCE(state->in_use, false);
+ return ret;
+ }
+
+ /* Generate blocks of RNG output directly into @buffer while there's enough room left. */
+ nblocks = len / CHACHA_BLOCK_SIZE;
+ if (nblocks) {
+ __arch_chacha20_blocks_nostack(buffer, state->key, counter, nblocks);
+ buffer += nblocks * CHACHA_BLOCK_SIZE;
+ len -= nblocks * CHACHA_BLOCK_SIZE;
+ }
+
+ BUILD_BUG_ON(sizeof(state->batch_key) % CHACHA_BLOCK_SIZE != 0);
+
+ /* Refill the batch and overwrite the key, in order to preserve forward secrecy. */
+ __arch_chacha20_blocks_nostack(state->batch_key, state->key, counter,
+ sizeof(state->batch_key) / CHACHA_BLOCK_SIZE);
+
+ /* Since the batch was just refilled, set the position back to 0 to indicate a full batch. */
+ state->pos = 0;
+ goto more_batch;
+
+fallback_syscall:
+ return getrandom_syscall(orig_buffer, orig_len, flags);
+}
+
+static __always_inline ssize_t
+__cvdso_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len)
+{
+ return __cvdso_getrandom_data(__arch_get_vdso_rng_data(), buffer, len, flags, opaque_state, opaque_len);
+}
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
index ce2f69552003..c01eaafd8041 100644
--- a/lib/vdso/gettimeofday.c
+++ b/lib/vdso/gettimeofday.c
@@ -5,15 +5,23 @@
#include <vdso/datapage.h>
#include <vdso/helpers.h>
-#ifndef vdso_calc_delta
-/*
- * Default implementation which works for all sane clocksources. That
- * obviously excludes x86/TSC.
- */
-static __always_inline
-u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
+#ifndef vdso_calc_ns
+
+#ifdef VDSO_DELTA_NOMASK
+# define VDSO_DELTA_MASK(vd) ULLONG_MAX
+#else
+# define VDSO_DELTA_MASK(vd) (vd->mask)
+#endif
+
+#ifdef CONFIG_GENERIC_VDSO_OVERFLOW_PROTECT
+static __always_inline bool vdso_delta_ok(const struct vdso_data *vd, u64 delta)
+{
+ return delta < vd->max_cycles;
+}
+#else
+static __always_inline bool vdso_delta_ok(const struct vdso_data *vd, u64 delta)
{
- return ((cycles - last) & mask) * mult;
+ return true;
}
#endif
@@ -24,6 +32,21 @@ static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
}
#endif
+/*
+ * Default implementation which works for all sane clocksources. That
+ * obviously excludes x86/TSC.
+ */
+static __always_inline u64 vdso_calc_ns(const struct vdso_data *vd, u64 cycles, u64 base)
+{
+ u64 delta = (cycles - vd->cycle_last) & VDSO_DELTA_MASK(vd);
+
+ if (likely(vdso_delta_ok(vd, delta)))
+ return vdso_shift_ns((delta * vd->mult) + base, vd->shift);
+
+ return mul_u64_u32_add_u64_shr(delta, vd->mult, base, vd->shift);
+}
+#endif /* vdso_calc_ns */
+
#ifndef __arch_vdso_hres_capable
static inline bool __arch_vdso_hres_capable(void)
{
@@ -49,10 +72,10 @@ static inline bool vdso_cycles_ok(u64 cycles)
static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
struct __kernel_timespec *ts)
{
- const struct vdso_data *vd;
const struct timens_offset *offs = &vdns->offset[clk];
const struct vdso_timestamp *vdso_ts;
- u64 cycles, last, ns;
+ const struct vdso_data *vd;
+ u64 cycles, ns;
u32 seq;
s64 sec;
@@ -73,10 +96,7 @@ static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_
cycles = __arch_get_hw_counter(vd->clock_mode, vd);
if (unlikely(!vdso_cycles_ok(cycles)))
return -1;
- ns = vdso_ts->nsec;
- last = vd->cycle_last;
- ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
- ns = vdso_shift_ns(ns, vd->shift);
+ ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec);
sec = vdso_ts->sec;
} while (unlikely(vdso_read_retry(vd, seq)));
@@ -111,7 +131,7 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
struct __kernel_timespec *ts)
{
const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
- u64 cycles, last, sec, ns;
+ u64 cycles, sec, ns;
u32 seq;
/* Allows to compile the high resolution parts out */
@@ -120,14 +140,14 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
do {
/*
- * Open coded to handle VDSO_CLOCKMODE_TIMENS. Time namespace
- * enabled tasks have a special VVAR page installed which
- * has vd->seq set to 1 and vd->clock_mode set to
- * VDSO_CLOCKMODE_TIMENS. For non time namespace affected tasks
- * this does not affect performance because if vd->seq is
- * odd, i.e. a concurrent update is in progress the extra
- * check for vd->clock_mode is just a few extra
- * instructions while spin waiting for vd->seq to become
+ * Open coded function vdso_read_begin() to handle
+ * VDSO_CLOCKMODE_TIMENS. Time namespace enabled tasks have a
+ * special VVAR page installed which has vd->seq set to 1 and
+ * vd->clock_mode set to VDSO_CLOCKMODE_TIMENS. For non time
+ * namespace affected tasks this does not affect performance
+ * because if vd->seq is odd, i.e. a concurrent update is in
+ * progress the extra check for vd->clock_mode is just a few
+ * extra instructions while spin waiting for vd->seq to become
* even again.
*/
while (unlikely((seq = READ_ONCE(vd->seq)) & 1)) {
@@ -144,10 +164,7 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
cycles = __arch_get_hw_counter(vd->clock_mode, vd);
if (unlikely(!vdso_cycles_ok(cycles)))
return -1;
- ns = vdso_ts->nsec;
- last = vd->cycle_last;
- ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
- ns = vdso_shift_ns(ns, vd->shift);
+ ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec);
sec = vdso_ts->sec;
} while (unlikely(vdso_read_retry(vd, seq)));
@@ -206,8 +223,8 @@ static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
do {
/*
- * Open coded to handle VDSO_CLOCK_TIMENS. See comment in
- * do_hres().
+ * Open coded function vdso_read_begin() to handle
+ * VDSO_CLOCK_TIMENS. See comment in do_hres().
*/
while ((seq = READ_ONCE(vd->seq)) & 1) {
if (IS_ENABLED(CONFIG_TIME_NS) &&
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 552738f14275..2d71b1115916 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -966,13 +966,13 @@ char *bdev_name(char *buf, char *end, struct block_device *bdev,
hd = bdev->bd_disk;
buf = string(buf, end, hd->disk_name, spec);
- if (bdev->bd_partno) {
+ if (bdev_is_partition(bdev)) {
if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) {
if (buf < end)
*buf = 'p';
buf++;
}
- buf = number(buf, end, bdev->bd_partno, spec);
+ buf = number(buf, end, bdev_partno(bdev), spec);
}
return buf;
}
@@ -1080,7 +1080,7 @@ char *resource_string(char *buf, char *end, struct resource *res,
#define FLAG_BUF_SIZE (2 * sizeof(res->flags))
#define DECODED_BUF_SIZE sizeof("[mem - 64bit pref window disabled]")
#define RAW_BUF_SIZE sizeof("[mem - flags 0x]")
- char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE,
+ char sym[MAX(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE,
2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)];
char *p = sym, *pend = sym + sizeof(sym);
diff --git a/lib/xarray.c b/lib/xarray.c
index 39f07bfc4dcc..32d4bac8c94c 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -200,7 +200,8 @@ static void *xas_start(struct xa_state *xas)
return entry;
}
-static void *xas_descend(struct xa_state *xas, struct xa_node *node)
+static __always_inline void *xas_descend(struct xa_state *xas,
+ struct xa_node *node)
{
unsigned int offset = get_offset(xas->xa_index, node);
void *entry = xa_entry(xas->xa, node, offset);
@@ -969,8 +970,22 @@ static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
return marks;
}
+static inline void node_mark_slots(struct xa_node *node, unsigned int sibs,
+ xa_mark_t mark)
+{
+ int i;
+
+ if (sibs == 0)
+ node_mark_all(node, mark);
+ else {
+ for (i = 0; i < XA_CHUNK_SIZE; i += sibs + 1)
+ node_set_mark(node, i, mark);
+ }
+}
+
static void node_set_marks(struct xa_node *node, unsigned int offset,
- struct xa_node *child, unsigned int marks)
+ struct xa_node *child, unsigned int sibs,
+ unsigned int marks)
{
xa_mark_t mark = XA_MARK_0;
@@ -978,7 +993,7 @@ static void node_set_marks(struct xa_node *node, unsigned int offset,
if (marks & (1 << (__force unsigned int)mark)) {
node_set_mark(node, offset, mark);
if (child)
- node_mark_all(child, mark);
+ node_mark_slots(child, sibs, mark);
}
if (mark == XA_MARK_MAX)
break;
@@ -1077,7 +1092,8 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
child->nr_values = xa_is_value(entry) ?
XA_CHUNK_SIZE : 0;
RCU_INIT_POINTER(child->parent, node);
- node_set_marks(node, offset, child, marks);
+ node_set_marks(node, offset, child, xas->xa_sibs,
+ marks);
rcu_assign_pointer(node->slots[offset],
xa_mk_node(child));
if (xa_is_value(curr))
@@ -1086,7 +1102,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
} else {
unsigned int canon = offset - xas->xa_sibs;
- node_set_marks(node, canon, NULL, marks);
+ node_set_marks(node, canon, NULL, 0, marks);
rcu_assign_pointer(node->slots[canon], entry);
while (offset > canon)
rcu_assign_pointer(node->slots[offset--],
@@ -1750,39 +1766,52 @@ unlock:
EXPORT_SYMBOL(xa_store_range);
/**
- * xa_get_order() - Get the order of an entry.
- * @xa: XArray.
- * @index: Index of the entry.
+ * xas_get_order() - Get the order of an entry.
+ * @xas: XArray operation state.
+ *
+ * Called after xas_load, the xas should not be in an error state.
*
* Return: A number between 0 and 63 indicating the order of the entry.
*/
-int xa_get_order(struct xarray *xa, unsigned long index)
+int xas_get_order(struct xa_state *xas)
{
- XA_STATE(xas, xa, index);
- void *entry;
int order = 0;
- rcu_read_lock();
- entry = xas_load(&xas);
-
- if (!entry)
- goto unlock;
-
- if (!xas.xa_node)
- goto unlock;
+ if (!xas->xa_node)
+ return 0;
for (;;) {
- unsigned int slot = xas.xa_offset + (1 << order);
+ unsigned int slot = xas->xa_offset + (1 << order);
if (slot >= XA_CHUNK_SIZE)
break;
- if (!xa_is_sibling(xas.xa_node->slots[slot]))
+ if (!xa_is_sibling(xa_entry(xas->xa, xas->xa_node, slot)))
break;
order++;
}
- order += xas.xa_node->shift;
-unlock:
+ order += xas->xa_node->shift;
+ return order;
+}
+EXPORT_SYMBOL_GPL(xas_get_order);
+
+/**
+ * xa_get_order() - Get the order of an entry.
+ * @xa: XArray.
+ * @index: Index of the entry.
+ *
+ * Return: A number between 0 and 63 indicating the order of the entry.
+ */
+int xa_get_order(struct xarray *xa, unsigned long index)
+{
+ XA_STATE(xas, xa, index);
+ int order = 0;
+ void *entry;
+
+ rcu_read_lock();
+ entry = xas_load(&xas);
+ if (entry)
+ order = xas_get_order(&xas);
rcu_read_unlock();
return order;
diff --git a/lib/zlib_deflate/deflate_syms.c b/lib/zlib_deflate/deflate_syms.c
index 24b740b99678..68941a2350ea 100644
--- a/lib/zlib_deflate/deflate_syms.c
+++ b/lib/zlib_deflate/deflate_syms.c
@@ -17,4 +17,5 @@ EXPORT_SYMBOL(zlib_deflate);
EXPORT_SYMBOL(zlib_deflateInit2);
EXPORT_SYMBOL(zlib_deflateEnd);
EXPORT_SYMBOL(zlib_deflateReset);
+MODULE_DESCRIPTION("Data compression using the deflation algorithm");
MODULE_LICENSE("GPL");
diff --git a/lib/zlib_dfltcc/dfltcc.h b/lib/zlib_dfltcc/dfltcc.h
index b96232bdd44d..0f2a16d7a48a 100644
--- a/lib/zlib_dfltcc/dfltcc.h
+++ b/lib/zlib_dfltcc/dfltcc.h
@@ -80,6 +80,7 @@ struct dfltcc_param_v0 {
uint8_t csb[1152];
};
+static_assert(offsetof(struct dfltcc_param_v0, csb) == 384);
static_assert(sizeof(struct dfltcc_param_v0) == 1536);
#define CVT_CRC32 0
diff --git a/lib/zlib_dfltcc/dfltcc_util.h b/lib/zlib_dfltcc/dfltcc_util.h
index 4a46b5009f0d..10509270d822 100644
--- a/lib/zlib_dfltcc/dfltcc_util.h
+++ b/lib/zlib_dfltcc/dfltcc_util.h
@@ -2,6 +2,8 @@
#ifndef DFLTCC_UTIL_H
#define DFLTCC_UTIL_H
+#include "dfltcc.h"
+#include <linux/kmsan-checks.h>
#include <linux/zutil.h>
/*
@@ -20,6 +22,7 @@ typedef enum {
#define DFLTCC_CMPR 2
#define DFLTCC_XPND 4
#define HBT_CIRCULAR (1 << 7)
+#define DFLTCC_FN_MASK ((1 << 7) - 1)
#define HB_BITS 15
#define HB_SIZE (1 << HB_BITS)
@@ -34,6 +37,7 @@ static inline dfltcc_cc dfltcc(
)
{
Byte *t2 = op1 ? *op1 : NULL;
+ unsigned char *orig_t2 = t2;
size_t t3 = len1 ? *len1 : 0;
const Byte *t4 = op2 ? *op2 : NULL;
size_t t5 = len2 ? *len2 : 0;
@@ -59,6 +63,30 @@ static inline dfltcc_cc dfltcc(
: "cc", "memory");
t2 = r2; t3 = r3; t4 = r4; t5 = r5;
+ /*
+ * Unpoison the parameter block and the output buffer.
+ * This is a no-op in non-KMSAN builds.
+ */
+ switch (fn & DFLTCC_FN_MASK) {
+ case DFLTCC_QAF:
+ kmsan_unpoison_memory(param, sizeof(struct dfltcc_qaf_param));
+ break;
+ case DFLTCC_GDHT:
+ kmsan_unpoison_memory(param, offsetof(struct dfltcc_param_v0, csb));
+ break;
+ case DFLTCC_CMPR:
+ kmsan_unpoison_memory(param, sizeof(struct dfltcc_param_v0));
+ kmsan_unpoison_memory(
+ orig_t2,
+ t2 - orig_t2 +
+ (((struct dfltcc_param_v0 *)param)->sbb == 0 ? 0 : 1));
+ break;
+ case DFLTCC_XPND:
+ kmsan_unpoison_memory(param, sizeof(struct dfltcc_param_v0));
+ kmsan_unpoison_memory(orig_t2, t2 - orig_t2);
+ break;
+ }
+
if (op1)
*op1 = t2;
if (len1)