summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug27
-rw-r--r--lib/Kconfig.kasan39
-rw-r--r--lib/Makefile11
-rw-r--r--lib/bitmap.c4
-rw-r--r--lib/bootconfig.c33
-rw-r--r--lib/crc64.c2
-rw-r--r--lib/decompress_bunzip2.c2
-rw-r--r--lib/decompress_unlzma.c6
-rw-r--r--lib/ioremap.c287
-rw-r--r--lib/iov_iter.c3
-rw-r--r--lib/kstrtox.c12
-rw-r--r--lib/livepatch/Makefile4
-rw-r--r--lib/livepatch/test_klp_callbacks_busy.c37
-rw-r--r--lib/livepatch/test_klp_shadow_vars.c240
-rw-r--r--lib/math/rational.c2
-rw-r--r--lib/mpi/mpiutil.c6
-rw-r--r--lib/percpu_counter.c19
-rw-r--r--lib/rbtree.c2
-rw-r--r--lib/test_bitmap.c58
-rw-r--r--lib/test_bitops.c18
-rw-r--r--lib/test_bits.c75
-rw-r--r--lib/test_kasan.c87
-rw-r--r--lib/test_kmod.c2
-rw-r--r--lib/test_lockup.c6
-rw-r--r--lib/ts_bm.c2
-rw-r--r--lib/xxhash.c2
-rw-r--r--lib/xz/xz_crc32.c2
-rw-r--r--lib/xz/xz_dec_bcj.c2
-rw-r--r--lib/xz/xz_dec_lzma2.c2
-rw-r--r--lib/xz/xz_lzma2.h2
-rw-r--r--lib/xz/xz_stream.h2
31 files changed, 492 insertions, 504 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index a164785c3b48..e068c3c7189a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -365,6 +365,17 @@ config SECTION_MISMATCH_WARN_ONLY
If unsure, say Y.
+config DEBUG_FORCE_FUNCTION_ALIGN_32B
+ bool "Force all function address 32B aligned" if EXPERT
+ help
+ There are cases that a commit from one domain changes the function
+ address alignment of other domains, and cause magic performance
+ bump (regression or improvement). Enable this option will help to
+ verify if the bump is caused by function alignment changes, while
+ it will slightly increase the kernel size and affect icache usage.
+
+ It is mainly for debug and performance tuning use.
+
#
# Select this config option from the architecture Kconfig, if it
# is preferred to always offer frame pointers as a config
@@ -906,7 +917,7 @@ config PANIC_TIMEOUT
int "panic timeout"
default 0
help
- Set the timeout value (in seconds) until a reboot occurs when the
+ Set the timeout value (in seconds) until a reboot occurs when
the kernel panics. If n = 0, then we wait forever. A timeout
value n > 0 will wait n seconds before rebooting, while a timeout
value n < 0 will reboot immediately.
@@ -1067,6 +1078,7 @@ config WQ_WATCHDOG
config TEST_LOCKUP
tristate "Test module to generate lockups"
+ depends on m
help
This builds the "test_lockup" module that helps to make sure
that watchdogs and lockup detectors are working properly.
@@ -2203,7 +2215,7 @@ config LIST_KUNIT_TEST
and associated macros.
KUnit tests run during boot and output the results to the debug log
- in TAP format (http://testanything.org/). Only useful for kernel devs
+ in TAP format (https://testanything.org/). Only useful for kernel devs
running the KUnit test harness, and not intended for inclusion into a
production build.
@@ -2224,6 +2236,17 @@ config LINEAR_RANGES_TEST
If unsure, say N.
+config BITS_TEST
+ tristate "KUnit test for bits.h"
+ depends on KUNIT
+ help
+ This builds the bits unit test.
+ Tests the logic of macros defined in bits.h.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
config TEST_UDELAY
tristate "udelay test driver"
help
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 34b84bcbd3d9..047b53dbfd58 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -18,7 +18,7 @@ config CC_HAS_KASAN_SW_TAGS
config CC_HAS_WORKING_NOSANITIZE_ADDRESS
def_bool !CC_IS_GCC || GCC_VERSION >= 80300
-config KASAN
+menuconfig KASAN
bool "KASAN: runtime memory debugger"
depends on (HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \
(HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)
@@ -29,9 +29,10 @@ config KASAN
designed to find out-of-bounds accesses and use-after-free bugs.
See Documentation/dev-tools/kasan.rst for details.
+if KASAN
+
choice
prompt "KASAN mode"
- depends on KASAN
default KASAN_GENERIC
help
KASAN has two modes: generic KASAN (similar to userspace ASan,
@@ -39,6 +40,7 @@ choice
software tag-based KASAN (a version based on software memory
tagging, arm64 only, similar to userspace HWASan, enabled with
CONFIG_KASAN_SW_TAGS).
+
Both generic and tag-based KASAN are strictly debugging features.
config KASAN_GENERIC
@@ -50,16 +52,18 @@ config KASAN_GENERIC
select STACKDEPOT
help
Enables generic KASAN mode.
- Supported in both GCC and Clang. With GCC it requires version 4.9.2
- or later for basic support and version 5.0 or later for detection of
- out-of-bounds accesses for stack and global variables and for inline
- instrumentation mode (CONFIG_KASAN_INLINE). With Clang it requires
- version 3.7.0 or later and it doesn't support detection of
- out-of-bounds accesses for global variables yet.
+
+ This mode is supported in both GCC and Clang. With GCC it requires
+ version 8.3.0 or later. With Clang it requires version 7.0.0 or
+ later, but detection of out-of-bounds accesses for global variables
+ is supported only since Clang 11.
+
This mode consumes about 1/8th of available memory at kernel start
and introduces an overhead of ~x1.5 for the rest of the allocations.
The performance slowdown is ~x3.
+
For better error detection enable CONFIG_STACKTRACE.
+
Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
(the resulting kernel does not boot).
@@ -72,15 +76,19 @@ config KASAN_SW_TAGS
select STACKDEPOT
help
Enables software tag-based KASAN mode.
+
This mode requires Top Byte Ignore support by the CPU and therefore
- is only supported for arm64.
- This mode requires Clang version 7.0.0 or later.
+ is only supported for arm64. This mode requires Clang version 7.0.0
+ or later.
+
This mode consumes about 1/16th of available memory at kernel start
and introduces an overhead of ~20% for the rest of the allocations.
This mode may potentially introduce problems relating to pointer
casting and comparison, as it embeds tags into the top byte of each
pointer.
+
For better error detection enable CONFIG_STACKTRACE.
+
Currently CONFIG_KASAN_SW_TAGS doesn't work with CONFIG_DEBUG_SLAB
(the resulting kernel does not boot).
@@ -88,7 +96,6 @@ endchoice
choice
prompt "Instrumentation type"
- depends on KASAN
default KASAN_OUTLINE
config KASAN_OUTLINE
@@ -107,13 +114,11 @@ config KASAN_INLINE
memory accesses. This is faster than outline (in some workloads
it gives about x2 boost over outline instrumentation), but
make kernel's .text size much bigger.
- For CONFIG_KASAN_GENERIC this requires GCC 5.0 or later.
endchoice
config KASAN_STACK_ENABLE
bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
- depends on KASAN
help
The LLVM stack address sanitizer has a know problem that
causes excessive stack usage in a lot of functions, see
@@ -134,7 +139,7 @@ config KASAN_STACK
config KASAN_S390_4_LEVEL_PAGING
bool "KASan: use 4-level paging"
- depends on KASAN && S390
+ depends on S390
help
Compiling the kernel with KASan disables automatic 3-level vs
4-level paging selection. 3-level paging is used by default (up
@@ -151,7 +156,7 @@ config KASAN_SW_TAGS_IDENTIFY
config KASAN_VMALLOC
bool "Back mappings in vmalloc space with real shadow memory"
- depends on KASAN && HAVE_ARCH_KASAN_VMALLOC
+ depends on HAVE_ARCH_KASAN_VMALLOC
help
By default, the shadow region for vmalloc space is the read-only
zero page. This means that KASAN cannot detect errors involving
@@ -164,8 +169,10 @@ config KASAN_VMALLOC
config TEST_KASAN
tristate "Module for testing KASAN for bug detection"
- depends on m && KASAN
+ depends on m
help
This is a test module doing various nasty things like
out of bounds accesses, use after free. It is useful for testing
kernel debugging features like KASAN.
+
+endif # KASAN
diff --git a/lib/Makefile b/lib/Makefile
index 435f7f13b8aa..e290fc5707ea 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -3,10 +3,7 @@
# Makefile for some libs needed in the kernel.
#
-ifdef CONFIG_FUNCTION_TRACER
-ORIG_CFLAGS := $(KBUILD_CFLAGS)
-KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
-endif
+ccflags-remove-$(CONFIG_FUNCTION_TRACER) += $(CC_FLAGS_FTRACE)
# These files are disabled because they produce lots of non-interesting and/or
# flaky coverage that is not a function of syscall inputs. For example,
@@ -22,7 +19,7 @@ KCOV_INSTRUMENT_fault-inject.o := n
ifdef CONFIG_AMD_MEM_ENCRYPT
KASAN_SANITIZE_string.o := n
-CFLAGS_string.o := $(call cc-option, -fno-stack-protector)
+CFLAGS_string.o := -fno-stack-protector
endif
# Used by KCSAN while enabled, avoid recursion.
@@ -37,7 +34,6 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
nmi_backtrace.o nodemask.o win_minmax.o memcat_p.o
lib-$(CONFIG_PRINTK) += dump_stack.o
-lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
lib-y += kobject.o klist.o
@@ -325,7 +321,7 @@ endif
UBSAN_SANITIZE_ubsan.o := n
KASAN_SANITIZE_ubsan.o := n
KCSAN_SANITIZE_ubsan.o := n
-CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN)
+CFLAGS_ubsan.o := -fno-stack-protector $(DISABLE_STACKLEAK_PLUGIN)
obj-$(CONFIG_SBITMAP) += sbitmap.o
@@ -346,3 +342,4 @@ obj-$(CONFIG_PLDMFW) += pldmfw/
# KUnit tests
obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
+obj-$(CONFIG_BITS_TEST) += test_bits.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 0364452b1617..c13d859bc7ab 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -212,13 +212,13 @@ void bitmap_cut(unsigned long *dst, const unsigned long *src,
unsigned long keep = 0, carry;
int i;
- memmove(dst, src, len * sizeof(*dst));
-
if (first % BITS_PER_LONG) {
keep = src[first / BITS_PER_LONG] &
(~0UL >> (BITS_PER_LONG - first % BITS_PER_LONG));
}
+ memmove(dst, src, len * sizeof(*dst));
+
while (cut--) {
for (i = first / BITS_PER_LONG; i < len; i++) {
if (i < len - 1)
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index 912ef4921398..a5f701161f6b 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -329,22 +329,30 @@ const char * __init xbc_node_find_next_key_value(struct xbc_node *root,
/* XBC parse and tree build */
+static int __init xbc_init_node(struct xbc_node *node, char *data, u32 flag)
+{
+ unsigned long offset = data - xbc_data;
+
+ if (WARN_ON(offset >= XBC_DATA_MAX))
+ return -EINVAL;
+
+ node->data = (u16)offset | flag;
+ node->child = 0;
+ node->next = 0;
+
+ return 0;
+}
+
static struct xbc_node * __init xbc_add_node(char *data, u32 flag)
{
struct xbc_node *node;
- unsigned long offset;
if (xbc_node_num == XBC_NODE_MAX)
return NULL;
node = &xbc_nodes[xbc_node_num++];
- offset = data - xbc_data;
- node->data = (u16)offset;
- if (WARN_ON(offset >= XBC_DATA_MAX))
+ if (xbc_init_node(node, data, flag) < 0)
return NULL;
- node->data |= flag;
- node->child = 0;
- node->next = 0;
return node;
}
@@ -603,7 +611,9 @@ static int __init xbc_parse_kv(char **k, char *v, int op)
if (c < 0)
return c;
- if (!xbc_add_sibling(v, XBC_VALUE))
+ if (op == ':' && child) {
+ xbc_init_node(child, v, XBC_VALUE);
+ } else if (!xbc_add_sibling(v, XBC_VALUE))
return -ENOMEM;
if (c == ',') { /* Array */
@@ -787,7 +797,7 @@ int __init xbc_init(char *buf, const char **emsg, int *epos)
p = buf;
do {
- q = strpbrk(p, "{}=+;\n#");
+ q = strpbrk(p, "{}=+;:\n#");
if (!q) {
p = skip_spaces(p);
if (*p != '\0')
@@ -798,9 +808,12 @@ int __init xbc_init(char *buf, const char **emsg, int *epos)
c = *q;
*q++ = '\0';
switch (c) {
+ case ':':
case '+':
if (*q++ != '=') {
- ret = xbc_parse_error("Wrong '+' operator",
+ ret = xbc_parse_error(c == '+' ?
+ "Wrong '+' operator" :
+ "Wrong ':' operator",
q - 2);
break;
}
diff --git a/lib/crc64.c b/lib/crc64.c
index f8928ce28280..47cfa054827f 100644
--- a/lib/crc64.c
+++ b/lib/crc64.c
@@ -4,7 +4,7 @@
*
* This is a basic crc64 implementation following ECMA-182 specification,
* which can be found from,
- * http://www.ecma-international.org/publications/standards/Ecma-182.htm
+ * https://www.ecma-international.org/publications/standards/Ecma-182.htm
*
* Dr. Ross N. Williams has a great document to introduce the idea of CRC
* algorithm, here the CRC64 code is also inspired by the table-driven
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 7c4932eed748..f9628f3924ce 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -34,7 +34,7 @@
Phone (337) 232-1234 or 1-800-738-2226
Fax (337) 232-1297
- http://www.hospiceacadiana.com/
+ https://www.hospiceacadiana.com/
Manuel
*/
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
index ed7a1fd819f2..1cf409ef8d04 100644
--- a/lib/decompress_unlzma.c
+++ b/lib/decompress_unlzma.c
@@ -8,7 +8,7 @@
*implementation for lzma.
*Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
*
- *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
+ *Based on LzmaDecode.c from the LZMA SDK 4.22 (https://www.7-zip.org/)
*Copyright (C) 1999-2005 Igor Pavlov
*
*Copyrights of the parts, see headers below.
@@ -56,7 +56,7 @@ static long long INIT read_int(unsigned char *ptr, int size)
/* Small range coder implementation for lzma.
*Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
*
- *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
+ *Based on LzmaDecode.c from the LZMA SDK 4.22 (https://www.7-zip.org/)
*Copyright (c) 1999-2005 Igor Pavlov
*/
@@ -213,7 +213,7 @@ rc_bit_tree_decode(struct rc *rc, uint16_t *p, int num_levels, int *symbol)
* Small lzma deflate implementation.
* Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
*
- * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
+ * Based on LzmaDecode.c from the LZMA SDK 4.22 (https://www.7-zip.org/)
* Copyright (C) 1999-2005 Igor Pavlov
*/
diff --git a/lib/ioremap.c b/lib/ioremap.c
deleted file mode 100644
index 5ee3526f71b8..000000000000
--- a/lib/ioremap.c
+++ /dev/null
@@ -1,287 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Re-map IO memory to kernel address space so that we can access it.
- * This is needed for high PCI addresses that aren't mapped in the
- * 640k-1MB IO memory area on PC's
- *
- * (C) Copyright 1995 1996 Linus Torvalds
- */
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/io.h>
-#include <linux/export.h>
-#include <asm/cacheflush.h>
-
-#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
-static int __read_mostly ioremap_p4d_capable;
-static int __read_mostly ioremap_pud_capable;
-static int __read_mostly ioremap_pmd_capable;
-static int __read_mostly ioremap_huge_disabled;
-
-static int __init set_nohugeiomap(char *str)
-{
- ioremap_huge_disabled = 1;
- return 0;
-}
-early_param("nohugeiomap", set_nohugeiomap);
-
-void __init ioremap_huge_init(void)
-{
- if (!ioremap_huge_disabled) {
- if (arch_ioremap_p4d_supported())
- ioremap_p4d_capable = 1;
- if (arch_ioremap_pud_supported())
- ioremap_pud_capable = 1;
- if (arch_ioremap_pmd_supported())
- ioremap_pmd_capable = 1;
- }
-}
-
-static inline int ioremap_p4d_enabled(void)
-{
- return ioremap_p4d_capable;
-}
-
-static inline int ioremap_pud_enabled(void)
-{
- return ioremap_pud_capable;
-}
-
-static inline int ioremap_pmd_enabled(void)
-{
- return ioremap_pmd_capable;
-}
-
-#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
-static inline int ioremap_p4d_enabled(void) { return 0; }
-static inline int ioremap_pud_enabled(void) { return 0; }
-static inline int ioremap_pmd_enabled(void) { return 0; }
-#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
-
-static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
- pgtbl_mod_mask *mask)
-{
- pte_t *pte;
- u64 pfn;
-
- pfn = phys_addr >> PAGE_SHIFT;
- pte = pte_alloc_kernel_track(pmd, addr, mask);
- if (!pte)
- return -ENOMEM;
- do {
- BUG_ON(!pte_none(*pte));
- set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
- pfn++;
- } while (pte++, addr += PAGE_SIZE, addr != end);
- *mask |= PGTBL_PTE_MODIFIED;
- return 0;
-}
-
-static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr,
- pgprot_t prot)
-{
- if (!ioremap_pmd_enabled())
- return 0;
-
- if ((end - addr) != PMD_SIZE)
- return 0;
-
- if (!IS_ALIGNED(addr, PMD_SIZE))
- return 0;
-
- if (!IS_ALIGNED(phys_addr, PMD_SIZE))
- return 0;
-
- if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
- return 0;
-
- return pmd_set_huge(pmd, phys_addr, prot);
-}
-
-static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
- pgtbl_mod_mask *mask)
-{
- pmd_t *pmd;
- unsigned long next;
-
- pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
- if (!pmd)
- return -ENOMEM;
- do {
- next = pmd_addr_end(addr, end);
-
- if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) {
- *mask |= PGTBL_PMD_MODIFIED;
- continue;
- }
-
- if (ioremap_pte_range(pmd, addr, next, phys_addr, prot, mask))
- return -ENOMEM;
- } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
- return 0;
-}
-
-static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr,
- pgprot_t prot)
-{
- if (!ioremap_pud_enabled())
- return 0;
-
- if ((end - addr) != PUD_SIZE)
- return 0;
-
- if (!IS_ALIGNED(addr, PUD_SIZE))
- return 0;
-
- if (!IS_ALIGNED(phys_addr, PUD_SIZE))
- return 0;
-
- if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
- return 0;
-
- return pud_set_huge(pud, phys_addr, prot);
-}
-
-static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
- pgtbl_mod_mask *mask)
-{
- pud_t *pud;
- unsigned long next;
-
- pud = pud_alloc_track(&init_mm, p4d, addr, mask);
- if (!pud)
- return -ENOMEM;
- do {
- next = pud_addr_end(addr, end);
-
- if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot)) {
- *mask |= PGTBL_PUD_MODIFIED;
- continue;
- }
-
- if (ioremap_pmd_range(pud, addr, next, phys_addr, prot, mask))
- return -ENOMEM;
- } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
- return 0;
-}
-
-static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr,
- pgprot_t prot)
-{
- if (!ioremap_p4d_enabled())
- return 0;
-
- if ((end - addr) != P4D_SIZE)
- return 0;
-
- if (!IS_ALIGNED(addr, P4D_SIZE))
- return 0;
-
- if (!IS_ALIGNED(phys_addr, P4D_SIZE))
- return 0;
-
- if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
- return 0;
-
- return p4d_set_huge(p4d, phys_addr, prot);
-}
-
-static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
- pgtbl_mod_mask *mask)
-{
- p4d_t *p4d;
- unsigned long next;
-
- p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
- if (!p4d)
- return -ENOMEM;
- do {
- next = p4d_addr_end(addr, end);
-
- if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) {
- *mask |= PGTBL_P4D_MODIFIED;
- continue;
- }
-
- if (ioremap_pud_range(p4d, addr, next, phys_addr, prot, mask))
- return -ENOMEM;
- } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
- return 0;
-}
-
-int ioremap_page_range(unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
-{
- pgd_t *pgd;
- unsigned long start;
- unsigned long next;
- int err;
- pgtbl_mod_mask mask = 0;
-
- might_sleep();
- BUG_ON(addr >= end);
-
- start = addr;
- pgd = pgd_offset_k(addr);
- do {
- next = pgd_addr_end(addr, end);
- err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot,
- &mask);
- if (err)
- break;
- } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
-
- flush_cache_vmap(start, end);
-
- if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
- arch_sync_kernel_mappings(start, end);
-
- return err;
-}
-
-#ifdef CONFIG_GENERIC_IOREMAP
-void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
-{
- unsigned long offset, vaddr;
- phys_addr_t last_addr;
- struct vm_struct *area;
-
- /* Disallow wrap-around or zero size */
- last_addr = addr + size - 1;
- if (!size || last_addr < addr)
- return NULL;
-
- /* Page-align mappings */
- offset = addr & (~PAGE_MASK);
- addr -= offset;
- size = PAGE_ALIGN(size + offset);
-
- area = get_vm_area_caller(size, VM_IOREMAP,
- __builtin_return_address(0));
- if (!area)
- return NULL;
- vaddr = (unsigned long)area->addr;
-
- if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
- free_vm_area(area);
- return NULL;
- }
-
- return (void __iomem *)(vaddr + offset);
-}
-EXPORT_SYMBOL(ioremap_prot);
-
-void iounmap(volatile void __iomem *addr)
-{
- vunmap((void *)((unsigned long)addr & PAGE_MASK));
-}
-EXPORT_SYMBOL(iounmap);
-#endif /* CONFIG_GENERIC_IOREMAP */
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index bf538c2bec77..5e40786c8f12 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <crypto/hash.h>
#include <linux/export.h>
#include <linux/bvec.h>
#include <linux/uio.h>
@@ -1567,7 +1568,7 @@ EXPORT_SYMBOL(csum_and_copy_to_iter);
size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
struct iov_iter *i)
{
-#ifdef CONFIG_CRYPTO
+#ifdef CONFIG_CRYPTO_HASH
struct ahash_request *hash = hashp;
struct scatterlist sg;
size_t copied;
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index 1006bf70bf74..a14ccf905055 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -115,8 +115,7 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
* @res: Where to write the result of the conversion on success.
*
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the obsolete simple_strtoull. Return code must
- * be checked.
+ * Preferred over simple_strtoull(). Return code must be checked.
*/
int kstrtoull(const char *s, unsigned int base, unsigned long long *res)
{
@@ -139,8 +138,7 @@ EXPORT_SYMBOL(kstrtoull);
* @res: Where to write the result of the conversion on success.
*
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the obsolete simple_strtoull. Return code must
- * be checked.
+ * Preferred over simple_strtoll(). Return code must be checked.
*/
int kstrtoll(const char *s, unsigned int base, long long *res)
{
@@ -211,8 +209,7 @@ EXPORT_SYMBOL(_kstrtol);
* @res: Where to write the result of the conversion on success.
*
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the obsolete simple_strtoull. Return code must
- * be checked.
+ * Preferred over simple_strtoul(). Return code must be checked.
*/
int kstrtouint(const char *s, unsigned int base, unsigned int *res)
{
@@ -242,8 +239,7 @@ EXPORT_SYMBOL(kstrtouint);
* @res: Where to write the result of the conversion on success.
*
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the obsolete simple_strtoull. Return code must
- * be checked.
+ * Preferred over simple_strtol(). Return code must be checked.
*/
int kstrtoint(const char *s, unsigned int base, int *res)
{
diff --git a/lib/livepatch/Makefile b/lib/livepatch/Makefile
index 295b94bff370..dcc912b3478f 100644
--- a/lib/livepatch/Makefile
+++ b/lib/livepatch/Makefile
@@ -12,7 +12,3 @@ obj-$(CONFIG_TEST_LIVEPATCH) += test_klp_atomic_replace.o \
test_klp_state.o \
test_klp_state2.o \
test_klp_state3.o
-
-# Target modules to be livepatched require CC_FLAGS_FTRACE
-CFLAGS_test_klp_callbacks_busy.o += $(CC_FLAGS_FTRACE)
-CFLAGS_test_klp_callbacks_mod.o += $(CC_FLAGS_FTRACE)
diff --git a/lib/livepatch/test_klp_callbacks_busy.c b/lib/livepatch/test_klp_callbacks_busy.c
index 40beddf8a0e2..7ac845f65be5 100644
--- a/lib/livepatch/test_klp_callbacks_busy.c
+++ b/lib/livepatch/test_klp_callbacks_busy.c
@@ -5,34 +5,53 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
-static int sleep_secs;
-module_param(sleep_secs, int, 0644);
-MODULE_PARM_DESC(sleep_secs, "sleep_secs (default=0)");
+/* load/run-time control from sysfs writer */
+static bool block_transition;
+module_param(block_transition, bool, 0644);
+MODULE_PARM_DESC(block_transition, "block_transition (default=false)");
static void busymod_work_func(struct work_struct *work);
-static DECLARE_DELAYED_WORK(work, busymod_work_func);
+static DECLARE_WORK(work, busymod_work_func);
static void busymod_work_func(struct work_struct *work)
{
- pr_info("%s, sleeping %d seconds ...\n", __func__, sleep_secs);
- msleep(sleep_secs * 1000);
+ pr_info("%s enter\n", __func__);
+
+ while (READ_ONCE(block_transition)) {
+ /*
+ * Busy-wait until the sysfs writer has acknowledged a
+ * blocked transition and clears the flag.
+ */
+ msleep(20);
+ }
+
pr_info("%s exit\n", __func__);
}
static int test_klp_callbacks_busy_init(void)
{
pr_info("%s\n", __func__);
- schedule_delayed_work(&work,
- msecs_to_jiffies(1000 * 0));
+ schedule_work(&work);
+
+ if (!block_transition) {
+ /*
+ * Serialize output: print all messages from the work
+ * function before returning from init().
+ */
+ flush_work(&work);
+ }
+
return 0;
}
static void test_klp_callbacks_busy_exit(void)
{
- cancel_delayed_work_sync(&work);
+ WRITE_ONCE(block_transition, false);
+ flush_work(&work);
pr_info("%s\n", __func__);
}
diff --git a/lib/livepatch/test_klp_shadow_vars.c b/lib/livepatch/test_klp_shadow_vars.c
index f0b5a1d24e55..b99116490858 100644
--- a/lib/livepatch/test_klp_shadow_vars.c
+++ b/lib/livepatch/test_klp_shadow_vars.c
@@ -109,8 +109,7 @@ static void shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor)
static void shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor)
{
klp_shadow_free_all(id, dtor);
- pr_info("klp_%s(id=0x%lx, dtor=PTR%d)\n",
- __func__, id, ptr_id(dtor));
+ pr_info("klp_%s(id=0x%lx, dtor=PTR%d)\n", __func__, id, ptr_id(dtor));
}
@@ -124,12 +123,16 @@ static int shadow_ctor(void *obj, void *shadow_data, void *ctor_data)
return -EINVAL;
*sv = *var;
- pr_info("%s: PTR%d -> PTR%d\n",
- __func__, ptr_id(sv), ptr_id(*var));
+ pr_info("%s: PTR%d -> PTR%d\n", __func__, ptr_id(sv), ptr_id(*var));
return 0;
}
+/*
+ * With more than one item to free in the list, order is not determined and
+ * shadow_dtor will not be passed to shadow_free_all() which would make the
+ * test fail. (see pass 6)
+ */
static void shadow_dtor(void *obj, void *shadow_data)
{
int **sv = shadow_data;
@@ -138,132 +141,153 @@ static void shadow_dtor(void *obj, void *shadow_data)
__func__, ptr_id(obj), ptr_id(sv));
}
-static int test_klp_shadow_vars_init(void)
-{
- void *obj = THIS_MODULE;
- int id = 0x1234;
- gfp_t gfp_flags = GFP_KERNEL;
+/* number of objects we simulate that need shadow vars */
+#define NUM_OBJS 3
- int var1, var2, var3, var4;
- int *pv1, *pv2, *pv3, *pv4;
- int **sv1, **sv2, **sv3, **sv4;
+/* dynamically created obj fields have the following shadow var id values */
+#define SV_ID1 0x1234
+#define SV_ID2 0x1235
- int **sv;
+/*
+ * The main test case adds/removes new fields (shadow var) to each of these
+ * test structure instances. The last group of fields in the struct represent
+ * the idea that shadow variables may be added and removed to and from the
+ * struct during execution.
+ */
+struct test_object {
+ /* add anything here below and avoid to define an empty struct */
+ struct shadow_ptr sp;
- pv1 = &var1;
- pv2 = &var2;
- pv3 = &var3;
- pv4 = &var4;
+ /* these represent shadow vars added and removed with SV_ID{1,2} */
+ /* char nfield1; */
+ /* int nfield2; */
+};
+
+static int test_klp_shadow_vars_init(void)
+{
+ struct test_object objs[NUM_OBJS];
+ char nfields1[NUM_OBJS], *pnfields1[NUM_OBJS], **sv1[NUM_OBJS];
+ char *pndup[NUM_OBJS];
+ int nfields2[NUM_OBJS], *pnfields2[NUM_OBJS], **sv2[NUM_OBJS];
+ void **sv;
+ int ret;
+ int i;
ptr_id(NULL);
- ptr_id(pv1);
- ptr_id(pv2);
- ptr_id(pv3);
- ptr_id(pv4);
/*
* With an empty shadow variable hash table, expect not to find
* any matches.
*/
- sv = shadow_get(obj, id);
+ sv = shadow_get(&objs[0], SV_ID1);
if (!sv)
pr_info(" got expected NULL result\n");
- /*
- * Allocate a few shadow variables with different <obj> and <id>.
- */
- sv1 = shadow_alloc(obj, id, sizeof(pv1), gfp_flags, shadow_ctor, &pv1);
- if (!sv1)
- return -ENOMEM;
-
- sv2 = shadow_alloc(obj + 1, id, sizeof(pv2), gfp_flags, shadow_ctor, &pv2);
- if (!sv2)
- return -ENOMEM;
-
- sv3 = shadow_alloc(obj, id + 1, sizeof(pv3), gfp_flags, shadow_ctor, &pv3);
- if (!sv3)
- return -ENOMEM;
-
- /*
- * Verify we can find our new shadow variables and that they point
- * to expected data.
- */
- sv = shadow_get(obj, id);
- if (!sv)
- return -EINVAL;
- if (sv == sv1 && *sv1 == pv1)
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv1), ptr_id(*sv1));
-
- sv = shadow_get(obj + 1, id);
- if (!sv)
- return -EINVAL;
- if (sv == sv2 && *sv2 == pv2)
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv2), ptr_id(*sv2));
- sv = shadow_get(obj, id + 1);
- if (!sv)
- return -EINVAL;
- if (sv == sv3 && *sv3 == pv3)
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv3), ptr_id(*sv3));
-
- /*
- * Allocate or get a few more, this time with the same <obj>, <id>.
- * The second invocation should return the same shadow var.
- */
- sv4 = shadow_get_or_alloc(obj + 2, id, sizeof(pv4), gfp_flags, shadow_ctor, &pv4);
- if (!sv4)
- return -ENOMEM;
-
- sv = shadow_get_or_alloc(obj + 2, id, sizeof(pv4), gfp_flags, shadow_ctor, &pv4);
- if (!sv)
- return -EINVAL;
- if (sv == sv4 && *sv4 == pv4)
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv4), ptr_id(*sv4));
-
- /*
- * Free the <obj=*, id> shadow variables and check that we can no
- * longer find them.
- */
- shadow_free(obj, id, shadow_dtor); /* sv1 */
- sv = shadow_get(obj, id);
- if (!sv)
- pr_info(" got expected NULL result\n");
+ /* pass 1: init & alloc a char+int pair of svars for each objs */
+ for (i = 0; i < NUM_OBJS; i++) {
+ pnfields1[i] = &nfields1[i];
+ ptr_id(pnfields1[i]);
+
+ if (i % 2) {
+ sv1[i] = shadow_alloc(&objs[i], SV_ID1,
+ sizeof(pnfields1[i]), GFP_KERNEL,
+ shadow_ctor, &pnfields1[i]);
+ } else {
+ sv1[i] = shadow_get_or_alloc(&objs[i], SV_ID1,
+ sizeof(pnfields1[i]), GFP_KERNEL,
+ shadow_ctor, &pnfields1[i]);
+ }
+ if (!sv1[i]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pnfields2[i] = &nfields2[i];
+ ptr_id(pnfields2[i]);
+ sv2[i] = shadow_alloc(&objs[i], SV_ID2, sizeof(pnfields2[i]),
+ GFP_KERNEL, shadow_ctor, &pnfields2[i]);
+ if (!sv2[i]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
- shadow_free(obj + 1, id, shadow_dtor); /* sv2 */
- sv = shadow_get(obj + 1, id);
- if (!sv)
- pr_info(" got expected NULL result\n");
+ /* pass 2: verify we find allocated svars and where they point to */
+ for (i = 0; i < NUM_OBJS; i++) {
+ /* check the "char" svar for all objects */
+ sv = shadow_get(&objs[i], SV_ID1);
+ if (!sv) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if ((char **)sv == sv1[i] && *sv1[i] == pnfields1[i])
+ pr_info(" got expected PTR%d -> PTR%d result\n",
+ ptr_id(sv1[i]), ptr_id(*sv1[i]));
+
+ /* check the "int" svar for all objects */
+ sv = shadow_get(&objs[i], SV_ID2);
+ if (!sv) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if ((int **)sv == sv2[i] && *sv2[i] == pnfields2[i])
+ pr_info(" got expected PTR%d -> PTR%d result\n",
+ ptr_id(sv2[i]), ptr_id(*sv2[i]));
+ }
- shadow_free(obj + 2, id, shadow_dtor); /* sv4 */
- sv = shadow_get(obj + 2, id);
- if (!sv)
- pr_info(" got expected NULL result\n");
+ /* pass 3: verify that 'get_or_alloc' returns already allocated svars */
+ for (i = 0; i < NUM_OBJS; i++) {
+ pndup[i] = &nfields1[i];
+ ptr_id(pndup[i]);
+
+ sv = shadow_get_or_alloc(&objs[i], SV_ID1, sizeof(pndup[i]),
+ GFP_KERNEL, shadow_ctor, &pndup[i]);
+ if (!sv) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if ((char **)sv == sv1[i] && *sv1[i] == pnfields1[i])
+ pr_info(" got expected PTR%d -> PTR%d result\n",
+ ptr_id(sv1[i]), ptr_id(*sv1[i]));
+ }
- /*
- * We should still find an <id+1> variable.
- */
- sv = shadow_get(obj, id + 1);
- if (!sv)
- return -EINVAL;
- if (sv == sv3 && *sv3 == pv3)
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv3), ptr_id(*sv3));
+ /* pass 4: free <objs[*], SV_ID1> pairs of svars, verify removal */
+ for (i = 0; i < NUM_OBJS; i++) {
+ shadow_free(&objs[i], SV_ID1, shadow_dtor); /* 'char' pairs */
+ sv = shadow_get(&objs[i], SV_ID1);
+ if (!sv)
+ pr_info(" got expected NULL result\n");
+ }
- /*
- * Free all the <id+1> variables, too.
- */
- shadow_free_all(id + 1, shadow_dtor); /* sv3 */
- sv = shadow_get(obj, id);
- if (!sv)
- pr_info(" shadow_get() got expected NULL result\n");
+ /* pass 5: check we still find <objs[*], SV_ID2> svar pairs */
+ for (i = 0; i < NUM_OBJS; i++) {
+ sv = shadow_get(&objs[i], SV_ID2); /* 'int' pairs */
+ if (!sv) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if ((int **)sv == sv2[i] && *sv2[i] == pnfields2[i])
+ pr_info(" got expected PTR%d -> PTR%d result\n",
+ ptr_id(sv2[i]), ptr_id(*sv2[i]));
+ }
+ /* pass 6: free all the <objs[*], SV_ID2> svar pairs too. */
+ shadow_free_all(SV_ID2, NULL); /* 'int' pairs */
+ for (i = 0; i < NUM_OBJS; i++) {
+ sv = shadow_get(&objs[i], SV_ID2);
+ if (!sv)
+ pr_info(" got expected NULL result\n");
+ }
free_ptr_list();
return 0;
+out:
+ shadow_free_all(SV_ID1, NULL); /* 'char' pairs */
+ shadow_free_all(SV_ID2, NULL); /* 'int' pairs */
+ free_ptr_list();
+
+ return ret;
}
static void test_klp_shadow_vars_exit(void)
diff --git a/lib/math/rational.c b/lib/math/rational.c
index 31fb27db2deb..df75c8809693 100644
--- a/lib/math/rational.c
+++ b/lib/math/rational.c
@@ -27,7 +27,7 @@
* with the fractional part size described in given_denominator.
*
* for theoretical background, see:
- * http://en.wikipedia.org/wiki/Continued_fraction
+ * https://en.wikipedia.org/wiki/Continued_fraction
*/
void rational_best_approximation(
diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c
index 20ed0f766787..4cd2b335cb7f 100644
--- a/lib/mpi/mpiutil.c
+++ b/lib/mpi/mpiutil.c
@@ -69,7 +69,7 @@ void mpi_free_limb_space(mpi_ptr_t a)
if (!a)
return;
- kzfree(a);
+ kfree_sensitive(a);
}
void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs)
@@ -95,7 +95,7 @@ int mpi_resize(MPI a, unsigned nlimbs)
if (!p)
return -ENOMEM;
memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t));
- kzfree(a->d);
+ kfree_sensitive(a->d);
a->d = p;
} else {
a->d = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
@@ -112,7 +112,7 @@ void mpi_free(MPI a)
return;
if (a->flags & 4)
- kzfree(a->d);
+ kfree_sensitive(a->d);
else
mpi_free_limb_space(a->d);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index a66595ba5543..a2345de90e93 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -99,6 +99,25 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
EXPORT_SYMBOL(percpu_counter_add_batch);
/*
+ * For percpu_counter with a big batch, the devication of its count could
+ * be big, and there is requirement to reduce the deviation, like when the
+ * counter's batch could be runtime decreased to get a better accuracy,
+ * which can be achieved by running this sync function on each CPU.
+ */
+void percpu_counter_sync(struct percpu_counter *fbc)
+{
+ unsigned long flags;
+ s64 count;
+
+ raw_spin_lock_irqsave(&fbc->lock, flags);
+ count = __this_cpu_read(*fbc->counters);
+ fbc->count += count;
+ __this_cpu_sub(*fbc->counters, count);
+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
+}
+EXPORT_SYMBOL(percpu_counter_sync);
+
+/*
* Add up all the per-cpu counts, return the result. This is a more accurate
* but much slower version of percpu_counter_read_positive()
*/
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 8545872e61db..c4ac5c2421f2 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -13,7 +13,7 @@
#include <linux/export.h>
/*
- * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree
+ * red-black trees properties: https://en.wikipedia.org/wiki/Rbtree
*
* 1) A node is either red or black
* 2) The root is black
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 6b13150667f5..df903c53952b 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -610,6 +610,63 @@ static void __init test_for_each_set_clump8(void)
expect_eq_clump8(start, CLUMP_EXP_NUMBITS, clump_exp, &clump);
}
+struct test_bitmap_cut {
+ unsigned int first;
+ unsigned int cut;
+ unsigned int nbits;
+ unsigned long in[4];
+ unsigned long expected[4];
+};
+
+static struct test_bitmap_cut test_cut[] = {
+ { 0, 0, 8, { 0x0000000aUL, }, { 0x0000000aUL, }, },
+ { 0, 0, 32, { 0xdadadeadUL, }, { 0xdadadeadUL, }, },
+ { 0, 3, 8, { 0x000000aaUL, }, { 0x00000015UL, }, },
+ { 3, 3, 8, { 0x000000aaUL, }, { 0x00000012UL, }, },
+ { 0, 1, 32, { 0xa5a5a5a5UL, }, { 0x52d2d2d2UL, }, },
+ { 0, 8, 32, { 0xdeadc0deUL, }, { 0x00deadc0UL, }, },
+ { 1, 1, 32, { 0x5a5a5a5aUL, }, { 0x2d2d2d2cUL, }, },
+ { 0, 15, 32, { 0xa5a5a5a5UL, }, { 0x00014b4bUL, }, },
+ { 0, 16, 32, { 0xa5a5a5a5UL, }, { 0x0000a5a5UL, }, },
+ { 15, 15, 32, { 0xa5a5a5a5UL, }, { 0x000125a5UL, }, },
+ { 15, 16, 32, { 0xa5a5a5a5UL, }, { 0x0000a5a5UL, }, },
+ { 16, 15, 32, { 0xa5a5a5a5UL, }, { 0x0001a5a5UL, }, },
+
+ { BITS_PER_LONG, BITS_PER_LONG, BITS_PER_LONG,
+ { 0xa5a5a5a5UL, 0xa5a5a5a5UL, },
+ { 0xa5a5a5a5UL, 0xa5a5a5a5UL, },
+ },
+ { 1, BITS_PER_LONG - 1, BITS_PER_LONG,
+ { 0xa5a5a5a5UL, 0xa5a5a5a5UL, },
+ { 0x00000001UL, 0x00000001UL, },
+ },
+
+ { 0, BITS_PER_LONG * 2, BITS_PER_LONG * 2 + 1,
+ { 0xa5a5a5a5UL, 0x00000001UL, 0x00000001UL, 0x00000001UL },
+ { 0x00000001UL, },
+ },
+ { 16, BITS_PER_LONG * 2 + 1, BITS_PER_LONG * 2 + 1 + 16,
+ { 0x0000ffffUL, 0x5a5a5a5aUL, 0x5a5a5a5aUL, 0x5a5a5a5aUL },
+ { 0x2d2dffffUL, },
+ },
+};
+
+static void __init test_bitmap_cut(void)
+{
+ unsigned long b[5], *in = &b[1], *out = &b[0]; /* Partial overlap */
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(test_cut); i++) {
+ struct test_bitmap_cut *t = &test_cut[i];
+
+ memcpy(in, t->in, sizeof(t->in));
+
+ bitmap_cut(out, in, t->first, t->cut, t->nbits);
+
+ expect_eq_bitmap(t->expected, out, t->nbits);
+ }
+}
+
static void __init selftest(void)
{
test_zero_clear();
@@ -623,6 +680,7 @@ static void __init selftest(void)
test_bitmap_parselist_user();
test_mem_optimisations();
test_for_each_set_clump8();
+ test_bitmap_cut();
}
KSTM_MODULE_LOADERS(test_bitmap);
diff --git a/lib/test_bitops.c b/lib/test_bitops.c
index ced25e3a779b..471141ddd691 100644
--- a/lib/test_bitops.c
+++ b/lib/test_bitops.c
@@ -52,9 +52,9 @@ static unsigned long order_comb_long[][2] = {
static int __init test_bitops_startup(void)
{
- int i;
+ int i, bit_set;
- pr_warn("Loaded test module\n");
+ pr_info("Starting bitops test\n");
set_bit(BITOPS_4, g_bitmap);
set_bit(BITOPS_7, g_bitmap);
set_bit(BITOPS_11, g_bitmap);
@@ -81,12 +81,8 @@ static int __init test_bitops_startup(void)
order_comb_long[i][0]);
}
#endif
- return 0;
-}
-static void __exit test_bitops_unstartup(void)
-{
- int bit_set;
+ barrier();
clear_bit(BITOPS_4, g_bitmap);
clear_bit(BITOPS_7, g_bitmap);
@@ -98,7 +94,13 @@ static void __exit test_bitops_unstartup(void)
if (bit_set != BITOPS_LAST)
pr_err("ERROR: FOUND SET BIT %d\n", bit_set);
- pr_warn("Unloaded test module\n");
+ pr_info("Completed bitops test\n");
+
+ return 0;
+}
+
+static void __exit test_bitops_unstartup(void)
+{
}
module_init(test_bitops_startup);
diff --git a/lib/test_bits.c b/lib/test_bits.c
new file mode 100644
index 000000000000..c9368a2314e7
--- /dev/null
+++ b/lib/test_bits.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases for functions and macros in bits.h
+ */
+
+#include <kunit/test.h>
+#include <linux/bits.h>
+
+
+static void genmask_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, 1ul, GENMASK(0, 0));
+ KUNIT_EXPECT_EQ(test, 3ul, GENMASK(1, 0));
+ KUNIT_EXPECT_EQ(test, 6ul, GENMASK(2, 1));
+ KUNIT_EXPECT_EQ(test, 0xFFFFFFFFul, GENMASK(31, 0));
+
+#ifdef TEST_GENMASK_FAILURES
+ /* these should fail compilation */
+ GENMASK(0, 1);
+ GENMASK(0, 10);
+ GENMASK(9, 10);
+#endif
+
+
+}
+
+static void genmask_ull_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, 1ull, GENMASK_ULL(0, 0));
+ KUNIT_EXPECT_EQ(test, 3ull, GENMASK_ULL(1, 0));
+ KUNIT_EXPECT_EQ(test, 0x000000ffffe00000ull, GENMASK_ULL(39, 21));
+ KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, GENMASK_ULL(63, 0));
+
+#ifdef TEST_GENMASK_FAILURES
+ /* these should fail compilation */
+ GENMASK_ULL(0, 1);
+ GENMASK_ULL(0, 10);
+ GENMASK_ULL(9, 10);
+#endif
+}
+
+static void genmask_input_check_test(struct kunit *test)
+{
+ unsigned int x, y;
+ int z, w;
+
+ /* Unknown input */
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(x, 0));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(0, x));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(x, y));
+
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(z, 0));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(0, z));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(z, w));
+
+ /* Valid input */
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(1, 1));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(39, 21));
+}
+
+
+static struct kunit_case bits_test_cases[] = {
+ KUNIT_CASE(genmask_test),
+ KUNIT_CASE(genmask_ull_test),
+ KUNIT_CASE(genmask_input_check_test),
+ {}
+};
+
+static struct kunit_suite bits_test_suite = {
+ .name = "bits-test",
+ .test_cases = bits_test_cases,
+};
+kunit_test_suite(bits_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index dc2c6a51d11a..53e953bb1d1d 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -23,6 +23,10 @@
#include <asm/page.h>
+#include "../mm/kasan/kasan.h"
+
+#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_SHADOW_SCALE_SIZE)
+
/*
* We assign some test results to these globals to make sure the tests
* are not eliminated as dead code.
@@ -48,7 +52,8 @@ static noinline void __init kmalloc_oob_right(void)
return;
}
- ptr[size] = 'x';
+ ptr[size + OOB_TAG_OFF] = 'x';
+
kfree(ptr);
}
@@ -100,7 +105,8 @@ static noinline void __init kmalloc_pagealloc_oob_right(void)
return;
}
- ptr[size] = 0;
+ ptr[size + OOB_TAG_OFF] = 0;
+
kfree(ptr);
}
@@ -170,7 +176,8 @@ static noinline void __init kmalloc_oob_krealloc_more(void)
return;
}
- ptr2[size2] = 'x';
+ ptr2[size2 + OOB_TAG_OFF] = 'x';
+
kfree(ptr2);
}
@@ -188,7 +195,9 @@ static noinline void __init kmalloc_oob_krealloc_less(void)
kfree(ptr1);
return;
}
- ptr2[size2] = 'x';
+
+ ptr2[size2 + OOB_TAG_OFF] = 'x';
+
kfree(ptr2);
}
@@ -224,7 +233,8 @@ static noinline void __init kmalloc_oob_memset_2(void)
return;
}
- memset(ptr+7, 0, 2);
+ memset(ptr + 7 + OOB_TAG_OFF, 0, 2);
+
kfree(ptr);
}
@@ -240,7 +250,8 @@ static noinline void __init kmalloc_oob_memset_4(void)
return;
}
- memset(ptr+5, 0, 4);
+ memset(ptr + 5 + OOB_TAG_OFF, 0, 4);
+
kfree(ptr);
}
@@ -257,7 +268,8 @@ static noinline void __init kmalloc_oob_memset_8(void)
return;
}
- memset(ptr+1, 0, 8);
+ memset(ptr + 1 + OOB_TAG_OFF, 0, 8);
+
kfree(ptr);
}
@@ -273,7 +285,8 @@ static noinline void __init kmalloc_oob_memset_16(void)
return;
}
- memset(ptr+1, 0, 16);
+ memset(ptr + 1 + OOB_TAG_OFF, 0, 16);
+
kfree(ptr);
}
@@ -289,7 +302,8 @@ static noinline void __init kmalloc_oob_in_memset(void)
return;
}
- memset(ptr, 0, size+5);
+ memset(ptr, 0, size + 5 + OOB_TAG_OFF);
+
kfree(ptr);
}
@@ -423,7 +437,8 @@ static noinline void __init kmem_cache_oob(void)
return;
}
- *p = p[size];
+ *p = p[size + OOB_TAG_OFF];
+
kmem_cache_free(cache, p);
kmem_cache_destroy(cache);
}
@@ -473,7 +488,7 @@ static noinline void __init kasan_global_oob(void)
static noinline void __init kasan_stack_oob(void)
{
char stack_array[10];
- volatile int i = 0;
+ volatile int i = OOB_TAG_OFF;
char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
pr_info("out-of-bounds on stack\n");
@@ -520,25 +535,25 @@ static noinline void __init copy_user_test(void)
}
pr_info("out-of-bounds in copy_from_user()\n");
- unused = copy_from_user(kmem, usermem, size + 1);
+ unused = copy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
pr_info("out-of-bounds in copy_to_user()\n");
- unused = copy_to_user(usermem, kmem, size + 1);
+ unused = copy_to_user(usermem, kmem, size + 1 + OOB_TAG_OFF);
pr_info("out-of-bounds in __copy_from_user()\n");
- unused = __copy_from_user(kmem, usermem, size + 1);
+ unused = __copy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
pr_info("out-of-bounds in __copy_to_user()\n");
- unused = __copy_to_user(usermem, kmem, size + 1);
+ unused = __copy_to_user(usermem, kmem, size + 1 + OOB_TAG_OFF);
pr_info("out-of-bounds in __copy_from_user_inatomic()\n");
- unused = __copy_from_user_inatomic(kmem, usermem, size + 1);
+ unused = __copy_from_user_inatomic(kmem, usermem, size + 1 + OOB_TAG_OFF);
pr_info("out-of-bounds in __copy_to_user_inatomic()\n");
- unused = __copy_to_user_inatomic(usermem, kmem, size + 1);
+ unused = __copy_to_user_inatomic(usermem, kmem, size + 1 + OOB_TAG_OFF);
pr_info("out-of-bounds in strncpy_from_user()\n");
- unused = strncpy_from_user(kmem, usermem, size + 1);
+ unused = strncpy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
vm_munmap((unsigned long)usermem, PAGE_SIZE);
kfree(kmem);
@@ -766,15 +781,15 @@ static noinline void __init kmalloc_double_kzfree(void)
char *ptr;
size_t size = 16;
- pr_info("double-free (kzfree)\n");
+ pr_info("double-free (kfree_sensitive)\n");
ptr = kmalloc(size, GFP_KERNEL);
if (!ptr) {
pr_err("Allocation failed\n");
return;
}
- kzfree(ptr);
- kzfree(ptr);
+ kfree_sensitive(ptr);
+ kfree_sensitive(ptr);
}
#ifdef CONFIG_KASAN_VMALLOC
@@ -801,6 +816,35 @@ static noinline void __init vmalloc_oob(void)
static void __init vmalloc_oob(void) {}
#endif
+static struct kasan_rcu_info {
+ int i;
+ struct rcu_head rcu;
+} *global_rcu_ptr;
+
+static noinline void __init kasan_rcu_reclaim(struct rcu_head *rp)
+{
+ struct kasan_rcu_info *fp = container_of(rp,
+ struct kasan_rcu_info, rcu);
+
+ kfree(fp);
+ fp->i = 1;
+}
+
+static noinline void __init kasan_rcu_uaf(void)
+{
+ struct kasan_rcu_info *ptr;
+
+ pr_info("use-after-free in kasan_rcu_reclaim\n");
+ ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL);
+ if (!ptr) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ global_rcu_ptr = rcu_dereference_protected(ptr, NULL);
+ call_rcu(&global_rcu_ptr->rcu, kasan_rcu_reclaim);
+}
+
static int __init kmalloc_tests_init(void)
{
/*
@@ -848,6 +892,7 @@ static int __init kmalloc_tests_init(void)
kasan_bitops();
kmalloc_double_kzfree();
vmalloc_oob();
+ kasan_rcu_uaf();
kasan_restore_multi_shot(multishot);
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index e651c37d56db..eab52770070d 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -745,7 +745,7 @@ static int trigger_config_run_type(struct kmod_test_device *test_dev,
break;
case TEST_KMOD_FS_TYPE:
kfree_const(config->test_fs);
- config->test_driver = NULL;
+ config->test_fs = NULL;
copied = config_copy_test_fs(config, test_str,
strlen(test_str));
break;
diff --git a/lib/test_lockup.c b/lib/test_lockup.c
index ff26f36d729f..f1a020bcc763 100644
--- a/lib/test_lockup.c
+++ b/lib/test_lockup.c
@@ -400,7 +400,7 @@ static void test_lockup(bool master)
test_unlock(master, true);
}
-DEFINE_PER_CPU(struct work_struct, test_works);
+static DEFINE_PER_CPU(struct work_struct, test_works);
static void test_work_fn(struct work_struct *work)
{
@@ -512,8 +512,8 @@ static int __init test_lockup_init(void)
if (test_file_path[0]) {
test_file = filp_open(test_file_path, O_RDONLY, 0);
if (IS_ERR(test_file)) {
- pr_err("cannot find file_path\n");
- return -EINVAL;
+ pr_err("failed to open %s: %ld\n", test_file_path, PTR_ERR(test_file));
+ return PTR_ERR(test_file);
}
test_inode = file_inode(test_file);
} else if (test_lock_inode ||
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index 277cb4417ac2..4cf250031f0f 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -11,7 +11,7 @@
* [1] A Fast String Searching Algorithm, R.S. Boyer and Moore.
* Communications of the Association for Computing Machinery,
* 20(10), 1977, pp. 762-772.
- * http://www.cs.utexas.edu/users/moore/publications/fstrpos.pdf
+ * https://www.cs.utexas.edu/users/moore/publications/fstrpos.pdf
*
* [2] Handbook of Exact String Matching Algorithms, Thierry Lecroq, 2004
* http://www-igm.univ-mlv.fr/~lecroq/string/string.pdf
diff --git a/lib/xxhash.c b/lib/xxhash.c
index aa61e2a3802f..d5bb9ff10607 100644
--- a/lib/xxhash.c
+++ b/lib/xxhash.c
@@ -34,7 +34,7 @@
* ("BSD").
*
* You can contact the author at:
- * - xxHash homepage: http://cyan4973.github.io/xxHash/
+ * - xxHash homepage: https://cyan4973.github.io/xxHash/
* - xxHash source repository: https://github.com/Cyan4973/xxHash
*/
diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c
index 912aae5fa09e..88a2c35e1b59 100644
--- a/lib/xz/xz_crc32.c
+++ b/lib/xz/xz_crc32.c
@@ -2,7 +2,7 @@
* CRC32 using the polynomial from IEEE-802.3
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
- * Igor Pavlov <http://7-zip.org/>
+ * Igor Pavlov <https://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c
index a768e6d28bbb..72ddac6ef2ec 100644
--- a/lib/xz/xz_dec_bcj.c
+++ b/lib/xz/xz_dec_bcj.c
@@ -2,7 +2,7 @@
* Branch/Call/Jump (BCJ) filter decoders
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
- * Igor Pavlov <http://7-zip.org/>
+ * Igor Pavlov <https://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c
index 156f26fdc4c9..9f336bc07ed6 100644
--- a/lib/xz/xz_dec_lzma2.c
+++ b/lib/xz/xz_dec_lzma2.c
@@ -2,7 +2,7 @@
* LZMA2 decoder
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
- * Igor Pavlov <http://7-zip.org/>
+ * Igor Pavlov <https://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
diff --git a/lib/xz/xz_lzma2.h b/lib/xz/xz_lzma2.h
index 071d67bee9f5..92d852d4f87a 100644
--- a/lib/xz/xz_lzma2.h
+++ b/lib/xz/xz_lzma2.h
@@ -2,7 +2,7 @@
* LZMA2 definitions
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
- * Igor Pavlov <http://7-zip.org/>
+ * Igor Pavlov <https://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
diff --git a/lib/xz/xz_stream.h b/lib/xz/xz_stream.h
index 66cb5a7055ec..430bb3a0d195 100644
--- a/lib/xz/xz_stream.h
+++ b/lib/xz/xz_stream.h
@@ -19,7 +19,7 @@
/*
* See the .xz file format specification at
- * http://tukaani.org/xz/xz-file-format.txt
+ * https://tukaani.org/xz/xz-file-format.txt
* to understand the container format.
*/