summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug6
-rw-r--r--lib/Kconfig.kasan2
-rw-r--r--lib/Kconfig.kgdb2
-rw-r--r--lib/Makefile1
-rw-r--r--lib/assoc_array.c2
-rw-r--r--lib/buildid.c6
-rw-r--r--lib/devres.c208
-rw-r--r--lib/dhry_1.c2
-rw-r--r--lib/dhry_run.c1
-rw-r--r--lib/dump_stack.c16
-rw-r--r--lib/dynamic_debug.c7
-rw-r--r--lib/flex_proportions.c77
-rw-r--r--lib/fonts/Kconfig3
-rw-r--r--lib/fw_table.c15
-rw-r--r--lib/generic-radix-tree.c35
-rw-r--r--lib/kobject_uevent.c24
-rw-r--r--lib/maple_tree.c6
-rw-r--r--lib/math/div64.c15
-rw-r--r--lib/pci_iomap.c180
-rw-r--r--lib/raid6/Makefile2
-rw-r--r--lib/sort.c20
-rw-r--r--lib/stackdepot.c65
-rw-r--r--lib/stackinit_kunit.c2
-rw-r--r--lib/test_kmod.c6
-rw-r--r--lib/test_vmalloc.c11
-rw-r--r--lib/test_xarray.c230
27 files changed, 350 insertions, 597 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 5ddda7c2ed9b..4557bb8a5256 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -70,9 +70,6 @@ source "lib/math/Kconfig"
config NO_GENERIC_PCI_IOPORT_MAP
bool
-config GENERIC_PCI_IOMAP
- bool
-
config GENERIC_IOMAP
bool
select GENERIC_PCI_IOMAP
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2164f066e7b6..f4a12980a8da 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2085,7 +2085,7 @@ config KCOV
depends on ARCH_HAS_KCOV
depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS
depends on !ARCH_WANTS_NO_INSTR || HAVE_NOINSTR_HACK || \
- GCC_VERSION >= 120000 || CLANG_VERSION >= 130000
+ GCC_VERSION >= 120000 || CC_IS_CLANG
select DEBUG_FS
select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC
select OBJTOOL if HAVE_NOINSTR_HACK
@@ -2127,7 +2127,7 @@ config KCOV_IRQ_AREA_SIZE
menuconfig RUNTIME_TESTING_MENU
bool "Runtime Testing"
- def_bool y
+ default y
if RUNTIME_TESTING_MENU
@@ -2142,7 +2142,7 @@ config TEST_DHRY
To run the benchmark, it needs to be enabled explicitly, either from
the kernel command line (when built-in), or from userspace (when
- built-in or modular.
+ built-in or modular).
Run once during kernel boot:
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index e6eda054ab27..98016e137b7f 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -158,7 +158,7 @@ config KASAN_STACK
out-of-bounds bugs in stack variables.
With Clang, stack instrumentation has a problem that causes excessive
- stack usage, see https://bugs.llvm.org/show_bug.cgi?id=38809. Thus,
+ stack usage, see https://llvm.org/pr38809. Thus,
with Clang, this option is deemed unsafe.
This option is always disabled when compile-testing with Clang to
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index 3b9a44008433..b5c0e6576749 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -43,7 +43,7 @@ config KGDB_SERIAL_CONSOLE
tristate "KGDB: use kgdb over the serial console"
select CONSOLE_POLL
select MAGIC_SYSRQ
- depends on TTY && HW_CONSOLE
+ depends on TTY && VT
default y
help
Share a serial console with kgdb. Sysrq-g must be used
diff --git a/lib/Makefile b/lib/Makefile
index 363852afa200..ffc6b2341b45 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -152,7 +152,6 @@ CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any)
obj-y += math/ crypto/
obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
-obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o
obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index ca0b4f360c1a..388e656ac974 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -938,7 +938,7 @@ static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
edit->leaf_p = &new_n0->slots[0];
pr_devel("<--%s() = ok [split shortcut]\n", __func__);
- return edit;
+ return true;
}
/**
diff --git a/lib/buildid.c b/lib/buildid.c
index e3a7acdeef0e..898301b49eb6 100644
--- a/lib/buildid.c
+++ b/lib/buildid.c
@@ -140,7 +140,7 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
return -EFAULT; /* page not mapped */
ret = -EINVAL;
- page_addr = kmap_atomic(page);
+ page_addr = kmap_local_page(page);
ehdr = (Elf32_Ehdr *)page_addr;
/* compare magic x7f "ELF" */
@@ -156,7 +156,7 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
ret = get_build_id_64(page_addr, build_id, size);
out:
- kunmap_atomic(page_addr);
+ kunmap_local(page_addr);
put_page(page);
return ret;
}
@@ -174,7 +174,7 @@ int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size)
return parse_build_id_buf(build_id, NULL, buf, buf_size);
}
-#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_CRASH_CORE)
+#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_VMCORE_INFO)
unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX] __ro_after_init;
/**
diff --git a/lib/devres.c b/lib/devres.c
index c44f104b58d5..fe0c63caeb68 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/device.h>
#include <linux/err.h>
-#include <linux/pci.h>
#include <linux/io.h>
#include <linux/gfp.h>
#include <linux/export.h>
@@ -311,212 +311,6 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
EXPORT_SYMBOL(devm_ioport_unmap);
#endif /* CONFIG_HAS_IOPORT_MAP */
-#ifdef CONFIG_PCI
-/*
- * PCI iomap devres
- */
-#define PCIM_IOMAP_MAX PCI_STD_NUM_BARS
-
-struct pcim_iomap_devres {
- void __iomem *table[PCIM_IOMAP_MAX];
-};
-
-static void pcim_iomap_release(struct device *gendev, void *res)
-{
- struct pci_dev *dev = to_pci_dev(gendev);
- struct pcim_iomap_devres *this = res;
- int i;
-
- for (i = 0; i < PCIM_IOMAP_MAX; i++)
- if (this->table[i])
- pci_iounmap(dev, this->table[i]);
-}
-
-/**
- * pcim_iomap_table - access iomap allocation table
- * @pdev: PCI device to access iomap table for
- *
- * Access iomap allocation table for @dev. If iomap table doesn't
- * exist and @pdev is managed, it will be allocated. All iomaps
- * recorded in the iomap table are automatically unmapped on driver
- * detach.
- *
- * This function might sleep when the table is first allocated but can
- * be safely called without context and guaranteed to succeed once
- * allocated.
- */
-void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
-{
- struct pcim_iomap_devres *dr, *new_dr;
-
- dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
- if (dr)
- return dr->table;
-
- new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL,
- dev_to_node(&pdev->dev));
- if (!new_dr)
- return NULL;
- dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
- return dr->table;
-}
-EXPORT_SYMBOL(pcim_iomap_table);
-
-/**
- * pcim_iomap - Managed pcim_iomap()
- * @pdev: PCI device to iomap for
- * @bar: BAR to iomap
- * @maxlen: Maximum length of iomap
- *
- * Managed pci_iomap(). Map is automatically unmapped on driver
- * detach.
- */
-void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
-{
- void __iomem **tbl;
-
- BUG_ON(bar >= PCIM_IOMAP_MAX);
-
- tbl = (void __iomem **)pcim_iomap_table(pdev);
- if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
- return NULL;
-
- tbl[bar] = pci_iomap(pdev, bar, maxlen);
- return tbl[bar];
-}
-EXPORT_SYMBOL(pcim_iomap);
-
-/**
- * pcim_iounmap - Managed pci_iounmap()
- * @pdev: PCI device to iounmap for
- * @addr: Address to unmap
- *
- * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
- */
-void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
-{
- void __iomem **tbl;
- int i;
-
- pci_iounmap(pdev, addr);
-
- tbl = (void __iomem **)pcim_iomap_table(pdev);
- BUG_ON(!tbl);
-
- for (i = 0; i < PCIM_IOMAP_MAX; i++)
- if (tbl[i] == addr) {
- tbl[i] = NULL;
- return;
- }
- WARN_ON(1);
-}
-EXPORT_SYMBOL(pcim_iounmap);
-
-/**
- * pcim_iomap_regions - Request and iomap PCI BARs
- * @pdev: PCI device to map IO resources for
- * @mask: Mask of BARs to request and iomap
- * @name: Name used when requesting regions
- *
- * Request and iomap regions specified by @mask.
- */
-int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
-{
- void __iomem * const *iomap;
- int i, rc;
-
- iomap = pcim_iomap_table(pdev);
- if (!iomap)
- return -ENOMEM;
-
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- unsigned long len;
-
- if (!(mask & (1 << i)))
- continue;
-
- rc = -EINVAL;
- len = pci_resource_len(pdev, i);
- if (!len)
- goto err_inval;
-
- rc = pci_request_region(pdev, i, name);
- if (rc)
- goto err_inval;
-
- rc = -ENOMEM;
- if (!pcim_iomap(pdev, i, 0))
- goto err_region;
- }
-
- return 0;
-
- err_region:
- pci_release_region(pdev, i);
- err_inval:
- while (--i >= 0) {
- if (!(mask & (1 << i)))
- continue;
- pcim_iounmap(pdev, iomap[i]);
- pci_release_region(pdev, i);
- }
-
- return rc;
-}
-EXPORT_SYMBOL(pcim_iomap_regions);
-
-/**
- * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
- * @pdev: PCI device to map IO resources for
- * @mask: Mask of BARs to iomap
- * @name: Name used when requesting regions
- *
- * Request all PCI BARs and iomap regions specified by @mask.
- */
-int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
- const char *name)
-{
- int request_mask = ((1 << 6) - 1) & ~mask;
- int rc;
-
- rc = pci_request_selected_regions(pdev, request_mask, name);
- if (rc)
- return rc;
-
- rc = pcim_iomap_regions(pdev, mask, name);
- if (rc)
- pci_release_selected_regions(pdev, request_mask);
- return rc;
-}
-EXPORT_SYMBOL(pcim_iomap_regions_request_all);
-
-/**
- * pcim_iounmap_regions - Unmap and release PCI BARs
- * @pdev: PCI device to map IO resources for
- * @mask: Mask of BARs to unmap and release
- *
- * Unmap and release regions specified by @mask.
- */
-void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
-{
- void __iomem * const *iomap;
- int i;
-
- iomap = pcim_iomap_table(pdev);
- if (!iomap)
- return;
-
- for (i = 0; i < PCIM_IOMAP_MAX; i++) {
- if (!(mask & (1 << i)))
- continue;
-
- pcim_iounmap(pdev, iomap[i]);
- pci_release_region(pdev, i);
- }
-}
-EXPORT_SYMBOL(pcim_iounmap_regions);
-#endif /* CONFIG_PCI */
-
static void devm_arch_phys_ac_add_release(struct device *dev, void *res)
{
arch_phys_wc_del(*((int *)res));
diff --git a/lib/dhry_1.c b/lib/dhry_1.c
index 08edbbb19f57..ca6c87232c58 100644
--- a/lib/dhry_1.c
+++ b/lib/dhry_1.c
@@ -277,7 +277,7 @@ int dhry(int n)
dhry_assert_string_eq(Str_1_Loc, "DHRYSTONE PROGRAM, 1'ST STRING");
dhry_assert_string_eq(Str_2_Loc, "DHRYSTONE PROGRAM, 2'ND STRING");
- User_Time = ktime_to_ms(ktime_sub(End_Time, Begin_Time));
+ User_Time = ktime_ms_delta(End_Time, Begin_Time);
kfree(Ptr_Glob);
kfree(Next_Ptr_Glob);
diff --git a/lib/dhry_run.c b/lib/dhry_run.c
index f15ac666e9d3..e6a279dabf84 100644
--- a/lib/dhry_run.c
+++ b/lib/dhry_run.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/mutex.h>
#include <linux/smp.h>
#define DHRY_VAX 1757
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 83471e81501a..222c6d6c8281 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -96,15 +96,25 @@ static void __dump_stack(const char *log_lvl)
*/
asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
{
+ bool in_panic = this_cpu_in_panic();
unsigned long flags;
/*
* Permit this cpu to perform nested stack dumps while serialising
- * against other CPUs
+ * against other CPUs, unless this CPU is in panic.
+ *
+ * When in panic, non-panic CPUs are not permitted to store new
+ * printk messages so there is no need to synchronize the output.
+ * This avoids potential deadlock in panic() if another CPU is
+ * holding and unable to release the printk_cpu_sync.
*/
- printk_cpu_sync_get_irqsave(flags);
+ if (!in_panic)
+ printk_cpu_sync_get_irqsave(flags);
+
__dump_stack(log_lvl);
- printk_cpu_sync_put_irqrestore(flags);
+
+ if (!in_panic)
+ printk_cpu_sync_put_irqrestore(flags);
}
EXPORT_SYMBOL(dump_stack_lvl);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 6fba6423cc10..c78f335fa981 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -640,10 +640,9 @@ static int param_set_dyndbg_classnames(const char *instr, const struct kernel_pa
int cls_id, totct = 0;
bool wanted;
- cl_str = tmp = kstrdup(instr, GFP_KERNEL);
- p = strchr(cl_str, '\n');
- if (p)
- *p = '\0';
+ cl_str = tmp = kstrdup_and_replace(instr, '\n', '\0', GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
/* start with previously set state-bits, then modify */
curr_bits = old_bits = *dcp->bits;
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index 83332fefa6f4..84ecccddc771 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -84,83 +84,6 @@ bool fprop_new_period(struct fprop_global *p, int periods)
}
/*
- * ---- SINGLE ----
- */
-
-int fprop_local_init_single(struct fprop_local_single *pl)
-{
- pl->events = 0;
- pl->period = 0;
- raw_spin_lock_init(&pl->lock);
- return 0;
-}
-
-void fprop_local_destroy_single(struct fprop_local_single *pl)
-{
-}
-
-static void fprop_reflect_period_single(struct fprop_global *p,
- struct fprop_local_single *pl)
-{
- unsigned int period = p->period;
- unsigned long flags;
-
- /* Fast path - period didn't change */
- if (pl->period == period)
- return;
- raw_spin_lock_irqsave(&pl->lock, flags);
- /* Someone updated pl->period while we were spinning? */
- if (pl->period >= period) {
- raw_spin_unlock_irqrestore(&pl->lock, flags);
- return;
- }
- /* Aging zeroed our fraction? */
- if (period - pl->period < BITS_PER_LONG)
- pl->events >>= period - pl->period;
- else
- pl->events = 0;
- pl->period = period;
- raw_spin_unlock_irqrestore(&pl->lock, flags);
-}
-
-/* Event of type pl happened */
-void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
-{
- fprop_reflect_period_single(p, pl);
- pl->events++;
- percpu_counter_add(&p->events, 1);
-}
-
-/* Return fraction of events of type pl */
-void fprop_fraction_single(struct fprop_global *p,
- struct fprop_local_single *pl,
- unsigned long *numerator, unsigned long *denominator)
-{
- unsigned int seq;
- s64 num, den;
-
- do {
- seq = read_seqcount_begin(&p->sequence);
- fprop_reflect_period_single(p, pl);
- num = pl->events;
- den = percpu_counter_read_positive(&p->events);
- } while (read_seqcount_retry(&p->sequence, seq));
-
- /*
- * Make fraction <= 1 and denominator > 0 even in presence of percpu
- * counter errors
- */
- if (den <= num) {
- if (num)
- den = num;
- else
- den = 1;
- }
- *denominator = den;
- *numerator = num;
-}
-
-/*
* ---- PERCPU ----
*/
#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig
index 7ee468ef21ec..7e945fdcbf11 100644
--- a/lib/fonts/Kconfig
+++ b/lib/fonts/Kconfig
@@ -98,7 +98,8 @@ config FONT_10x18
config FONT_SUN8x16
bool "Sparc console 8x16 font"
- depends on (FRAMEBUFFER_CONSOLE && (FONTS || SPARC)) || BOOTX_TEXT
+ depends on (FRAMEBUFFER_CONSOLE && (FONTS || SPARC)) || \
+ BOOTX_TEXT || EARLYFB
help
This is the high resolution console font for Sun machines. Say Y.
diff --git a/lib/fw_table.c b/lib/fw_table.c
index c3569d2ba503..16291814450e 100644
--- a/lib/fw_table.c
+++ b/lib/fw_table.c
@@ -127,6 +127,7 @@ static __init_or_fwtbl_lib int call_handler(struct acpi_subtable_proc *proc,
*
* @id: table id (for debugging purposes)
* @table_size: size of the root table
+ * @max_length: maximum size of the table (ignore if 0)
* @table_header: where does the table start?
* @proc: array of acpi_subtable_proc struct containing entry id
* and associated handler with it
@@ -148,18 +149,21 @@ static __init_or_fwtbl_lib int call_handler(struct acpi_subtable_proc *proc,
int __init_or_fwtbl_lib
acpi_parse_entries_array(char *id, unsigned long table_size,
union fw_table_header *table_header,
+ unsigned long max_length,
struct acpi_subtable_proc *proc,
int proc_num, unsigned int max_entries)
{
- unsigned long table_end, subtable_len, entry_len;
+ unsigned long table_len, table_end, subtable_len, entry_len;
struct acpi_subtable_entry entry;
enum acpi_subtable_type type;
int count = 0;
int i;
type = acpi_get_subtable_type(id);
- table_end = (unsigned long)table_header +
- acpi_table_get_length(type, table_header);
+ table_len = acpi_table_get_length(type, table_header);
+ if (max_length && max_length < table_len)
+ table_len = max_length;
+ table_end = (unsigned long)table_header + table_len;
/* Parse all entries looking for a match. */
@@ -208,7 +212,8 @@ int __init_or_fwtbl_lib
cdat_table_parse(enum acpi_cdat_type type,
acpi_tbl_entry_handler_arg handler_arg,
void *arg,
- struct acpi_table_cdat *table_header)
+ struct acpi_table_cdat *table_header,
+ unsigned long length)
{
struct acpi_subtable_proc proc = {
.id = type,
@@ -222,6 +227,6 @@ cdat_table_parse(enum acpi_cdat_type type,
return acpi_parse_entries_array(ACPI_SIG_CDAT,
sizeof(struct acpi_table_cdat),
(union fw_table_header *)table_header,
- &proc, 1, 0);
+ length, &proc, 1, 0);
}
EXPORT_SYMBOL_FWTBL_LIB(cdat_table_parse);
diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
index 41f1bcdc4488..aaefb9b678c8 100644
--- a/lib/generic-radix-tree.c
+++ b/lib/generic-radix-tree.c
@@ -5,7 +5,7 @@
#include <linux/gfp.h>
#include <linux/kmemleak.h>
-#define GENRADIX_ARY (PAGE_SIZE / sizeof(struct genradix_node *))
+#define GENRADIX_ARY (GENRADIX_NODE_SIZE / sizeof(struct genradix_node *))
#define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY)
struct genradix_node {
@@ -14,13 +14,13 @@ struct genradix_node {
struct genradix_node *children[GENRADIX_ARY];
/* Leaf: */
- u8 data[PAGE_SIZE];
+ u8 data[GENRADIX_NODE_SIZE];
};
};
static inline int genradix_depth_shift(unsigned depth)
{
- return PAGE_SHIFT + GENRADIX_ARY_SHIFT * depth;
+ return GENRADIX_NODE_SHIFT + GENRADIX_ARY_SHIFT * depth;
}
/*
@@ -33,7 +33,7 @@ static inline size_t genradix_depth_size(unsigned depth)
/* depth that's needed for a genradix that can address up to ULONG_MAX: */
#define GENRADIX_MAX_DEPTH \
- DIV_ROUND_UP(BITS_PER_LONG - PAGE_SHIFT, GENRADIX_ARY_SHIFT)
+ DIV_ROUND_UP(BITS_PER_LONG - GENRADIX_NODE_SHIFT, GENRADIX_ARY_SHIFT)
#define GENRADIX_DEPTH_MASK \
((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1))
@@ -79,23 +79,12 @@ EXPORT_SYMBOL(__genradix_ptr);
static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask)
{
- struct genradix_node *node;
-
- node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO);
-
- /*
- * We're using pages (not slab allocations) directly for kernel data
- * structures, so we need to explicitly inform kmemleak of them in order
- * to avoid false positive memory leak reports.
- */
- kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask);
- return node;
+ return kzalloc(GENRADIX_NODE_SIZE, gfp_mask);
}
static inline void genradix_free_node(struct genradix_node *node)
{
- kmemleak_free(node);
- free_page((unsigned long)node);
+ kfree(node);
}
/*
@@ -200,7 +189,7 @@ restart:
i++;
iter->offset = round_down(iter->offset + objs_per_ptr,
objs_per_ptr);
- iter->pos = (iter->offset >> PAGE_SHIFT) *
+ iter->pos = (iter->offset >> GENRADIX_NODE_SHIFT) *
objs_per_page;
if (i == GENRADIX_ARY)
goto restart;
@@ -209,7 +198,7 @@ restart:
n = n->children[i];
}
- return &n->data[iter->offset & (PAGE_SIZE - 1)];
+ return &n->data[iter->offset & (GENRADIX_NODE_SIZE - 1)];
}
EXPORT_SYMBOL(__genradix_iter_peek);
@@ -235,7 +224,7 @@ restart:
if (ilog2(iter->offset) >= genradix_depth_shift(level)) {
iter->offset = genradix_depth_size(level);
- iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page;
+ iter->pos = (iter->offset >> GENRADIX_NODE_SHIFT) * objs_per_page;
iter->offset -= obj_size_plus_page_remainder;
iter->pos--;
@@ -251,7 +240,7 @@ restart:
size_t objs_per_ptr = genradix_depth_size(level);
iter->offset = round_down(iter->offset, objs_per_ptr);
- iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page;
+ iter->pos = (iter->offset >> GENRADIX_NODE_SHIFT) * objs_per_page;
if (!iter->offset)
return NULL;
@@ -267,7 +256,7 @@ restart:
n = n->children[i];
}
- return &n->data[iter->offset & (PAGE_SIZE - 1)];
+ return &n->data[iter->offset & (GENRADIX_NODE_SIZE - 1)];
}
EXPORT_SYMBOL(__genradix_iter_peek_prev);
@@ -289,7 +278,7 @@ int __genradix_prealloc(struct __genradix *radix, size_t size,
{
size_t offset;
- for (offset = 0; offset < size; offset += PAGE_SIZE)
+ for (offset = 0; offset < size; offset += GENRADIX_NODE_SIZE)
if (!__genradix_ptr_alloc(radix, offset, gfp_mask))
return -ENOMEM;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index fb9a2f06dd1e..03b427e2707e 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -30,7 +30,7 @@
#include <net/net_namespace.h>
-u64 uevent_seqnum;
+atomic64_t uevent_seqnum;
#ifdef CONFIG_UEVENT_HELPER
char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
#endif
@@ -42,10 +42,9 @@ struct uevent_sock {
#ifdef CONFIG_NET
static LIST_HEAD(uevent_sock_list);
-#endif
-
-/* This lock protects uevent_seqnum and uevent_sock_list */
+/* This lock protects uevent_sock_list */
static DEFINE_MUTEX(uevent_sock_mutex);
+#endif
/* the strings here must match the enum in include/linux/kobject.h */
static const char *kobject_actions[] = {
@@ -315,6 +314,7 @@ static int uevent_net_broadcast_untagged(struct kobj_uevent_env *env,
int retval = 0;
/* send netlink message */
+ mutex_lock(&uevent_sock_mutex);
list_for_each_entry(ue_sk, &uevent_sock_list, list) {
struct sock *uevent_sock = ue_sk->sk;
@@ -334,6 +334,7 @@ static int uevent_net_broadcast_untagged(struct kobj_uevent_env *env,
if (retval == -ENOBUFS || retval == -ESRCH)
retval = 0;
}
+ mutex_unlock(&uevent_sock_mutex);
consume_skb(skb);
return retval;
@@ -583,16 +584,14 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
break;
}
- mutex_lock(&uevent_sock_mutex);
/* we will send an event, so request a new sequence number */
- retval = add_uevent_var(env, "SEQNUM=%llu", ++uevent_seqnum);
- if (retval) {
- mutex_unlock(&uevent_sock_mutex);
+ retval = add_uevent_var(env, "SEQNUM=%llu",
+ atomic64_inc_return(&uevent_seqnum));
+ if (retval)
goto exit;
- }
+
retval = kobject_uevent_net_broadcast(kobj, env, action_string,
devpath);
- mutex_unlock(&uevent_sock_mutex);
#ifdef CONFIG_UEVENT_HELPER
/* call uevent_helper, usually only enabled during early boot */
@@ -688,7 +687,8 @@ static int uevent_net_broadcast(struct sock *usk, struct sk_buff *skb,
int ret;
/* bump and prepare sequence number */
- ret = snprintf(buf, sizeof(buf), "SEQNUM=%llu", ++uevent_seqnum);
+ ret = snprintf(buf, sizeof(buf), "SEQNUM=%llu",
+ atomic64_inc_return(&uevent_seqnum));
if (ret < 0 || (size_t)ret >= sizeof(buf))
return -ENOMEM;
ret++;
@@ -742,9 +742,7 @@ static int uevent_net_rcv_skb(struct sk_buff *skb, struct nlmsghdr *nlh,
return -EPERM;
}
- mutex_lock(&uevent_sock_mutex);
ret = uevent_net_broadcast(net->uevent_sock->sk, skb, extack);
- mutex_unlock(&uevent_sock_mutex);
return ret;
}
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index af0970288727..55e1b35bf877 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -1307,8 +1307,8 @@ static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
}
/*
- * mas_node_count() - Check if enough nodes are allocated and request more if
- * there is not enough nodes.
+ * mas_node_count_gfp() - Check if enough nodes are allocated and request more
+ * if there is not enough nodes.
* @mas: The maple state
* @count: The number of nodes needed
* @gfp: the gfp flags
@@ -2271,8 +2271,6 @@ bool mast_spanning_rebalance(struct maple_subtree_state *mast)
struct ma_state l_tmp = *mast->orig_l;
unsigned char depth = 0;
- r_tmp = *mast->orig_r;
- l_tmp = *mast->orig_l;
do {
mas_ascend(mast->orig_r);
mas_ascend(mast->orig_l);
diff --git a/lib/math/div64.c b/lib/math/div64.c
index 55a81782e271..191761b1b623 100644
--- a/lib/math/div64.c
+++ b/lib/math/div64.c
@@ -22,6 +22,7 @@
#include <linux/export.h>
#include <linux/math.h>
#include <linux/math64.h>
+#include <linux/minmax.h>
#include <linux/log2.h>
/* Not needed on 64bit architectures */
@@ -191,6 +192,20 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
/* can a * b overflow ? */
if (ilog2(a) + ilog2(b) > 62) {
/*
+ * Note that the algorithm after the if block below might lose
+ * some precision and the result is more exact for b > a. So
+ * exchange a and b if a is bigger than b.
+ *
+ * For example with a = 43980465100800, b = 100000000, c = 1000000000
+ * the below calculation doesn't modify b at all because div == 0
+ * and then shift becomes 45 + 26 - 62 = 9 and so the result
+ * becomes 4398035251080. However with a and b swapped the exact
+ * result is calculated (i.e. 4398046510080).
+ */
+ if (a > b)
+ swap(a, b);
+
+ /*
* (b * a) / c is equal to
*
* (b / c) * a +
diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c
deleted file mode 100644
index ce39ce9f3526..000000000000
--- a/lib/pci_iomap.c
+++ /dev/null
@@ -1,180 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Implement the default iomap interfaces
- *
- * (C) Copyright 2004 Linus Torvalds
- */
-#include <linux/pci.h>
-#include <linux/io.h>
-
-#include <linux/export.h>
-
-#ifdef CONFIG_PCI
-/**
- * pci_iomap_range - create a virtual mapping cookie for a PCI BAR
- * @dev: PCI device that owns the BAR
- * @bar: BAR number
- * @offset: map memory at the given offset in BAR
- * @maxlen: max length of the memory to map
- *
- * Using this function you will get a __iomem address to your device BAR.
- * You can access it using ioread*() and iowrite*(). These functions hide
- * the details if this is a MMIO or PIO address space and will just do what
- * you expect from them in the correct way.
- *
- * @maxlen specifies the maximum length to map. If you want to get access to
- * the complete BAR from offset to the end, pass %0 here.
- * */
-void __iomem *pci_iomap_range(struct pci_dev *dev,
- int bar,
- unsigned long offset,
- unsigned long maxlen)
-{
- resource_size_t start = pci_resource_start(dev, bar);
- resource_size_t len = pci_resource_len(dev, bar);
- unsigned long flags = pci_resource_flags(dev, bar);
-
- if (len <= offset || !start)
- return NULL;
- len -= offset;
- start += offset;
- if (maxlen && len > maxlen)
- len = maxlen;
- if (flags & IORESOURCE_IO)
- return __pci_ioport_map(dev, start, len);
- if (flags & IORESOURCE_MEM)
- return ioremap(start, len);
- /* What? */
- return NULL;
-}
-EXPORT_SYMBOL(pci_iomap_range);
-
-/**
- * pci_iomap_wc_range - create a virtual WC mapping cookie for a PCI BAR
- * @dev: PCI device that owns the BAR
- * @bar: BAR number
- * @offset: map memory at the given offset in BAR
- * @maxlen: max length of the memory to map
- *
- * Using this function you will get a __iomem address to your device BAR.
- * You can access it using ioread*() and iowrite*(). These functions hide
- * the details if this is a MMIO or PIO address space and will just do what
- * you expect from them in the correct way. When possible write combining
- * is used.
- *
- * @maxlen specifies the maximum length to map. If you want to get access to
- * the complete BAR from offset to the end, pass %0 here.
- * */
-void __iomem *pci_iomap_wc_range(struct pci_dev *dev,
- int bar,
- unsigned long offset,
- unsigned long maxlen)
-{
- resource_size_t start = pci_resource_start(dev, bar);
- resource_size_t len = pci_resource_len(dev, bar);
- unsigned long flags = pci_resource_flags(dev, bar);
-
-
- if (flags & IORESOURCE_IO)
- return NULL;
-
- if (len <= offset || !start)
- return NULL;
-
- len -= offset;
- start += offset;
- if (maxlen && len > maxlen)
- len = maxlen;
-
- if (flags & IORESOURCE_MEM)
- return ioremap_wc(start, len);
-
- /* What? */
- return NULL;
-}
-EXPORT_SYMBOL_GPL(pci_iomap_wc_range);
-
-/**
- * pci_iomap - create a virtual mapping cookie for a PCI BAR
- * @dev: PCI device that owns the BAR
- * @bar: BAR number
- * @maxlen: length of the memory to map
- *
- * Using this function you will get a __iomem address to your device BAR.
- * You can access it using ioread*() and iowrite*(). These functions hide
- * the details if this is a MMIO or PIO address space and will just do what
- * you expect from them in the correct way.
- *
- * @maxlen specifies the maximum length to map. If you want to get access to
- * the complete BAR without checking for its length first, pass %0 here.
- * */
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
- return pci_iomap_range(dev, bar, 0, maxlen);
-}
-EXPORT_SYMBOL(pci_iomap);
-
-/**
- * pci_iomap_wc - create a virtual WC mapping cookie for a PCI BAR
- * @dev: PCI device that owns the BAR
- * @bar: BAR number
- * @maxlen: length of the memory to map
- *
- * Using this function you will get a __iomem address to your device BAR.
- * You can access it using ioread*() and iowrite*(). These functions hide
- * the details if this is a MMIO or PIO address space and will just do what
- * you expect from them in the correct way. When possible write combining
- * is used.
- *
- * @maxlen specifies the maximum length to map. If you want to get access to
- * the complete BAR without checking for its length first, pass %0 here.
- * */
-void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
- return pci_iomap_wc_range(dev, bar, 0, maxlen);
-}
-EXPORT_SYMBOL_GPL(pci_iomap_wc);
-
-/*
- * pci_iounmap() somewhat illogically comes from lib/iomap.c for the
- * CONFIG_GENERIC_IOMAP case, because that's the code that knows about
- * the different IOMAP ranges.
- *
- * But if the architecture does not use the generic iomap code, and if
- * it has _not_ defined it's own private pci_iounmap function, we define
- * it here.
- *
- * NOTE! This default implementation assumes that if the architecture
- * support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will
- * be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [,
- * and does not need unmapping with 'ioport_unmap()'.
- *
- * If you have different rules for your architecture, you need to
- * implement your own pci_iounmap() that knows the rules for where
- * and how IO vs MEM get mapped.
- *
- * This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes
- * from legacy <asm-generic/io.h> header file behavior. In particular,
- * it would seem to make sense to do the iounmap(p) for the non-IO-space
- * case here regardless, but that's not what the old header file code
- * did. Probably incorrectly, but this is meant to be bug-for-bug
- * compatible.
- */
-#if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP)
-
-void pci_iounmap(struct pci_dev *dev, void __iomem *p)
-{
-#ifdef ARCH_HAS_GENERIC_IOPORT_MAP
- uintptr_t start = (uintptr_t) PCI_IOBASE;
- uintptr_t addr = (uintptr_t) p;
-
- if (addr >= start && addr < start + IO_SPACE_LIMIT)
- return;
- iounmap(p);
-#endif
-}
-EXPORT_SYMBOL(pci_iounmap);
-
-#endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */
-
-#endif /* CONFIG_PCI */
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 1c5420ff254e..385a94aa0b99 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -21,7 +21,7 @@ altivec_flags += -isystem $(shell $(CC) -print-file-name=include)
ifdef CONFIG_CC_IS_CLANG
# clang ppc port does not yet support -maltivec when -msoft-float is
# enabled. A future release of clang will resolve this
-# https://bugs.llvm.org/show_bug.cgi?id=31177
+# https://llvm.org/pr31177
CFLAGS_REMOVE_altivec1.o += -msoft-float
CFLAGS_REMOVE_altivec2.o += -msoft-float
CFLAGS_REMOVE_altivec4.o += -msoft-float
diff --git a/lib/sort.c b/lib/sort.c
index b399bf10d675..a0509088f82a 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -215,6 +215,7 @@ void sort_r(void *base, size_t num, size_t size,
/* pre-scale counters for performance */
size_t n = num * size, a = (num/2) * size;
const unsigned int lsbit = size & -size; /* Used to find parent */
+ size_t shift = 0;
if (!a) /* num < 2 || size == 0 */
return;
@@ -242,12 +243,21 @@ void sort_r(void *base, size_t num, size_t size,
for (;;) {
size_t b, c, d;
- if (a) /* Building heap: sift down --a */
- a -= size;
- else if (n -= size) /* Sorting: Extract root to --n */
+ if (a) /* Building heap: sift down a */
+ a -= size << shift;
+ else if (n > 3 * size) { /* Sorting: Extract two largest elements */
+ n -= size;
do_swap(base, base + n, size, swap_func, priv);
- else /* Sort complete */
+ shift = do_cmp(base + size, base + 2 * size, cmp_func, priv) <= 0;
+ a = size << shift;
+ n -= size;
+ do_swap(base + a, base + n, size, swap_func, priv);
+ } else if (n > size) { /* Sorting: Extract root */
+ n -= size;
+ do_swap(base, base + n, size, swap_func, priv);
+ } else { /* Sort complete */
break;
+ }
/*
* Sift element at "a" down into heap. This is the
@@ -262,7 +272,7 @@ void sort_r(void *base, size_t num, size_t size,
* average, 3/4 worst-case.)
*/
for (b = a; c = 2*b + size, (d = c + size) < n;)
- b = do_cmp(base + c, base + d, cmp_func, priv) >= 0 ? c : d;
+ b = do_cmp(base + c, base + d, cmp_func, priv) > 0 ? c : d;
if (d == n) /* Special case last leaf with no sibling */
b = c;
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 4a7055a63d9f..af6cc19a2003 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -36,53 +36,11 @@
#include <linux/memblock.h>
#include <linux/kasan-enabled.h>
-#define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8)
-
-#define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */
-#define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER))
-#define DEPOT_STACK_ALIGN 4
-#define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN)
-#define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_OFFSET_BITS - \
- STACK_DEPOT_EXTRA_BITS)
#define DEPOT_POOLS_CAP 8192
+/* The pool_index is offset by 1 so the first record does not have a 0 handle. */
#define DEPOT_MAX_POOLS \
- (((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \
- (1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP)
-
-/* Compact structure that stores a reference to a stack. */
-union handle_parts {
- depot_stack_handle_t handle;
- struct {
- u32 pool_index : DEPOT_POOL_INDEX_BITS;
- u32 offset : DEPOT_OFFSET_BITS;
- u32 extra : STACK_DEPOT_EXTRA_BITS;
- };
-};
-
-struct stack_record {
- struct list_head hash_list; /* Links in the hash table */
- u32 hash; /* Hash in hash table */
- u32 size; /* Number of stored frames */
- union handle_parts handle; /* Constant after initialization */
- refcount_t count;
- union {
- unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES]; /* Frames */
- struct {
- /*
- * An important invariant of the implementation is to
- * only place a stack record onto the freelist iff its
- * refcount is zero. Because stack records with a zero
- * refcount are never considered as valid, it is safe to
- * union @entries and freelist management state below.
- * Conversely, as soon as an entry is off the freelist
- * and its refcount becomes non-zero, the below must not
- * be accessed until being placed back on the freelist.
- */
- struct list_head free_list; /* Links in the freelist */
- unsigned long rcu_state; /* RCU cookie */
- };
- };
-};
+ (((1LL << (DEPOT_POOL_INDEX_BITS)) - 1 < DEPOT_POOLS_CAP) ? \
+ (1LL << (DEPOT_POOL_INDEX_BITS)) - 1 : DEPOT_POOLS_CAP)
static bool stack_depot_disabled;
static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
@@ -372,7 +330,7 @@ static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size)
stack = current_pool + pool_offset;
/* Pre-initialize handle once. */
- stack->handle.pool_index = pool_index;
+ stack->handle.pool_index = pool_index + 1;
stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
stack->handle.extra = 0;
INIT_LIST_HEAD(&stack->hash_list);
@@ -483,18 +441,19 @@ static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
const int pools_num_cached = READ_ONCE(pools_num);
union handle_parts parts = { .handle = handle };
void *pool;
+ u32 pool_index = parts.pool_index - 1;
size_t offset = parts.offset << DEPOT_STACK_ALIGN;
struct stack_record *stack;
lockdep_assert_not_held(&pool_lock);
- if (parts.pool_index > pools_num_cached) {
+ if (pool_index >= pools_num_cached) {
WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
- parts.pool_index, pools_num_cached, handle);
+ pool_index, pools_num_cached, handle);
return NULL;
}
- pool = stack_pools[parts.pool_index];
+ pool = stack_pools[pool_index];
if (WARN_ON(!pool))
return NULL;
@@ -728,6 +687,14 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
}
EXPORT_SYMBOL_GPL(stack_depot_save);
+struct stack_record *__stack_depot_get_stack_record(depot_stack_handle_t handle)
+{
+ if (!handle)
+ return NULL;
+
+ return depot_fetch_stack(handle);
+}
+
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries)
{
diff --git a/lib/stackinit_kunit.c b/lib/stackinit_kunit.c
index dc3c68f46f0a..3bc14d1ee816 100644
--- a/lib/stackinit_kunit.c
+++ b/lib/stackinit_kunit.c
@@ -417,7 +417,7 @@ static noinline int leaf_switch_2_none(unsigned long sp, bool fill,
* These are expected to fail for most configurations because neither
* GCC nor Clang have a way to perform initialization of variables in
* non-code areas (i.e. in a switch statement before the first "case").
- * https://bugs.llvm.org/show_bug.cgi?id=44916
+ * https://llvm.org/pr44916
*/
DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, ALWAYS_FAIL);
DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, ALWAYS_FAIL);
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index 43d9dfd57ab7..1eec3b7ac67c 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -58,11 +58,14 @@ static int num_test_devs;
* @need_mod_put for your tests case.
*/
enum kmod_test_case {
+ /* private: */
__TEST_KMOD_INVALID = 0,
+ /* public: */
TEST_KMOD_DRIVER,
TEST_KMOD_FS_TYPE,
+ /* private: */
__TEST_KMOD_MAX,
};
@@ -82,6 +85,7 @@ struct kmod_test_device;
* @ret_sync: return value if request_module() is used, sync request for
* @TEST_KMOD_DRIVER
* @fs_sync: return value of get_fs_type() for @TEST_KMOD_FS_TYPE
+ * @task_sync: kthread's task_struct or %NULL if not running
* @thread_idx: thread ID
* @test_dev: test device test is being performed under
* @need_mod_put: Some tests (get_fs_type() is one) requires putting the module
@@ -108,7 +112,7 @@ struct kmod_test_device_info {
* @dev: pointer to misc_dev's own struct device
* @config_mutex: protects configuration of test
* @trigger_mutex: the test trigger can only be fired once at a time
- * @thread_lock: protects @done count, and the @info per each thread
+ * @thread_mutex: protects @done count, and the @info per each thread
* @done: number of threads which have completed or failed
* @test_is_oom: when we run out of memory, use this to halt moving forward
* @kthreads_done: completion used to signal when all work is done
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index 3718d9886407..4ddf769861ff 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -117,7 +117,7 @@ static int align_shift_alloc_test(void)
int i;
for (i = 0; i < BITS_PER_LONG; i++) {
- align = ((unsigned long) 1) << i;
+ align = 1UL << i;
ptr = __vmalloc_node(PAGE_SIZE, align, GFP_KERNEL|__GFP_ZERO, 0,
__builtin_return_address(0));
@@ -501,7 +501,7 @@ static int test_func(void *private)
}
static int
-init_test_configurtion(void)
+init_test_configuration(void)
{
/*
* A maximum number of workers is defined as hard-coded
@@ -531,7 +531,7 @@ static void do_concurrent_test(void)
/*
* Set some basic configurations plus sanity check.
*/
- ret = init_test_configurtion();
+ ret = init_test_configuration();
if (ret < 0)
return;
@@ -600,12 +600,7 @@ static int vmalloc_test_init(void)
return -EAGAIN; /* Fail will directly unload the module */
}
-static void vmalloc_test_exit(void)
-{
-}
-
module_init(vmalloc_test_init)
-module_exit(vmalloc_test_exit)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Uladzislau Rezki");
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index e77d4856442c..ebe2af2e072d 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -423,6 +423,59 @@ static noinline void check_cmpxchg(struct xarray *xa)
XA_BUG_ON(xa, !xa_empty(xa));
}
+static noinline void check_cmpxchg_order(struct xarray *xa)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ void *FIVE = xa_mk_value(5);
+ unsigned int i, order = 3;
+
+ XA_BUG_ON(xa, xa_store_order(xa, 0, order, FIVE, GFP_KERNEL));
+
+ /* Check entry FIVE has the order saved */
+ XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != order);
+
+ /* Check all the tied indexes have the same entry and order */
+ for (i = 0; i < (1 << order); i++) {
+ XA_BUG_ON(xa, xa_load(xa, i) != FIVE);
+ XA_BUG_ON(xa, xa_get_order(xa, i) != order);
+ }
+
+ /* Ensure that nothing is stored at index '1 << order' */
+ XA_BUG_ON(xa, xa_load(xa, 1 << order) != NULL);
+
+ /*
+ * Additionally, keep the node information and the order at
+ * '1 << order'
+ */
+ XA_BUG_ON(xa, xa_store_order(xa, 1 << order, order, FIVE, GFP_KERNEL));
+ for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) {
+ XA_BUG_ON(xa, xa_load(xa, i) != FIVE);
+ XA_BUG_ON(xa, xa_get_order(xa, i) != order);
+ }
+
+ /* Conditionally replace FIVE entry at index '0' with NULL */
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 0, FIVE, NULL, GFP_KERNEL) != FIVE);
+
+ /* Verify the order is lost at FIVE (and old) entries */
+ XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != 0);
+
+ /* Verify the order and entries are lost in all the tied indexes */
+ for (i = 0; i < (1 << order); i++) {
+ XA_BUG_ON(xa, xa_load(xa, i) != NULL);
+ XA_BUG_ON(xa, xa_get_order(xa, i) != 0);
+ }
+
+ /* Verify node and order are kept at '1 << order' */
+ for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) {
+ XA_BUG_ON(xa, xa_load(xa, i) != FIVE);
+ XA_BUG_ON(xa, xa_get_order(xa, i) != order);
+ }
+
+ xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
+ XA_BUG_ON(xa, !xa_empty(xa));
+#endif
+}
+
static noinline void check_reserve(struct xarray *xa)
{
void *entry;
@@ -674,6 +727,181 @@ static noinline void check_multi_store(struct xarray *xa)
#endif
}
+#ifdef CONFIG_XARRAY_MULTI
+/* mimics page cache __filemap_add_folio() */
+static noinline void check_xa_multi_store_adv_add(struct xarray *xa,
+ unsigned long index,
+ unsigned int order,
+ void *p)
+{
+ XA_STATE(xas, xa, index);
+ unsigned int nrpages = 1UL << order;
+
+ /* users are responsible for index alignemnt to the order when adding */
+ XA_BUG_ON(xa, index & (nrpages - 1));
+
+ xas_set_order(&xas, index, order);
+
+ do {
+ xas_lock_irq(&xas);
+
+ xas_store(&xas, p);
+ XA_BUG_ON(xa, xas_error(&xas));
+ XA_BUG_ON(xa, xa_load(xa, index) != p);
+
+ xas_unlock_irq(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ XA_BUG_ON(xa, xas_error(&xas));
+}
+
+/* mimics page_cache_delete() */
+static noinline void check_xa_multi_store_adv_del_entry(struct xarray *xa,
+ unsigned long index,
+ unsigned int order)
+{
+ XA_STATE(xas, xa, index);
+
+ xas_set_order(&xas, index, order);
+ xas_store(&xas, NULL);
+ xas_init_marks(&xas);
+}
+
+static noinline void check_xa_multi_store_adv_delete(struct xarray *xa,
+ unsigned long index,
+ unsigned int order)
+{
+ xa_lock_irq(xa);
+ check_xa_multi_store_adv_del_entry(xa, index, order);
+ xa_unlock_irq(xa);
+}
+
+/* mimics page cache filemap_get_entry() */
+static noinline void *test_get_entry(struct xarray *xa, unsigned long index)
+{
+ XA_STATE(xas, xa, index);
+ void *p;
+ static unsigned int loops = 0;
+
+ rcu_read_lock();
+repeat:
+ xas_reset(&xas);
+ p = xas_load(&xas);
+ if (xas_retry(&xas, p))
+ goto repeat;
+ rcu_read_unlock();
+
+ /*
+ * This is not part of the page cache, this selftest is pretty
+ * aggressive and does not want to trust the xarray API but rather
+ * test it, and for order 20 (4 GiB block size) we can loop over
+ * over a million entries which can cause a soft lockup. Page cache
+ * APIs won't be stupid, proper page cache APIs loop over the proper
+ * order so when using a larger order we skip shared entries.
+ */
+ if (++loops % XA_CHECK_SCHED == 0)
+ schedule();
+
+ return p;
+}
+
+static unsigned long some_val = 0xdeadbeef;
+static unsigned long some_val_2 = 0xdeaddead;
+
+/* mimics the page cache usage */
+static noinline void check_xa_multi_store_adv(struct xarray *xa,
+ unsigned long pos,
+ unsigned int order)
+{
+ unsigned int nrpages = 1UL << order;
+ unsigned long index, base, next_index, next_next_index;
+ unsigned int i;
+
+ index = pos >> PAGE_SHIFT;
+ base = round_down(index, nrpages);
+ next_index = round_down(base + nrpages, nrpages);
+ next_next_index = round_down(next_index + nrpages, nrpages);
+
+ check_xa_multi_store_adv_add(xa, base, order, &some_val);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, base + i) != &some_val);
+
+ XA_BUG_ON(xa, test_get_entry(xa, next_index) != NULL);
+
+ /* Use order 0 for the next item */
+ check_xa_multi_store_adv_add(xa, next_index, 0, &some_val_2);
+ XA_BUG_ON(xa, test_get_entry(xa, next_index) != &some_val_2);
+
+ /* Remove the next item */
+ check_xa_multi_store_adv_delete(xa, next_index, 0);
+
+ /* Now use order for a new pointer */
+ check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2);
+
+ check_xa_multi_store_adv_delete(xa, next_index, order);
+ check_xa_multi_store_adv_delete(xa, base, order);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* starting fresh again */
+
+ /* let's test some holes now */
+
+ /* hole at base and next_next */
+ check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != NULL);
+
+ check_xa_multi_store_adv_delete(xa, next_index, order);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* hole at base and next */
+
+ check_xa_multi_store_adv_add(xa, next_next_index, order, &some_val_2);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != NULL);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != &some_val_2);
+
+ check_xa_multi_store_adv_delete(xa, next_next_index, order);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+#endif
+
+static noinline void check_multi_store_advanced(struct xarray *xa)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
+ unsigned long end = ULONG_MAX/2;
+ unsigned long pos, i;
+
+ /*
+ * About 117 million tests below.
+ */
+ for (pos = 7; pos < end; pos = (pos * pos) + 564) {
+ for (i = 0; i < max_order; i++) {
+ check_xa_multi_store_adv(xa, pos, i);
+ check_xa_multi_store_adv(xa, pos + 157, i);
+ }
+ }
+#endif
+}
+
static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base)
{
int i;
@@ -1801,9 +2029,11 @@ static int xarray_checks(void)
check_xas_erase(&array);
check_insert(&array);
check_cmpxchg(&array);
+ check_cmpxchg_order(&array);
check_reserve(&array);
check_reserve(&xa0);
check_multi_store(&array);
+ check_multi_store_advanced(&array);
check_get_order(&array);
check_xa_alloc();
check_find(&array);