diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 23 | ||||
-rw-r--r-- | lib/Kconfig.ubsan | 10 | ||||
-rw-r--r-- | lib/Makefile | 8 | ||||
-rw-r--r-- | lib/clz_ctz.c | 32 | ||||
-rw-r--r-- | lib/cpumask.c | 5 | ||||
-rw-r--r-- | lib/genalloc.c | 2 | ||||
-rw-r--r-- | lib/iov_iter.c | 45 | ||||
-rw-r--r-- | lib/list_debug.c | 16 | ||||
-rw-r--r-- | lib/locking-selftest.c | 135 | ||||
-rw-r--r-- | lib/maple_tree.c | 10 | ||||
-rw-r--r-- | lib/radix-tree.c | 1 | ||||
-rw-r--r-- | lib/sbitmap.c | 15 | ||||
-rw-r--r-- | lib/scatterlist.c | 2 | ||||
-rw-r--r-- | lib/test_bitmap.c | 8 | ||||
-rw-r--r-- | lib/test_maple_tree.c | 5 |
15 files changed, 134 insertions, 183 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1b3894e861f2..a8a1b0ac8b22 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1200,7 +1200,7 @@ config WQ_CPU_INTENSIVE_REPORT help Say Y here to enable reporting of concurrency-managed per-cpu work items that hog CPUs for longer than - workqueue.cpu_intensive_threshold_us. Workqueue automatically + workqueue.cpu_intensive_thresh_us. Workqueue automatically detects and excludes them from concurrency management to prevent them from stalling other per-cpu work items. Occassional triggering may not necessarily indicate a problem. Repeated @@ -1673,10 +1673,15 @@ menu "Debug kernel data structures" config DEBUG_LIST bool "Debug linked list manipulation" - depends on DEBUG_KERNEL || BUG_ON_DATA_CORRUPTION + depends on DEBUG_KERNEL + select LIST_HARDENED help - Enable this to turn on extended checks in the linked-list - walking routines. + Enable this to turn on extended checks in the linked-list walking + routines. + + This option trades better quality error reports for performance, and + is more suitable for kernel debugging. If you care about performance, + you should only enable CONFIG_LIST_HARDENED instead. If unsure, say N. @@ -1710,16 +1715,6 @@ config DEBUG_NOTIFIERS This is a relatively cheap check but if you care about maximum performance, say N. -config BUG_ON_DATA_CORRUPTION - bool "Trigger a BUG when data corruption is detected" - select DEBUG_LIST - help - Select this option if the kernel should BUG when it encounters - data corruption in kernel memory structures when they get checked - for validity. - - If unsure, say N. - config DEBUG_MAPLE_TREE bool "Debug maple trees" depends on DEBUG_KERNEL diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index efae7e011956..59e21bfec188 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan @@ -13,7 +13,7 @@ menuconfig UBSAN if UBSAN config UBSAN_TRAP - bool "On Sanitizer warnings, abort the running kernel code" + bool "Abort on Sanitizer warnings (smaller kernel but less verbose)" depends on !COMPILE_TEST help Building kernels with Sanitizer features enabled tends to grow @@ -26,6 +26,14 @@ config UBSAN_TRAP the system. For some system builders this is an acceptable trade-off. + Also note that selecting Y will cause your kernel to Oops + with an "illegal instruction" error with no further details + when a UBSAN violation occurs. (Except on arm64, which will + report which Sanitizer failed.) This may make it hard to + determine whether an Oops was caused by UBSAN or to figure + out the details of a UBSAN violation. It makes the kernel log + output less useful for bug reports. + config CC_HAS_UBSAN_BOUNDS_STRICT def_bool $(cc-option,-fsanitize=bounds-strict) help diff --git a/lib/Makefile b/lib/Makefile index 42d307ade225..d1397785ec16 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -82,7 +82,13 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o obj-$(CONFIG_TEST_DYNAMIC_DEBUG) += test_dynamic_debug.o obj-$(CONFIG_TEST_PRINTF) += test_printf.o obj-$(CONFIG_TEST_SCANF) += test_scanf.o + obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o +ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_KASAN),yy) +# FIXME: Clang breaks test_bitmap_const_eval when KASAN and GCOV are enabled +GCOV_PROFILE_test_bitmap.o := n +endif + obj-$(CONFIG_TEST_UUID) += test_uuid.o obj-$(CONFIG_TEST_XARRAY) += test_xarray.o obj-$(CONFIG_TEST_MAPLE_TREE) += test_maple_tree.o @@ -161,7 +167,7 @@ obj-$(CONFIG_BTREE) += btree.o obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o -obj-$(CONFIG_DEBUG_LIST) += list_debug.o +obj-$(CONFIG_LIST_HARDENED) += list_debug.o obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o obj-$(CONFIG_BITREVERSE) += bitrev.o diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c index 0d3a686b5ba2..fb8c0c5c2bd2 100644 --- a/lib/clz_ctz.c +++ b/lib/clz_ctz.c @@ -28,36 +28,16 @@ int __weak __clzsi2(int val) } EXPORT_SYMBOL(__clzsi2); -int __weak __clzdi2(long val); -int __weak __ctzdi2(long val); -#if BITS_PER_LONG == 32 - -int __weak __clzdi2(long val) +int __weak __clzdi2(u64 val); +int __weak __clzdi2(u64 val) { - return 32 - fls((int)val); + return 64 - fls64(val); } EXPORT_SYMBOL(__clzdi2); -int __weak __ctzdi2(long val) +int __weak __ctzdi2(u64 val); +int __weak __ctzdi2(u64 val) { - return __ffs((u32)val); + return __ffs64(val); } EXPORT_SYMBOL(__ctzdi2); - -#elif BITS_PER_LONG == 64 - -int __weak __clzdi2(long val) -{ - return 64 - fls64((u64)val); -} -EXPORT_SYMBOL(__clzdi2); - -int __weak __ctzdi2(long val) -{ - return __ffs64((u64)val); -} -EXPORT_SYMBOL(__ctzdi2); - -#else -#error BITS_PER_LONG not 32 or 64 -#endif diff --git a/lib/cpumask.c b/lib/cpumask.c index de356f16773a..a7fd02b5ae26 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -45,6 +45,7 @@ EXPORT_SYMBOL(cpumask_next_wrap); * alloc_cpumask_var_node - allocate a struct cpumask on a given node * @mask: pointer to cpumask_var_t where the cpumask is returned * @flags: GFP_ flags + * @node: memory node from which to allocate or %NUMA_NO_NODE * * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is * a nop returning a constant 1 (in <linux/cpumask.h>) @@ -157,7 +158,9 @@ EXPORT_SYMBOL(cpumask_local_spread); static DEFINE_PER_CPU(int, distribute_cpu_mask_prev); /** - * cpumask_any_and_distribute - Return an arbitrary cpu within srcp1 & srcp2. + * cpumask_any_and_distribute - Return an arbitrary cpu within src1p & src2p. + * @src1p: first &cpumask for intersection + * @src2p: second &cpumask for intersection * * Iterated calls using the same srcp1 and srcp2 will be distributed within * their intersection. diff --git a/lib/genalloc.c b/lib/genalloc.c index 0c883d6fbd44..6c644f954bc5 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -895,7 +895,7 @@ struct gen_pool *of_gen_pool_get(struct device_node *np, of_property_read_string(np_pool, "label", &name); if (!name) - name = np_pool->name; + name = of_node_full_name(np_pool); } if (pdev) pool = gen_pool_get(&pdev->dev, name); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index b667b1e2f688..424737045b97 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -566,24 +566,37 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i) } EXPORT_SYMBOL(iov_iter_zero); -size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes, - struct iov_iter *i) +size_t copy_page_from_iter_atomic(struct page *page, size_t offset, + size_t bytes, struct iov_iter *i) { - char *kaddr = kmap_atomic(page), *p = kaddr + offset; - if (!page_copy_sane(page, offset, bytes)) { - kunmap_atomic(kaddr); + size_t n, copied = 0; + + if (!page_copy_sane(page, offset, bytes)) return 0; - } - if (WARN_ON_ONCE(!i->data_source)) { - kunmap_atomic(kaddr); + if (WARN_ON_ONCE(!i->data_source)) return 0; - } - iterate_and_advance(i, bytes, base, len, off, - copyin(p + off, base, len), - memcpy_from_iter(i, p + off, base, len) - ) - kunmap_atomic(kaddr); - return bytes; + + do { + char *p; + + n = bytes - copied; + if (PageHighMem(page)) { + page += offset / PAGE_SIZE; + offset %= PAGE_SIZE; + n = min_t(size_t, n, PAGE_SIZE - offset); + } + + p = kmap_atomic(page) + offset; + iterate_and_advance(i, n, base, len, off, + copyin(p + off, base, len), + memcpy_from_iter(i, p + off, base, len) + ) + kunmap_atomic(p); + copied += n; + offset += n; + } while (PageHighMem(page) && copied != bytes && n > 0); + + return copied; } EXPORT_SYMBOL(copy_page_from_iter_atomic); @@ -1349,7 +1362,7 @@ uaccess_end: return ret; } -static int copy_iovec_from_user(struct iovec *iov, +static __noclone int copy_iovec_from_user(struct iovec *iov, const struct iovec __user *uiov, unsigned long nr_segs) { int ret = -EFAULT; diff --git a/lib/list_debug.c b/lib/list_debug.c index d98d43f80958..db602417febf 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c @@ -2,7 +2,8 @@ * Copyright 2006, Red Hat, Inc., Dave Jones * Released under the General Public License (GPL). * - * This file contains the linked list validation for DEBUG_LIST. + * This file contains the linked list validation and error reporting for + * LIST_HARDENED and DEBUG_LIST. */ #include <linux/export.h> @@ -17,8 +18,9 @@ * attempt). */ -bool __list_add_valid(struct list_head *new, struct list_head *prev, - struct list_head *next) +__list_valid_slowpath +bool __list_add_valid_or_report(struct list_head *new, struct list_head *prev, + struct list_head *next) { if (CHECK_DATA_CORRUPTION(prev == NULL, "list_add corruption. prev is NULL.\n") || @@ -37,9 +39,10 @@ bool __list_add_valid(struct list_head *new, struct list_head *prev, return true; } -EXPORT_SYMBOL(__list_add_valid); +EXPORT_SYMBOL(__list_add_valid_or_report); -bool __list_del_entry_valid(struct list_head *entry) +__list_valid_slowpath +bool __list_del_entry_valid_or_report(struct list_head *entry) { struct list_head *prev, *next; @@ -65,6 +68,5 @@ bool __list_del_entry_valid(struct list_head *entry) return false; return true; - } -EXPORT_SYMBOL(__list_del_entry_valid); +EXPORT_SYMBOL(__list_del_entry_valid_or_report); diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 8d24279fad05..6f6a5fc85b42 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -2506,94 +2506,29 @@ static void fs_reclaim_tests(void) pr_cont("\n"); } -#define __guard(cleanup) __maybe_unused __attribute__((__cleanup__(cleanup))) +/* Defines guard classes to create contexts */ +DEFINE_LOCK_GUARD_0(HARDIRQ, HARDIRQ_ENTER(), HARDIRQ_EXIT()) +DEFINE_LOCK_GUARD_0(NOTTHREADED_HARDIRQ, + do { + local_irq_disable(); + __irq_enter(); + WARN_ON(!in_irq()); + } while(0), HARDIRQ_EXIT()) +DEFINE_LOCK_GUARD_0(SOFTIRQ, SOFTIRQ_ENTER(), SOFTIRQ_EXIT()) + +/* Define RCU guards, should go away when RCU has its own guard definitions */ +DEFINE_LOCK_GUARD_0(RCU, rcu_read_lock(), rcu_read_unlock()) +DEFINE_LOCK_GUARD_0(RCU_BH, rcu_read_lock_bh(), rcu_read_unlock_bh()) +DEFINE_LOCK_GUARD_0(RCU_SCHED, rcu_read_lock_sched(), rcu_read_unlock_sched()) -static void hardirq_exit(int *_) -{ - HARDIRQ_EXIT(); -} - -#define HARDIRQ_CONTEXT(name, ...) \ - int hardirq_guard_##name __guard(hardirq_exit); \ - HARDIRQ_ENTER(); - -#define NOTTHREADED_HARDIRQ_CONTEXT(name, ...) \ - int notthreaded_hardirq_guard_##name __guard(hardirq_exit); \ - local_irq_disable(); \ - __irq_enter(); \ - WARN_ON(!in_irq()); - -static void softirq_exit(int *_) -{ - SOFTIRQ_EXIT(); -} - -#define SOFTIRQ_CONTEXT(name, ...) \ - int softirq_guard_##name __guard(softirq_exit); \ - SOFTIRQ_ENTER(); - -static void rcu_exit(int *_) -{ - rcu_read_unlock(); -} - -#define RCU_CONTEXT(name, ...) \ - int rcu_guard_##name __guard(rcu_exit); \ - rcu_read_lock(); - -static void rcu_bh_exit(int *_) -{ - rcu_read_unlock_bh(); -} - -#define RCU_BH_CONTEXT(name, ...) \ - int rcu_bh_guard_##name __guard(rcu_bh_exit); \ - rcu_read_lock_bh(); - -static void rcu_sched_exit(int *_) -{ - rcu_read_unlock_sched(); -} - -#define RCU_SCHED_CONTEXT(name, ...) \ - int rcu_sched_guard_##name __guard(rcu_sched_exit); \ - rcu_read_lock_sched(); - -static void raw_spinlock_exit(raw_spinlock_t **lock) -{ - raw_spin_unlock(*lock); -} - -#define RAW_SPINLOCK_CONTEXT(name, lock) \ - raw_spinlock_t *raw_spinlock_guard_##name __guard(raw_spinlock_exit) = &(lock); \ - raw_spin_lock(&(lock)); - -static void spinlock_exit(spinlock_t **lock) -{ - spin_unlock(*lock); -} - -#define SPINLOCK_CONTEXT(name, lock) \ - spinlock_t *spinlock_guard_##name __guard(spinlock_exit) = &(lock); \ - spin_lock(&(lock)); - -static void mutex_exit(struct mutex **lock) -{ - mutex_unlock(*lock); -} - -#define MUTEX_CONTEXT(name, lock) \ - struct mutex *mutex_guard_##name __guard(mutex_exit) = &(lock); \ - mutex_lock(&(lock)); #define GENERATE_2_CONTEXT_TESTCASE(outer, outer_lock, inner, inner_lock) \ \ static void __maybe_unused inner##_in_##outer(void) \ { \ - outer##_CONTEXT(_, outer_lock); \ - { \ - inner##_CONTEXT(_, inner_lock); \ - } \ + /* Relies the reversed clean-up ordering: inner first */ \ + guard(outer)(outer_lock); \ + guard(inner)(inner_lock); \ } /* @@ -2632,21 +2567,21 @@ GENERATE_2_CONTEXT_TESTCASE(SOFTIRQ, , inner, inner_lock) \ GENERATE_2_CONTEXT_TESTCASE(RCU, , inner, inner_lock) \ GENERATE_2_CONTEXT_TESTCASE(RCU_BH, , inner, inner_lock) \ GENERATE_2_CONTEXT_TESTCASE(RCU_SCHED, , inner, inner_lock) \ -GENERATE_2_CONTEXT_TESTCASE(RAW_SPINLOCK, raw_lock_A, inner, inner_lock) \ -GENERATE_2_CONTEXT_TESTCASE(SPINLOCK, lock_A, inner, inner_lock) \ -GENERATE_2_CONTEXT_TESTCASE(MUTEX, mutex_A, inner, inner_lock) +GENERATE_2_CONTEXT_TESTCASE(raw_spinlock, &raw_lock_A, inner, inner_lock) \ +GENERATE_2_CONTEXT_TESTCASE(spinlock, &lock_A, inner, inner_lock) \ +GENERATE_2_CONTEXT_TESTCASE(mutex, &mutex_A, inner, inner_lock) GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RCU, ) -GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RAW_SPINLOCK, raw_lock_B) -GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(SPINLOCK, lock_B) -GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(MUTEX, mutex_B) +GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(raw_spinlock, &raw_lock_B) +GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(spinlock, &lock_B) +GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(mutex, &mutex_B) /* the outer context allows all kinds of preemption */ #define DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(outer) \ dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \ - dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \ - dotest(SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \ - dotest(MUTEX_in_##outer, SUCCESS, LOCKTYPE_MUTEX); \ + dotest(raw_spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \ + dotest(spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \ + dotest(mutex_in_##outer, SUCCESS, LOCKTYPE_MUTEX); \ /* * the outer context only allows the preemption introduced by spinlock_t (which @@ -2654,16 +2589,16 @@ GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(MUTEX, mutex_B) */ #define DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(outer) \ dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \ - dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \ - dotest(SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \ - dotest(MUTEX_in_##outer, FAILURE, LOCKTYPE_MUTEX); \ + dotest(raw_spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \ + dotest(spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \ + dotest(mutex_in_##outer, FAILURE, LOCKTYPE_MUTEX); \ /* the outer doesn't allows any kind of preemption */ #define DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(outer) \ dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \ - dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \ - dotest(SPINLOCK_in_##outer, FAILURE, LOCKTYPE_SPIN); \ - dotest(MUTEX_in_##outer, FAILURE, LOCKTYPE_MUTEX); \ + dotest(raw_spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \ + dotest(spinlock_in_##outer, FAILURE, LOCKTYPE_SPIN); \ + dotest(mutex_in_##outer, FAILURE, LOCKTYPE_MUTEX); \ static void wait_context_tests(void) { @@ -2697,15 +2632,15 @@ static void wait_context_tests(void) pr_cont("\n"); print_testname("in RAW_SPINLOCK context"); - DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RAW_SPINLOCK); + DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(raw_spinlock); pr_cont("\n"); print_testname("in SPINLOCK context"); - DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(SPINLOCK); + DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(spinlock); pr_cont("\n"); print_testname("in MUTEX context"); - DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(MUTEX); + DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(mutex); pr_cont("\n"); } diff --git a/lib/maple_tree.c b/lib/maple_tree.c index bfffbb7cab26..f723024e1426 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -3692,7 +3692,8 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry) mas->offset = slot; pivots[slot] = mas->last; if (mas->last != ULONG_MAX) - slot++; + pivots[++slot] = ULONG_MAX; + mas->depth = 1; mas_set_height(mas); ma_set_meta(node, maple_leaf_64, 0, slot); @@ -4264,6 +4265,10 @@ static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas) * mas_wr_append: Attempt to append * @wr_mas: the maple write state * + * This is currently unsafe in rcu mode since the end of the node may be cached + * by readers while the node contents may be updated which could result in + * inaccurate information. + * * Return: True if appended, false otherwise */ static inline bool mas_wr_append(struct ma_wr_state *wr_mas) @@ -4273,6 +4278,9 @@ static inline bool mas_wr_append(struct ma_wr_state *wr_mas) struct ma_state *mas = wr_mas->mas; unsigned char node_pivots = mt_pivots[wr_mas->type]; + if (mt_in_rcu(mas->tree)) + return false; + if (mas->offset != wr_mas->node_end) return false; diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 1a31065b2036..976b9bd02a1b 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -1136,7 +1136,6 @@ static void set_iter_tags(struct radix_tree_iter *iter, void __rcu **radix_tree_iter_resume(void __rcu **slot, struct radix_tree_iter *iter) { - slot++; iter->index = __radix_tree_iter_add(iter, 1); iter->next_index = iter->index; iter->tags = 0; diff --git a/lib/sbitmap.c b/lib/sbitmap.c index eff4e42c425a..d0a5081dfd12 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -550,7 +550,7 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth); static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr) { - int i, wake_index; + int i, wake_index, woken; if (!atomic_read(&sbq->ws_active)) return; @@ -567,13 +567,12 @@ static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr) */ wake_index = sbq_index_inc(wake_index); - /* - * It is sufficient to wake up at least one waiter to - * guarantee forward progress. - */ - if (waitqueue_active(&ws->wait) && - wake_up_nr(&ws->wait, nr)) - break; + if (waitqueue_active(&ws->wait)) { + woken = wake_up_nr(&ws->wait, nr); + if (woken == nr) + break; + nr -= woken; + } } if (wake_index != atomic_read(&sbq->wake_index)) diff --git a/lib/scatterlist.c b/lib/scatterlist.c index e86231a44c3d..c65566b4dc66 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -1148,7 +1148,7 @@ static ssize_t extract_user_to_sg(struct iov_iter *iter, failed: while (sgtable->nents > sgtable->orig_nents) - put_page(sg_page(&sgtable->sgl[--sgtable->nents])); + unpin_user_page(sg_page(&sgtable->sgl[--sgtable->nents])); return res; } diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c index 187f5b2db4cf..f2ea9f30c7c5 100644 --- a/lib/test_bitmap.c +++ b/lib/test_bitmap.c @@ -1161,6 +1161,10 @@ static void __init test_bitmap_print_buf(void) } } +/* + * FIXME: Clang breaks compile-time evaluations when KASAN and GCOV are enabled. + * To workaround it, GCOV is force-disabled in Makefile for this configuration. + */ static void __init test_bitmap_const_eval(void) { DECLARE_BITMAP(bitmap, BITS_PER_LONG); @@ -1186,11 +1190,7 @@ static void __init test_bitmap_const_eval(void) * the compiler is fixed. */ bitmap_clear(bitmap, 0, BITS_PER_LONG); -#if defined(__s390__) && defined(__clang__) - if (!const_test_bit(7, bitmap)) -#else if (!test_bit(7, bitmap)) -#endif bitmap_set(bitmap, 5, 2); /* Equals to `unsigned long bitopvar = BIT(20)` */ diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 9939be34e516..8d4c92cbdd0c 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -1898,13 +1898,16 @@ static noinline void __init next_prev_test(struct maple_tree *mt) 725}; static const unsigned long level2_32[] = { 1747, 2000, 1750, 1755, 1760, 1765}; + unsigned long last_index; if (MAPLE_32BIT) { nr_entries = 500; level2 = level2_32; + last_index = 0x138e; } else { nr_entries = 200; level2 = level2_64; + last_index = 0x7d6; } for (i = 0; i <= nr_entries; i++) @@ -2011,7 +2014,7 @@ static noinline void __init next_prev_test(struct maple_tree *mt) val = mas_next(&mas, ULONG_MAX); MT_BUG_ON(mt, val != NULL); - MT_BUG_ON(mt, mas.index != 0x7d6); + MT_BUG_ON(mt, mas.index != last_index); MT_BUG_ON(mt, mas.last != ULONG_MAX); val = mas_prev(&mas, 0); |