summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-07-09 00:30:25 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2023-07-09 00:30:25 +0300
commit946c6b59c56dc6e7d8364a8959cb36bf6d10bc37 (patch)
tree0f83317cecca8b249630dbfd6c1a63aa7dcb1884 /mm
parentfb49c455323ff8319a123dd312be9082c49a23a5 (diff)
parent8ba388c06bc8056935ec1814b2689bfb42f3b89a (diff)
downloadlinux-946c6b59c56dc6e7d8364a8959cb36bf6d10bc37.tar.xz
Merge tag 'mm-hotfixes-stable-2023-07-08-10-43' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull hotfixes from Andrew Morton: "16 hotfixes. Six are cc:stable and the remainder address post-6.4 issues" The merge undoes the disabling of the CONFIG_PER_VMA_LOCK feature, since it was all hopefully fixed in mainline. * tag 'mm-hotfixes-stable-2023-07-08-10-43' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: lib: dhry: fix sleeping allocations inside non-preemptable section kasan, slub: fix HW_TAGS zeroing with slub_debug kasan: fix type cast in memory_is_poisoned_n mailmap: add entries for Heiko Stuebner mailmap: update manpage link bootmem: remove the vmemmap pages from kmemleak in free_bootmem_page MAINTAINERS: add linux-next info mailmap: add Markus Schneider-Pargmann writeback: account the number of pages written back mm: call arch_swap_restore() from do_swap_page() squashfs: fix cache race with migration mm/hugetlb.c: fix a bug within a BUG(): inconsistent pte comparison docs: update ocfs2-devel mailing list address MAINTAINERS: update ocfs2-devel mailing list address mm: disable CONFIG_PER_VMA_LOCK until its fixed fork: lock VMAs of the parent process when forking
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c7
-rw-r--r--mm/kasan/generic.c3
-rw-r--r--mm/kasan/kasan.h12
-rw-r--r--mm/memory.c7
-rw-r--r--mm/page-writeback.c8
-rw-r--r--mm/slab.h16
6 files changed, 34 insertions, 19 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index bce28cca73a1..64a3239b6407 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -7246,7 +7246,12 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
pte = (pte_t *)pmd_alloc(mm, pud, addr);
}
}
- BUG_ON(pte && pte_present(ptep_get(pte)) && !pte_huge(ptep_get(pte)));
+
+ if (pte) {
+ pte_t pteval = ptep_get_lockless(pte);
+
+ BUG_ON(pte_present(pteval) && !pte_huge(pteval));
+ }
return pte;
}
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 5b4c97baa656..4d837ab83f08 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -130,9 +130,10 @@ static __always_inline bool memory_is_poisoned_n(const void *addr, size_t size)
if (unlikely(ret)) {
const void *last_byte = addr + size - 1;
s8 *last_shadow = (s8 *)kasan_mem_to_shadow(last_byte);
+ s8 last_accessible_byte = (unsigned long)last_byte & KASAN_GRANULE_MASK;
if (unlikely(ret != (unsigned long)last_shadow ||
- (((long)last_byte & KASAN_GRANULE_MASK) >= *last_shadow)))
+ last_accessible_byte >= *last_shadow))
return true;
}
return false;
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index b799f11e45dc..2e973b36fe07 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -466,18 +466,6 @@ static inline void kasan_unpoison(const void *addr, size_t size, bool init)
if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
return;
- /*
- * Explicitly initialize the memory with the precise object size to
- * avoid overwriting the slab redzone. This disables initialization in
- * the arch code and may thus lead to performance penalty. This penalty
- * does not affect production builds, as slab redzones are not enabled
- * there.
- */
- if (__slub_debug_enabled() &&
- init && ((unsigned long)size & KASAN_GRANULE_MASK)) {
- init = false;
- memzero_explicit((void *)addr, size);
- }
size = round_up(size, KASAN_GRANULE_SIZE);
hw_set_mem_tag_range((void *)addr, size, tag, init);
diff --git a/mm/memory.c b/mm/memory.c
index 0ae594703021..01f39e8144ef 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3951,6 +3951,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
}
/*
+ * Some architectures may have to restore extra metadata to the page
+ * when reading from swap. This metadata may be indexed by swap entry
+ * so this must be called before swap_free().
+ */
+ arch_swap_restore(entry, folio);
+
+ /*
* Remove the swap entry and conditionally try to free up the swapcache.
* We're already holding a reference on the page but haven't mapped it
* yet.
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 1d17fb1ec863..d3f42009bb70 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2434,6 +2434,7 @@ int write_cache_pages(struct address_space *mapping,
for (i = 0; i < nr_folios; i++) {
struct folio *folio = fbatch.folios[i];
+ unsigned long nr;
done_index = folio->index;
@@ -2471,6 +2472,7 @@ continue_unlock:
trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
error = writepage(folio, wbc, data);
+ nr = folio_nr_pages(folio);
if (unlikely(error)) {
/*
* Handle errors according to the type of
@@ -2489,8 +2491,7 @@ continue_unlock:
error = 0;
} else if (wbc->sync_mode != WB_SYNC_ALL) {
ret = error;
- done_index = folio->index +
- folio_nr_pages(folio);
+ done_index = folio->index + nr;
done = 1;
break;
}
@@ -2504,7 +2505,8 @@ continue_unlock:
* keep going until we have written all the pages
* we tagged for writeback prior to entering this loop.
*/
- if (--wbc->nr_to_write <= 0 &&
+ wbc->nr_to_write -= nr;
+ if (wbc->nr_to_write <= 0 &&
wbc->sync_mode == WB_SYNC_NONE) {
done = 1;
break;
diff --git a/mm/slab.h b/mm/slab.h
index 6a5633b25eb5..9c0e09d0f81f 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -723,6 +723,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
unsigned int orig_size)
{
unsigned int zero_size = s->object_size;
+ bool kasan_init = init;
size_t i;
flags &= gfp_allowed_mask;
@@ -740,6 +741,17 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
zero_size = orig_size;
/*
+ * When slub_debug is enabled, avoid memory initialization integrated
+ * into KASAN and instead zero out the memory via the memset below with
+ * the proper size. Otherwise, KASAN might overwrite SLUB redzones and
+ * cause false-positive reports. This does not lead to a performance
+ * penalty on production builds, as slub_debug is not intended to be
+ * enabled there.
+ */
+ if (__slub_debug_enabled())
+ kasan_init = false;
+
+ /*
* As memory initialization might be integrated into KASAN,
* kasan_slab_alloc and initialization memset must be
* kept together to avoid discrepancies in behavior.
@@ -747,8 +759,8 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
* As p[i] might get tagged, memset and kmemleak hook come after KASAN.
*/
for (i = 0; i < size; i++) {
- p[i] = kasan_slab_alloc(s, p[i], flags, init);
- if (p[i] && init && !kasan_has_integrated_init())
+ p[i] = kasan_slab_alloc(s, p[i], flags, kasan_init);
+ if (p[i] && init && (!kasan_init || !kasan_has_integrated_init()))
memset(p[i], 0, zero_size);
kmemleak_alloc_recursive(p[i], s->object_size, 1,
s->flags, flags);