From aa5fe31b6b59210cb4ea28a59e68781f48eeca74 Mon Sep 17 00:00:00 2001 From: Zi Yan Date: Wed, 13 Sep 2023 16:12:48 -0400 Subject: mips: use nth_page() in place of direct struct page manipulation __flush_dcache_pages() is called during hugetlb migration via migrate_pages() -> migrate_hugetlbs() -> unmap_and_move_huge_page() -> move_to_new_folio() -> flush_dcache_folio(). And with hugetlb and without sparsemem vmemmap, struct page is not guaranteed to be contiguous beyond a section. Use nth_page() instead. Without the fix, a wrong address might be used for data cache page flush. No bug is reported. The fix comes from code inspection. Link: https://lkml.kernel.org/r/20230913201248.452081-6-zi.yan@sent.com Fixes: 15fa3e8e3269 ("mips: implement the new page table range API") Signed-off-by: Zi Yan Cc: David Hildenbrand Cc: Matthew Wilcox (Oracle) Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Muchun Song Cc: Thomas Bogendoerfer Cc: Signed-off-by: Andrew Morton --- arch/mips/mm/cache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/mips') diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 02042100e267..7f830634dbe7 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -117,7 +117,7 @@ void __flush_dcache_pages(struct page *page, unsigned int nr) * get faulted into the tlb (and thus flushed) anyways. */ for (i = 0; i < nr; i++) { - addr = (unsigned long)kmap_local_page(page + i); + addr = (unsigned long)kmap_local_page(nth_page(page, i)); flush_data_cache_page(addr); kunmap_local((void *)addr); } -- cgit v1.2.3 From 55d2a0bd5eadaade850efa9d3a7ffbb0aeb67198 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 18 Sep 2023 14:31:42 +0800 Subject: mm: add statistics for PUD level pagetable Recently, we found that cross-die access to pagetable pages on ARM64 machines can cause performance fluctuations in our business. Currently, there are no PMU events available to track this situation on our ARM64 machines, so accurate pagetable accounting can help to analyze this issue, but now the PUD level pagetable accounting is missed. So introduce pagetable_pud_ctor/dtor() to help to get accurate PUD pagetable accounting, as well as converting the architectures which use generic PUD pagetable allocation to add corresponding PUD pagetable accounting. Moreover this patch will mark the PUD level pagetable with PG_table flag, which will help to do sanity validation in unpoison_memory(). On my testing machine, I can see more pagetables statistics after the patch with page-types tool: Before patch: flags page-count MB symbolic-flags long-symbolic-flags 0x0000000004000000 27326 106 __________________________g_________________ pgtable After patch: 0x0000000004000000 27541 107 __________________________g_________________ pgtable Link: https://lkml.kernel.org/r/876c71c03a7e69c17722a690e3225a4f7b172fb2.1695017383.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Acked-by: Mike Rapoport (IBM) Acked-by: Vishal Moola (Oracle) Cc: Andy Lutomirski Cc: Aneesh Kumar K.V Cc: Arnd Bergmann Cc: Borislav Petkov Cc: Catalin Marinas Cc: Dave Hansen Cc: Huacai Chen Cc: Ingo Molnar Cc: Matthew Wilcox (Oracle) Cc: Nicholas Piggin Cc: Peter Zijlstra Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/arm64/include/asm/tlb.h | 5 ++++- arch/loongarch/include/asm/pgalloc.h | 1 + arch/mips/include/asm/pgalloc.h | 1 + arch/x86/mm/pgtable.c | 3 +++ include/asm-generic/pgalloc.h | 7 ++++++- include/linux/mm.h | 16 ++++++++++++++++ 6 files changed, 31 insertions(+), 2 deletions(-) (limited to 'arch/mips') diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 2c29239d05c3..846c563689a8 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -96,7 +96,10 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, unsigned long addr) { - tlb_remove_ptdesc(tlb, virt_to_ptdesc(pudp)); + struct ptdesc *ptdesc = virt_to_ptdesc(pudp); + + pagetable_pud_dtor(ptdesc); + tlb_remove_ptdesc(tlb, ptdesc); } #endif diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h index 79470f0b4f1d..4e2d6b7ca2ee 100644 --- a/arch/loongarch/include/asm/pgalloc.h +++ b/arch/loongarch/include/asm/pgalloc.h @@ -84,6 +84,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) if (!ptdesc) return NULL; + pagetable_pud_ctor(ptdesc); pud = ptdesc_address(ptdesc); pud_init(pud); diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index 40e40a7eb94a..f4440edcd8fe 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -95,6 +95,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) if (!ptdesc) return NULL; + pagetable_pud_ctor(ptdesc); pud = ptdesc_address(ptdesc); pud_init(pud); diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 9deadf517f14..0cbc1b8e8e3d 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -76,6 +76,9 @@ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) #if CONFIG_PGTABLE_LEVELS > 3 void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) { + struct ptdesc *ptdesc = virt_to_ptdesc(pud); + + pagetable_pud_dtor(ptdesc); paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); paravirt_tlb_remove_table(tlb, virt_to_page(pud)); } diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index c75d4a753849..879e5f8aa5e9 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -169,6 +169,8 @@ static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr) ptdesc = pagetable_alloc(gfp, 0); if (!ptdesc) return NULL; + + pagetable_pud_ctor(ptdesc); return ptdesc_address(ptdesc); } @@ -190,8 +192,11 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) static inline void __pud_free(struct mm_struct *mm, pud_t *pud) { + struct ptdesc *ptdesc = virt_to_ptdesc(pud); + BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); - pagetable_free(virt_to_ptdesc(pud)); + pagetable_pud_dtor(ptdesc); + pagetable_free(ptdesc); } #ifndef __HAVE_ARCH_PUD_FREE diff --git a/include/linux/mm.h b/include/linux/mm.h index 31dc25d3f6b5..126b54b45442 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3049,6 +3049,22 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) return ptl; } +static inline void pagetable_pud_ctor(struct ptdesc *ptdesc) +{ + struct folio *folio = ptdesc_folio(ptdesc); + + __folio_set_pgtable(folio); + lruvec_stat_add_folio(folio, NR_PAGETABLE); +} + +static inline void pagetable_pud_dtor(struct ptdesc *ptdesc) +{ + struct folio *folio = ptdesc_folio(ptdesc); + + __folio_clear_pgtable(folio); + lruvec_stat_sub_folio(folio, NR_PAGETABLE); +} + extern void __init pagecache_init(void); extern void free_initmem(void); -- cgit v1.2.3 From 8da36b26e3d8640364a9e60e0b5c3fa3f55d298b Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 4 Oct 2023 17:53:10 +0100 Subject: mips: implement xor_unlock_is_negative_byte Inspired by the mips test_and_change_bit(), this will surely be more efficient than the generic one defined in filemap.c Link: https://lkml.kernel.org/r/20231004165317.1061855-11-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Cc: Albert Ou Cc: Alexander Gordeev Cc: Andreas Dilger Cc: Christian Borntraeger Cc: Christophe Leroy Cc: Geert Uytterhoeven Cc: Heiko Carstens Cc: Ivan Kokshaysky Cc: Matt Turner Cc: Michael Ellerman Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Richard Henderson Cc: Sven Schnelle Cc: "Theodore Ts'o" Cc: Thomas Bogendoerfer Cc: Vasily Gorbik Signed-off-by: Andrew Morton --- arch/mips/include/asm/bitops.h | 26 +++++++++++++++++++++++++- arch/mips/lib/bitops.c | 14 ++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) (limited to 'arch/mips') diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index b4bf754f7db3..d98a05c478f4 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -73,7 +73,8 @@ int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr); int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr); - +bool __mips_xor_is_negative_byte(unsigned long mask, + volatile unsigned long *addr); /* * set_bit - Atomically set a bit in memory @@ -279,6 +280,29 @@ static inline int test_and_change_bit(unsigned long nr, return res; } +static inline bool xor_unlock_is_negative_byte(unsigned long mask, + volatile unsigned long *p) +{ + unsigned long orig; + bool res; + + smp_mb__before_atomic(); + + if (!kernel_uses_llsc) { + res = __mips_xor_is_negative_byte(mask, p); + } else { + orig = __test_bit_op(*p, "%0", + "xor\t%1, %0, %3", + "ir"(mask)); + res = (orig & BIT(7)) != 0; + } + + smp_llsc_mb(); + + return res; +} +#define xor_unlock_is_negative_byte xor_unlock_is_negative_byte + #undef __bit_op #undef __test_bit_op diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c index 116d0bd8b2ae..00aee98e9d54 100644 --- a/arch/mips/lib/bitops.c +++ b/arch/mips/lib/bitops.c @@ -146,3 +146,17 @@ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr) return res; } EXPORT_SYMBOL(__mips_test_and_change_bit); + +bool __mips_xor_is_negative_byte(unsigned long mask, + volatile unsigned long *addr) +{ + unsigned long flags; + unsigned long data; + + raw_local_irq_save(flags); + data = *addr; + *addr = data ^ mask; + raw_local_irq_restore(flags); + + return (data & BIT(7)) != 0; +} -- cgit v1.2.3 From f12fb73b74fd23ca33e3f95fb996f295eeae1da7 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 4 Oct 2023 17:53:14 +0100 Subject: mm: delete checks for xor_unlock_is_negative_byte() Architectures which don't define their own use the one in asm-generic/bitops/lock.h. Get rid of all the ifdefs around "maybe we don't have it". Link: https://lkml.kernel.org/r/20231004165317.1061855-15-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Geert Uytterhoeven Cc: Albert Ou Cc: Alexander Gordeev Cc: Andreas Dilger Cc: Christian Borntraeger Cc: Christophe Leroy Cc: Heiko Carstens Cc: Ivan Kokshaysky Cc: Matt Turner Cc: Michael Ellerman Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Richard Henderson Cc: Sven Schnelle Cc: "Theodore Ts'o" Cc: Thomas Bogendoerfer Cc: Vasily Gorbik Signed-off-by: Andrew Morton --- arch/alpha/include/asm/bitops.h | 1 - arch/m68k/include/asm/bitops.h | 1 - arch/mips/include/asm/bitops.h | 1 - arch/riscv/include/asm/bitops.h | 1 - include/asm-generic/bitops/instrumented-lock.h | 5 ----- include/asm-generic/bitops/lock.h | 1 - kernel/kcsan/kcsan_test.c | 3 --- kernel/kcsan/selftest.c | 3 --- mm/filemap.c | 30 +------------------------- mm/kasan/kasan_test.c | 3 --- 10 files changed, 1 insertion(+), 48 deletions(-) (limited to 'arch/mips') diff --git a/arch/alpha/include/asm/bitops.h b/arch/alpha/include/asm/bitops.h index b50ad6b83e85..3e33621922c3 100644 --- a/arch/alpha/include/asm/bitops.h +++ b/arch/alpha/include/asm/bitops.h @@ -305,7 +305,6 @@ static inline bool xor_unlock_is_negative_byte(unsigned long mask, return (old & BIT(7)) != 0; } -#define xor_unlock_is_negative_byte xor_unlock_is_negative_byte /* * ffz = Find First Zero in word. Undefined if no zero exists, diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h index 80ee36095905..14c64a6f1217 100644 --- a/arch/m68k/include/asm/bitops.h +++ b/arch/m68k/include/asm/bitops.h @@ -339,7 +339,6 @@ static inline bool xor_unlock_is_negative_byte(unsigned long mask, return result; #endif } -#define xor_unlock_is_negative_byte xor_unlock_is_negative_byte /* * The true 68020 and more advanced processors support the "bfffo" diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index d98a05c478f4..89f73d1a4ea4 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -301,7 +301,6 @@ static inline bool xor_unlock_is_negative_byte(unsigned long mask, return res; } -#define xor_unlock_is_negative_byte xor_unlock_is_negative_byte #undef __bit_op #undef __test_bit_op diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h index 15e3044298a2..65f6eee4ab8d 100644 --- a/arch/riscv/include/asm/bitops.h +++ b/arch/riscv/include/asm/bitops.h @@ -202,7 +202,6 @@ static inline bool xor_unlock_is_negative_byte(unsigned long mask, : "memory"); return (res & BIT(7)) != 0; } -#define xor_unlock_is_negative_byte xor_unlock_is_negative_byte #undef __test_and_op_bit #undef __op_bit diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h index e8ea3aeda9a9..542d3727ee4e 100644 --- a/include/asm-generic/bitops/instrumented-lock.h +++ b/include/asm-generic/bitops/instrumented-lock.h @@ -58,7 +58,6 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) return arch_test_and_set_bit_lock(nr, addr); } -#if defined(arch_xor_unlock_is_negative_byte) /** * xor_unlock_is_negative_byte - XOR a single byte in memory and test if * it is negative, for unlock. @@ -80,8 +79,4 @@ static inline bool xor_unlock_is_negative_byte(unsigned long mask, instrument_atomic_write(addr, sizeof(long)); return arch_xor_unlock_is_negative_byte(mask, addr); } -/* Let everybody know we have it. */ -#define xor_unlock_is_negative_byte xor_unlock_is_negative_byte -#endif - #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H */ diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h index 6a638e89d130..14d4ec8c5152 100644 --- a/include/asm-generic/bitops/lock.h +++ b/include/asm-generic/bitops/lock.h @@ -75,7 +75,6 @@ static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask, old = raw_atomic_long_fetch_xor_release(mask, (atomic_long_t *)p); return !!(old & BIT(7)); } -#define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte #endif #include diff --git a/kernel/kcsan/kcsan_test.c b/kernel/kcsan/kcsan_test.c index 1333d23ac4ef..015586217875 100644 --- a/kernel/kcsan/kcsan_test.c +++ b/kernel/kcsan/kcsan_test.c @@ -699,12 +699,9 @@ static void test_barrier_nothreads(struct kunit *test) KCSAN_EXPECT_RW_BARRIER(spin_unlock(&test_spinlock), true); KCSAN_EXPECT_RW_BARRIER(mutex_lock(&test_mutex), false); KCSAN_EXPECT_RW_BARRIER(mutex_unlock(&test_mutex), true); - -#ifdef xor_unlock_is_negative_byte KCSAN_EXPECT_READ_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true); KCSAN_EXPECT_WRITE_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true); KCSAN_EXPECT_RW_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true); -#endif kcsan_nestable_atomic_end(); } diff --git a/kernel/kcsan/selftest.c b/kernel/kcsan/selftest.c index 619be7417420..84a1200271af 100644 --- a/kernel/kcsan/selftest.c +++ b/kernel/kcsan/selftest.c @@ -227,12 +227,9 @@ static bool __init test_barrier(void) KCSAN_CHECK_RW_BARRIER(arch_spin_unlock(&arch_spinlock)); spin_lock(&test_spinlock); KCSAN_CHECK_RW_BARRIER(spin_unlock(&test_spinlock)); - -#ifdef xor_unlock_is_negative_byte KCSAN_CHECK_RW_BARRIER(xor_unlock_is_negative_byte(1, &test_var)); KCSAN_CHECK_READ_BARRIER(xor_unlock_is_negative_byte(1, &test_var)); KCSAN_CHECK_WRITE_BARRIER(xor_unlock_is_negative_byte(1, &test_var)); -#endif kcsan_nestable_atomic_end(); return ret; diff --git a/mm/filemap.c b/mm/filemap.c index c637863f4643..458377e9a184 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1482,34 +1482,6 @@ void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter) } EXPORT_SYMBOL_GPL(folio_add_wait_queue); -#ifdef xor_unlock_is_negative_byte -#define clear_bit_unlock_is_negative_byte(nr, p) \ - xor_unlock_is_negative_byte(1 << nr, p) -#endif - -#ifndef clear_bit_unlock_is_negative_byte - -/* - * PG_waiters is the high bit in the same byte as PG_lock. - * - * On x86 (and on many other architectures), we can clear PG_lock and - * test the sign bit at the same time. But if the architecture does - * not support that special operation, we just do this all by hand - * instead. - * - * The read of PG_waiters has to be after (or concurrently with) PG_locked - * being cleared, but a memory barrier should be unnecessary since it is - * in the same byte as PG_locked. - */ -static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem) -{ - clear_bit_unlock(nr, mem); - /* smp_mb__after_atomic(); */ - return test_bit(PG_waiters, mem); -} - -#endif - /** * folio_unlock - Unlock a locked folio. * @folio: The folio. @@ -1525,7 +1497,7 @@ void folio_unlock(struct folio *folio) BUILD_BUG_ON(PG_waiters != 7); BUILD_BUG_ON(PG_locked > 7); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); - if (clear_bit_unlock_is_negative_byte(PG_locked, folio_flags(folio, 0))) + if (xor_unlock_is_negative_byte(1 << PG_locked, folio_flags(folio, 0))) folio_wake_bit(folio, PG_locked); } EXPORT_SYMBOL(folio_unlock); diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c index 821c9ea0473a..8281eb42464b 100644 --- a/mm/kasan/kasan_test.c +++ b/mm/kasan/kasan_test.c @@ -1098,12 +1098,9 @@ static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr) KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr)); KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr)); KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr)); - -#if defined(xor_unlock_is_negative_byte) if (nr < 7) KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = xor_unlock_is_negative_byte(1 << nr, addr)); -#endif } static void kasan_bitops_generic(struct kunit *test) -- cgit v1.2.3