summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorDavid Hildenbrand <david@redhat.com>2022-03-25 04:13:56 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2022-03-25 05:06:51 +0300
commit7f7609175ff2339a2cffa48ba2474ef928d7eafd (patch)
tree9fcd1debb0cf0d2e0e815bb3cb8f289cb38a6339 /mm
parent55c62fa7c53368a9011cd1276c001a1469078c6a (diff)
downloadlinux-7f7609175ff2339a2cffa48ba2474ef928d7eafd.tar.xz
mm/huge_memory: remove stale locking logic from __split_huge_pmd()
Let's remove the stale logic that was required for reuse_swap_page(). [akpm@linux-foundation.org: simplification, per Yang Shi] Link: https://lkml.kernel.org/r/20220131162940.210846-10-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: David Rientjes <rientjes@google.com> Cc: Don Dutile <ddutile@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jan Kara <jack@suse.cz> Cc: Jann Horn <jannh@google.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Liang Zhang <zhangliang5@huawei.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: Nadav Amit <nadav.amit@gmail.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Xu <peterx@redhat.com> Cc: Rik van Riel <riel@surriel.com> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Shakeel Butt <shakeelb@google.com> Cc: Yang Shi <shy828301@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c40
1 files changed, 4 insertions, 36 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index cc580d90117b..2fe38212e07c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2133,8 +2133,6 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
{
spinlock_t *ptl;
struct mmu_notifier_range range;
- bool do_unlock_folio = false;
- pmd_t _pmd;
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
address & HPAGE_PMD_MASK,
@@ -2153,42 +2151,12 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
goto out;
}
-repeat:
- if (pmd_trans_huge(*pmd)) {
- if (!folio) {
- folio = page_folio(pmd_page(*pmd));
- /*
- * An anonymous page must be locked, to ensure that a
- * concurrent reuse_swap_page() sees stable mapcount;
- * but reuse_swap_page() is not used on shmem or file,
- * and page lock must not be taken when zap_pmd_range()
- * calls __split_huge_pmd() while i_mmap_lock is held.
- */
- if (folio_test_anon(folio)) {
- if (unlikely(!folio_trylock(folio))) {
- folio_get(folio);
- _pmd = *pmd;
- spin_unlock(ptl);
- folio_lock(folio);
- spin_lock(ptl);
- if (unlikely(!pmd_same(*pmd, _pmd))) {
- folio_unlock(folio);
- folio_put(folio);
- folio = NULL;
- goto repeat;
- }
- folio_put(folio);
- }
- do_unlock_folio = true;
- }
- }
- } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
- goto out;
- __split_huge_pmd_locked(vma, pmd, range.start, freeze);
+ if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
+ is_pmd_migration_entry(*pmd))
+ __split_huge_pmd_locked(vma, pmd, range.start, freeze);
+
out:
spin_unlock(ptl);
- if (do_unlock_folio)
- folio_unlock(folio);
/*
* No need to double call mmu_notifier->invalidate_range() callback.
* They are 3 cases to consider inside __split_huge_pmd_locked():