summaryrefslogtreecommitdiff
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorLokesh Gidra <lokeshgidra@google.com>2024-02-15 21:27:56 +0300
committerAndrew Morton <akpm@linux-foundation.org>2024-02-23 02:27:20 +0300
commit867a43a34ff8a38772212045262b2c9b77807ea3 (patch)
tree90ef57890d1a59cd4aa66bbb05d32d781f4651ce /mm/huge_memory.c
parent32af81af2f6f4c23b1b4ff68410e91da660af102 (diff)
downloadlinux-867a43a34ff8a38772212045262b2c9b77807ea3.tar.xz
userfaultfd: use per-vma locks in userfaultfd operations
All userfaultfd operations, except write-protect, opportunistically use per-vma locks to lock vmas. On failure, attempt again inside mmap_lock critical section. Write-protect operation requires mmap_lock as it iterates over multiple vmas. Link: https://lkml.kernel.org/r/20240215182756.3448972-5-lokeshgidra@google.com Signed-off-by: Lokesh Gidra <lokeshgidra@google.com> Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Brian Geffon <bgeffon@google.com> Cc: David Hildenbrand <david@redhat.com> Cc: Jann Horn <jannh@google.com> Cc: Kalesh Singh <kaleshsingh@google.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Mike Rapoport (IBM) <rppt@kernel.org> Cc: Nicolas Geoffray <ngeoffray@google.com> Cc: Peter Xu <peterx@redhat.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Tim Murray <timmurray@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 14888b15121e..28341a5067fb 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2158,7 +2158,7 @@ unlock:
#ifdef CONFIG_USERFAULTFD
/*
- * The PT lock for src_pmd and the mmap_lock for reading are held by
+ * The PT lock for src_pmd and dst_vma/src_vma (for reading) are locked by
* the caller, but it must return after releasing the page_table_lock.
* Just move the page from src_pmd to dst_pmd if possible.
* Return zero if succeeded in moving the page, -EAGAIN if it needs to be
@@ -2181,7 +2181,8 @@ int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pm
src_ptl = pmd_lockptr(mm, src_pmd);
lockdep_assert_held(src_ptl);
- mmap_assert_locked(mm);
+ vma_assert_locked(src_vma);
+ vma_assert_locked(dst_vma);
/* Sanity checks before the operation */
if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) ||