summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2023-08-21 22:51:20 +0300
committerAndrew Morton <akpm@linux-foundation.org>2023-08-25 02:20:15 +0300
commita98460494b16db9c377e55bc13e5407a0eb79fe8 (patch)
treeac30c81bdfda0782d86b08c566bbf0ae7dc914d3 /mm
parent6c1419730822fe991fc15bfd7059f6872a71a7af (diff)
downloadlinux-a98460494b16db9c377e55bc13e5407a0eb79fe8.tar.xz
mm/khugepaged: fix collapse_pte_mapped_thp() versus uffd
Jann Horn demonstrated how userfaultfd ioctl UFFDIO_COPY into a private shmem mapping can add valid PTEs to page table collapse_pte_mapped_thp() thought it had emptied: page lock on the huge page is enough to protect against WP faults (which find the PTE has been cleared), but not enough to protect against userfaultfd. "BUG: Bad rss-counter state" followed. retract_page_tables() protects against this by checking !vma->anon_vma; but we know that MADV_COLLAPSE needs to be able to work on private shmem mappings, even those with an anon_vma prepared for another part of the mapping; and we know that MADV_COLLAPSE needs to work on shared shmem mappings which are userfaultfd_armed(). Whether it needs to work on private shmem mappings which are userfaultfd_armed(), I'm not so sure: but assume that it does. Just for this case, take the pmd_lock() two steps earlier: not because it gives any protection against this case itself, but because ptlock nests inside it, and it's the dropping of ptlock which let the bug in. In other cases, continue to minimize the pmd_lock() hold time. Link: https://lkml.kernel.org/r/4d31abf5-56c0-9f3d-d12f-c9317936691@google.com Fixes: 1043173eb5eb ("mm/khugepaged: collapse_pte_mapped_thp() with mmap_read_lock()") Signed-off-by: Hugh Dickins <hughd@google.com> Reported-by: Jann Horn <jannh@google.com> Closes: https://lore.kernel.org/linux-mm/CAG48ez0FxiRC4d3VTu_a9h=rg5FW-kYD5Rg5xo_RDBM0LTTqZQ@mail.gmail.com/ Acked-by: Peter Xu <peterx@redhat.com> Cc: David Hildenbrand <david@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/khugepaged.c38
1 files changed, 29 insertions, 9 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 40d43eccdee8..d5650541083a 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1476,7 +1476,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
struct page *hpage;
pte_t *start_pte, *pte;
pmd_t *pmd, pgt_pmd;
- spinlock_t *pml, *ptl;
+ spinlock_t *pml = NULL, *ptl;
int nr_ptes = 0, result = SCAN_FAIL;
int i;
@@ -1572,9 +1572,25 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
haddr, haddr + HPAGE_PMD_SIZE);
mmu_notifier_invalidate_range_start(&range);
notified = true;
- start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
+
+ /*
+ * pmd_lock covers a wider range than ptl, and (if split from mm's
+ * page_table_lock) ptl nests inside pml. The less time we hold pml,
+ * the better; but userfaultfd's mfill_atomic_pte() on a private VMA
+ * inserts a valid as-if-COWed PTE without even looking up page cache.
+ * So page lock of hpage does not protect from it, so we must not drop
+ * ptl before pgt_pmd is removed, so uffd private needs pml taken now.
+ */
+ if (userfaultfd_armed(vma) && !(vma->vm_flags & VM_SHARED))
+ pml = pmd_lock(mm, pmd);
+
+ start_pte = pte_offset_map_nolock(mm, pmd, haddr, &ptl);
if (!start_pte) /* mmap_lock + page lock should prevent this */
goto abort;
+ if (!pml)
+ spin_lock(ptl);
+ else if (ptl != pml)
+ spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
/* step 2: clear page table and adjust rmap */
for (i = 0, addr = haddr, pte = start_pte;
@@ -1608,7 +1624,9 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
nr_ptes++;
}
- pte_unmap_unlock(start_pte, ptl);
+ pte_unmap(start_pte);
+ if (!pml)
+ spin_unlock(ptl);
/* step 3: set proper refcount and mm_counters. */
if (nr_ptes) {
@@ -1616,12 +1634,12 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes);
}
- /* step 4: remove page table */
-
- /* Huge page lock is still held, so page table must remain empty */
- pml = pmd_lock(mm, pmd);
- if (ptl != pml)
- spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
+ /* step 4: remove empty page table */
+ if (!pml) {
+ pml = pmd_lock(mm, pmd);
+ if (ptl != pml)
+ spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
+ }
pgt_pmd = pmdp_collapse_flush(vma, haddr, pmd);
pmdp_get_lockless_sync();
if (ptl != pml)
@@ -1648,6 +1666,8 @@ abort:
}
if (start_pte)
pte_unmap_unlock(start_pte, ptl);
+ if (pml && pml != ptl)
+ spin_unlock(pml);
if (notified)
mmu_notifier_invalidate_range_end(&range);
drop_hpage: