summaryrefslogtreecommitdiff
path: root/mm/memory.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2023-10-06 22:53:15 +0300
committerAndrew Morton <akpm@linux-foundation.org>2023-10-19 00:34:14 +0300
commit4ed4379881aa62588aba6442a9f362a8cf7624e6 (patch)
treea373d55dedf8fe2cdf03abc18ad29dd381d084d0 /mm/memory.c
parent164b06f238b986317131e6b61b2f22aabcbc2cc0 (diff)
downloadlinux-4ed4379881aa62588aba6442a9f362a8cf7624e6.tar.xz
mm: handle shared faults under the VMA lock
There are many implementations of ->fault and some of them depend on mmap_lock being held. All vm_ops that implement ->map_pages() end up calling filemap_fault(), which I have audited to be sure it does not rely on mmap_lock. So (for now) key off ->map_pages existing as a flag to indicate that it's safe to call ->fault while only holding the vma lock. Link: https://lkml.kernel.org/r/20231006195318.4087158-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Suren Baghdasaryan <surenb@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c22
1 files changed, 18 insertions, 4 deletions
diff --git a/mm/memory.c b/mm/memory.c
index c437d3562d18..7b240a67f207 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3042,6 +3042,21 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
count_vm_event(PGREUSE);
}
+/*
+ * We could add a bitflag somewhere, but for now, we know that all
+ * vm_ops that have a ->map_pages have been audited and don't need
+ * the mmap_lock to be held.
+ */
+static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+
+ if (vma->vm_ops->map_pages || !(vmf->flags & FAULT_FLAG_VMA_LOCK))
+ return 0;
+ vma_end_read(vma);
+ return VM_FAULT_RETRY;
+}
+
static vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
@@ -4669,10 +4684,9 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf)
vm_fault_t ret, tmp;
struct folio *folio;
- if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
- vma_end_read(vma);
- return VM_FAULT_RETRY;
- }
+ ret = vmf_can_call_fault(vmf);
+ if (ret)
+ return ret;
ret = __do_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))