summaryrefslogtreecommitdiff
path: root/mm/hmm.c
diff options
context:
space:
mode:
authorRalph Campbell <rcampbell@nvidia.com>2019-08-24 01:17:53 +0300
committerJason Gunthorpe <jgg@mellanox.com>2019-08-28 01:27:07 +0300
commitc18ce674d548c00faa6b7e760bacbaf1f39315f3 (patch)
tree33ca4bae9d6c21f40ba4a095fb80294c968943eb /mm/hmm.c
parent6c64f2bbe79cf3b770ac60ae79442322bd76d55e (diff)
downloadlinux-c18ce674d548c00faa6b7e760bacbaf1f39315f3.tar.xz
mm/hmm: hmm_range_fault() infinite loop
Normally, callers to handle_mm_fault() are supposed to check the vma->vm_flags first. hmm_range_fault() checks for VM_READ but doesn't check for VM_WRITE if the caller requests a page to be faulted in with write permission (via the hmm_range.pfns[] value). If the vma is write protected, this can result in an infinite loop: hmm_range_fault() walk_page_range() ... hmm_vma_walk_hole() hmm_vma_walk_hole_() hmm_vma_do_fault() handle_mm_fault(FAULT_FLAG_WRITE) /* returns VM_FAULT_WRITE */ /* returns -EBUSY */ /* returns -EBUSY */ /* returns -EBUSY */ /* loops on -EBUSY and range->valid */ Prevent this by checking for vma->vm_flags & VM_WRITE before calling handle_mm_fault(). Link: https://lore.kernel.org/r/20190823221753.2514-3-rcampbell@nvidia.com Signed-off-by: Ralph Campbell <rcampbell@nvidia.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Jason Gunthorpe <jgg@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'mm/hmm.c')
-rw-r--r--mm/hmm.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/mm/hmm.c b/mm/hmm.c
index 29371485fe94..4882b83aeccb 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -292,6 +292,9 @@ static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
hmm_vma_walk->last = addr;
i = (addr - range->start) >> PAGE_SHIFT;
+ if (write_fault && walk->vma && !(walk->vma->vm_flags & VM_WRITE))
+ return -EPERM;
+
for (; addr < end; addr += PAGE_SIZE, i++) {
pfns[i] = range->values[HMM_PFN_NONE];
if (fault || write_fault) {