summaryrefslogtreecommitdiff
path: root/drivers/vfio
diff options
context:
space:
mode:
authorAlex Williamson <alex.williamson@redhat.com>2020-09-07 13:17:20 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-09-09 20:04:28 +0300
commit6df210762f80f174affd2bbcb346f2f0b233546c (patch)
tree4b6fe00c57f1cbcc4b184cc137ef5ae71ea258a4 /drivers/vfio
parent43eadb9e37a2b0888d38e1ebe5d2206928a38e3b (diff)
downloadlinux-6df210762f80f174affd2bbcb346f2f0b233546c.tar.xz
vfio/type1: Support faulting PFNMAP vmas
commit 41311242221e3482b20bfed10fa4d9db98d87016 upstream. With conversion to follow_pfn(), DMA mapping a PFNMAP range depends on the range being faulted into the vma. Add support to manually provide that, in the same way as done on KVM with hva_to_pfn_remapped(). Reviewed-by: Peter Xu <peterx@redhat.com> Signed-off-by: Alex Williamson <alex.williamson@redhat.com> [Ajay: Regenerated the patch for v4.19] Signed-off-by: Ajay Kaher <akaher@vmware.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'drivers/vfio')
-rw-r--r--drivers/vfio/vfio_iommu_type1.c36
1 files changed, 33 insertions, 3 deletions
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 52083b710b87..05d8553635ee 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -343,6 +343,32 @@ static int put_pfn(unsigned long pfn, int prot)
return 0;
}
+static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
+ unsigned long vaddr, unsigned long *pfn,
+ bool write_fault)
+{
+ int ret;
+
+ ret = follow_pfn(vma, vaddr, pfn);
+ if (ret) {
+ bool unlocked = false;
+
+ ret = fixup_user_fault(NULL, mm, vaddr,
+ FAULT_FLAG_REMOTE |
+ (write_fault ? FAULT_FLAG_WRITE : 0),
+ &unlocked);
+ if (unlocked)
+ return -EAGAIN;
+
+ if (ret)
+ return ret;
+
+ ret = follow_pfn(vma, vaddr, pfn);
+ }
+
+ return ret;
+}
+
static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
int prot, unsigned long *pfn)
{
@@ -382,12 +408,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
down_read(&mm->mmap_sem);
+retry:
vma = find_vma_intersection(mm, vaddr, vaddr + 1);
if (vma && vma->vm_flags & VM_PFNMAP) {
- if (!follow_pfn(vma, vaddr, pfn) &&
- is_invalid_reserved_pfn(*pfn))
- ret = 0;
+ ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE);
+ if (ret == -EAGAIN)
+ goto retry;
+
+ if (!ret && !is_invalid_reserved_pfn(*pfn))
+ ret = -EFAULT;
}
up_read(&mm->mmap_sem);