summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
authorJean-Philippe Brucker <jean-philippe@linaro.org>2023-05-15 14:39:50 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-07-19 17:21:20 +0300
commit2128318c91303d07689d47414c129dd7976129f0 (patch)
treeaa6bc24e160f9c3680a0f66a70efde0bbcf4f170 /drivers/iommu
parent0f2c11ccfdcc97b42b78bf407176d220cb990ada (diff)
downloadlinux-2128318c91303d07689d47414c129dd7976129f0.tar.xz
iommu/virtio: Return size mapped for a detached domain
[ Upstream commit 7061b6af34686e7e2364b7240cfb061293218f2d ] When map() is called on a detached domain, the domain does not exist in the device so we do not send a MAP request, but we do update the internal mapping tree, to be replayed on the next attach. Since this constitutes a successful iommu_map() call, return *mapped in this case too. Fixes: 7e62edd7a33a ("iommu/virtio: Add map/unmap_pages() callbacks implementation") Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/20230515113946.1017624-3-jean-philippe@linaro.org Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/virtio-iommu.c33
1 files changed, 17 insertions, 16 deletions
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index fe02ac772b65..fd86ccb709ec 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -834,25 +834,26 @@ static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
if (ret)
return ret;
- map = (struct virtio_iommu_req_map) {
- .head.type = VIRTIO_IOMMU_T_MAP,
- .domain = cpu_to_le32(vdomain->id),
- .virt_start = cpu_to_le64(iova),
- .phys_start = cpu_to_le64(paddr),
- .virt_end = cpu_to_le64(end),
- .flags = cpu_to_le32(flags),
- };
-
- if (!vdomain->nr_endpoints)
- return 0;
+ if (vdomain->nr_endpoints) {
+ map = (struct virtio_iommu_req_map) {
+ .head.type = VIRTIO_IOMMU_T_MAP,
+ .domain = cpu_to_le32(vdomain->id),
+ .virt_start = cpu_to_le64(iova),
+ .phys_start = cpu_to_le64(paddr),
+ .virt_end = cpu_to_le64(end),
+ .flags = cpu_to_le32(flags),
+ };
- ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
- if (ret)
- viommu_del_mappings(vdomain, iova, end);
- else if (mapped)
+ ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
+ if (ret) {
+ viommu_del_mappings(vdomain, iova, end);
+ return ret;
+ }
+ }
+ if (mapped)
*mapped = size;
- return ret;
+ return 0;
}
static size_t viommu_unmap_pages(struct iommu_domain *domain, unsigned long iova,