summaryrefslogtreecommitdiff
path: root/drivers/vfio
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2019-07-02 18:43:48 +0300
committerWill Deacon <will@kernel.org>2019-07-24 15:35:27 +0300
commita7d20dc19d9ea7012227be5144353012ffa3ddc4 (patch)
tree1068bf66db02fd09120ab41353378e648b6e781f /drivers/vfio
parent298f78895b081911e0b3605f07d79ebd3d4cf7b0 (diff)
downloadlinux-a7d20dc19d9ea7012227be5144353012ffa3ddc4.tar.xz
iommu: Introduce struct iommu_iotlb_gather for batching TLB flushes
To permit batching of TLB flushes across multiple calls to the IOMMU driver's ->unmap() implementation, introduce a new structure for tracking the address range to be flushed and the granularity at which the flushing is required. This is hooked into the IOMMU API and its caller are updated to make use of the new structure. Subsequent patches will plumb this into the IOMMU drivers as well, but for now the gathering information is ignored. Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'drivers/vfio')
-rw-r--r--drivers/vfio/vfio_iommu_type1.c26
1 files changed, 17 insertions, 9 deletions
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index fad7fd8c167c..ad830abe1021 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -650,12 +650,13 @@ unpin_exit:
}
static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
- struct list_head *regions)
+ struct list_head *regions,
+ struct iommu_iotlb_gather *iotlb_gather)
{
long unlocked = 0;
struct vfio_regions *entry, *next;
- iommu_tlb_sync(domain->domain);
+ iommu_tlb_sync(domain->domain, iotlb_gather);
list_for_each_entry_safe(entry, next, regions, list) {
unlocked += vfio_unpin_pages_remote(dma,
@@ -685,13 +686,15 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain,
struct vfio_dma *dma, dma_addr_t *iova,
size_t len, phys_addr_t phys, long *unlocked,
struct list_head *unmapped_list,
- int *unmapped_cnt)
+ int *unmapped_cnt,
+ struct iommu_iotlb_gather *iotlb_gather)
{
size_t unmapped = 0;
struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (entry) {
- unmapped = iommu_unmap_fast(domain->domain, *iova, len);
+ unmapped = iommu_unmap_fast(domain->domain, *iova, len,
+ iotlb_gather);
if (!unmapped) {
kfree(entry);
@@ -711,8 +714,8 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain,
* or in case of errors.
*/
if (*unmapped_cnt >= VFIO_IOMMU_TLB_SYNC_MAX || !unmapped) {
- *unlocked += vfio_sync_unpin(dma, domain,
- unmapped_list);
+ *unlocked += vfio_sync_unpin(dma, domain, unmapped_list,
+ iotlb_gather);
*unmapped_cnt = 0;
}
@@ -743,6 +746,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
dma_addr_t iova = dma->iova, end = dma->iova + dma->size;
struct vfio_domain *domain, *d;
LIST_HEAD(unmapped_region_list);
+ struct iommu_iotlb_gather iotlb_gather;
int unmapped_region_cnt = 0;
long unlocked = 0;
@@ -767,6 +771,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
cond_resched();
}
+ iommu_iotlb_gather_init(&iotlb_gather);
while (iova < end) {
size_t unmapped, len;
phys_addr_t phys, next;
@@ -795,7 +800,8 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
*/
unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys,
&unlocked, &unmapped_region_list,
- &unmapped_region_cnt);
+ &unmapped_region_cnt,
+ &iotlb_gather);
if (!unmapped) {
unmapped = unmap_unpin_slow(domain, dma, &iova, len,
phys, &unlocked);
@@ -806,8 +812,10 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
dma->iommu_mapped = false;
- if (unmapped_region_cnt)
- unlocked += vfio_sync_unpin(dma, domain, &unmapped_region_list);
+ if (unmapped_region_cnt) {
+ unlocked += vfio_sync_unpin(dma, domain, &unmapped_region_list,
+ &iotlb_gather);
+ }
if (do_accounting) {
vfio_lock_acct(dma, -unlocked, true);