From eb5ecd1a40e2098f805fb63cb07817ac48826e40 Mon Sep 17 00:00:00 2001 From: Suravee Suthikulpanit Date: Wed, 21 Feb 2018 14:19:45 +0700 Subject: iommu/amd: Add support for fast IOTLB flushing Since AMD IOMMU driver currently flushes all TLB entries when page size is more than one, use the same interface for both iommu_ops.flush_iotlb_all() and iommu_ops.iotlb_sync(). Cc: Joerg Roedel Signed-off-by: Suravee Suthikulpanit Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) (limited to 'drivers/iommu/amd_iommu.c') diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 14efeb306a9f..997a947ddc3b 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3056,9 +3056,6 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, unmap_size = iommu_unmap_page(domain, iova, page_size); mutex_unlock(&domain->api_lock); - domain_flush_tlb_pde(domain); - domain_flush_complete(domain); - return unmap_size; } @@ -3176,6 +3173,19 @@ static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, return dev_data->defer_attach; } +static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain) +{ + struct protection_domain *dom = to_pdomain(domain); + + domain_flush_tlb_pde(dom); + domain_flush_complete(dom); +} + +static void amd_iommu_iotlb_range_add(struct iommu_domain *domain, + unsigned long iova, size_t size) +{ +} + const struct iommu_ops amd_iommu_ops = { .capable = amd_iommu_capable, .domain_alloc = amd_iommu_domain_alloc, @@ -3194,6 +3204,9 @@ const struct iommu_ops amd_iommu_ops = { .apply_resv_region = amd_iommu_apply_resv_region, .is_attach_deferred = amd_iommu_is_attach_deferred, .pgsize_bitmap = AMD_IOMMU_PGSIZES, + .flush_iotlb_all = amd_iommu_flush_iotlb_all, + .iotlb_range_add = amd_iommu_iotlb_range_add, + .iotlb_sync = amd_iommu_flush_iotlb_all, }; /***************************************************************************** -- cgit v1.2.3