summaryrefslogtreecommitdiff
path: root/drivers/iommu/dmar.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-08-01 14:25:10 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-08-01 14:25:10 +0300
commitdd9671172a06830071c8edb31fb2176f222a2c6e (patch)
treea0c343374f9586a7a2afec1a3a7be745d2939ae1 /drivers/iommu/dmar.c
parent77d9ada23f207ec3d6258985c882f4fb653693f1 (diff)
parentf360d3241f5557f241d55b959e6e65070e77992e (diff)
downloadlinux-dd9671172a06830071c8edb31fb2176f222a2c6e.tar.xz
Merge tag 'iommu-updates-v4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU updates from Joerg Roedel: - big-endian support and preparation for defered probing for the Exynos IOMMU driver - simplifications in iommu-group id handling - support for Mediatek generation one IOMMU hardware - conversion of the AMD IOMMU driver to use the generic IOVA allocator. This driver now also benefits from the recent scalability improvements in the IOVA code. - preparations to use generic DMA mapping code in the Rockchip IOMMU driver - device tree adaption and conversion to use generic page-table code for the MSM IOMMU driver - an iova_to_phys optimization in the ARM-SMMU driver to greatly improve page-table teardown performance with VFIO - various other small fixes and conversions * tag 'iommu-updates-v4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (59 commits) iommu/amd: Initialize dma-ops domains with 3-level page-table iommu/amd: Update Alias-DTE in update_device_table() iommu/vt-d: Return error code in domain_context_mapping_one() iommu/amd: Use container_of to get dma_ops_domain iommu/amd: Flush iova queue before releasing dma_ops_domain iommu/amd: Handle IOMMU_DOMAIN_DMA in ops->domain_free call-back iommu/amd: Use dev_data->domain in get_domain() iommu/amd: Optimize map_sg and unmap_sg iommu/amd: Introduce dir2prot() helper iommu/amd: Implement timeout to flush unmap queues iommu/amd: Implement flush queue iommu/amd: Allow NULL pointer parameter for domain_flush_complete() iommu/amd: Set up data structures for flush queue iommu/amd: Remove align-parameter from __map_single() iommu/amd: Remove other remains of old address allocator iommu/amd: Make use of the generic IOVA allocator iommu/amd: Remove special mapping code for dma_ops path iommu/amd: Pass gfp-flags to iommu_map_page() iommu/amd: Implement apply_dm_region call-back iommu/amd: Create a list of reserved iova addresses ...
Diffstat (limited to 'drivers/iommu/dmar.c')
-rw-r--r--drivers/iommu/dmar.c21
1 files changed, 14 insertions, 7 deletions
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 7330a66e2b7e..58470f5ced04 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -241,8 +241,20 @@ int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
if (!dmar_match_pci_path(info, scope->bus, path, level))
continue;
- if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT) ^
- (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL)) {
+ /*
+ * We expect devices with endpoint scope to have normal PCI
+ * headers, and devices with bridge scope to have bridge PCI
+ * headers. However PCI NTB devices may be listed in the
+ * DMAR table with bridge scope, even though they have a
+ * normal PCI header. NTB devices are identified by class
+ * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
+ * for this special case.
+ */
+ if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
+ info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
+ (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
+ (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
+ info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) {
pr_warn("Device scope type does not match for %s\n",
pci_name(info->dev));
return -EINVAL;
@@ -1155,8 +1167,6 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
(unsigned long long)qi->desc[index].high);
memcpy(&qi->desc[index], &qi->desc[wait_index],
sizeof(struct qi_desc));
- __iommu_flush_cache(iommu, &qi->desc[index],
- sizeof(struct qi_desc));
writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
return -EINVAL;
}
@@ -1231,9 +1241,6 @@ restart:
hw[wait_index] = wait_desc;
- __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
- __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
-
qi->free_head = (qi->free_head + 2) % QI_LENGTH;
qi->free_cnt -= 2;