summaryrefslogtreecommitdiff
path: root/drivers/iommu/intel/iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/intel/iommu.c')
-rw-r--r--drivers/iommu/intel/iommu.c279
1 files changed, 172 insertions, 107 deletions
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 06b00b5363d8..ee0932307d64 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -44,10 +44,10 @@
#include <asm/irq_remapping.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
-#include <trace/events/intel_iommu.h>
#include "../irq_remapping.h"
#include "pasid.h"
+#include "cap_audit.h"
#define ROOT_SIZE VTD_PAGE_SIZE
#define CONTEXT_SIZE VTD_PAGE_SIZE
@@ -316,8 +316,18 @@ struct dmar_atsr_unit {
u8 include_all:1; /* include all ports */
};
+struct dmar_satc_unit {
+ struct list_head list; /* list of SATC units */
+ struct acpi_dmar_header *hdr; /* ACPI header */
+ struct dmar_dev_scope *devices; /* target devices */
+ struct intel_iommu *iommu; /* the corresponding iommu */
+ int devices_cnt; /* target device count */
+ u8 atc_required:1; /* ATS is required */
+};
+
static LIST_HEAD(dmar_atsr_units);
static LIST_HEAD(dmar_rmrr_units);
+static LIST_HEAD(dmar_satc_units);
#define for_each_rmrr_units(rmrr) \
list_for_each_entry(rmrr, &dmar_rmrr_units, list)
@@ -1017,8 +1027,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
- if (domain_use_first_level(domain))
+ if (domain_use_first_level(domain)) {
pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
+ if (domain->domain.type == IOMMU_DOMAIN_DMA)
+ pteval |= DMA_FL_PTE_ACCESS;
+ }
if (cmpxchg64(&pte->val, 0ULL, pteval))
/* Someone else set it while we were thinking; use theirs. */
free_pgtable_page(tmp_page);
@@ -1861,25 +1874,7 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
*/
static bool first_level_by_default(void)
{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- static int first_level_support = -1;
-
- if (likely(first_level_support != -1))
- return first_level_support;
-
- first_level_support = 1;
-
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) {
- first_level_support = 0;
- break;
- }
- }
- rcu_read_unlock();
-
- return first_level_support;
+ return scalable_mode_support() && intel_cap_flts_sanity();
}
static struct dmar_domain *alloc_domain(int flags)
@@ -2298,9 +2293,9 @@ static int
__domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long phys_pfn, unsigned long nr_pages, int prot)
{
- struct dma_pte *first_pte = NULL, *pte = NULL;
unsigned int largepage_lvl = 0;
unsigned long lvl_pages = 0;
+ struct dma_pte *pte = NULL;
phys_addr_t pteval;
u64 attr;
@@ -2310,9 +2305,16 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
return -EINVAL;
attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
- if (domain_use_first_level(domain))
+ if (domain_use_first_level(domain)) {
attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
+ if (domain->domain.type == IOMMU_DOMAIN_DMA) {
+ attr |= DMA_FL_PTE_ACCESS;
+ if (prot & DMA_PTE_WRITE)
+ attr |= DMA_FL_PTE_DIRTY;
+ }
+ }
+
pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
while (nr_pages > 0) {
@@ -2322,7 +2324,7 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
largepage_lvl = hardware_largepage_caps(domain, iov_pfn,
phys_pfn, nr_pages);
- first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
+ pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
if (!pte)
return -ENOMEM;
/* It is large page*/
@@ -2383,34 +2385,14 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
* recalculate 'pte' and switch back to smaller pages for the
* end of the mapping, if the trailing size is not enough to
* use another superpage (i.e. nr_pages < lvl_pages).
+ *
+ * We leave clflush for the leaf pte changes to iotlb_sync_map()
+ * callback.
*/
pte++;
if (!nr_pages || first_pte_in_page(pte) ||
- (largepage_lvl > 1 && nr_pages < lvl_pages)) {
- domain_flush_cache(domain, first_pte,
- (void *)pte - (void *)first_pte);
+ (largepage_lvl > 1 && nr_pages < lvl_pages))
pte = NULL;
- }
- }
-
- return 0;
-}
-
-static int
-domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- unsigned long phys_pfn, unsigned long nr_pages, int prot)
-{
- int iommu_id, ret;
- struct intel_iommu *iommu;
-
- /* Do the real mapping first */
- ret = __domain_mapping(domain, iov_pfn, phys_pfn, nr_pages, prot);
- if (ret)
- return ret;
-
- for_each_domain_iommu(iommu_id, domain) {
- iommu = g_iommus[iommu_id];
- __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
}
return 0;
@@ -3197,6 +3179,10 @@ static int __init init_dmars(void)
goto error;
}
+ ret = intel_cap_audit(CAP_AUDIT_STATIC_DMAR, NULL);
+ if (ret)
+ goto free_iommu;
+
for_each_iommu(iommu, drhd) {
if (drhd->ignored) {
iommu_disable_translation(iommu);
@@ -3740,6 +3726,57 @@ int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
return 0;
}
+static struct dmar_satc_unit *dmar_find_satc(struct acpi_dmar_satc *satc)
+{
+ struct dmar_satc_unit *satcu;
+ struct acpi_dmar_satc *tmp;
+
+ list_for_each_entry_rcu(satcu, &dmar_satc_units, list,
+ dmar_rcu_check()) {
+ tmp = (struct acpi_dmar_satc *)satcu->hdr;
+ if (satc->segment != tmp->segment)
+ continue;
+ if (satc->header.length != tmp->header.length)
+ continue;
+ if (memcmp(satc, tmp, satc->header.length) == 0)
+ return satcu;
+ }
+
+ return NULL;
+}
+
+int dmar_parse_one_satc(struct acpi_dmar_header *hdr, void *arg)
+{
+ struct acpi_dmar_satc *satc;
+ struct dmar_satc_unit *satcu;
+
+ if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
+ return 0;
+
+ satc = container_of(hdr, struct acpi_dmar_satc, header);
+ satcu = dmar_find_satc(satc);
+ if (satcu)
+ return 0;
+
+ satcu = kzalloc(sizeof(*satcu) + hdr->length, GFP_KERNEL);
+ if (!satcu)
+ return -ENOMEM;
+
+ satcu->hdr = (void *)(satcu + 1);
+ memcpy(satcu->hdr, hdr, hdr->length);
+ satcu->atc_required = satc->flags & 0x1;
+ satcu->devices = dmar_alloc_dev_scope((void *)(satc + 1),
+ (void *)satc + satc->header.length,
+ &satcu->devices_cnt);
+ if (satcu->devices_cnt && !satcu->devices) {
+ kfree(satcu);
+ return -ENOMEM;
+ }
+ list_add_rcu(&satcu->list, &dmar_satc_units);
+
+ return 0;
+}
+
static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
{
int sp, ret;
@@ -3748,6 +3785,10 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
if (g_iommus[iommu->seq_id])
return 0;
+ ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR, iommu);
+ if (ret)
+ goto out;
+
if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
pr_warn("%s: Doesn't support hardware pass through.\n",
iommu->name);
@@ -3843,6 +3884,7 @@ static void intel_iommu_free_dmars(void)
{
struct dmar_rmrr_unit *rmrru, *rmrr_n;
struct dmar_atsr_unit *atsru, *atsr_n;
+ struct dmar_satc_unit *satcu, *satc_n;
list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
list_del(&rmrru->list);
@@ -3854,6 +3896,11 @@ static void intel_iommu_free_dmars(void)
list_del(&atsru->list);
intel_iommu_free_atsr(atsru);
}
+ list_for_each_entry_safe(satcu, satc_n, &dmar_satc_units, list) {
+ list_del(&satcu->list);
+ dmar_free_dev_scope(&satcu->devices, &satcu->devices_cnt);
+ kfree(satcu);
+ }
}
int dmar_find_matched_atsr_unit(struct pci_dev *dev)
@@ -3905,8 +3952,10 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
int ret;
struct dmar_rmrr_unit *rmrru;
struct dmar_atsr_unit *atsru;
+ struct dmar_satc_unit *satcu;
struct acpi_dmar_atsr *atsr;
struct acpi_dmar_reserved_memory *rmrr;
+ struct acpi_dmar_satc *satc;
if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
return 0;
@@ -3947,6 +3996,23 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
break;
}
}
+ list_for_each_entry(satcu, &dmar_satc_units, list) {
+ satc = container_of(satcu->hdr, struct acpi_dmar_satc, header);
+ if (info->event == BUS_NOTIFY_ADD_DEVICE) {
+ ret = dmar_insert_dev_scope(info, (void *)(satc + 1),
+ (void *)satc + satc->header.length,
+ satc->segment, satcu->devices,
+ satcu->devices_cnt);
+ if (ret > 0)
+ break;
+ else if (ret < 0)
+ return ret;
+ } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
+ if (dmar_remove_dev_scope(info, satc->segment,
+ satcu->devices, satcu->devices_cnt))
+ break;
+ }
+ }
return 0;
}
@@ -4290,6 +4356,9 @@ int __init intel_iommu_init(void)
if (list_empty(&dmar_atsr_units))
pr_info("No ATSR found\n");
+ if (list_empty(&dmar_satc_units))
+ pr_info("No SATC found\n");
+
if (dmar_map_gfx)
intel_iommu_gfx_mapped = 1;
@@ -4943,7 +5012,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
u64 max_addr;
int prot = 0;
- int ret;
if (iommu_prot & IOMMU_READ)
prot |= DMA_PTE_READ;
@@ -4969,9 +5037,8 @@ static int intel_iommu_map(struct iommu_domain *domain,
/* Round up size to next multiple of PAGE_SIZE, if it and
the low bits of hpa would take us onto the next page */
size = aligned_nrpages(hpa, size);
- ret = domain_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
- hpa >> VTD_PAGE_SHIFT, size, prot);
- return ret;
+ return __domain_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
+ hpa >> VTD_PAGE_SHIFT, size, prot);
}
static size_t intel_iommu_unmap(struct iommu_domain *domain,
@@ -5040,60 +5107,6 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
return phys;
}
-static inline bool scalable_mode_support(void)
-{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- bool ret = true;
-
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (!sm_supported(iommu)) {
- ret = false;
- break;
- }
- }
- rcu_read_unlock();
-
- return ret;
-}
-
-static inline bool iommu_pasid_support(void)
-{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- bool ret = true;
-
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (!pasid_supported(iommu)) {
- ret = false;
- break;
- }
- }
- rcu_read_unlock();
-
- return ret;
-}
-
-static inline bool nested_mode_support(void)
-{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- bool ret = true;
-
- rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
- if (!sm_supported(iommu) || !ecap_nest(iommu->ecap)) {
- ret = false;
- break;
- }
- }
- rcu_read_unlock();
-
- return ret;
-}
-
static bool intel_iommu_capable(enum iommu_cap cap)
{
if (cap == IOMMU_CAP_CACHE_COHERENCY)
@@ -5334,7 +5347,7 @@ intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
int ret;
if (!dev_is_pci(dev) || dmar_disabled ||
- !scalable_mode_support() || !iommu_pasid_support())
+ !scalable_mode_support() || !pasid_mode_support())
return false;
ret = pci_pasid_features(to_pci_dev(dev));
@@ -5508,6 +5521,57 @@ static bool risky_device(struct pci_dev *pdev)
return false;
}
+static void clflush_sync_map(struct dmar_domain *domain, unsigned long clf_pfn,
+ unsigned long clf_pages)
+{
+ struct dma_pte *first_pte = NULL, *pte = NULL;
+ unsigned long lvl_pages = 0;
+ int level = 0;
+
+ while (clf_pages > 0) {
+ if (!pte) {
+ level = 0;
+ pte = pfn_to_dma_pte(domain, clf_pfn, &level);
+ if (WARN_ON(!pte))
+ return;
+ first_pte = pte;
+ lvl_pages = lvl_to_nr_pages(level);
+ }
+
+ if (WARN_ON(!lvl_pages || clf_pages < lvl_pages))
+ return;
+
+ clf_pages -= lvl_pages;
+ clf_pfn += lvl_pages;
+ pte++;
+
+ if (!clf_pages || first_pte_in_page(pte) ||
+ (level > 1 && clf_pages < lvl_pages)) {
+ domain_flush_cache(domain, first_pte,
+ (void *)pte - (void *)first_pte);
+ pte = NULL;
+ }
+ }
+}
+
+static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ unsigned long pages = aligned_nrpages(iova, size);
+ unsigned long pfn = iova >> VTD_PAGE_SHIFT;
+ struct intel_iommu *iommu;
+ int iommu_id;
+
+ if (!dmar_domain->iommu_coherency)
+ clflush_sync_map(dmar_domain, pfn, pages);
+
+ for_each_domain_iommu(iommu_id, dmar_domain) {
+ iommu = g_iommus[iommu_id];
+ __mapping_notify_one(iommu, dmar_domain, pfn, pages);
+ }
+}
+
const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc,
@@ -5520,6 +5584,7 @@ const struct iommu_ops intel_iommu_ops = {
.aux_detach_dev = intel_iommu_aux_detach_device,
.aux_get_pasid = intel_iommu_aux_get_pasid,
.map = intel_iommu_map,
+ .iotlb_sync_map = intel_iommu_iotlb_sync_map,
.unmap = intel_iommu_unmap,
.flush_iotlb_all = intel_flush_iotlb_all,
.iotlb_sync = intel_iommu_tlb_sync,