summaryrefslogtreecommitdiff
path: root/drivers/iommu/amd/iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/amd/iommu.c')
-rw-r--r--drivers/iommu/amd/iommu.c302
1 files changed, 189 insertions, 113 deletions
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index fb727f5b0b82..1fdf37e04215 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -42,6 +42,7 @@
#include "amd_iommu.h"
#include "../dma-iommu.h"
#include "../irq_remapping.h"
+#include "../iommu-pages.h"
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
@@ -89,6 +90,21 @@ static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom)
return (pdom && (pdom->pd_mode == PD_MODE_V2));
}
+static inline bool pdom_is_in_pt_mode(struct protection_domain *pdom)
+{
+ return (pdom->domain.type == IOMMU_DOMAIN_IDENTITY);
+}
+
+/*
+ * We cannot support PASID w/ existing v1 page table in the same domain
+ * since it will be nested. However, existing domain w/ v2 page table
+ * or passthrough mode can be used for PASID.
+ */
+static inline bool pdom_is_sva_capable(struct protection_domain *pdom)
+{
+ return pdom_is_v2_pgtbl_mode(pdom) || pdom_is_in_pt_mode(pdom);
+}
+
static inline int get_acpihid_device_id(struct device *dev,
struct acpihid_map_entry **entry)
{
@@ -179,11 +195,6 @@ static struct amd_iommu *rlookup_amd_iommu(struct device *dev)
return __rlookup_amd_iommu(seg, PCI_SBDF_TO_DEVID(devid));
}
-static struct protection_domain *to_pdomain(struct iommu_domain *dom)
-{
- return container_of(dom, struct protection_domain, domain);
-}
-
static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -384,7 +395,7 @@ static inline void pdev_disable_cap_ats(struct pci_dev *pdev)
}
}
-int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev)
+static inline int pdev_enable_cap_pri(struct pci_dev *pdev)
{
struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
int ret = -EINVAL;
@@ -392,6 +403,9 @@ int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev)
if (dev_data->pri_enabled)
return 0;
+ if (!dev_data->ats_enabled)
+ return 0;
+
if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) {
/*
* First reset the PRI state of the device.
@@ -408,7 +422,7 @@ int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev)
return ret;
}
-void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev)
+static inline void pdev_disable_cap_pri(struct pci_dev *pdev)
{
struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
@@ -450,15 +464,14 @@ static void pdev_enable_caps(struct pci_dev *pdev)
{
pdev_enable_cap_ats(pdev);
pdev_enable_cap_pasid(pdev);
- amd_iommu_pdev_enable_cap_pri(pdev);
-
+ pdev_enable_cap_pri(pdev);
}
static void pdev_disable_caps(struct pci_dev *pdev)
{
pdev_disable_cap_ats(pdev);
pdev_disable_cap_pasid(pdev);
- amd_iommu_pdev_disable_cap_pri(pdev);
+ pdev_disable_cap_pri(pdev);
}
/*
@@ -818,59 +831,6 @@ static void iommu_poll_events(struct amd_iommu *iommu)
writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
}
-static void iommu_poll_ppr_log(struct amd_iommu *iommu)
-{
- u32 head, tail;
-
- if (iommu->ppr_log == NULL)
- return;
-
- head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
- tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
-
- while (head != tail) {
- volatile u64 *raw;
- u64 entry[2];
- int i;
-
- raw = (u64 *)(iommu->ppr_log + head);
-
- /*
- * Hardware bug: Interrupt may arrive before the entry is
- * written to memory. If this happens we need to wait for the
- * entry to arrive.
- */
- for (i = 0; i < LOOP_TIMEOUT; ++i) {
- if (PPR_REQ_TYPE(raw[0]) != 0)
- break;
- udelay(1);
- }
-
- /* Avoid memcpy function-call overhead */
- entry[0] = raw[0];
- entry[1] = raw[1];
-
- /*
- * To detect the hardware errata 733 we need to clear the
- * entry back to zero. This issue does not exist on SNP
- * enabled system. Also this buffer is not writeable on
- * SNP enabled system.
- */
- if (!amd_iommu_snp_en)
- raw[0] = raw[1] = 0UL;
-
- /* Update head pointer of hardware ring-buffer */
- head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
- writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
-
- /* TODO: PPR Handler will be added when we add IOPF support */
-
- /* Refresh ring-buffer information */
- head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
- tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
- }
-}
-
#ifdef CONFIG_IRQ_REMAP
static int (*iommu_ga_log_notifier)(u32);
@@ -991,7 +951,7 @@ irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data)
{
amd_iommu_handle_irq(data, "PPR", MMIO_STATUS_PPR_INT_MASK,
MMIO_STATUS_PPR_OVERFLOW_MASK,
- iommu_poll_ppr_log, amd_iommu_restart_ppr_log);
+ amd_iommu_poll_ppr_log, amd_iommu_restart_ppr_log);
return IRQ_HANDLED;
}
@@ -1664,15 +1624,14 @@ void amd_iommu_domain_update(struct protection_domain *domain)
amd_iommu_domain_flush_all(domain);
}
-int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
- int status, int tag)
+int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag)
{
struct iommu_dev_data *dev_data;
struct amd_iommu *iommu;
struct iommu_cmd cmd;
- dev_data = dev_iommu_priv_get(&pdev->dev);
- iommu = get_amd_iommu_from_dev(&pdev->dev);
+ dev_data = dev_iommu_priv_get(dev);
+ iommu = get_amd_iommu_from_dev(dev);
build_complete_ppr(&cmd, dev_data->devid, pasid, status,
tag, dev_data->pri_tlp);
@@ -1728,7 +1687,7 @@ static void free_gcr3_tbl_level1(u64 *tbl)
ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
- free_page((unsigned long)ptr);
+ iommu_free_page(ptr);
}
}
@@ -1761,7 +1720,7 @@ static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info)
/* Free per device domain ID */
domain_id_free(gcr3_info->domid);
- free_page((unsigned long)gcr3_info->gcr3_tbl);
+ iommu_free_page(gcr3_info->gcr3_tbl);
gcr3_info->gcr3_tbl = NULL;
}
@@ -1796,7 +1755,7 @@ static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info,
/* Allocate per device domain ID */
gcr3_info->domid = domain_id_alloc();
- gcr3_info->gcr3_tbl = alloc_pgtable_page(nid, GFP_ATOMIC);
+ gcr3_info->gcr3_tbl = iommu_alloc_page_node(nid, GFP_ATOMIC);
if (gcr3_info->gcr3_tbl == NULL) {
domain_id_free(gcr3_info->domid);
return -ENOMEM;
@@ -2002,10 +1961,78 @@ static void clear_dte_entry(struct amd_iommu *iommu, u16 devid)
amd_iommu_apply_erratum_63(iommu, devid);
}
+/* Update and flush DTE for the given device */
+void amd_iommu_dev_update_dte(struct iommu_dev_data *dev_data, bool set)
+{
+ struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
+
+ if (set)
+ set_dte_entry(iommu, dev_data);
+ else
+ clear_dte_entry(iommu, dev_data->devid);
+
+ clone_aliases(iommu, dev_data->dev);
+ device_flush_dte(dev_data);
+ iommu_completion_wait(iommu);
+}
+
+/*
+ * If domain is SVA capable then initialize GCR3 table. Also if domain is
+ * in v2 page table mode then update GCR3[0].
+ */
+static int init_gcr3_table(struct iommu_dev_data *dev_data,
+ struct protection_domain *pdom)
+{
+ struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
+ int max_pasids = dev_data->max_pasids;
+ int ret = 0;
+
+ /*
+ * If domain is in pt mode then setup GCR3 table only if device
+ * is PASID capable
+ */
+ if (pdom_is_in_pt_mode(pdom) && !pdev_pasid_supported(dev_data))
+ return ret;
+
+ /*
+ * By default, setup GCR3 table to support MAX PASIDs
+ * supported by the device/IOMMU.
+ */
+ ret = setup_gcr3_table(&dev_data->gcr3_info, iommu,
+ max_pasids > 0 ? max_pasids : 1);
+ if (ret)
+ return ret;
+
+ /* Setup GCR3[0] only if domain is setup with v2 page table mode */
+ if (!pdom_is_v2_pgtbl_mode(pdom))
+ return ret;
+
+ ret = update_gcr3(dev_data, 0, iommu_virt_to_phys(pdom->iop.pgd), true);
+ if (ret)
+ free_gcr3_table(&dev_data->gcr3_info);
+
+ return ret;
+}
+
+static void destroy_gcr3_table(struct iommu_dev_data *dev_data,
+ struct protection_domain *pdom)
+{
+ struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
+
+ if (pdom_is_v2_pgtbl_mode(pdom))
+ update_gcr3(dev_data, 0, 0, false);
+
+ if (gcr3_info->gcr3_tbl == NULL)
+ return;
+
+ free_gcr3_table(gcr3_info);
+}
+
static int do_attach(struct iommu_dev_data *dev_data,
struct protection_domain *domain)
{
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
+ struct pci_dev *pdev;
int ret = 0;
/* Update data structures */
@@ -2020,26 +2047,29 @@ static int do_attach(struct iommu_dev_data *dev_data,
domain->dev_iommu[iommu->index] += 1;
domain->dev_cnt += 1;
- /* Init GCR3 table and update device table */
- if (domain->pd_mode == PD_MODE_V2) {
- /* By default, setup GCR3 table to support single PASID */
- ret = setup_gcr3_table(&dev_data->gcr3_info, iommu, 1);
+ pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL;
+ if (pdom_is_sva_capable(domain)) {
+ ret = init_gcr3_table(dev_data, domain);
if (ret)
return ret;
- ret = update_gcr3(dev_data, 0,
- iommu_virt_to_phys(domain->iop.pgd), true);
- if (ret) {
- free_gcr3_table(&dev_data->gcr3_info);
- return ret;
+ if (pdev) {
+ pdev_enable_caps(pdev);
+
+ /*
+ * Device can continue to function even if IOPF
+ * enablement failed. Hence in error path just
+ * disable device PRI support.
+ */
+ if (amd_iommu_iopf_add_device(iommu, dev_data))
+ pdev_disable_cap_pri(pdev);
}
+ } else if (pdev) {
+ pdev_enable_cap_ats(pdev);
}
/* Update device table */
- set_dte_entry(iommu, dev_data);
- clone_aliases(iommu, dev_data->dev);
-
- device_flush_dte(dev_data);
+ amd_iommu_dev_update_dte(dev_data, true);
return ret;
}
@@ -2050,19 +2080,15 @@ static void do_detach(struct iommu_dev_data *dev_data)
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
/* Clear GCR3 table */
- if (domain->pd_mode == PD_MODE_V2) {
- update_gcr3(dev_data, 0, 0, false);
- free_gcr3_table(&dev_data->gcr3_info);
- }
+ if (pdom_is_sva_capable(domain))
+ destroy_gcr3_table(dev_data, domain);
/* Update data structures */
dev_data->domain = NULL;
list_del(&dev_data->list);
- clear_dte_entry(iommu, dev_data->devid);
- clone_aliases(iommu, dev_data->dev);
- /* Flush the DTE entry */
- device_flush_dte(dev_data);
+ /* Clear DTE and flush the entry */
+ amd_iommu_dev_update_dte(dev_data, false);
/* Flush IOTLB and wait for the flushes to finish */
amd_iommu_domain_flush_all(domain);
@@ -2094,9 +2120,6 @@ static int attach_device(struct device *dev,
goto out;
}
- if (dev_is_pci(dev))
- pdev_enable_caps(to_pci_dev(dev));
-
ret = do_attach(dev_data, domain);
out:
@@ -2112,12 +2135,11 @@ out:
*/
static void detach_device(struct device *dev)
{
- struct protection_domain *domain;
- struct iommu_dev_data *dev_data;
+ struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
+ struct protection_domain *domain = dev_data->domain;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
unsigned long flags;
-
- dev_data = dev_iommu_priv_get(dev);
- domain = dev_data->domain;
+ bool ppr = dev_data->ppr;
spin_lock_irqsave(&domain->lock, flags);
@@ -2132,8 +2154,19 @@ static void detach_device(struct device *dev)
if (WARN_ON(!dev_data->domain))
goto out;
+ if (ppr) {
+ iopf_queue_flush_dev(dev);
+
+ /* Updated here so that it gets reflected in DTE */
+ dev_data->ppr = false;
+ }
+
do_detach(dev_data);
+ /* Remove IOPF handler */
+ if (ppr)
+ amd_iommu_iopf_remove_device(iommu, dev_data);
+
if (dev_is_pci(dev))
pdev_disable_caps(to_pci_dev(dev));
@@ -2147,6 +2180,7 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
{
struct iommu_device *iommu_dev;
struct amd_iommu *iommu;
+ struct iommu_dev_data *dev_data;
int ret;
if (!check_device(dev))
@@ -2173,18 +2207,22 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
iommu_dev = &iommu->iommu;
}
+ /*
+ * If IOMMU and device supports PASID then it will contain max
+ * supported PASIDs, else it will be zero.
+ */
+ dev_data = dev_iommu_priv_get(dev);
+ if (amd_iommu_pasid_supported() && dev_is_pci(dev) &&
+ pdev_pasid_supported(dev_data)) {
+ dev_data->max_pasids = min_t(u32, iommu->iommu.max_pasids,
+ pci_max_pasids(to_pci_dev(dev)));
+ }
+
iommu_completion_wait(iommu);
return iommu_dev;
}
-static void amd_iommu_probe_finalize(struct device *dev)
-{
- /* Domains are initialized for this device - have a look what we ended up with */
- set_dma_ops(dev, NULL);
- iommu_setup_dma_ops(dev, 0, U64_MAX);
-}
-
static void amd_iommu_release_device(struct device *dev)
{
struct amd_iommu *iommu;
@@ -2236,7 +2274,7 @@ static void cleanup_domain(struct protection_domain *domain)
WARN_ON(domain->dev_cnt != 0);
}
-static void protection_domain_free(struct protection_domain *domain)
+void protection_domain_free(struct protection_domain *domain)
{
if (!domain)
return;
@@ -2245,7 +2283,7 @@ static void protection_domain_free(struct protection_domain *domain)
free_io_pgtable_ops(&domain->iop.iop.ops);
if (domain->iop.root)
- free_page((unsigned long)domain->iop.root);
+ iommu_free_page(domain->iop.root);
if (domain->id)
domain_id_free(domain->id);
@@ -2260,7 +2298,7 @@ static int protection_domain_init_v1(struct protection_domain *domain, int mode)
BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL);
if (mode != PAGE_MODE_NONE) {
- pt_root = (void *)get_zeroed_page(GFP_KERNEL);
+ pt_root = iommu_alloc_page(GFP_KERNEL);
if (!pt_root)
return -ENOMEM;
}
@@ -2279,7 +2317,7 @@ static int protection_domain_init_v2(struct protection_domain *pdom)
return 0;
}
-static struct protection_domain *protection_domain_alloc(unsigned int type)
+struct protection_domain *protection_domain_alloc(unsigned int type)
{
struct io_pgtable_ops *pgtbl_ops;
struct protection_domain *domain;
@@ -2296,11 +2334,13 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
spin_lock_init(&domain->lock);
INIT_LIST_HEAD(&domain->dev_list);
+ INIT_LIST_HEAD(&domain->dev_data_list);
domain->nid = NUMA_NO_NODE;
switch (type) {
/* No need to allocate io pgtable ops in passthrough mode */
case IOMMU_DOMAIN_IDENTITY:
+ case IOMMU_DOMAIN_SVA:
return domain;
case IOMMU_DOMAIN_DMA:
pgtable = amd_iommu_pgtable;
@@ -2420,7 +2460,7 @@ amd_iommu_domain_alloc_user(struct device *dev, u32 flags,
return do_iommu_domain_alloc(type, dev, flags);
}
-static void amd_iommu_domain_free(struct iommu_domain *dom)
+void amd_iommu_domain_free(struct iommu_domain *dom)
{
struct protection_domain *domain;
unsigned long flags;
@@ -2785,18 +2825,54 @@ static const struct iommu_dirty_ops amd_dirty_ops = {
.read_and_clear_dirty = amd_iommu_read_and_clear_dirty,
};
+static int amd_iommu_dev_enable_feature(struct device *dev,
+ enum iommu_dev_features feat)
+{
+ int ret = 0;
+
+ switch (feat) {
+ case IOMMU_DEV_FEAT_IOPF:
+ case IOMMU_DEV_FEAT_SVA:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int amd_iommu_dev_disable_feature(struct device *dev,
+ enum iommu_dev_features feat)
+{
+ int ret = 0;
+
+ switch (feat) {
+ case IOMMU_DEV_FEAT_IOPF:
+ case IOMMU_DEV_FEAT_SVA:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.domain_alloc = amd_iommu_domain_alloc,
.domain_alloc_user = amd_iommu_domain_alloc_user,
+ .domain_alloc_sva = amd_iommu_domain_alloc_sva,
.probe_device = amd_iommu_probe_device,
.release_device = amd_iommu_release_device,
- .probe_finalize = amd_iommu_probe_finalize,
.device_group = amd_iommu_device_group,
.get_resv_regions = amd_iommu_get_resv_regions,
.is_attach_deferred = amd_iommu_is_attach_deferred,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
.def_domain_type = amd_iommu_def_domain_type,
+ .dev_enable_feat = amd_iommu_dev_enable_feature,
+ .dev_disable_feat = amd_iommu_dev_disable_feature,
+ .remove_dev_pasid = amd_iommu_remove_dev_pasid,
+ .page_response = amd_iommu_page_response,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = amd_iommu_attach_device,
.map_pages = amd_iommu_map_pages,