summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/Makefile2
-rw-r--r--drivers/iommu/amd/init.c88
-rw-r--r--drivers/iommu/amd/iommu.c15
-rw-r--r--drivers/iommu/amd/iommu_v2.c5
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c104
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c32
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h23
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-impl.c3
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c91
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c157
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h21
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c3
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c21
-rw-r--r--drivers/iommu/exynos-iommu.c26
-rw-r--r--drivers/iommu/fsl_pamu.c8
-rw-r--r--drivers/iommu/fsl_pamu_domain.c4
-rw-r--r--drivers/iommu/intel/dmar.c8
-rw-r--r--drivers/iommu/intel/iommu.c113
-rw-r--r--drivers/iommu/intel/iommu.h22
-rw-r--r--drivers/iommu/intel/pasid.c6
-rw-r--r--drivers/iommu/intel/svm.c164
-rw-r--r--drivers/iommu/io-pgfault.c77
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c41
-rw-r--r--drivers/iommu/io-pgtable-arm.c42
-rw-r--r--drivers/iommu/iommu-sva-lib.c71
-rw-r--r--drivers/iommu/iommu-sva.c240
-rw-r--r--drivers/iommu/iommu-sva.h (renamed from drivers/iommu/iommu-sva-lib.h)14
-rw-r--r--drivers/iommu/iommu.c329
-rw-r--r--drivers/iommu/ipmmu-vmsa.c20
-rw-r--r--drivers/iommu/msm_iommu.c18
-rw-r--r--drivers/iommu/mtk_iommu.c153
-rw-r--r--drivers/iommu/mtk_iommu_v1.c30
-rw-r--r--drivers/iommu/omap-iommu.c6
-rw-r--r--drivers/iommu/rockchip-iommu.c10
-rw-r--r--drivers/iommu/s390-iommu.c377
-rw-r--r--drivers/iommu/sprd-iommu.c29
-rw-r--r--drivers/iommu/sun50i-iommu.c89
-rw-r--r--drivers/iommu/tegra-gart.c2
-rw-r--r--drivers/iommu/virtio-iommu.c7
39 files changed, 1515 insertions, 956 deletions
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index cc9f381013c3..7fbf6a337662 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -28,6 +28,6 @@ obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o
obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
-obj-$(CONFIG_IOMMU_SVA) += iommu-sva-lib.o io-pgfault.o
+obj-$(CONFIG_IOMMU_SVA) += iommu-sva.o io-pgfault.o
obj-$(CONFIG_SPRD_IOMMU) += sprd-iommu.o
obj-$(CONFIG_APPLE_DART) += apple-dart.o
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 1a2d425bf568..467b194975b3 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -85,7 +85,7 @@
#define LOOP_TIMEOUT 2000000
-#define IVRS_GET_SBDF_ID(seg, bus, dev, fd) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \
+#define IVRS_GET_SBDF_ID(seg, bus, dev, fn) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \
| ((dev & 0x1f) << 3) | (fn & 0x7))
/*
@@ -3402,18 +3402,24 @@ static int __init parse_amd_iommu_options(char *str)
static int __init parse_ivrs_ioapic(char *str)
{
u32 seg = 0, bus, dev, fn;
- int ret, id, i;
+ int id, i;
u32 devid;
- ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
- if (ret != 4) {
- ret = sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn);
- if (ret != 5) {
- pr_err("Invalid command line: ivrs_ioapic%s\n", str);
- return 1;
- }
+ if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
+ sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5)
+ goto found;
+
+ if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
+ sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) {
+ pr_warn("ivrs_ioapic%s option format deprecated; use ivrs_ioapic=%d@%04x:%02x:%02x.%d instead\n",
+ str, id, seg, bus, dev, fn);
+ goto found;
}
+ pr_err("Invalid command line: ivrs_ioapic%s\n", str);
+ return 1;
+
+found:
if (early_ioapic_map_size == EARLY_MAP_SIZE) {
pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
str);
@@ -3434,18 +3440,24 @@ static int __init parse_ivrs_ioapic(char *str)
static int __init parse_ivrs_hpet(char *str)
{
u32 seg = 0, bus, dev, fn;
- int ret, id, i;
+ int id, i;
u32 devid;
- ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
- if (ret != 4) {
- ret = sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn);
- if (ret != 5) {
- pr_err("Invalid command line: ivrs_hpet%s\n", str);
- return 1;
- }
+ if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
+ sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5)
+ goto found;
+
+ if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
+ sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) {
+ pr_warn("ivrs_hpet%s option format deprecated; use ivrs_hpet=%d@%04x:%02x:%02x.%d instead\n",
+ str, id, seg, bus, dev, fn);
+ goto found;
}
+ pr_err("Invalid command line: ivrs_hpet%s\n", str);
+ return 1;
+
+found:
if (early_hpet_map_size == EARLY_MAP_SIZE) {
pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
str);
@@ -3466,19 +3478,36 @@ static int __init parse_ivrs_hpet(char *str)
static int __init parse_ivrs_acpihid(char *str)
{
u32 seg = 0, bus, dev, fn;
- char *hid, *uid, *p;
+ char *hid, *uid, *p, *addr;
char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
- int ret, i;
-
- ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
- if (ret != 4) {
- ret = sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid);
- if (ret != 5) {
- pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
- return 1;
+ int i;
+
+ addr = strchr(str, '@');
+ if (!addr) {
+ if (sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid) == 4 ||
+ sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid) == 5) {
+ pr_warn("ivrs_acpihid%s option format deprecated; use ivrs_acpihid=%s@%04x:%02x:%02x.%d instead\n",
+ str, acpiid, seg, bus, dev, fn);
+ goto found;
}
+ goto not_found;
}
+ /* We have the '@', make it the terminator to get just the acpiid */
+ *addr++ = 0;
+
+ if (sscanf(str, "=%s", acpiid) != 1)
+ goto not_found;
+
+ if (sscanf(addr, "%x:%x.%x", &bus, &dev, &fn) == 3 ||
+ sscanf(addr, "%x:%x:%x.%x", &seg, &bus, &dev, &fn) == 4)
+ goto found;
+
+not_found:
+ pr_err("Invalid command line: ivrs_acpihid%s\n", str);
+ return 1;
+
+found:
p = acpiid;
hid = strsep(&p, ":");
uid = p;
@@ -3488,6 +3517,13 @@ static int __init parse_ivrs_acpihid(char *str)
return 1;
}
+ /*
+ * Ignore leading zeroes after ':', so e.g., AMDI0095:00
+ * will match AMDI0095:0 in the second strcmp in acpi_dev_hid_uid_match
+ */
+ while (*uid == '0' && *(uid + 1))
+ uid++;
+
i = early_acpihid_map_size++;
memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index d3b39d0416fa..5c381cc3658e 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -767,7 +767,7 @@ EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
static void iommu_poll_ga_log(struct amd_iommu *iommu)
{
- u32 head, tail, cnt = 0;
+ u32 head, tail;
if (iommu->ga_log == NULL)
return;
@@ -780,7 +780,6 @@ static void iommu_poll_ga_log(struct amd_iommu *iommu)
u64 log_entry;
raw = (u64 *)(iommu->ga_log + head);
- cnt++;
/* Avoid memcpy function-call overhead */
log_entry = *raw;
@@ -2155,21 +2154,13 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
static int amd_iommu_attach_device(struct iommu_domain *dom,
struct device *dev)
{
+ struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
struct protection_domain *domain = to_pdomain(dom);
- struct iommu_dev_data *dev_data;
- struct amd_iommu *iommu;
+ struct amd_iommu *iommu = rlookup_amd_iommu(dev);
int ret;
- if (!check_device(dev))
- return -EINVAL;
-
- dev_data = dev_iommu_priv_get(dev);
dev_data->defer_attach = false;
- iommu = rlookup_amd_iommu(dev);
- if (!iommu)
- return -EINVAL;
-
if (dev_data->domain)
detach_device(dev);
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index 6a1f02c62dff..864e4ffb6aa9 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -587,6 +587,7 @@ out_drop_state:
put_device_state(dev_state);
out:
+ pci_dev_put(pdev);
return ret;
}
@@ -639,7 +640,9 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
if (pasid_state->mm == NULL)
goto out_free;
- mmu_notifier_register(&pasid_state->mn, mm);
+ ret = mmu_notifier_register(&pasid_state->mn, mm);
+ if (ret)
+ goto out_free;
ret = set_pasid_state(dev_state, pasid_state, pasid);
if (ret)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 5968a568aae2..a5a63b1c947e 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -10,7 +10,7 @@
#include <linux/slab.h>
#include "arm-smmu-v3.h"
-#include "../../iommu-sva-lib.h"
+#include "../../iommu-sva.h"
#include "../../io-pgtable-arm.h"
struct arm_smmu_mmu_notifier {
@@ -344,11 +344,6 @@ __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
if (!bond)
return ERR_PTR(-ENOMEM);
- /* Allocate a PASID for this mm if necessary */
- ret = iommu_sva_alloc_pasid(mm, 1, (1U << master->ssid_bits) - 1);
- if (ret)
- goto err_free_bond;
-
bond->mm = mm;
bond->sva.dev = dev;
refcount_set(&bond->refs, 1);
@@ -367,42 +362,6 @@ err_free_bond:
return ERR_PTR(ret);
}
-struct iommu_sva *
-arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
-{
- struct iommu_sva *handle;
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-
- if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1)
- return ERR_PTR(-EINVAL);
-
- mutex_lock(&sva_lock);
- handle = __arm_smmu_sva_bind(dev, mm);
- mutex_unlock(&sva_lock);
- return handle;
-}
-
-void arm_smmu_sva_unbind(struct iommu_sva *handle)
-{
- struct arm_smmu_bond *bond = sva_to_bond(handle);
-
- mutex_lock(&sva_lock);
- if (refcount_dec_and_test(&bond->refs)) {
- list_del(&bond->list);
- arm_smmu_mmu_notifier_put(bond->smmu_mn);
- kfree(bond);
- }
- mutex_unlock(&sva_lock);
-}
-
-u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle)
-{
- struct arm_smmu_bond *bond = sva_to_bond(handle);
-
- return bond->mm->pasid;
-}
-
bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
{
unsigned long reg, fld;
@@ -550,3 +509,64 @@ void arm_smmu_sva_notifier_synchronize(void)
*/
mmu_notifier_synchronize();
}
+
+void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t id)
+{
+ struct mm_struct *mm = domain->mm;
+ struct arm_smmu_bond *bond = NULL, *t;
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+
+ mutex_lock(&sva_lock);
+ list_for_each_entry(t, &master->bonds, list) {
+ if (t->mm == mm) {
+ bond = t;
+ break;
+ }
+ }
+
+ if (!WARN_ON(!bond) && refcount_dec_and_test(&bond->refs)) {
+ list_del(&bond->list);
+ arm_smmu_mmu_notifier_put(bond->smmu_mn);
+ kfree(bond);
+ }
+ mutex_unlock(&sva_lock);
+}
+
+static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t id)
+{
+ int ret = 0;
+ struct iommu_sva *handle;
+ struct mm_struct *mm = domain->mm;
+
+ mutex_lock(&sva_lock);
+ handle = __arm_smmu_sva_bind(dev, mm);
+ if (IS_ERR(handle))
+ ret = PTR_ERR(handle);
+ mutex_unlock(&sva_lock);
+
+ return ret;
+}
+
+static void arm_smmu_sva_domain_free(struct iommu_domain *domain)
+{
+ kfree(domain);
+}
+
+static const struct iommu_domain_ops arm_smmu_sva_domain_ops = {
+ .set_dev_pasid = arm_smmu_sva_set_dev_pasid,
+ .free = arm_smmu_sva_domain_free
+};
+
+struct iommu_domain *arm_smmu_sva_domain_alloc(void)
+{
+ struct iommu_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ return NULL;
+ domain->ops = &arm_smmu_sva_domain_ops;
+
+ return domain;
+}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 6d5df91c5c46..ab160198edd6 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -29,7 +29,7 @@
#include "arm-smmu-v3.h"
#include "../../dma-iommu.h"
-#include "../../iommu-sva-lib.h"
+#include "../../iommu-sva.h"
static bool disable_bypass = true;
module_param(disable_bypass, bool, 0444);
@@ -2009,6 +2009,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
struct arm_smmu_domain *smmu_domain;
+ if (type == IOMMU_DOMAIN_SVA)
+ return arm_smmu_sva_domain_alloc();
+
if (type != IOMMU_DOMAIN_UNMANAGED &&
type != IOMMU_DOMAIN_DMA &&
type != IOMMU_DOMAIN_DMA_FQ &&
@@ -2430,23 +2433,14 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
goto out_unlock;
}
} else if (smmu_domain->smmu != smmu) {
- dev_err(dev,
- "cannot attach to SMMU %s (upstream of %s)\n",
- dev_name(smmu_domain->smmu->dev),
- dev_name(smmu->dev));
- ret = -ENXIO;
+ ret = -EINVAL;
goto out_unlock;
} else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
master->ssid_bits != smmu_domain->s1_cfg.s1cdmax) {
- dev_err(dev,
- "cannot attach to incompatible domain (%u SSID bits != %u)\n",
- smmu_domain->s1_cfg.s1cdmax, master->ssid_bits);
ret = -EINVAL;
goto out_unlock;
} else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
smmu_domain->stall_enabled != master->stall_enabled) {
- dev_err(dev, "cannot attach to stall-%s domain\n",
- smmu_domain->stall_enabled ? "enabled" : "disabled");
ret = -EINVAL;
goto out_unlock;
}
@@ -2838,6 +2832,17 @@ static int arm_smmu_def_domain_type(struct device *dev)
return 0;
}
+static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
+{
+ struct iommu_domain *domain;
+
+ domain = iommu_get_domain_for_dev_pasid(dev, pasid, IOMMU_DOMAIN_SVA);
+ if (WARN_ON(IS_ERR(domain)) || !domain)
+ return;
+
+ arm_smmu_sva_remove_dev_pasid(domain, dev, pasid);
+}
+
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
@@ -2846,11 +2851,9 @@ static struct iommu_ops arm_smmu_ops = {
.device_group = arm_smmu_device_group,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
+ .remove_dev_pasid = arm_smmu_remove_dev_pasid,
.dev_enable_feat = arm_smmu_dev_enable_feature,
.dev_disable_feat = arm_smmu_dev_disable_feature,
- .sva_bind = arm_smmu_sva_bind,
- .sva_unbind = arm_smmu_sva_unbind,
- .sva_get_pasid = arm_smmu_sva_get_pasid,
.page_response = arm_smmu_page_response,
.def_domain_type = arm_smmu_def_domain_type,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
@@ -3543,6 +3546,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
/* SID/SSID sizes */
smmu->ssid_bits = FIELD_GET(IDR1_SSIDSIZE, reg);
smmu->sid_bits = FIELD_GET(IDR1_SIDSIZE, reg);
+ smmu->iommu.max_pasids = 1UL << smmu->ssid_bits;
/*
* If the SMMU supports fewer bits than would fill a single L2 stream
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index cd48590ada30..8d772ea8a583 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -754,11 +754,10 @@ bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master);
int arm_smmu_master_enable_sva(struct arm_smmu_master *master);
int arm_smmu_master_disable_sva(struct arm_smmu_master *master);
bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master);
-struct iommu_sva *arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm,
- void *drvdata);
-void arm_smmu_sva_unbind(struct iommu_sva *handle);
-u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle);
void arm_smmu_sva_notifier_synchronize(void);
+struct iommu_domain *arm_smmu_sva_domain_alloc(void);
+void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t id);
#else /* CONFIG_ARM_SMMU_V3_SVA */
static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
{
@@ -790,19 +789,17 @@ static inline bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master
return false;
}
-static inline struct iommu_sva *
-arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
+static inline void arm_smmu_sva_notifier_synchronize(void) {}
+
+static inline struct iommu_domain *arm_smmu_sva_domain_alloc(void)
{
- return ERR_PTR(-ENODEV);
+ return NULL;
}
-static inline void arm_smmu_sva_unbind(struct iommu_sva *handle) {}
-
-static inline u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle)
+static inline void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
+ struct device *dev,
+ ioasid_t id)
{
- return IOMMU_PASID_INVALID;
}
-
-static inline void arm_smmu_sva_notifier_synchronize(void) {}
#endif /* CONFIG_ARM_SMMU_V3_SVA */
#endif /* _ARM_SMMU_V3_H */
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
index 658f3cc83278..9dc772f2cbb2 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
@@ -136,6 +136,9 @@ int arm_mmu500_reset(struct arm_smmu_device *smmu)
reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR);
reg &= ~ARM_MMU500_ACTLR_CPRE;
arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg);
+ reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR);
+ if (reg & ARM_MMU500_ACTLR_CPRE)
+ dev_warn_once(smmu->dev, "Failed to disable prefetcher [errata #841119 and #826419], check ACR.CACHE_LOCK\n");
}
return 0;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
index 6eed8e67a0ca..74e9ef2fd580 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
@@ -10,16 +10,6 @@
#include "arm-smmu.h"
#include "arm-smmu-qcom.h"
-enum qcom_smmu_impl_reg_offset {
- QCOM_SMMU_TBU_PWR_STATUS,
- QCOM_SMMU_STATS_SYNC_INV_TBU_ACK,
- QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR,
-};
-
-struct qcom_smmu_config {
- const u32 *reg_offset;
-};
-
void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu)
{
int ret;
@@ -59,84 +49,3 @@ void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu)
tbu_pwr_status, sync_inv_ack, sync_inv_progress);
}
}
-
-/* Implementation Defined Register Space 0 register offsets */
-static const u32 qcom_smmu_impl0_reg_offset[] = {
- [QCOM_SMMU_TBU_PWR_STATUS] = 0x2204,
- [QCOM_SMMU_STATS_SYNC_INV_TBU_ACK] = 0x25dc,
- [QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR] = 0x2670,
-};
-
-static const struct qcom_smmu_config qcm2290_smmu_cfg = {
- .reg_offset = qcom_smmu_impl0_reg_offset,
-};
-
-static const struct qcom_smmu_config sc7180_smmu_cfg = {
- .reg_offset = qcom_smmu_impl0_reg_offset,
-};
-
-static const struct qcom_smmu_config sc7280_smmu_cfg = {
- .reg_offset = qcom_smmu_impl0_reg_offset,
-};
-
-static const struct qcom_smmu_config sc8180x_smmu_cfg = {
- .reg_offset = qcom_smmu_impl0_reg_offset,
-};
-
-static const struct qcom_smmu_config sc8280xp_smmu_cfg = {
- .reg_offset = qcom_smmu_impl0_reg_offset,
-};
-
-static const struct qcom_smmu_config sm6125_smmu_cfg = {
- .reg_offset = qcom_smmu_impl0_reg_offset,
-};
-
-static const struct qcom_smmu_config sm6350_smmu_cfg = {
- .reg_offset = qcom_smmu_impl0_reg_offset,
-};
-
-static const struct qcom_smmu_config sm8150_smmu_cfg = {
- .reg_offset = qcom_smmu_impl0_reg_offset,
-};
-
-static const struct qcom_smmu_config sm8250_smmu_cfg = {
- .reg_offset = qcom_smmu_impl0_reg_offset,
-};
-
-static const struct qcom_smmu_config sm8350_smmu_cfg = {
- .reg_offset = qcom_smmu_impl0_reg_offset,
-};
-
-static const struct qcom_smmu_config sm8450_smmu_cfg = {
- .reg_offset = qcom_smmu_impl0_reg_offset,
-};
-
-static const struct of_device_id __maybe_unused qcom_smmu_impl_debug_match[] = {
- { .compatible = "qcom,msm8998-smmu-v2" },
- { .compatible = "qcom,qcm2290-smmu-500", .data = &qcm2290_smmu_cfg },
- { .compatible = "qcom,sc7180-smmu-500", .data = &sc7180_smmu_cfg },
- { .compatible = "qcom,sc7280-smmu-500", .data = &sc7280_smmu_cfg},
- { .compatible = "qcom,sc8180x-smmu-500", .data = &sc8180x_smmu_cfg },
- { .compatible = "qcom,sc8280xp-smmu-500", .data = &sc8280xp_smmu_cfg },
- { .compatible = "qcom,sdm630-smmu-v2" },
- { .compatible = "qcom,sdm845-smmu-500" },
- { .compatible = "qcom,sm6125-smmu-500", .data = &sm6125_smmu_cfg},
- { .compatible = "qcom,sm6350-smmu-500", .data = &sm6350_smmu_cfg},
- { .compatible = "qcom,sm8150-smmu-500", .data = &sm8150_smmu_cfg },
- { .compatible = "qcom,sm8250-smmu-500", .data = &sm8250_smmu_cfg },
- { .compatible = "qcom,sm8350-smmu-500", .data = &sm8350_smmu_cfg },
- { .compatible = "qcom,sm8450-smmu-500", .data = &sm8450_smmu_cfg },
- { }
-};
-
-const void *qcom_smmu_impl_data(struct arm_smmu_device *smmu)
-{
- const struct of_device_id *match;
- const struct device_node *np = smmu->dev->of_node;
-
- match = of_match_node(qcom_smmu_impl_debug_match, np);
- if (!match)
- return NULL;
-
- return match->data;
-}
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index b2708de25ea3..91d404deb115 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -361,6 +361,8 @@ static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu)
{
int ret;
+ arm_mmu500_reset(smmu);
+
/*
* To address performance degradation in non-real time clients,
* such as USB and UFS, turn off wait-for-safe on sdm845 based boards,
@@ -374,41 +376,67 @@ static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu)
return ret;
}
-static int qcom_smmu500_reset(struct arm_smmu_device *smmu)
-{
- const struct device_node *np = smmu->dev->of_node;
-
- arm_mmu500_reset(smmu);
-
- if (of_device_is_compatible(np, "qcom,sdm845-smmu-500"))
- return qcom_sdm845_smmu500_reset(smmu);
+static const struct arm_smmu_impl qcom_smmu_v2_impl = {
+ .init_context = qcom_smmu_init_context,
+ .cfg_probe = qcom_smmu_cfg_probe,
+ .def_domain_type = qcom_smmu_def_domain_type,
+ .write_s2cr = qcom_smmu_write_s2cr,
+ .tlb_sync = qcom_smmu_tlb_sync,
+};
- return 0;
-}
+static const struct arm_smmu_impl qcom_smmu_500_impl = {
+ .init_context = qcom_smmu_init_context,
+ .cfg_probe = qcom_smmu_cfg_probe,
+ .def_domain_type = qcom_smmu_def_domain_type,
+ .reset = arm_mmu500_reset,
+ .write_s2cr = qcom_smmu_write_s2cr,
+ .tlb_sync = qcom_smmu_tlb_sync,
+};
-static const struct arm_smmu_impl qcom_smmu_impl = {
+static const struct arm_smmu_impl sdm845_smmu_500_impl = {
.init_context = qcom_smmu_init_context,
.cfg_probe = qcom_smmu_cfg_probe,
.def_domain_type = qcom_smmu_def_domain_type,
- .reset = qcom_smmu500_reset,
+ .reset = qcom_sdm845_smmu500_reset,
.write_s2cr = qcom_smmu_write_s2cr,
.tlb_sync = qcom_smmu_tlb_sync,
};
-static const struct arm_smmu_impl qcom_adreno_smmu_impl = {
+static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl = {
+ .init_context = qcom_adreno_smmu_init_context,
+ .def_domain_type = qcom_smmu_def_domain_type,
+ .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
+ .write_sctlr = qcom_adreno_smmu_write_sctlr,
+ .tlb_sync = qcom_smmu_tlb_sync,
+};
+
+static const struct arm_smmu_impl qcom_adreno_smmu_500_impl = {
.init_context = qcom_adreno_smmu_init_context,
.def_domain_type = qcom_smmu_def_domain_type,
- .reset = qcom_smmu500_reset,
+ .reset = arm_mmu500_reset,
.alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
.write_sctlr = qcom_adreno_smmu_write_sctlr,
.tlb_sync = qcom_smmu_tlb_sync,
};
static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
- const struct arm_smmu_impl *impl)
+ const struct qcom_smmu_match_data *data)
{
+ const struct device_node *np = smmu->dev->of_node;
+ const struct arm_smmu_impl *impl;
struct qcom_smmu *qsmmu;
+ if (!data)
+ return ERR_PTR(-EINVAL);
+
+ if (np && of_device_is_compatible(np, "qcom,adreno-smmu"))
+ impl = data->adreno_impl;
+ else
+ impl = data->impl;
+
+ if (!impl)
+ return smmu;
+
/* Check to make sure qcom_scm has finished probing */
if (!qcom_scm_is_available())
return ERR_PTR(-EPROBE_DEFER);
@@ -418,27 +446,77 @@ static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
return ERR_PTR(-ENOMEM);
qsmmu->smmu.impl = impl;
- qsmmu->cfg = qcom_smmu_impl_data(smmu);
+ qsmmu->cfg = data->cfg;
return &qsmmu->smmu;
}
+/* Implementation Defined Register Space 0 register offsets */
+static const u32 qcom_smmu_impl0_reg_offset[] = {
+ [QCOM_SMMU_TBU_PWR_STATUS] = 0x2204,
+ [QCOM_SMMU_STATS_SYNC_INV_TBU_ACK] = 0x25dc,
+ [QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR] = 0x2670,
+};
+
+static const struct qcom_smmu_config qcom_smmu_impl0_cfg = {
+ .reg_offset = qcom_smmu_impl0_reg_offset,
+};
+
+/*
+ * It is not yet possible to use MDP SMMU with the bypass quirk on the msm8996,
+ * there are not enough context banks.
+ */
+static const struct qcom_smmu_match_data msm8996_smmu_data = {
+ .impl = NULL,
+ .adreno_impl = &qcom_adreno_smmu_v2_impl,
+};
+
+static const struct qcom_smmu_match_data qcom_smmu_v2_data = {
+ .impl = &qcom_smmu_v2_impl,
+ .adreno_impl = &qcom_adreno_smmu_v2_impl,
+};
+
+static const struct qcom_smmu_match_data sdm845_smmu_500_data = {
+ .impl = &sdm845_smmu_500_impl,
+ /*
+ * No need for adreno impl here. On sdm845 the Adreno SMMU is handled
+ * by the separate sdm845-smmu-v2 device.
+ */
+ /* Also no debug configuration. */
+};
+
+static const struct qcom_smmu_match_data qcom_smmu_500_impl0_data = {
+ .impl = &qcom_smmu_500_impl,
+ .adreno_impl = &qcom_adreno_smmu_500_impl,
+ .cfg = &qcom_smmu_impl0_cfg,
+};
+
+/*
+ * Do not add any more qcom,SOC-smmu-500 entries to this list, unless they need
+ * special handling and can not be covered by the qcom,smmu-500 entry.
+ */
static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
- { .compatible = "qcom,msm8998-smmu-v2" },
- { .compatible = "qcom,qcm2290-smmu-500" },
- { .compatible = "qcom,sc7180-smmu-500" },
- { .compatible = "qcom,sc7280-smmu-500" },
- { .compatible = "qcom,sc8180x-smmu-500" },
- { .compatible = "qcom,sc8280xp-smmu-500" },
- { .compatible = "qcom,sdm630-smmu-v2" },
- { .compatible = "qcom,sdm845-smmu-500" },
- { .compatible = "qcom,sm6125-smmu-500" },
- { .compatible = "qcom,sm6350-smmu-500" },
- { .compatible = "qcom,sm6375-smmu-500" },
- { .compatible = "qcom,sm8150-smmu-500" },
- { .compatible = "qcom,sm8250-smmu-500" },
- { .compatible = "qcom,sm8350-smmu-500" },
- { .compatible = "qcom,sm8450-smmu-500" },
+ { .compatible = "qcom,msm8996-smmu-v2", .data = &msm8996_smmu_data },
+ { .compatible = "qcom,msm8998-smmu-v2", .data = &qcom_smmu_v2_data },
+ { .compatible = "qcom,qcm2290-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,qdu1000-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sc7180-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sc7280-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sc8180x-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sc8280xp-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sdm630-smmu-v2", .data = &qcom_smmu_v2_data },
+ { .compatible = "qcom,sdm845-smmu-v2", .data = &qcom_smmu_v2_data },
+ { .compatible = "qcom,sdm845-smmu-500", .data = &sdm845_smmu_500_data },
+ { .compatible = "qcom,sm6115-smmu-500", .data = &qcom_smmu_500_impl0_data},
+ { .compatible = "qcom,sm6125-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sm6350-smmu-v2", .data = &qcom_smmu_v2_data },
+ { .compatible = "qcom,sm6350-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sm6375-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sm8150-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sm8250-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sm8350-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sm8450-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,smmu-500", .data = &qcom_smmu_500_impl0_data },
{ }
};
@@ -453,26 +531,19 @@ static struct acpi_platform_list qcom_acpi_platlist[] = {
struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
{
const struct device_node *np = smmu->dev->of_node;
+ const struct of_device_id *match;
#ifdef CONFIG_ACPI
if (np == NULL) {
/* Match platform for ACPI boot */
if (acpi_match_platform_list(qcom_acpi_platlist) >= 0)
- return qcom_smmu_create(smmu, &qcom_smmu_impl);
+ return qcom_smmu_create(smmu, &qcom_smmu_500_impl0_data);
}
#endif
- /*
- * Do not change this order of implementation, i.e., first adreno
- * smmu impl and then apss smmu since we can have both implementing
- * arm,mmu-500 in which case we will miss setting adreno smmu specific
- * features if the order is changed.
- */
- if (of_device_is_compatible(np, "qcom,adreno-smmu"))
- return qcom_smmu_create(smmu, &qcom_adreno_smmu_impl);
-
- if (of_match_node(qcom_smmu_impl_of_match, np))
- return qcom_smmu_create(smmu, &qcom_smmu_impl);
+ match = of_match_node(qcom_smmu_impl_of_match, np);
+ if (match)
+ return qcom_smmu_create(smmu, match->data);
return smmu;
}
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
index 99ec8f8629a0..593910567b88 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h
@@ -14,15 +14,26 @@ struct qcom_smmu {
u32 stall_enabled;
};
+enum qcom_smmu_impl_reg_offset {
+ QCOM_SMMU_TBU_PWR_STATUS,
+ QCOM_SMMU_STATS_SYNC_INV_TBU_ACK,
+ QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR,
+};
+
+struct qcom_smmu_config {
+ const u32 *reg_offset;
+};
+
+struct qcom_smmu_match_data {
+ const struct qcom_smmu_config *cfg;
+ const struct arm_smmu_impl *impl;
+ const struct arm_smmu_impl *adreno_impl;
+};
+
#ifdef CONFIG_ARM_SMMU_QCOM_DEBUG
void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu);
-const void *qcom_smmu_impl_data(struct arm_smmu_device *smmu);
#else
static inline void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu) { }
-static inline const void *qcom_smmu_impl_data(struct arm_smmu_device *smmu)
-{
- return NULL;
-}
#endif
#endif /* _ARM_SMMU_QCOM_H */
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 30dab1418e3f..719fbca1fe52 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -1150,9 +1150,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
* different SMMUs.
*/
if (smmu_domain->smmu != smmu) {
- dev_err(dev,
- "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
- dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
ret = -EINVAL;
goto rpm_put;
}
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index 3869c3ecda8c..270c3d9128ba 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -381,13 +381,8 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev
* Sanity check the domain. We don't support domains across
* different IOMMUs.
*/
- if (qcom_domain->iommu != qcom_iommu) {
- dev_err(dev, "cannot attach to IOMMU %s while already "
- "attached to domain on IOMMU %s\n",
- dev_name(qcom_domain->iommu->dev),
- dev_name(qcom_iommu->dev));
+ if (qcom_domain->iommu != qcom_iommu)
return -EINVAL;
- }
return 0;
}
@@ -415,7 +410,8 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
}
static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
{
int ret;
unsigned long flags;
@@ -426,13 +422,14 @@ static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
return -ENODEV;
spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
- ret = ops->map(ops, iova, paddr, size, prot, GFP_ATOMIC);
+ ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, GFP_ATOMIC, mapped);
spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
return ret;
}
static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *gather)
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
{
size_t ret;
unsigned long flags;
@@ -449,7 +446,7 @@ static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
*/
pm_runtime_get_sync(qcom_domain->iommu->dev);
spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
- ret = ops->unmap(ops, iova, size, gather);
+ ret = ops->unmap_pages(ops, iova, pgsize, pgcount, gather);
spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
pm_runtime_put_sync(qcom_domain->iommu->dev);
@@ -587,8 +584,8 @@ static const struct iommu_ops qcom_iommu_ops = {
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = qcom_iommu_attach_dev,
.detach_dev = qcom_iommu_detach_dev,
- .map = qcom_iommu_map,
- .unmap = qcom_iommu_unmap,
+ .map_pages = qcom_iommu_map,
+ .unmap_pages = qcom_iommu_unmap,
.flush_iotlb_all = qcom_iommu_flush_iotlb_all,
.iotlb_sync = qcom_iommu_iotlb_sync,
.iova_to_phys = qcom_iommu_iova_to_phys,
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 45fd4850bacb..b0cde2211987 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -708,10 +708,6 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev);
- if (ret)
- goto err_iommu_register;
-
platform_set_drvdata(pdev, data);
if (PG_ENT_SHIFT < 0) {
@@ -743,11 +739,13 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
+ ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev);
+ if (ret)
+ goto err_dma_set_mask;
+
return 0;
err_dma_set_mask:
- iommu_device_unregister(&data->iommu);
-err_iommu_register:
iommu_device_sysfs_remove(&data->iommu);
return ret;
}
@@ -1432,12 +1430,6 @@ static int __init exynos_iommu_init(void)
return -ENOMEM;
}
- ret = platform_driver_register(&exynos_sysmmu_driver);
- if (ret) {
- pr_err("%s: Failed to register driver\n", __func__);
- goto err_reg_driver;
- }
-
zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
if (zero_lv2_table == NULL) {
pr_err("%s: Failed to allocate zero level2 page table\n",
@@ -1446,10 +1438,16 @@ static int __init exynos_iommu_init(void)
goto err_zero_lv2;
}
+ ret = platform_driver_register(&exynos_sysmmu_driver);
+ if (ret) {
+ pr_err("%s: Failed to register driver\n", __func__);
+ goto err_reg_driver;
+ }
+
return 0;
-err_zero_lv2:
- platform_driver_unregister(&exynos_sysmmu_driver);
err_reg_driver:
+ platform_driver_unregister(&exynos_sysmmu_driver);
+err_zero_lv2:
kmem_cache_destroy(lv2table_kmem_cache);
return ret;
}
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index 0d03f837a5d4..05d820fb1d0b 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -211,7 +211,7 @@ int pamu_config_ppaace(int liodn, u32 omi, u32 stashid, int prot)
ppaace->op_encode.index_ot.omi = omi;
} else if (~omi != 0) {
pr_debug("bad operation mapping index: %d\n", omi);
- return -EINVAL;
+ return -ENODEV;
}
/* configure stash id */
@@ -779,7 +779,7 @@ static int fsl_pamu_probe(struct platform_device *pdev)
of_get_address(dev->of_node, 0, &size, NULL);
irq = irq_of_parse_and_map(dev->of_node, 0);
- if (irq == NO_IRQ) {
+ if (!irq) {
dev_warn(dev, "no interrupts listed in PAMU node\n");
goto error;
}
@@ -868,7 +868,7 @@ static int fsl_pamu_probe(struct platform_device *pdev)
ret = create_csd(ppaact_phys, mem_size, csd_port_id);
if (ret) {
dev_err(dev, "could not create coherence subdomain\n");
- return ret;
+ goto error;
}
}
@@ -903,7 +903,7 @@ static int fsl_pamu_probe(struct platform_device *pdev)
return 0;
error:
- if (irq != NO_IRQ)
+ if (irq)
free_irq(irq, data);
kfree_sensitive(data);
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index fa20f4b03e12..4408ac3c49b6 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -258,7 +258,7 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
if (!liodn) {
pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
- return -EINVAL;
+ return -ENODEV;
}
spin_lock_irqsave(&dma_domain->domain_lock, flags);
@@ -267,7 +267,7 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
liodn[i], dev->of_node);
- ret = -EINVAL;
+ ret = -ENODEV;
break;
}
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 5a8f780e7ffd..b00a0ceb2d13 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -820,6 +820,7 @@ int __init dmar_dev_scope_init(void)
info = dmar_alloc_pci_notify_info(dev,
BUS_NOTIFY_ADD_DEVICE);
if (!info) {
+ pci_dev_put(dev);
return dmar_dev_scope_status;
} else {
dmar_pci_bus_add_dev(info);
@@ -1105,6 +1106,13 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
raw_spin_lock_init(&iommu->register_lock);
/*
+ * A value of N in PSS field of eCap register indicates hardware
+ * supports PASID field of N+1 bits.
+ */
+ if (pasid_supported(iommu))
+ iommu->iommu.max_pasids = 2UL << ecap_pss(iommu->ecap);
+
+ /*
* This is only for hotplug; at boot time intel_iommu_enabled won't
* be set yet. When intel_iommu_init() runs, it registers the units
* present at boot time, then sets intel_iommu_enabled.
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index a3db7ac3d60c..5fa52a03069f 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -27,7 +27,7 @@
#include "iommu.h"
#include "../dma-iommu.h"
#include "../irq_remapping.h"
-#include "../iommu-sva-lib.h"
+#include "../iommu-sva.h"
#include "pasid.h"
#include "cap_audit.h"
@@ -1379,6 +1379,24 @@ static void domain_update_iotlb(struct dmar_domain *domain)
spin_unlock_irqrestore(&domain->lock, flags);
}
+/*
+ * The extra devTLB flush quirk impacts those QAT devices with PCI device
+ * IDs ranging from 0x4940 to 0x4943. It is exempted from risky_device()
+ * check because it applies only to the built-in QAT devices and it doesn't
+ * grant additional privileges.
+ */
+#define BUGGY_QAT_DEVID_MASK 0x4940
+static bool dev_needs_extra_dtlb_flush(struct pci_dev *pdev)
+{
+ if (pdev->vendor != PCI_VENDOR_ID_INTEL)
+ return false;
+
+ if ((pdev->device & 0xfffc) != BUGGY_QAT_DEVID_MASK)
+ return false;
+
+ return true;
+}
+
static void iommu_enable_pci_caps(struct device_domain_info *info)
{
struct pci_dev *pdev;
@@ -1461,6 +1479,7 @@ static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
qdep = info->ats_qdep;
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
qdep, addr, mask);
+ quirk_extra_dev_tlb_flush(info, addr, mask, PASID_RID2PASID, qdep);
}
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
@@ -3808,8 +3827,10 @@ static inline bool has_external_pci(void)
struct pci_dev *pdev = NULL;
for_each_pci_dev(pdev)
- if (pdev->external_facing)
+ if (pdev->external_facing) {
+ pci_dev_put(pdev);
return true;
+ }
return false;
}
@@ -4167,6 +4188,8 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
return domain;
case IOMMU_DOMAIN_IDENTITY:
return &si_domain->domain;
+ case IOMMU_DOMAIN_SVA:
+ return intel_svm_domain_alloc();
default:
return NULL;
}
@@ -4192,19 +4215,15 @@ static int prepare_domain_attach_device(struct iommu_domain *domain,
return -ENODEV;
if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap))
- return -EOPNOTSUPP;
+ return -EINVAL;
/* check if this iommu agaw is sufficient for max mapped address */
addr_width = agaw_to_width(iommu->agaw);
if (addr_width > cap_mgaw(iommu->cap))
addr_width = cap_mgaw(iommu->cap);
- if (dmar_domain->max_addr > (1LL << addr_width)) {
- dev_err(dev, "%s: iommu width (%d) is not "
- "sufficient for the mapped address (%llx)\n",
- __func__, addr_width, dmar_domain->max_addr);
- return -EFAULT;
- }
+ if (dmar_domain->max_addr > (1LL << addr_width))
+ return -EINVAL;
dmar_domain->gaw = addr_width;
/*
@@ -4481,9 +4500,10 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
if (dev_is_pci(dev)) {
if (ecap_dev_iotlb_support(iommu->ecap) &&
pci_ats_supported(pdev) &&
- dmar_ats_supported(pdev, iommu))
+ dmar_ats_supported(pdev, iommu)) {
info->ats_supported = 1;
-
+ info->dtlb_extra_inval = dev_needs_extra_dtlb_flush(pdev);
+ }
if (sm_supported(iommu)) {
if (pasid_supported(iommu)) {
int features = pci_pasid_features(pdev);
@@ -4712,6 +4732,28 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
__mapping_notify_one(info->iommu, dmar_domain, pfn, pages);
}
+static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
+{
+ struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
+ struct iommu_domain *domain;
+
+ /* Domain type specific cleanup: */
+ domain = iommu_get_domain_for_dev_pasid(dev, pasid, 0);
+ if (domain) {
+ switch (domain->type) {
+ case IOMMU_DOMAIN_SVA:
+ intel_svm_remove_dev_pasid(dev, pasid);
+ break;
+ default:
+ /* should never reach here */
+ WARN_ON(1);
+ break;
+ }
+ }
+
+ intel_pasid_tear_down_entry(iommu, dev, pasid, false);
+}
+
const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc,
@@ -4724,11 +4766,9 @@ const struct iommu_ops intel_iommu_ops = {
.dev_disable_feat = intel_iommu_dev_disable_feat,
.is_attach_deferred = intel_iommu_is_attach_deferred,
.def_domain_type = device_def_domain_type,
+ .remove_dev_pasid = intel_iommu_remove_dev_pasid,
.pgsize_bitmap = SZ_4K,
#ifdef CONFIG_INTEL_IOMMU_SVM
- .sva_bind = intel_svm_bind,
- .sva_unbind = intel_svm_unbind,
- .sva_get_pasid = intel_svm_get_pasid,
.page_response = intel_svm_page_response,
#endif
.default_domain_ops = &(const struct iommu_domain_ops) {
@@ -4932,3 +4972,48 @@ static void __init check_tylersburg_isoch(void)
pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
vtisochctrl);
}
+
+/*
+ * Here we deal with a device TLB defect where device may inadvertently issue ATS
+ * invalidation completion before posted writes initiated with translated address
+ * that utilized translations matching the invalidation address range, violating
+ * the invalidation completion ordering.
+ * Therefore, any use cases that cannot guarantee DMA is stopped before unmap is
+ * vulnerable to this defect. In other words, any dTLB invalidation initiated not
+ * under the control of the trusted/privileged host device driver must use this
+ * quirk.
+ * Device TLBs are invalidated under the following six conditions:
+ * 1. Device driver does DMA API unmap IOVA
+ * 2. Device driver unbind a PASID from a process, sva_unbind_device()
+ * 3. PASID is torn down, after PASID cache is flushed. e.g. process
+ * exit_mmap() due to crash
+ * 4. Under SVA usage, called by mmu_notifier.invalidate_range() where
+ * VM has to free pages that were unmapped
+ * 5. Userspace driver unmaps a DMA buffer
+ * 6. Cache invalidation in vSVA usage (upcoming)
+ *
+ * For #1 and #2, device drivers are responsible for stopping DMA traffic
+ * before unmap/unbind. For #3, iommu driver gets mmu_notifier to
+ * invalidate TLB the same way as normal user unmap which will use this quirk.
+ * The dTLB invalidation after PASID cache flush does not need this quirk.
+ *
+ * As a reminder, #6 will *NEED* this quirk as we enable nested translation.
+ */
+void quirk_extra_dev_tlb_flush(struct device_domain_info *info,
+ unsigned long address, unsigned long mask,
+ u32 pasid, u16 qdep)
+{
+ u16 sid;
+
+ if (likely(!info->dtlb_extra_inval))
+ return;
+
+ sid = PCI_DEVID(info->bus, info->devfn);
+ if (pasid == PASID_RID2PASID) {
+ qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
+ qdep, address, mask);
+ } else {
+ qi_flush_dev_iotlb_pasid(info->iommu, sid, info->pfsid,
+ pasid, qdep, address, mask);
+ }
+}
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index 30b0d72aeb6c..9ab4b1b27d2a 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -480,8 +480,6 @@ enum {
#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
#define VTD_FLAG_SVM_CAPABLE (1 << 2)
-extern int intel_iommu_sm;
-
#define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap))
#define pasid_supported(iommu) (sm_supported(iommu) && \
ecap_pasid((iommu)->ecap))
@@ -618,6 +616,7 @@ struct device_domain_info {
u8 pri_enabled:1;
u8 ats_supported:1;
u8 ats_enabled:1;
+ u8 dtlb_extra_inval:1; /* Quirk for devices need extra flush */
u8 ats_qdep;
struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
struct intel_iommu *iommu; /* IOMMU used by this device */
@@ -723,6 +722,9 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
u32 pasid, u16 qdep, u64 addr,
unsigned int size_order);
+void quirk_extra_dev_tlb_flush(struct device_domain_info *info,
+ unsigned long address, unsigned long pages,
+ u32 pasid, u16 qdep);
void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu,
u32 pasid);
@@ -745,12 +747,10 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
extern void intel_svm_check(struct intel_iommu *iommu);
extern int intel_svm_enable_prq(struct intel_iommu *iommu);
extern int intel_svm_finish_prq(struct intel_iommu *iommu);
-struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm,
- void *drvdata);
-void intel_svm_unbind(struct iommu_sva *handle);
-u32 intel_svm_get_pasid(struct iommu_sva *handle);
int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt,
struct iommu_page_response *msg);
+struct iommu_domain *intel_svm_domain_alloc(void);
+void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid);
struct intel_svm_dev {
struct list_head list;
@@ -775,6 +775,14 @@ struct intel_svm {
};
#else
static inline void intel_svm_check(struct intel_iommu *iommu) {}
+static inline struct iommu_domain *intel_svm_domain_alloc(void)
+{
+ return NULL;
+}
+
+static inline void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid)
+{
+}
#endif
#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
@@ -790,6 +798,7 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
extern const struct iommu_ops intel_iommu_ops;
#ifdef CONFIG_INTEL_IOMMU
+extern int intel_iommu_sm;
extern int iommu_calculate_agaw(struct intel_iommu *iommu);
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
extern int dmar_disabled;
@@ -805,6 +814,7 @@ static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
}
#define dmar_disabled (1)
#define intel_iommu_enabled (0)
+#define intel_iommu_sm (0)
#endif
static inline const char *decode_prq_descriptor(char *str, size_t size,
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index e13d7e5273e1..fb3c7020028d 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -101,8 +101,10 @@ int intel_pasid_alloc_table(struct device *dev)
might_sleep();
info = dev_iommu_priv_get(dev);
- if (WARN_ON(!info || !dev_is_pci(dev) || info->pasid_table))
- return -EINVAL;
+ if (WARN_ON(!info || !dev_is_pci(dev)))
+ return -ENODEV;
+ if (WARN_ON(info->pasid_table))
+ return -EEXIST;
pasid_table = kzalloc(sizeof(*pasid_table), GFP_KERNEL);
if (!pasid_table)
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 7d08eb034f2d..c76b66263467 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -24,7 +24,7 @@
#include "iommu.h"
#include "pasid.h"
#include "perf.h"
-#include "../iommu-sva-lib.h"
+#include "../iommu-sva.h"
#include "trace.h"
static irqreturn_t prq_event_thread(int irq, void *d);
@@ -184,10 +184,13 @@ static void __flush_svm_range_dev(struct intel_svm *svm,
return;
qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih);
- if (info->ats_enabled)
+ if (info->ats_enabled) {
qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
svm->pasid, sdev->qdep, address,
order_base_2(pages));
+ quirk_extra_dev_tlb_flush(info, address, order_base_2(pages),
+ svm->pasid, sdev->qdep);
+ }
}
static void intel_flush_svm_range_dev(struct intel_svm *svm,
@@ -296,19 +299,9 @@ out:
return 0;
}
-static int intel_svm_alloc_pasid(struct device *dev, struct mm_struct *mm,
- unsigned int flags)
-{
- ioasid_t max_pasid = dev_is_pci(dev) ?
- pci_max_pasids(to_pci_dev(dev)) : intel_pasid_max_id;
-
- return iommu_sva_alloc_pasid(mm, PASID_MIN, max_pasid - 1);
-}
-
static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
struct device *dev,
- struct mm_struct *mm,
- unsigned int flags)
+ struct mm_struct *mm)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_svm_dev *sdev;
@@ -324,22 +317,18 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
svm->pasid = mm->pasid;
svm->mm = mm;
- svm->flags = flags;
INIT_LIST_HEAD_RCU(&svm->devs);
- if (!(flags & SVM_FLAG_SUPERVISOR_MODE)) {
- svm->notifier.ops = &intel_mmuops;
- ret = mmu_notifier_register(&svm->notifier, mm);
- if (ret) {
- kfree(svm);
- return ERR_PTR(ret);
- }
+ svm->notifier.ops = &intel_mmuops;
+ ret = mmu_notifier_register(&svm->notifier, mm);
+ if (ret) {
+ kfree(svm);
+ return ERR_PTR(ret);
}
ret = pasid_private_add(svm->pasid, svm);
if (ret) {
- if (svm->notifier.ops)
- mmu_notifier_unregister(&svm->notifier, mm);
+ mmu_notifier_unregister(&svm->notifier, mm);
kfree(svm);
return ERR_PTR(ret);
}
@@ -374,9 +363,7 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
}
/* Setup the pasid table: */
- sflags = (flags & SVM_FLAG_SUPERVISOR_MODE) ?
- PASID_FLAG_SUPERVISOR_MODE : 0;
- sflags |= cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
+ sflags = cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid,
FLPT_DEFAULT_DID, sflags);
if (ret)
@@ -390,8 +377,7 @@ free_sdev:
kfree(sdev);
free_svm:
if (list_empty(&svm->devs)) {
- if (svm->notifier.ops)
- mmu_notifier_unregister(&svm->notifier, mm);
+ mmu_notifier_unregister(&svm->notifier, mm);
pasid_private_remove(mm->pasid);
kfree(svm);
}
@@ -745,12 +731,16 @@ bad_req:
* If prq is to be handled outside iommu driver via receiver of
* the fault notifiers, we skip the page response here.
*/
- if (!pdev || intel_svm_prq_report(iommu, &pdev->dev, req))
- handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
+ if (!pdev)
+ goto bad_req;
- trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1,
- req->priv_data[0], req->priv_data[1],
- iommu->prq_seq_number++);
+ if (intel_svm_prq_report(iommu, &pdev->dev, req))
+ handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
+ else
+ trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1,
+ req->priv_data[0], req->priv_data[1],
+ iommu->prq_seq_number++);
+ pci_dev_put(pdev);
prq_advance:
head = (head + sizeof(*req)) & PRQ_RING_MASK;
}
@@ -780,67 +770,6 @@ prq_advance:
return IRQ_RETVAL(handled);
}
-struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
-{
- struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
- unsigned int flags = 0;
- struct iommu_sva *sva;
- int ret;
-
- if (drvdata)
- flags = *(unsigned int *)drvdata;
-
- if (flags & SVM_FLAG_SUPERVISOR_MODE) {
- if (!ecap_srs(iommu->ecap)) {
- dev_err(dev, "%s: Supervisor PASID not supported\n",
- iommu->name);
- return ERR_PTR(-EOPNOTSUPP);
- }
-
- if (mm) {
- dev_err(dev, "%s: Supervisor PASID with user provided mm\n",
- iommu->name);
- return ERR_PTR(-EINVAL);
- }
-
- mm = &init_mm;
- }
-
- mutex_lock(&pasid_mutex);
- ret = intel_svm_alloc_pasid(dev, mm, flags);
- if (ret) {
- mutex_unlock(&pasid_mutex);
- return ERR_PTR(ret);
- }
-
- sva = intel_svm_bind_mm(iommu, dev, mm, flags);
- mutex_unlock(&pasid_mutex);
-
- return sva;
-}
-
-void intel_svm_unbind(struct iommu_sva *sva)
-{
- struct intel_svm_dev *sdev = to_intel_svm_dev(sva);
-
- mutex_lock(&pasid_mutex);
- intel_svm_unbind_mm(sdev->dev, sdev->pasid);
- mutex_unlock(&pasid_mutex);
-}
-
-u32 intel_svm_get_pasid(struct iommu_sva *sva)
-{
- struct intel_svm_dev *sdev;
- u32 pasid;
-
- mutex_lock(&pasid_mutex);
- sdev = to_intel_svm_dev(sva);
- pasid = sdev->pasid;
- mutex_unlock(&pasid_mutex);
-
- return pasid;
-}
-
int intel_svm_page_response(struct device *dev,
struct iommu_fault_event *evt,
struct iommu_page_response *msg)
@@ -911,3 +840,50 @@ int intel_svm_page_response(struct device *dev,
out:
return ret;
}
+
+void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid)
+{
+ mutex_lock(&pasid_mutex);
+ intel_svm_unbind_mm(dev, pasid);
+ mutex_unlock(&pasid_mutex);
+}
+
+static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+ struct mm_struct *mm = domain->mm;
+ struct iommu_sva *sva;
+ int ret = 0;
+
+ mutex_lock(&pasid_mutex);
+ sva = intel_svm_bind_mm(iommu, dev, mm);
+ if (IS_ERR(sva))
+ ret = PTR_ERR(sva);
+ mutex_unlock(&pasid_mutex);
+
+ return ret;
+}
+
+static void intel_svm_domain_free(struct iommu_domain *domain)
+{
+ kfree(to_dmar_domain(domain));
+}
+
+static const struct iommu_domain_ops intel_svm_domain_ops = {
+ .set_dev_pasid = intel_svm_set_dev_pasid,
+ .free = intel_svm_domain_free
+};
+
+struct iommu_domain *intel_svm_domain_alloc(void)
+{
+ struct dmar_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ return NULL;
+ domain->domain.ops = &intel_svm_domain_ops;
+
+ return &domain->domain;
+}
diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c
index 1df8c1dcae77..e5b8b9110c13 100644
--- a/drivers/iommu/io-pgfault.c
+++ b/drivers/iommu/io-pgfault.c
@@ -11,7 +11,7 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
-#include "iommu-sva-lib.h"
+#include "iommu-sva.h"
/**
* struct iopf_queue - IO Page Fault queue
@@ -69,69 +69,18 @@ static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf,
return iommu_page_response(dev, &resp);
}
-static enum iommu_page_response_code
-iopf_handle_single(struct iopf_fault *iopf)
-{
- vm_fault_t ret;
- struct mm_struct *mm;
- struct vm_area_struct *vma;
- unsigned int access_flags = 0;
- unsigned int fault_flags = FAULT_FLAG_REMOTE;
- struct iommu_fault_page_request *prm = &iopf->fault.prm;
- enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
-
- if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
- return status;
-
- mm = iommu_sva_find(prm->pasid);
- if (IS_ERR_OR_NULL(mm))
- return status;
-
- mmap_read_lock(mm);
-
- vma = find_extend_vma(mm, prm->addr);
- if (!vma)
- /* Unmapped area */
- goto out_put_mm;
-
- if (prm->perm & IOMMU_FAULT_PERM_READ)
- access_flags |= VM_READ;
-
- if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
- access_flags |= VM_WRITE;
- fault_flags |= FAULT_FLAG_WRITE;
- }
-
- if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
- access_flags |= VM_EXEC;
- fault_flags |= FAULT_FLAG_INSTRUCTION;
- }
-
- if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
- fault_flags |= FAULT_FLAG_USER;
-
- if (access_flags & ~vma->vm_flags)
- /* Access fault */
- goto out_put_mm;
-
- ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
- status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
- IOMMU_PAGE_RESP_SUCCESS;
-
-out_put_mm:
- mmap_read_unlock(mm);
- mmput(mm);
-
- return status;
-}
-
-static void iopf_handle_group(struct work_struct *work)
+static void iopf_handler(struct work_struct *work)
{
struct iopf_group *group;
+ struct iommu_domain *domain;
struct iopf_fault *iopf, *next;
enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
group = container_of(work, struct iopf_group, work);
+ domain = iommu_get_domain_for_dev_pasid(group->dev,
+ group->last_fault.fault.prm.pasid, 0);
+ if (!domain || !domain->iopf_handler)
+ status = IOMMU_PAGE_RESP_INVALID;
list_for_each_entry_safe(iopf, next, &group->faults, list) {
/*
@@ -139,7 +88,8 @@ static void iopf_handle_group(struct work_struct *work)
* faults in the group if there is an error.
*/
if (status == IOMMU_PAGE_RESP_SUCCESS)
- status = iopf_handle_single(iopf);
+ status = domain->iopf_handler(&iopf->fault,
+ domain->fault_data);
if (!(iopf->fault.prm.flags &
IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
@@ -181,6 +131,13 @@ static void iopf_handle_group(struct work_struct *work)
* request completes, outstanding faults will have been dealt with by the time
* the PASID is freed.
*
+ * Any valid page fault will be eventually routed to an iommu domain and the
+ * page fault handler installed there will get called. The users of this
+ * handling framework should guarantee that the iommu domain could only be
+ * freed after the device has stopped generating page faults (or the iommu
+ * hardware has been set to block the page faults) and the pending page faults
+ * have been flushed.
+ *
* Return: 0 on success and <0 on error.
*/
int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
@@ -235,7 +192,7 @@ int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
group->last_fault.fault = *fault;
INIT_LIST_HEAD(&group->faults);
list_add(&group->last_fault.list, &group->faults);
- INIT_WORK(&group->work, iopf_handle_group);
+ INIT_WORK(&group->work, iopf_handler);
/* See if we have partial faults for this group */
list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index ba3115fd0f86..75f244a3e12d 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -564,8 +564,7 @@ static int arm_v7s_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
iova += pgsize;
paddr += pgsize;
- if (mapped)
- *mapped += pgsize;
+ *mapped += pgsize;
}
/*
* Synchronise all PTE updates for the new mapping before there's
@@ -576,12 +575,6 @@ static int arm_v7s_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
return ret;
}
-static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
-{
- return arm_v7s_map_pages(ops, iova, paddr, size, 1, prot, gfp, NULL);
-}
-
static void arm_v7s_free_pgtable(struct io_pgtable *iop)
{
struct arm_v7s_io_pgtable *data = io_pgtable_to_data(iop);
@@ -764,12 +757,6 @@ static size_t arm_v7s_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova
return unmapped;
}
-static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *gather)
-{
- return arm_v7s_unmap_pages(ops, iova, size, 1, gather);
-}
-
static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
unsigned long iova)
{
@@ -842,9 +829,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
goto out_free_data;
data->iop.ops = (struct io_pgtable_ops) {
- .map = arm_v7s_map,
.map_pages = arm_v7s_map_pages,
- .unmap = arm_v7s_unmap,
.unmap_pages = arm_v7s_unmap_pages,
.iova_to_phys = arm_v7s_iova_to_phys,
};
@@ -954,6 +939,7 @@ static int __init arm_v7s_do_selftests(void)
};
unsigned int iova, size, iova_start;
unsigned int i, loopnr = 0;
+ size_t mapped;
selftest_running = true;
@@ -984,15 +970,16 @@ static int __init arm_v7s_do_selftests(void)
iova = 0;
for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) {
size = 1UL << i;
- if (ops->map(ops, iova, iova, size, IOMMU_READ |
- IOMMU_WRITE |
- IOMMU_NOEXEC |
- IOMMU_CACHE, GFP_KERNEL))
+ if (ops->map_pages(ops, iova, iova, size, 1,
+ IOMMU_READ | IOMMU_WRITE |
+ IOMMU_NOEXEC | IOMMU_CACHE,
+ GFP_KERNEL, &mapped))
return __FAIL(ops);
/* Overlapping mappings */
- if (!ops->map(ops, iova, iova + size, size,
- IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL))
+ if (!ops->map_pages(ops, iova, iova + size, size, 1,
+ IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL,
+ &mapped))
return __FAIL(ops);
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
@@ -1007,11 +994,12 @@ static int __init arm_v7s_do_selftests(void)
size = 1UL << __ffs(cfg.pgsize_bitmap);
while (i < loopnr) {
iova_start = i * SZ_16M;
- if (ops->unmap(ops, iova_start + size, size, NULL) != size)
+ if (ops->unmap_pages(ops, iova_start + size, size, 1, NULL) != size)
return __FAIL(ops);
/* Remap of partial unmap */
- if (ops->map(ops, iova_start + size, size, size, IOMMU_READ, GFP_KERNEL))
+ if (ops->map_pages(ops, iova_start + size, size, size, 1,
+ IOMMU_READ, GFP_KERNEL, &mapped))
return __FAIL(ops);
if (ops->iova_to_phys(ops, iova_start + size + 42)
@@ -1025,14 +1013,15 @@ static int __init arm_v7s_do_selftests(void)
for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) {
size = 1UL << i;
- if (ops->unmap(ops, iova, size, NULL) != size)
+ if (ops->unmap_pages(ops, iova, size, 1, NULL) != size)
return __FAIL(ops);
if (ops->iova_to_phys(ops, iova + 42))
return __FAIL(ops);
/* Remap full block */
- if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL))
+ if (ops->map_pages(ops, iova, iova, size, 1, IOMMU_WRITE,
+ GFP_KERNEL, &mapped))
return __FAIL(ops);
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 0ba817e86346..72dcdd468cf3 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -360,7 +360,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start;
num_entries = min_t(int, pgcount, max_entries);
ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep);
- if (!ret && mapped)
+ if (!ret)
*mapped += num_entries * size;
return ret;
@@ -496,13 +496,6 @@ static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
return ret;
}
-static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
- phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp)
-{
- return arm_lpae_map_pages(ops, iova, paddr, size, 1, iommu_prot, gfp,
- NULL);
-}
-
static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
arm_lpae_iopte *ptep)
{
@@ -682,12 +675,6 @@ static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iov
data->start_level, ptep);
}
-static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *gather)
-{
- return arm_lpae_unmap_pages(ops, iova, size, 1, gather);
-}
-
static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
unsigned long iova)
{
@@ -799,9 +786,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
data->iop.ops = (struct io_pgtable_ops) {
- .map = arm_lpae_map,
.map_pages = arm_lpae_map_pages,
- .unmap = arm_lpae_unmap,
.unmap_pages = arm_lpae_unmap_pages,
.iova_to_phys = arm_lpae_iova_to_phys,
};
@@ -1176,7 +1161,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
int i, j;
unsigned long iova;
- size_t size;
+ size_t size, mapped;
struct io_pgtable_ops *ops;
selftest_running = true;
@@ -1209,15 +1194,16 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
size = 1UL << j;
- if (ops->map(ops, iova, iova, size, IOMMU_READ |
- IOMMU_WRITE |
- IOMMU_NOEXEC |
- IOMMU_CACHE, GFP_KERNEL))
+ if (ops->map_pages(ops, iova, iova, size, 1,
+ IOMMU_READ | IOMMU_WRITE |
+ IOMMU_NOEXEC | IOMMU_CACHE,
+ GFP_KERNEL, &mapped))
return __FAIL(ops, i);
/* Overlapping mappings */
- if (!ops->map(ops, iova, iova + size, size,
- IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL))
+ if (!ops->map_pages(ops, iova, iova + size, size, 1,
+ IOMMU_READ | IOMMU_NOEXEC,
+ GFP_KERNEL, &mapped))
return __FAIL(ops, i);
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
@@ -1228,11 +1214,12 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
/* Partial unmap */
size = 1UL << __ffs(cfg->pgsize_bitmap);
- if (ops->unmap(ops, SZ_1G + size, size, NULL) != size)
+ if (ops->unmap_pages(ops, SZ_1G + size, size, 1, NULL) != size)
return __FAIL(ops, i);
/* Remap of partial unmap */
- if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ, GFP_KERNEL))
+ if (ops->map_pages(ops, SZ_1G + size, size, size, 1,
+ IOMMU_READ, GFP_KERNEL, &mapped))
return __FAIL(ops, i);
if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
@@ -1243,14 +1230,15 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
size = 1UL << j;
- if (ops->unmap(ops, iova, size, NULL) != size)
+ if (ops->unmap_pages(ops, iova, size, 1, NULL) != size)
return __FAIL(ops, i);
if (ops->iova_to_phys(ops, iova + 42))
return __FAIL(ops, i);
/* Remap full block */
- if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL))
+ if (ops->map_pages(ops, iova, iova, size, 1,
+ IOMMU_WRITE, GFP_KERNEL, &mapped))
return __FAIL(ops, i);
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
diff --git a/drivers/iommu/iommu-sva-lib.c b/drivers/iommu/iommu-sva-lib.c
deleted file mode 100644
index 106506143896..000000000000
--- a/drivers/iommu/iommu-sva-lib.c
+++ /dev/null
@@ -1,71 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Helpers for IOMMU drivers implementing SVA
- */
-#include <linux/mutex.h>
-#include <linux/sched/mm.h>
-
-#include "iommu-sva-lib.h"
-
-static DEFINE_MUTEX(iommu_sva_lock);
-static DECLARE_IOASID_SET(iommu_sva_pasid);
-
-/**
- * iommu_sva_alloc_pasid - Allocate a PASID for the mm
- * @mm: the mm
- * @min: minimum PASID value (inclusive)
- * @max: maximum PASID value (inclusive)
- *
- * Try to allocate a PASID for this mm, or take a reference to the existing one
- * provided it fits within the [@min, @max] range. On success the PASID is
- * available in mm->pasid and will be available for the lifetime of the mm.
- *
- * Returns 0 on success and < 0 on error.
- */
-int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
-{
- int ret = 0;
- ioasid_t pasid;
-
- if (min == INVALID_IOASID || max == INVALID_IOASID ||
- min == 0 || max < min)
- return -EINVAL;
-
- mutex_lock(&iommu_sva_lock);
- /* Is a PASID already associated with this mm? */
- if (pasid_valid(mm->pasid)) {
- if (mm->pasid < min || mm->pasid >= max)
- ret = -EOVERFLOW;
- goto out;
- }
-
- pasid = ioasid_alloc(&iommu_sva_pasid, min, max, mm);
- if (!pasid_valid(pasid))
- ret = -ENOMEM;
- else
- mm_pasid_set(mm, pasid);
-out:
- mutex_unlock(&iommu_sva_lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_sva_alloc_pasid);
-
-/* ioasid_find getter() requires a void * argument */
-static bool __mmget_not_zero(void *mm)
-{
- return mmget_not_zero(mm);
-}
-
-/**
- * iommu_sva_find() - Find mm associated to the given PASID
- * @pasid: Process Address Space ID assigned to the mm
- *
- * On success a reference to the mm is taken, and must be released with mmput().
- *
- * Returns the mm corresponding to this PASID, or an error if not found.
- */
-struct mm_struct *iommu_sva_find(ioasid_t pasid)
-{
- return ioasid_find(&iommu_sva_pasid, pasid, __mmget_not_zero);
-}
-EXPORT_SYMBOL_GPL(iommu_sva_find);
diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c
new file mode 100644
index 000000000000..24bf9b2b58aa
--- /dev/null
+++ b/drivers/iommu/iommu-sva.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helpers for IOMMU drivers implementing SVA
+ */
+#include <linux/mutex.h>
+#include <linux/sched/mm.h>
+#include <linux/iommu.h>
+
+#include "iommu-sva.h"
+
+static DEFINE_MUTEX(iommu_sva_lock);
+static DECLARE_IOASID_SET(iommu_sva_pasid);
+
+/**
+ * iommu_sva_alloc_pasid - Allocate a PASID for the mm
+ * @mm: the mm
+ * @min: minimum PASID value (inclusive)
+ * @max: maximum PASID value (inclusive)
+ *
+ * Try to allocate a PASID for this mm, or take a reference to the existing one
+ * provided it fits within the [@min, @max] range. On success the PASID is
+ * available in mm->pasid and will be available for the lifetime of the mm.
+ *
+ * Returns 0 on success and < 0 on error.
+ */
+int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
+{
+ int ret = 0;
+ ioasid_t pasid;
+
+ if (min == INVALID_IOASID || max == INVALID_IOASID ||
+ min == 0 || max < min)
+ return -EINVAL;
+
+ mutex_lock(&iommu_sva_lock);
+ /* Is a PASID already associated with this mm? */
+ if (pasid_valid(mm->pasid)) {
+ if (mm->pasid < min || mm->pasid >= max)
+ ret = -EOVERFLOW;
+ goto out;
+ }
+
+ pasid = ioasid_alloc(&iommu_sva_pasid, min, max, mm);
+ if (!pasid_valid(pasid))
+ ret = -ENOMEM;
+ else
+ mm_pasid_set(mm, pasid);
+out:
+ mutex_unlock(&iommu_sva_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_sva_alloc_pasid);
+
+/* ioasid_find getter() requires a void * argument */
+static bool __mmget_not_zero(void *mm)
+{
+ return mmget_not_zero(mm);
+}
+
+/**
+ * iommu_sva_find() - Find mm associated to the given PASID
+ * @pasid: Process Address Space ID assigned to the mm
+ *
+ * On success a reference to the mm is taken, and must be released with mmput().
+ *
+ * Returns the mm corresponding to this PASID, or an error if not found.
+ */
+struct mm_struct *iommu_sva_find(ioasid_t pasid)
+{
+ return ioasid_find(&iommu_sva_pasid, pasid, __mmget_not_zero);
+}
+EXPORT_SYMBOL_GPL(iommu_sva_find);
+
+/**
+ * iommu_sva_bind_device() - Bind a process address space to a device
+ * @dev: the device
+ * @mm: the mm to bind, caller must hold a reference to mm_users
+ *
+ * Create a bond between device and address space, allowing the device to
+ * access the mm using the PASID returned by iommu_sva_get_pasid(). If a
+ * bond already exists between @device and @mm, an additional internal
+ * reference is taken. Caller must call iommu_sva_unbind_device()
+ * to release each reference.
+ *
+ * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
+ * initialize the required SVA features.
+ *
+ * On error, returns an ERR_PTR value.
+ */
+struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
+{
+ struct iommu_domain *domain;
+ struct iommu_sva *handle;
+ ioasid_t max_pasids;
+ int ret;
+
+ max_pasids = dev->iommu->max_pasids;
+ if (!max_pasids)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ /* Allocate mm->pasid if necessary. */
+ ret = iommu_sva_alloc_pasid(mm, 1, max_pasids - 1);
+ if (ret)
+ return ERR_PTR(ret);
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&iommu_sva_lock);
+ /* Search for an existing domain. */
+ domain = iommu_get_domain_for_dev_pasid(dev, mm->pasid,
+ IOMMU_DOMAIN_SVA);
+ if (IS_ERR(domain)) {
+ ret = PTR_ERR(domain);
+ goto out_unlock;
+ }
+
+ if (domain) {
+ domain->users++;
+ goto out;
+ }
+
+ /* Allocate a new domain and set it on device pasid. */
+ domain = iommu_sva_domain_alloc(dev, mm);
+ if (!domain) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ ret = iommu_attach_device_pasid(domain, dev, mm->pasid);
+ if (ret)
+ goto out_free_domain;
+ domain->users = 1;
+out:
+ mutex_unlock(&iommu_sva_lock);
+ handle->dev = dev;
+ handle->domain = domain;
+
+ return handle;
+
+out_free_domain:
+ iommu_domain_free(domain);
+out_unlock:
+ mutex_unlock(&iommu_sva_lock);
+ kfree(handle);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
+
+/**
+ * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
+ * @handle: the handle returned by iommu_sva_bind_device()
+ *
+ * Put reference to a bond between device and address space. The device should
+ * not be issuing any more transaction for this PASID. All outstanding page
+ * requests for this PASID must have been flushed to the IOMMU.
+ */
+void iommu_sva_unbind_device(struct iommu_sva *handle)
+{
+ struct iommu_domain *domain = handle->domain;
+ ioasid_t pasid = domain->mm->pasid;
+ struct device *dev = handle->dev;
+
+ mutex_lock(&iommu_sva_lock);
+ if (--domain->users == 0) {
+ iommu_detach_device_pasid(domain, dev, pasid);
+ iommu_domain_free(domain);
+ }
+ mutex_unlock(&iommu_sva_lock);
+ kfree(handle);
+}
+EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
+
+u32 iommu_sva_get_pasid(struct iommu_sva *handle)
+{
+ struct iommu_domain *domain = handle->domain;
+
+ return domain->mm->pasid;
+}
+EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
+
+/*
+ * I/O page fault handler for SVA
+ */
+enum iommu_page_response_code
+iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
+{
+ vm_fault_t ret;
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = data;
+ unsigned int access_flags = 0;
+ unsigned int fault_flags = FAULT_FLAG_REMOTE;
+ struct iommu_fault_page_request *prm = &fault->prm;
+ enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
+
+ if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
+ return status;
+
+ if (!mmget_not_zero(mm))
+ return status;
+
+ mmap_read_lock(mm);
+
+ vma = find_extend_vma(mm, prm->addr);
+ if (!vma)
+ /* Unmapped area */
+ goto out_put_mm;
+
+ if (prm->perm & IOMMU_FAULT_PERM_READ)
+ access_flags |= VM_READ;
+
+ if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
+ access_flags |= VM_WRITE;
+ fault_flags |= FAULT_FLAG_WRITE;
+ }
+
+ if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
+ access_flags |= VM_EXEC;
+ fault_flags |= FAULT_FLAG_INSTRUCTION;
+ }
+
+ if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
+ fault_flags |= FAULT_FLAG_USER;
+
+ if (access_flags & ~vma->vm_flags)
+ /* Access fault */
+ goto out_put_mm;
+
+ ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
+ status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
+ IOMMU_PAGE_RESP_SUCCESS;
+
+out_put_mm:
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ return status;
+}
diff --git a/drivers/iommu/iommu-sva-lib.h b/drivers/iommu/iommu-sva.h
index 8909ea1094e3..7215a761b962 100644
--- a/drivers/iommu/iommu-sva-lib.h
+++ b/drivers/iommu/iommu-sva.h
@@ -2,8 +2,8 @@
/*
* SVA library for IOMMU drivers
*/
-#ifndef _IOMMU_SVA_LIB_H
-#define _IOMMU_SVA_LIB_H
+#ifndef _IOMMU_SVA_H
+#define _IOMMU_SVA_H
#include <linux/ioasid.h>
#include <linux/mm_types.h>
@@ -26,6 +26,8 @@ int iopf_queue_flush_dev(struct device *dev);
struct iopf_queue *iopf_queue_alloc(const char *name);
void iopf_queue_free(struct iopf_queue *queue);
int iopf_queue_discard_partial(struct iopf_queue *queue);
+enum iommu_page_response_code
+iommu_sva_handle_iopf(struct iommu_fault *fault, void *data);
#else /* CONFIG_IOMMU_SVA */
static inline int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
@@ -63,5 +65,11 @@ static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
{
return -ENODEV;
}
+
+static inline enum iommu_page_response_code
+iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
+{
+ return IOMMU_PAGE_RESP_INVALID;
+}
#endif /* CONFIG_IOMMU_SVA */
-#endif /* _IOMMU_SVA_LIB_H */
+#endif /* _IOMMU_SVA_H */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 65a3b3d886dc..7c99d8eb3182 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -21,6 +21,7 @@
#include <linux/idr.h>
#include <linux/err.h>
#include <linux/pci.h>
+#include <linux/pci-ats.h>
#include <linux/bitops.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -28,9 +29,12 @@
#include <linux/module.h>
#include <linux/cc_platform.h>
#include <trace/events/iommu.h>
+#include <linux/sched/mm.h>
#include "dma-iommu.h"
+#include "iommu-sva.h"
+
static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida);
@@ -42,6 +46,7 @@ struct iommu_group {
struct kobject kobj;
struct kobject *devices_kobj;
struct list_head devices;
+ struct xarray pasid_array;
struct mutex mutex;
void *iommu_data;
void (*iommu_data_release)(void *iommu_data);
@@ -278,18 +283,46 @@ static void dev_iommu_free(struct device *dev)
kfree(param);
}
+static u32 dev_iommu_get_max_pasids(struct device *dev)
+{
+ u32 max_pasids = 0, bits = 0;
+ int ret;
+
+ if (dev_is_pci(dev)) {
+ ret = pci_max_pasids(to_pci_dev(dev));
+ if (ret > 0)
+ max_pasids = ret;
+ } else {
+ ret = device_property_read_u32(dev, "pasid-num-bits", &bits);
+ if (!ret)
+ max_pasids = 1UL << bits;
+ }
+
+ return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids);
+}
+
static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
struct iommu_device *iommu_dev;
struct iommu_group *group;
+ static DEFINE_MUTEX(iommu_probe_device_lock);
int ret;
if (!ops)
return -ENODEV;
-
- if (!dev_iommu_get(dev))
- return -ENOMEM;
+ /*
+ * Serialise to avoid races between IOMMU drivers registering in
+ * parallel and/or the "replay" calls from ACPI/OF code via client
+ * driver probe. Once the latter have been cleaned up we should
+ * probably be able to use device_lock() here to minimise the scope,
+ * but for now enforcing a simple global ordering is fine.
+ */
+ mutex_lock(&iommu_probe_device_lock);
+ if (!dev_iommu_get(dev)) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
if (!try_module_get(ops->owner)) {
ret = -EINVAL;
@@ -303,17 +336,21 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
}
dev->iommu->iommu_dev = iommu_dev;
+ dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev);
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group)) {
ret = PTR_ERR(group);
goto out_release;
}
- iommu_group_put(group);
+ mutex_lock(&group->mutex);
if (group_list && !group->default_domain && list_empty(&group->entry))
list_add_tail(&group->entry, group_list);
+ mutex_unlock(&group->mutex);
+ iommu_group_put(group);
+ mutex_unlock(&iommu_probe_device_lock);
iommu_device_link(iommu_dev, dev);
return 0;
@@ -328,6 +365,9 @@ out_module_put:
err_free:
dev_iommu_free(dev);
+err_unlock:
+ mutex_unlock(&iommu_probe_device_lock);
+
return ret;
}
@@ -703,6 +743,7 @@ struct iommu_group *iommu_group_alloc(void)
mutex_init(&group->mutex);
INIT_LIST_HEAD(&group->devices);
INIT_LIST_HEAD(&group->entry);
+ xa_init(&group->pasid_array);
ret = ida_alloc(&iommu_group_ida, GFP_KERNEL);
if (ret < 0) {
@@ -1799,11 +1840,11 @@ int bus_iommu_probe(struct bus_type *bus)
return ret;
list_for_each_entry_safe(group, next, &group_list, entry) {
+ mutex_lock(&group->mutex);
+
/* Remove item from the list */
list_del_init(&group->entry);
- mutex_lock(&group->mutex);
-
/* Try to allocate default domain */
probe_alloc_default_domain(bus, group);
@@ -1912,6 +1953,8 @@ EXPORT_SYMBOL_GPL(iommu_domain_alloc);
void iommu_domain_free(struct iommu_domain *domain)
{
+ if (domain->type == IOMMU_DOMAIN_SVA)
+ mmdrop(domain->mm);
iommu_put_dma_cookie(domain);
domain->ops->free(domain);
}
@@ -1949,6 +1992,18 @@ static int __iommu_attach_device(struct iommu_domain *domain,
return ret;
}
+/**
+ * iommu_attach_device - Attach an IOMMU domain to a device
+ * @domain: IOMMU domain to attach
+ * @dev: Device that will be attached
+ *
+ * Returns 0 on success and error code on failure
+ *
+ * Note that EINVAL can be treated as a soft failure, indicating
+ * that certain configuration of the domain is incompatible with
+ * the device. In this case attaching a different domain to the
+ * device may succeed.
+ */
int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
{
struct iommu_group *group;
@@ -2075,6 +2130,18 @@ static int __iommu_attach_group(struct iommu_domain *domain,
return ret;
}
+/**
+ * iommu_attach_group - Attach an IOMMU domain to an IOMMU group
+ * @domain: IOMMU domain to attach
+ * @group: IOMMU group that will be attached
+ *
+ * Returns 0 on success and error code on failure
+ *
+ * Note that EINVAL can be treated as a soft failure, indicating
+ * that certain configuration of the domain is incompatible with
+ * the group. In this case attaching a different domain to the
+ * group may succeed.
+ */
int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
{
int ret;
@@ -2726,98 +2793,6 @@ int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
}
EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
-/**
- * iommu_sva_bind_device() - Bind a process address space to a device
- * @dev: the device
- * @mm: the mm to bind, caller must hold a reference to it
- * @drvdata: opaque data pointer to pass to bind callback
- *
- * Create a bond between device and address space, allowing the device to access
- * the mm using the returned PASID. If a bond already exists between @device and
- * @mm, it is returned and an additional reference is taken. Caller must call
- * iommu_sva_unbind_device() to release each reference.
- *
- * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
- * initialize the required SVA features.
- *
- * On error, returns an ERR_PTR value.
- */
-struct iommu_sva *
-iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
-{
- struct iommu_group *group;
- struct iommu_sva *handle = ERR_PTR(-EINVAL);
- const struct iommu_ops *ops = dev_iommu_ops(dev);
-
- if (!ops->sva_bind)
- return ERR_PTR(-ENODEV);
-
- group = iommu_group_get(dev);
- if (!group)
- return ERR_PTR(-ENODEV);
-
- /* Ensure device count and domain don't change while we're binding */
- mutex_lock(&group->mutex);
-
- /*
- * To keep things simple, SVA currently doesn't support IOMMU groups
- * with more than one device. Existing SVA-capable systems are not
- * affected by the problems that required IOMMU groups (lack of ACS
- * isolation, device ID aliasing and other hardware issues).
- */
- if (iommu_group_device_count(group) != 1)
- goto out_unlock;
-
- handle = ops->sva_bind(dev, mm, drvdata);
-
-out_unlock:
- mutex_unlock(&group->mutex);
- iommu_group_put(group);
-
- return handle;
-}
-EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
-
-/**
- * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
- * @handle: the handle returned by iommu_sva_bind_device()
- *
- * Put reference to a bond between device and address space. The device should
- * not be issuing any more transaction for this PASID. All outstanding page
- * requests for this PASID must have been flushed to the IOMMU.
- */
-void iommu_sva_unbind_device(struct iommu_sva *handle)
-{
- struct iommu_group *group;
- struct device *dev = handle->dev;
- const struct iommu_ops *ops = dev_iommu_ops(dev);
-
- if (!ops->sva_unbind)
- return;
-
- group = iommu_group_get(dev);
- if (!group)
- return;
-
- mutex_lock(&group->mutex);
- ops->sva_unbind(handle);
- mutex_unlock(&group->mutex);
-
- iommu_group_put(group);
-}
-EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
-
-u32 iommu_sva_get_pasid(struct iommu_sva *handle)
-{
- const struct iommu_ops *ops = dev_iommu_ops(handle->dev);
-
- if (!ops->sva_get_pasid)
- return IOMMU_PASID_INVALID;
-
- return ops->sva_get_pasid(handle);
-}
-EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
-
/*
* Changes the default domain of an iommu group that has *only* one device
*
@@ -3087,7 +3062,8 @@ int iommu_device_use_default_domain(struct device *dev)
mutex_lock(&group->mutex);
if (group->owner_cnt) {
- if (group->owner || !iommu_is_default_domain(group)) {
+ if (group->owner || !iommu_is_default_domain(group) ||
+ !xa_empty(&group->pasid_array)) {
ret = -EBUSY;
goto unlock_out;
}
@@ -3118,7 +3094,7 @@ void iommu_device_unuse_default_domain(struct device *dev)
return;
mutex_lock(&group->mutex);
- if (!WARN_ON(!group->owner_cnt))
+ if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array)))
group->owner_cnt--;
mutex_unlock(&group->mutex);
@@ -3166,7 +3142,8 @@ int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
ret = -EPERM;
goto unlock_out;
} else {
- if (group->domain && group->domain != group->default_domain) {
+ if ((group->domain && group->domain != group->default_domain) ||
+ !xa_empty(&group->pasid_array)) {
ret = -EBUSY;
goto unlock_out;
}
@@ -3200,7 +3177,8 @@ void iommu_group_release_dma_owner(struct iommu_group *group)
int ret;
mutex_lock(&group->mutex);
- if (WARN_ON(!group->owner_cnt || !group->owner))
+ if (WARN_ON(!group->owner_cnt || !group->owner ||
+ !xa_empty(&group->pasid_array)))
goto unlock_out;
group->owner_cnt = 0;
@@ -3231,3 +3209,150 @@ bool iommu_group_dma_owner_claimed(struct iommu_group *group)
return user;
}
EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed);
+
+static int __iommu_set_group_pasid(struct iommu_domain *domain,
+ struct iommu_group *group, ioasid_t pasid)
+{
+ struct group_device *device;
+ int ret = 0;
+
+ list_for_each_entry(device, &group->devices, list) {
+ ret = domain->ops->set_dev_pasid(domain, device->dev, pasid);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static void __iommu_remove_group_pasid(struct iommu_group *group,
+ ioasid_t pasid)
+{
+ struct group_device *device;
+ const struct iommu_ops *ops;
+
+ list_for_each_entry(device, &group->devices, list) {
+ ops = dev_iommu_ops(device->dev);
+ ops->remove_dev_pasid(device->dev, pasid);
+ }
+}
+
+/*
+ * iommu_attach_device_pasid() - Attach a domain to pasid of device
+ * @domain: the iommu domain.
+ * @dev: the attached device.
+ * @pasid: the pasid of the device.
+ *
+ * Return: 0 on success, or an error.
+ */
+int iommu_attach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+ struct iommu_group *group;
+ void *curr;
+ int ret;
+
+ if (!domain->ops->set_dev_pasid)
+ return -EOPNOTSUPP;
+
+ group = iommu_group_get(dev);
+ if (!group)
+ return -ENODEV;
+
+ mutex_lock(&group->mutex);
+ curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL);
+ if (curr) {
+ ret = xa_err(curr) ? : -EBUSY;
+ goto out_unlock;
+ }
+
+ ret = __iommu_set_group_pasid(domain, group, pasid);
+ if (ret) {
+ __iommu_remove_group_pasid(group, pasid);
+ xa_erase(&group->pasid_array, pasid);
+ }
+out_unlock:
+ mutex_unlock(&group->mutex);
+ iommu_group_put(group);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_attach_device_pasid);
+
+/*
+ * iommu_detach_device_pasid() - Detach the domain from pasid of device
+ * @domain: the iommu domain.
+ * @dev: the attached device.
+ * @pasid: the pasid of the device.
+ *
+ * The @domain must have been attached to @pasid of the @dev with
+ * iommu_attach_device_pasid().
+ */
+void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev,
+ ioasid_t pasid)
+{
+ struct iommu_group *group = iommu_group_get(dev);
+
+ mutex_lock(&group->mutex);
+ __iommu_remove_group_pasid(group, pasid);
+ WARN_ON(xa_erase(&group->pasid_array, pasid) != domain);
+ mutex_unlock(&group->mutex);
+
+ iommu_group_put(group);
+}
+EXPORT_SYMBOL_GPL(iommu_detach_device_pasid);
+
+/*
+ * iommu_get_domain_for_dev_pasid() - Retrieve domain for @pasid of @dev
+ * @dev: the queried device
+ * @pasid: the pasid of the device
+ * @type: matched domain type, 0 for any match
+ *
+ * This is a variant of iommu_get_domain_for_dev(). It returns the existing
+ * domain attached to pasid of a device. Callers must hold a lock around this
+ * function, and both iommu_attach/detach_dev_pasid() whenever a domain of
+ * type is being manipulated. This API does not internally resolve races with
+ * attach/detach.
+ *
+ * Return: attached domain on success, NULL otherwise.
+ */
+struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev,
+ ioasid_t pasid,
+ unsigned int type)
+{
+ struct iommu_domain *domain;
+ struct iommu_group *group;
+
+ group = iommu_group_get(dev);
+ if (!group)
+ return NULL;
+
+ xa_lock(&group->pasid_array);
+ domain = xa_load(&group->pasid_array, pasid);
+ if (type && domain && domain->type != type)
+ domain = ERR_PTR(-EBUSY);
+ xa_unlock(&group->pasid_array);
+ iommu_group_put(group);
+
+ return domain;
+}
+EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid);
+
+struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
+ struct mm_struct *mm)
+{
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
+ struct iommu_domain *domain;
+
+ domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
+ if (!domain)
+ return NULL;
+
+ domain->type = IOMMU_DOMAIN_SVA;
+ mmgrab(mm);
+ domain->mm = mm;
+ domain->iopf_handler = iommu_sva_handle_iopf;
+ domain->fault_data = mm;
+
+ return domain;
+}
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 3b30c0752274..a003bd5fc65c 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -628,8 +628,6 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
* Something is wrong, we can't attach two devices using
* different IOMMUs to the same domain.
*/
- dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
- dev_name(mmu->dev), dev_name(domain->mmu->dev));
ret = -EINVAL;
} else
dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
@@ -661,22 +659,22 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain,
}
static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
{
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
- if (!domain)
- return -ENODEV;
-
- return domain->iop->map(domain->iop, iova, paddr, size, prot, gfp);
+ return domain->iop->map_pages(domain->iop, iova, paddr, pgsize, pgcount,
+ prot, gfp, mapped);
}
static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *gather)
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
{
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
- return domain->iop->unmap(domain->iop, iova, size, gather);
+ return domain->iop->unmap_pages(domain->iop, iova, pgsize, pgcount, gather);
}
static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain)
@@ -879,8 +877,8 @@ static const struct iommu_ops ipmmu_ops = {
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = ipmmu_attach_device,
.detach_dev = ipmmu_detach_device,
- .map = ipmmu_map,
- .unmap = ipmmu_unmap,
+ .map_pages = ipmmu_map,
+ .unmap_pages = ipmmu_unmap,
.flush_iotlb_all = ipmmu_flush_iotlb_all,
.iotlb_sync = ipmmu_iotlb_sync,
.iova_to_phys = ipmmu_iova_to_phys,
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 16179a9a7283..c60624910872 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -471,14 +471,16 @@ fail:
}
static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t pa, size_t len, int prot, gfp_t gfp)
+ phys_addr_t pa, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
{
struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags;
int ret;
spin_lock_irqsave(&priv->pgtlock, flags);
- ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC);
+ ret = priv->iop->map_pages(priv->iop, iova, pa, pgsize, pgcount, prot,
+ GFP_ATOMIC, mapped);
spin_unlock_irqrestore(&priv->pgtlock, flags);
return ret;
@@ -493,16 +495,18 @@ static void msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
}
static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t len, struct iommu_iotlb_gather *gather)
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
{
struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags;
+ size_t ret;
spin_lock_irqsave(&priv->pgtlock, flags);
- len = priv->iop->unmap(priv->iop, iova, len, gather);
+ ret = priv->iop->unmap_pages(priv->iop, iova, pgsize, pgcount, gather);
spin_unlock_irqrestore(&priv->pgtlock, flags);
- return len;
+ return ret;
}
static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -679,8 +683,8 @@ static struct iommu_ops msm_iommu_ops = {
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = msm_iommu_attach_dev,
.detach_dev = msm_iommu_detach_dev,
- .map = msm_iommu_map,
- .unmap = msm_iommu_unmap,
+ .map_pages = msm_iommu_map,
+ .unmap_pages = msm_iommu_unmap,
/*
* Nothing is needed here, the barrier to guarantee
* completion of the tlb sync operation is implicitly
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 2ab2ecfe01f8..2badd6acfb23 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -108,8 +108,12 @@
#define F_MMU_INT_ID_SUB_COMM_ID(a) (((a) >> 7) & 0x3)
#define F_MMU_INT_ID_COMM_ID_EXT(a) (((a) >> 10) & 0x7)
#define F_MMU_INT_ID_SUB_COMM_ID_EXT(a) (((a) >> 7) & 0x7)
+/* Macro for 5 bits length port ID field (default) */
#define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
#define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
+/* Macro for 6 bits length port ID field */
+#define F_MMU_INT_ID_LARB_ID_WID_6(a) (((a) >> 8) & 0x7)
+#define F_MMU_INT_ID_PORT_ID_WID_6(a) (((a) >> 2) & 0x3f)
#define MTK_PROTECT_PA_ALIGN 256
#define MTK_IOMMU_BANK_SZ 0x1000
@@ -139,6 +143,7 @@
#define IFA_IOMMU_PCIE_SUPPORT BIT(16)
#define PGTABLE_PA_35_EN BIT(17)
#define TF_PORT_TO_ADDR_MT8173 BIT(18)
+#define INT_ID_PORT_WIDTH_6 BIT(19)
#define MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, mask) \
((((pdata)->flags) & (mask)) == (_x))
@@ -165,6 +170,7 @@ enum mtk_iommu_plat {
M4U_MT8186,
M4U_MT8192,
M4U_MT8195,
+ M4U_MT8365,
};
struct mtk_iommu_iova_region {
@@ -223,10 +229,7 @@ struct mtk_iommu_data {
struct device *smicomm_dev;
struct mtk_iommu_bank_data *bank;
-
- struct dma_iommu_mapping *mapping; /* For mtk_iommu_v1.c */
struct regmap *pericfg;
-
struct mutex mutex; /* Protect m4u_group/m4u_dom above */
/*
@@ -441,20 +444,25 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
fault_pa |= (u64)pa34_32 << 32;
if (MTK_IOMMU_IS_TYPE(plat_data, MTK_IOMMU_TYPE_MM)) {
- fault_port = F_MMU_INT_ID_PORT_ID(regval);
if (MTK_IOMMU_HAS_FLAG(plat_data, HAS_SUB_COMM_2BITS)) {
fault_larb = F_MMU_INT_ID_COMM_ID(regval);
sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval);
+ fault_port = F_MMU_INT_ID_PORT_ID(regval);
} else if (MTK_IOMMU_HAS_FLAG(plat_data, HAS_SUB_COMM_3BITS)) {
fault_larb = F_MMU_INT_ID_COMM_ID_EXT(regval);
sub_comm = F_MMU_INT_ID_SUB_COMM_ID_EXT(regval);
+ fault_port = F_MMU_INT_ID_PORT_ID(regval);
+ } else if (MTK_IOMMU_HAS_FLAG(plat_data, INT_ID_PORT_WIDTH_6)) {
+ fault_port = F_MMU_INT_ID_PORT_ID_WID_6(regval);
+ fault_larb = F_MMU_INT_ID_LARB_ID_WID_6(regval);
} else {
+ fault_port = F_MMU_INT_ID_PORT_ID(regval);
fault_larb = F_MMU_INT_ID_LARB_ID(regval);
}
fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm];
}
- if (report_iommu_fault(&dom->domain, bank->parent_dev, fault_iova,
+ if (!dom || report_iommu_fault(&dom->domain, bank->parent_dev, fault_iova,
write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
dev_err_ratelimited(
bank->parent_dev,
@@ -609,7 +617,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
if (!dom->iop) {
dev_err(data->dev, "Failed to alloc io pgtable\n");
- return -EINVAL;
+ return -ENOMEM;
}
/* Update our support page sizes bitmap */
@@ -668,7 +676,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
ret = mtk_iommu_domain_finalise(dom, frstdata, region_id);
if (ret) {
mutex_unlock(&dom->mutex);
- return -ENODEV;
+ return ret;
}
dom->bank = &data->bank[bankid];
}
@@ -711,7 +719,8 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain,
}
static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
@@ -720,17 +729,17 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
paddr |= BIT_ULL(32);
/* Synchronize with the tlb_lock */
- return dom->iop->map(dom->iop, iova, paddr, size, prot, gfp);
+ return dom->iop->map_pages(dom->iop, iova, paddr, pgsize, pgcount, prot, gfp, mapped);
}
static size_t mtk_iommu_unmap(struct iommu_domain *domain,
- unsigned long iova, size_t size,
+ unsigned long iova, size_t pgsize, size_t pgcount,
struct iommu_iotlb_gather *gather)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- iommu_iotlb_gather_add_range(gather, iova, size);
- return dom->iop->unmap(dom->iop, iova, size, gather);
+ iommu_iotlb_gather_add_range(gather, iova, pgsize * pgcount);
+ return dom->iop->unmap_pages(dom->iop, iova, pgsize, pgcount, gather);
}
static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
@@ -938,8 +947,8 @@ static const struct iommu_ops mtk_iommu_ops = {
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = mtk_iommu_attach_device,
.detach_dev = mtk_iommu_detach_device,
- .map = mtk_iommu_map,
- .unmap = mtk_iommu_unmap,
+ .map_pages = mtk_iommu_map,
+ .unmap_pages = mtk_iommu_unmap,
.flush_iotlb_all = mtk_iommu_flush_iotlb_all,
.iotlb_sync = mtk_iommu_iotlb_sync,
.iotlb_sync_map = mtk_iommu_sync_map,
@@ -1043,21 +1052,26 @@ static const struct component_master_ops mtk_iommu_com_ops = {
static int mtk_iommu_mm_dts_parse(struct device *dev, struct component_match **match,
struct mtk_iommu_data *data)
{
- struct device_node *larbnode, *smicomm_node, *smi_subcomm_node;
- struct platform_device *plarbdev;
+ struct device_node *larbnode, *frst_avail_smicomm_node = NULL;
+ struct platform_device *plarbdev, *pcommdev;
struct device_link *link;
int i, larb_nr, ret;
larb_nr = of_count_phandle_with_args(dev->of_node, "mediatek,larbs", NULL);
if (larb_nr < 0)
return larb_nr;
+ if (larb_nr == 0 || larb_nr > MTK_LARB_NR_MAX)
+ return -EINVAL;
for (i = 0; i < larb_nr; i++) {
+ struct device_node *smicomm_node, *smi_subcomm_node;
u32 id;
larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
- if (!larbnode)
- return -EINVAL;
+ if (!larbnode) {
+ ret = -EINVAL;
+ goto err_larbdev_put;
+ }
if (!of_device_is_available(larbnode)) {
of_node_put(larbnode);
@@ -1067,48 +1081,91 @@ static int mtk_iommu_mm_dts_parse(struct device *dev, struct component_match **m
ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
if (ret)/* The id is consecutive if there is no this property */
id = i;
+ if (id >= MTK_LARB_NR_MAX) {
+ of_node_put(larbnode);
+ ret = -EINVAL;
+ goto err_larbdev_put;
+ }
plarbdev = of_find_device_by_node(larbnode);
+ of_node_put(larbnode);
if (!plarbdev) {
- of_node_put(larbnode);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_larbdev_put;
}
- if (!plarbdev->dev.driver) {
- of_node_put(larbnode);
- return -EPROBE_DEFER;
+ if (data->larb_imu[id].dev) {
+ platform_device_put(plarbdev);
+ ret = -EEXIST;
+ goto err_larbdev_put;
}
data->larb_imu[id].dev = &plarbdev->dev;
- component_match_add_release(dev, match, component_release_of,
- component_compare_of, larbnode);
+ if (!plarbdev->dev.driver) {
+ ret = -EPROBE_DEFER;
+ goto err_larbdev_put;
+ }
+
+ /* Get smi-(sub)-common dev from the last larb. */
+ smi_subcomm_node = of_parse_phandle(larbnode, "mediatek,smi", 0);
+ if (!smi_subcomm_node) {
+ ret = -EINVAL;
+ goto err_larbdev_put;
+ }
+
+ /*
+ * It may have two level smi-common. the node is smi-sub-common if it
+ * has a new mediatek,smi property. otherwise it is smi-commmon.
+ */
+ smicomm_node = of_parse_phandle(smi_subcomm_node, "mediatek,smi", 0);
+ if (smicomm_node)
+ of_node_put(smi_subcomm_node);
+ else
+ smicomm_node = smi_subcomm_node;
+
+ /*
+ * All the larbs that connect to one IOMMU must connect with the same
+ * smi-common.
+ */
+ if (!frst_avail_smicomm_node) {
+ frst_avail_smicomm_node = smicomm_node;
+ } else if (frst_avail_smicomm_node != smicomm_node) {
+ dev_err(dev, "mediatek,smi property is not right @larb%d.", id);
+ of_node_put(smicomm_node);
+ ret = -EINVAL;
+ goto err_larbdev_put;
+ } else {
+ of_node_put(smicomm_node);
+ }
+
+ component_match_add(dev, match, component_compare_dev, &plarbdev->dev);
+ platform_device_put(plarbdev);
}
- /* Get smi-(sub)-common dev from the last larb. */
- smi_subcomm_node = of_parse_phandle(larbnode, "mediatek,smi", 0);
- if (!smi_subcomm_node)
+ if (!frst_avail_smicomm_node)
return -EINVAL;
- /*
- * It may have two level smi-common. the node is smi-sub-common if it
- * has a new mediatek,smi property. otherwise it is smi-commmon.
- */
- smicomm_node = of_parse_phandle(smi_subcomm_node, "mediatek,smi", 0);
- if (smicomm_node)
- of_node_put(smi_subcomm_node);
- else
- smicomm_node = smi_subcomm_node;
-
- plarbdev = of_find_device_by_node(smicomm_node);
- of_node_put(smicomm_node);
- data->smicomm_dev = &plarbdev->dev;
+ pcommdev = of_find_device_by_node(frst_avail_smicomm_node);
+ of_node_put(frst_avail_smicomm_node);
+ if (!pcommdev)
+ return -ENODEV;
+ data->smicomm_dev = &pcommdev->dev;
link = device_link_add(data->smicomm_dev, dev,
DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
+ platform_device_put(pcommdev);
if (!link) {
dev_err(dev, "Unable to link %s.\n", dev_name(data->smicomm_dev));
return -EINVAL;
}
return 0;
+
+err_larbdev_put:
+ for (i = MTK_LARB_NR_MAX - 1; i >= 0; i--) {
+ if (!data->larb_imu[i].dev)
+ continue;
+ put_device(data->larb_imu[i].dev);
+ }
+ return ret;
}
static int mtk_iommu_probe(struct platform_device *pdev)
@@ -1173,6 +1230,8 @@ static int mtk_iommu_probe(struct platform_device *pdev)
banks_num = data->plat_data->banks_num;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
if (resource_size(res) < banks_num * MTK_IOMMU_BANK_SZ) {
dev_err(dev, "banknr %d. res %pR is not enough.\n", banks_num, res);
return -EINVAL;
@@ -1516,6 +1575,17 @@ static const struct mtk_iommu_plat_data mt8195_data_vpp = {
{4, MTK_INVALID_LARBID, MTK_INVALID_LARBID, MTK_INVALID_LARBID, 6}},
};
+static const struct mtk_iommu_plat_data mt8365_data = {
+ .m4u_plat = M4U_MT8365,
+ .flags = RESET_AXI | INT_ID_PORT_WIDTH_6,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
+ .banks_num = 1,
+ .banks_enable = {true},
+ .iova_region = single_domain,
+ .iova_region_nr = ARRAY_SIZE(single_domain),
+ .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */
+};
+
static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
{ .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
@@ -1528,6 +1598,7 @@ static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt8195-iommu-infra", .data = &mt8195_data_infra},
{ .compatible = "mediatek,mt8195-iommu-vdo", .data = &mt8195_data_vdo},
{ .compatible = "mediatek,mt8195-iommu-vpp", .data = &mt8195_data_vpp},
+ { .compatible = "mediatek,mt8365-m4u", .data = &mt8365_data},
{}
};
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 6e0e65831eb7..69682ee068d2 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -327,44 +327,42 @@ static void mtk_iommu_v1_detach_device(struct iommu_domain *domain, struct devic
}
static int mtk_iommu_v1_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
{
struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
- unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
unsigned long flags;
unsigned int i;
u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT);
u32 pabase = (u32)paddr;
- int map_size = 0;
spin_lock_irqsave(&dom->pgtlock, flags);
- for (i = 0; i < page_num; i++) {
- if (pgt_base_iova[i]) {
- memset(pgt_base_iova, 0, i * sizeof(u32));
+ for (i = 0; i < pgcount; i++) {
+ if (pgt_base_iova[i])
break;
- }
pgt_base_iova[i] = pabase | F_DESC_VALID | F_DESC_NONSEC;
pabase += MT2701_IOMMU_PAGE_SIZE;
- map_size += MT2701_IOMMU_PAGE_SIZE;
}
spin_unlock_irqrestore(&dom->pgtlock, flags);
- mtk_iommu_v1_tlb_flush_range(dom->data, iova, size);
+ *mapped = i * MT2701_IOMMU_PAGE_SIZE;
+ mtk_iommu_v1_tlb_flush_range(dom->data, iova, *mapped);
- return map_size == size ? 0 : -EEXIST;
+ return i == pgcount ? 0 : -EEXIST;
}
static size_t mtk_iommu_v1_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *gather)
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
{
struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
unsigned long flags;
u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT);
- unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
+ size_t size = pgcount * MT2701_IOMMU_PAGE_SIZE;
spin_lock_irqsave(&dom->pgtlock, flags);
- memset(pgt_base_iova, 0, page_num * sizeof(u32));
+ memset(pgt_base_iova, 0, pgcount * sizeof(u32));
spin_unlock_irqrestore(&dom->pgtlock, flags);
mtk_iommu_v1_tlb_flush_range(dom->data, iova, size);
@@ -586,13 +584,13 @@ static const struct iommu_ops mtk_iommu_v1_ops = {
.release_device = mtk_iommu_v1_release_device,
.def_domain_type = mtk_iommu_v1_def_domain_type,
.device_group = generic_device_group,
- .pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT,
+ .pgsize_bitmap = MT2701_IOMMU_PAGE_SIZE,
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = mtk_iommu_v1_attach_device,
.detach_dev = mtk_iommu_v1_detach_device,
- .map = mtk_iommu_v1_map,
- .unmap = mtk_iommu_v1_unmap,
+ .map_pages = mtk_iommu_v1_map,
+ .unmap_pages = mtk_iommu_v1_unmap,
.iova_to_phys = mtk_iommu_v1_iova_to_phys,
.free = mtk_iommu_v1_domain_free,
}
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 07ee2600113c..2fd7702c6709 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1414,7 +1414,7 @@ static int omap_iommu_attach_init(struct device *dev,
odomain->num_iommus = omap_iommu_count(dev);
if (!odomain->num_iommus)
- return -EINVAL;
+ return -ENODEV;
odomain->iommus = kcalloc(odomain->num_iommus, sizeof(*iommu),
GFP_ATOMIC);
@@ -1464,7 +1464,7 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
if (!arch_data || !arch_data->iommu_dev) {
dev_err(dev, "device doesn't have an associated iommu\n");
- return -EINVAL;
+ return -ENODEV;
}
spin_lock(&omap_domain->lock);
@@ -1472,7 +1472,7 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
/* only a single client device can be attached to a domain */
if (omap_domain->dev) {
dev_err(dev, "iommu domain is already attached\n");
- ret = -EBUSY;
+ ret = -EINVAL;
goto out;
}
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index a3fc59b814ab..a68eadd64f38 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -280,19 +280,17 @@ static u32 rk_mk_pte(phys_addr_t page, int prot)
* 11:9 - Page address bit 34:32
* 8:4 - Page address bit 39:35
* 3 - Security
- * 2 - Readable
- * 1 - Writable
+ * 2 - Writable
+ * 1 - Readable
* 0 - 1 if Page @ Page address is valid
*/
-#define RK_PTE_PAGE_READABLE_V2 BIT(2)
-#define RK_PTE_PAGE_WRITABLE_V2 BIT(1)
static u32 rk_mk_pte_v2(phys_addr_t page, int prot)
{
u32 flags = 0;
- flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE_V2 : 0;
- flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE_V2 : 0;
+ flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
+ flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
return rk_mk_dte_v2(page) | flags;
}
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 3c071782f6f1..ed33c6cce083 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -10,28 +10,18 @@
#include <linux/iommu.h>
#include <linux/iommu-helper.h>
#include <linux/sizes.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
#include <asm/pci_dma.h>
-/*
- * Physically contiguous memory regions can be mapped with 4 KiB alignment,
- * we allow all page sizes that are an order of 4KiB (no special large page
- * support so far).
- */
-#define S390_IOMMU_PGSIZES (~0xFFFUL)
-
static const struct iommu_ops s390_iommu_ops;
struct s390_domain {
struct iommu_domain domain;
struct list_head devices;
unsigned long *dma_table;
- spinlock_t dma_table_lock;
spinlock_t list_lock;
-};
-
-struct s390_domain_device {
- struct list_head list;
- struct zpci_dev *zdev;
+ struct rcu_head rcu;
};
static struct s390_domain *to_s390_domain(struct iommu_domain *dom)
@@ -67,119 +57,125 @@ static struct iommu_domain *s390_domain_alloc(unsigned domain_type)
kfree(s390_domain);
return NULL;
}
+ s390_domain->domain.geometry.force_aperture = true;
+ s390_domain->domain.geometry.aperture_start = 0;
+ s390_domain->domain.geometry.aperture_end = ZPCI_TABLE_SIZE_RT - 1;
- spin_lock_init(&s390_domain->dma_table_lock);
spin_lock_init(&s390_domain->list_lock);
- INIT_LIST_HEAD(&s390_domain->devices);
+ INIT_LIST_HEAD_RCU(&s390_domain->devices);
return &s390_domain->domain;
}
-static void s390_domain_free(struct iommu_domain *domain)
+static void s390_iommu_rcu_free_domain(struct rcu_head *head)
{
- struct s390_domain *s390_domain = to_s390_domain(domain);
+ struct s390_domain *s390_domain = container_of(head, struct s390_domain, rcu);
dma_cleanup_tables(s390_domain->dma_table);
kfree(s390_domain);
}
+static void s390_domain_free(struct iommu_domain *domain)
+{
+ struct s390_domain *s390_domain = to_s390_domain(domain);
+
+ rcu_read_lock();
+ WARN_ON(!list_empty(&s390_domain->devices));
+ rcu_read_unlock();
+
+ call_rcu(&s390_domain->rcu, s390_iommu_rcu_free_domain);
+}
+
+static void __s390_iommu_detach_device(struct zpci_dev *zdev)
+{
+ struct s390_domain *s390_domain = zdev->s390_domain;
+ unsigned long flags;
+
+ if (!s390_domain)
+ return;
+
+ spin_lock_irqsave(&s390_domain->list_lock, flags);
+ list_del_rcu(&zdev->iommu_list);
+ spin_unlock_irqrestore(&s390_domain->list_lock, flags);
+
+ zpci_unregister_ioat(zdev, 0);
+ zdev->s390_domain = NULL;
+ zdev->dma_table = NULL;
+}
+
static int s390_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct s390_domain *s390_domain = to_s390_domain(domain);
struct zpci_dev *zdev = to_zpci_dev(dev);
- struct s390_domain_device *domain_device;
unsigned long flags;
- int cc, rc;
+ u8 status;
+ int cc;
if (!zdev)
return -ENODEV;
- domain_device = kzalloc(sizeof(*domain_device), GFP_KERNEL);
- if (!domain_device)
- return -ENOMEM;
-
- if (zdev->dma_table && !zdev->s390_domain) {
- cc = zpci_dma_exit_device(zdev);
- if (cc) {
- rc = -EIO;
- goto out_free;
- }
- }
+ if (WARN_ON(domain->geometry.aperture_start > zdev->end_dma ||
+ domain->geometry.aperture_end < zdev->start_dma))
+ return -EINVAL;
if (zdev->s390_domain)
- zpci_unregister_ioat(zdev, 0);
+ __s390_iommu_detach_device(zdev);
+ else if (zdev->dma_table)
+ zpci_dma_exit_device(zdev);
- zdev->dma_table = s390_domain->dma_table;
cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
- virt_to_phys(zdev->dma_table));
- if (cc) {
- rc = -EIO;
- goto out_restore;
- }
+ virt_to_phys(s390_domain->dma_table), &status);
+ /*
+ * If the device is undergoing error recovery the reset code
+ * will re-establish the new domain.
+ */
+ if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
+ return -EIO;
+ zdev->dma_table = s390_domain->dma_table;
- spin_lock_irqsave(&s390_domain->list_lock, flags);
- /* First device defines the DMA range limits */
- if (list_empty(&s390_domain->devices)) {
- domain->geometry.aperture_start = zdev->start_dma;
- domain->geometry.aperture_end = zdev->end_dma;
- domain->geometry.force_aperture = true;
- /* Allow only devices with identical DMA range limits */
- } else if (domain->geometry.aperture_start != zdev->start_dma ||
- domain->geometry.aperture_end != zdev->end_dma) {
- rc = -EINVAL;
- spin_unlock_irqrestore(&s390_domain->list_lock, flags);
- goto out_restore;
- }
- domain_device->zdev = zdev;
+ zdev->dma_table = s390_domain->dma_table;
zdev->s390_domain = s390_domain;
- list_add(&domain_device->list, &s390_domain->devices);
+
+ spin_lock_irqsave(&s390_domain->list_lock, flags);
+ list_add_rcu(&zdev->iommu_list, &s390_domain->devices);
spin_unlock_irqrestore(&s390_domain->list_lock, flags);
return 0;
-
-out_restore:
- if (!zdev->s390_domain) {
- zpci_dma_init_device(zdev);
- } else {
- zdev->dma_table = zdev->s390_domain->dma_table;
- zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
- virt_to_phys(zdev->dma_table));
- }
-out_free:
- kfree(domain_device);
-
- return rc;
}
static void s390_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- struct s390_domain *s390_domain = to_s390_domain(domain);
struct zpci_dev *zdev = to_zpci_dev(dev);
- struct s390_domain_device *domain_device, *tmp;
- unsigned long flags;
- int found = 0;
- if (!zdev)
- return;
+ WARN_ON(zdev->s390_domain != to_s390_domain(domain));
- spin_lock_irqsave(&s390_domain->list_lock, flags);
- list_for_each_entry_safe(domain_device, tmp, &s390_domain->devices,
- list) {
- if (domain_device->zdev == zdev) {
- list_del(&domain_device->list);
- kfree(domain_device);
- found = 1;
- break;
- }
+ __s390_iommu_detach_device(zdev);
+ zpci_dma_init_device(zdev);
+}
+
+static void s390_iommu_get_resv_regions(struct device *dev,
+ struct list_head *list)
+{
+ struct zpci_dev *zdev = to_zpci_dev(dev);
+ struct iommu_resv_region *region;
+
+ if (zdev->start_dma) {
+ region = iommu_alloc_resv_region(0, zdev->start_dma, 0,
+ IOMMU_RESV_RESERVED, GFP_KERNEL);
+ if (!region)
+ return;
+ list_add_tail(&region->list, list);
}
- spin_unlock_irqrestore(&s390_domain->list_lock, flags);
- if (found && (zdev->s390_domain == s390_domain)) {
- zdev->s390_domain = NULL;
- zpci_unregister_ioat(zdev, 0);
- zpci_dma_init_device(zdev);
+ if (zdev->end_dma < ZPCI_TABLE_SIZE_RT - 1) {
+ region = iommu_alloc_resv_region(zdev->end_dma + 1,
+ ZPCI_TABLE_SIZE_RT - zdev->end_dma - 1,
+ 0, IOMMU_RESV_RESERVED, GFP_KERNEL);
+ if (!region)
+ return;
+ list_add_tail(&region->list, list);
}
}
@@ -192,55 +188,88 @@ static struct iommu_device *s390_iommu_probe_device(struct device *dev)
zdev = to_zpci_dev(dev);
+ if (zdev->start_dma > zdev->end_dma ||
+ zdev->start_dma > ZPCI_TABLE_SIZE_RT - 1)
+ return ERR_PTR(-EINVAL);
+
+ if (zdev->end_dma > ZPCI_TABLE_SIZE_RT - 1)
+ zdev->end_dma = ZPCI_TABLE_SIZE_RT - 1;
+
return &zdev->iommu_dev;
}
static void s390_iommu_release_device(struct device *dev)
{
struct zpci_dev *zdev = to_zpci_dev(dev);
- struct iommu_domain *domain;
/*
- * This is a workaround for a scenario where the IOMMU API common code
- * "forgets" to call the detach_dev callback: After binding a device
- * to vfio-pci and completing the VFIO_SET_IOMMU ioctl (which triggers
- * the attach_dev), removing the device via
- * "echo 1 > /sys/bus/pci/devices/.../remove" won't trigger detach_dev,
- * only release_device will be called via the BUS_NOTIFY_REMOVED_DEVICE
- * notifier.
- *
- * So let's call detach_dev from here if it hasn't been called before.
+ * release_device is expected to detach any domain currently attached
+ * to the device, but keep it attached to other devices in the group.
*/
- if (zdev && zdev->s390_domain) {
- domain = iommu_get_domain_for_dev(dev);
- if (domain)
- s390_iommu_detach_device(domain, dev);
+ if (zdev)
+ __s390_iommu_detach_device(zdev);
+}
+
+static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
+{
+ struct s390_domain *s390_domain = to_s390_domain(domain);
+ struct zpci_dev *zdev;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
+ zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
+ zdev->end_dma - zdev->start_dma + 1);
}
+ rcu_read_unlock();
}
-static int s390_iommu_update_trans(struct s390_domain *s390_domain,
- phys_addr_t pa, dma_addr_t dma_addr,
- size_t size, int flags)
+static void s390_iommu_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather)
{
- struct s390_domain_device *domain_device;
- phys_addr_t page_addr = pa & PAGE_MASK;
- dma_addr_t start_dma_addr = dma_addr;
- unsigned long irq_flags, nr_pages, i;
- unsigned long *entry;
- int rc = 0;
+ struct s390_domain *s390_domain = to_s390_domain(domain);
+ size_t size = gather->end - gather->start + 1;
+ struct zpci_dev *zdev;
- if (dma_addr < s390_domain->domain.geometry.aperture_start ||
- dma_addr + size > s390_domain->domain.geometry.aperture_end)
- return -EINVAL;
+ /* If gather was never added to there is nothing to flush */
+ if (!gather->end)
+ return;
- nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
- if (!nr_pages)
- return 0;
+ rcu_read_lock();
+ list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
+ zpci_refresh_trans((u64)zdev->fh << 32, gather->start,
+ size);
+ }
+ rcu_read_unlock();
+}
+
+static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct s390_domain *s390_domain = to_s390_domain(domain);
+ struct zpci_dev *zdev;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
+ if (!zdev->tlb_refresh)
+ continue;
+ zpci_refresh_trans((u64)zdev->fh << 32,
+ iova, size);
+ }
+ rcu_read_unlock();
+}
+
+static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
+ phys_addr_t pa, dma_addr_t dma_addr,
+ unsigned long nr_pages, int flags)
+{
+ phys_addr_t page_addr = pa & PAGE_MASK;
+ unsigned long *entry;
+ unsigned long i;
+ int rc;
- spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags);
for (i = 0; i < nr_pages; i++) {
entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr);
- if (!entry) {
+ if (unlikely(!entry)) {
rc = -ENOMEM;
goto undo_cpu_trans;
}
@@ -249,47 +278,70 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
dma_addr += PAGE_SIZE;
}
- spin_lock(&s390_domain->list_lock);
- list_for_each_entry(domain_device, &s390_domain->devices, list) {
- rc = zpci_refresh_trans((u64) domain_device->zdev->fh << 32,
- start_dma_addr, nr_pages * PAGE_SIZE);
- if (rc)
+ return 0;
+
+undo_cpu_trans:
+ while (i-- > 0) {
+ dma_addr -= PAGE_SIZE;
+ entry = dma_walk_cpu_trans(s390_domain->dma_table,
+ dma_addr);
+ if (!entry)
break;
+ dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID);
}
- spin_unlock(&s390_domain->list_lock);
-undo_cpu_trans:
- if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
- flags = ZPCI_PTE_INVALID;
- while (i-- > 0) {
- page_addr -= PAGE_SIZE;
- dma_addr -= PAGE_SIZE;
- entry = dma_walk_cpu_trans(s390_domain->dma_table,
- dma_addr);
- if (!entry)
- break;
- dma_update_cpu_trans(entry, page_addr, flags);
+ return rc;
+}
+
+static int s390_iommu_invalidate_trans(struct s390_domain *s390_domain,
+ dma_addr_t dma_addr, unsigned long nr_pages)
+{
+ unsigned long *entry;
+ unsigned long i;
+ int rc = 0;
+
+ for (i = 0; i < nr_pages; i++) {
+ entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr);
+ if (unlikely(!entry)) {
+ rc = -EINVAL;
+ break;
}
+ dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID);
+ dma_addr += PAGE_SIZE;
}
- spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags);
return rc;
}
-static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+static int s390_iommu_map_pages(struct iommu_domain *domain,
+ unsigned long iova, phys_addr_t paddr,
+ size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
{
struct s390_domain *s390_domain = to_s390_domain(domain);
+ size_t size = pgcount << __ffs(pgsize);
int flags = ZPCI_PTE_VALID, rc = 0;
+ if (pgsize != SZ_4K)
+ return -EINVAL;
+
+ if (iova < s390_domain->domain.geometry.aperture_start ||
+ (iova + size - 1) > s390_domain->domain.geometry.aperture_end)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(iova | paddr, pgsize))
+ return -EINVAL;
+
if (!(prot & IOMMU_READ))
return -EINVAL;
if (!(prot & IOMMU_WRITE))
flags |= ZPCI_TABLE_PROTECTED;
- rc = s390_iommu_update_trans(s390_domain, paddr, iova,
- size, flags);
+ rc = s390_iommu_validate_trans(s390_domain, paddr, iova,
+ pgcount, flags);
+ if (!rc)
+ *mapped = size;
return rc;
}
@@ -298,7 +350,8 @@ static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
struct s390_domain *s390_domain = to_s390_domain(domain);
- unsigned long *sto, *pto, *rto, flags;
+ unsigned long *rto, *sto, *pto;
+ unsigned long ste, pte, rte;
unsigned int rtx, sx, px;
phys_addr_t phys = 0;
@@ -311,38 +364,40 @@ static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain,
px = calc_px(iova);
rto = s390_domain->dma_table;
- spin_lock_irqsave(&s390_domain->dma_table_lock, flags);
- if (rto && reg_entry_isvalid(rto[rtx])) {
- sto = get_rt_sto(rto[rtx]);
- if (sto && reg_entry_isvalid(sto[sx])) {
- pto = get_st_pto(sto[sx]);
- if (pto && pt_entry_isvalid(pto[px]))
- phys = pto[px] & ZPCI_PTE_ADDR_MASK;
+ rte = READ_ONCE(rto[rtx]);
+ if (reg_entry_isvalid(rte)) {
+ sto = get_rt_sto(rte);
+ ste = READ_ONCE(sto[sx]);
+ if (reg_entry_isvalid(ste)) {
+ pto = get_st_pto(ste);
+ pte = READ_ONCE(pto[px]);
+ if (pt_entry_isvalid(pte))
+ phys = pte & ZPCI_PTE_ADDR_MASK;
}
}
- spin_unlock_irqrestore(&s390_domain->dma_table_lock, flags);
return phys;
}
-static size_t s390_iommu_unmap(struct iommu_domain *domain,
- unsigned long iova, size_t size,
- struct iommu_iotlb_gather *gather)
+static size_t s390_iommu_unmap_pages(struct iommu_domain *domain,
+ unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
{
struct s390_domain *s390_domain = to_s390_domain(domain);
- int flags = ZPCI_PTE_INVALID;
- phys_addr_t paddr;
+ size_t size = pgcount << __ffs(pgsize);
int rc;
- paddr = s390_iommu_iova_to_phys(domain, iova);
- if (!paddr)
+ if (WARN_ON(iova < s390_domain->domain.geometry.aperture_start ||
+ (iova + size - 1) > s390_domain->domain.geometry.aperture_end))
return 0;
- rc = s390_iommu_update_trans(s390_domain, paddr, iova,
- size, flags);
+ rc = s390_iommu_invalidate_trans(s390_domain, iova, pgcount);
if (rc)
return 0;
+ iommu_iotlb_gather_add_range(gather, iova, size);
+
return size;
}
@@ -380,12 +435,16 @@ static const struct iommu_ops s390_iommu_ops = {
.probe_device = s390_iommu_probe_device,
.release_device = s390_iommu_release_device,
.device_group = generic_device_group,
- .pgsize_bitmap = S390_IOMMU_PGSIZES,
+ .pgsize_bitmap = SZ_4K,
+ .get_resv_regions = s390_iommu_get_resv_regions,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = s390_iommu_attach_device,
.detach_dev = s390_iommu_detach_device,
- .map = s390_iommu_map,
- .unmap = s390_iommu_unmap,
+ .map_pages = s390_iommu_map_pages,
+ .unmap_pages = s390_iommu_unmap_pages,
+ .flush_iotlb_all = s390_iommu_flush_iotlb_all,
+ .iotlb_sync = s390_iommu_iotlb_sync,
+ .iotlb_sync_map = s390_iommu_iotlb_sync_map,
.iova_to_phys = s390_iommu_iova_to_phys,
.free = s390_domain_free,
}
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
index fadd2c907222..219bfa11f7f4 100644
--- a/drivers/iommu/sprd-iommu.c
+++ b/drivers/iommu/sprd-iommu.c
@@ -237,10 +237,8 @@ static int sprd_iommu_attach_device(struct iommu_domain *domain,
struct sprd_iommu_domain *dom = to_sprd_domain(domain);
size_t pgt_size = sprd_iommu_pgt_size(domain);
- if (dom->sdev) {
- pr_err("There's already a device attached to this domain.\n");
+ if (dom->sdev)
return -EINVAL;
- }
dom->pgt_va = dma_alloc_coherent(sdev->dev, pgt_size, &dom->pgt_pa, GFP_KERNEL);
if (!dom->pgt_va)
@@ -273,10 +271,11 @@ static void sprd_iommu_detach_device(struct iommu_domain *domain,
}
static int sprd_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
{
struct sprd_iommu_domain *dom = to_sprd_domain(domain);
- unsigned int page_num = size >> SPRD_IOMMU_PAGE_SHIFT;
+ size_t size = pgcount * SPRD_IOMMU_PAGE_SIZE;
unsigned long flags;
unsigned int i;
u32 *pgt_base_iova;
@@ -298,35 +297,37 @@ static int sprd_iommu_map(struct iommu_domain *domain, unsigned long iova,
pgt_base_iova = dom->pgt_va + ((iova - start) >> SPRD_IOMMU_PAGE_SHIFT);
spin_lock_irqsave(&dom->pgtlock, flags);
- for (i = 0; i < page_num; i++) {
+ for (i = 0; i < pgcount; i++) {
pgt_base_iova[i] = pabase >> SPRD_IOMMU_PAGE_SHIFT;
pabase += SPRD_IOMMU_PAGE_SIZE;
}
spin_unlock_irqrestore(&dom->pgtlock, flags);
+ *mapped = size;
return 0;
}
static size_t sprd_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *iotlb_gather)
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *iotlb_gather)
{
struct sprd_iommu_domain *dom = to_sprd_domain(domain);
unsigned long flags;
u32 *pgt_base_iova;
- unsigned int page_num = size >> SPRD_IOMMU_PAGE_SHIFT;
+ size_t size = pgcount * SPRD_IOMMU_PAGE_SIZE;
unsigned long start = domain->geometry.aperture_start;
unsigned long end = domain->geometry.aperture_end;
if (iova < start || (iova + size) > (end + 1))
- return -EINVAL;
+ return 0;
pgt_base_iova = dom->pgt_va + ((iova - start) >> SPRD_IOMMU_PAGE_SHIFT);
spin_lock_irqsave(&dom->pgtlock, flags);
- memset(pgt_base_iova, 0, page_num * sizeof(u32));
+ memset(pgt_base_iova, 0, pgcount * sizeof(u32));
spin_unlock_irqrestore(&dom->pgtlock, flags);
- return 0;
+ return size;
}
static void sprd_iommu_sync_map(struct iommu_domain *domain,
@@ -409,13 +410,13 @@ static const struct iommu_ops sprd_iommu_ops = {
.probe_device = sprd_iommu_probe_device,
.device_group = sprd_iommu_device_group,
.of_xlate = sprd_iommu_of_xlate,
- .pgsize_bitmap = ~0UL << SPRD_IOMMU_PAGE_SHIFT,
+ .pgsize_bitmap = SPRD_IOMMU_PAGE_SIZE,
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = sprd_iommu_attach_device,
.detach_dev = sprd_iommu_detach_device,
- .map = sprd_iommu_map,
- .unmap = sprd_iommu_unmap,
+ .map_pages = sprd_iommu_map,
+ .unmap_pages = sprd_iommu_unmap,
.iotlb_sync_map = sprd_iommu_sync_map,
.iotlb_sync = sprd_iommu_sync,
.iova_to_phys = sprd_iommu_iova_to_phys,
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index cd9b74ee24de..5b585eace3d4 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -27,6 +27,7 @@
#include <linux/types.h>
#define IOMMU_RESET_REG 0x010
+#define IOMMU_RESET_RELEASE_ALL 0xffffffff
#define IOMMU_ENABLE_REG 0x020
#define IOMMU_ENABLE_ENABLE BIT(0)
@@ -92,6 +93,8 @@
#define NUM_PT_ENTRIES 256
#define PT_SIZE (NUM_PT_ENTRIES * PT_ENTRY_SIZE)
+#define SPAGE_SIZE 4096
+
struct sun50i_iommu {
struct iommu_device iommu;
@@ -270,7 +273,7 @@ static u32 sun50i_mk_pte(phys_addr_t page, int prot)
enum sun50i_iommu_aci aci;
u32 flags = 0;
- if (prot & (IOMMU_READ | IOMMU_WRITE))
+ if ((prot & (IOMMU_READ | IOMMU_WRITE)) == (IOMMU_READ | IOMMU_WRITE))
aci = SUN50I_IOMMU_ACI_RD_WR;
else if (prot & IOMMU_READ)
aci = SUN50I_IOMMU_ACI_RD;
@@ -294,6 +297,62 @@ static void sun50i_table_flush(struct sun50i_iommu_domain *sun50i_domain,
dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE);
}
+static void sun50i_iommu_zap_iova(struct sun50i_iommu *iommu,
+ unsigned long iova)
+{
+ u32 reg;
+ int ret;
+
+ iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_REG, iova);
+ iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_MASK_REG, GENMASK(31, 12));
+ iommu_write(iommu, IOMMU_TLB_IVLD_ENABLE_REG,
+ IOMMU_TLB_IVLD_ENABLE_ENABLE);
+
+ ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_IVLD_ENABLE_REG,
+ reg, !reg, 1, 2000);
+ if (ret)
+ dev_warn(iommu->dev, "TLB invalidation timed out!\n");
+}
+
+static void sun50i_iommu_zap_ptw_cache(struct sun50i_iommu *iommu,
+ unsigned long iova)
+{
+ u32 reg;
+ int ret;
+
+ iommu_write(iommu, IOMMU_PC_IVLD_ADDR_REG, iova);
+ iommu_write(iommu, IOMMU_PC_IVLD_ENABLE_REG,
+ IOMMU_PC_IVLD_ENABLE_ENABLE);
+
+ ret = readl_poll_timeout_atomic(iommu->base + IOMMU_PC_IVLD_ENABLE_REG,
+ reg, !reg, 1, 2000);
+ if (ret)
+ dev_warn(iommu->dev, "PTW cache invalidation timed out!\n");
+}
+
+static void sun50i_iommu_zap_range(struct sun50i_iommu *iommu,
+ unsigned long iova, size_t size)
+{
+ assert_spin_locked(&iommu->iommu_lock);
+
+ iommu_write(iommu, IOMMU_AUTO_GATING_REG, 0);
+
+ sun50i_iommu_zap_iova(iommu, iova);
+ sun50i_iommu_zap_iova(iommu, iova + SPAGE_SIZE);
+ if (size > SPAGE_SIZE) {
+ sun50i_iommu_zap_iova(iommu, iova + size);
+ sun50i_iommu_zap_iova(iommu, iova + size + SPAGE_SIZE);
+ }
+ sun50i_iommu_zap_ptw_cache(iommu, iova);
+ sun50i_iommu_zap_ptw_cache(iommu, iova + SZ_1M);
+ if (size > SZ_1M) {
+ sun50i_iommu_zap_ptw_cache(iommu, iova + size);
+ sun50i_iommu_zap_ptw_cache(iommu, iova + size + SZ_1M);
+ }
+
+ iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE);
+}
+
static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu)
{
u32 reg;
@@ -343,6 +402,18 @@ static void sun50i_iommu_flush_iotlb_all(struct iommu_domain *domain)
spin_unlock_irqrestore(&iommu->iommu_lock, flags);
}
+static void sun50i_iommu_iotlb_sync_map(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
+ struct sun50i_iommu *iommu = sun50i_domain->iommu;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iommu->iommu_lock, flags);
+ sun50i_iommu_zap_range(iommu, iova, size);
+ spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+}
+
static void sun50i_iommu_iotlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
@@ -511,7 +582,7 @@ static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain,
sun50i_iommu_free_page_table(iommu, drop_pt);
}
- sun50i_table_flush(sun50i_domain, page_table, PT_SIZE);
+ sun50i_table_flush(sun50i_domain, page_table, NUM_PT_ENTRIES);
sun50i_table_flush(sun50i_domain, dte_addr, 1);
return page_table;
@@ -601,7 +672,6 @@ static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type)
struct sun50i_iommu_domain *sun50i_domain;
if (type != IOMMU_DOMAIN_DMA &&
- type != IOMMU_DOMAIN_IDENTITY &&
type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
@@ -766,6 +836,7 @@ static const struct iommu_ops sun50i_iommu_ops = {
.attach_dev = sun50i_iommu_attach_device,
.detach_dev = sun50i_iommu_detach_device,
.flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
+ .iotlb_sync_map = sun50i_iommu_iotlb_sync_map,
.iotlb_sync = sun50i_iommu_iotlb_sync,
.iova_to_phys = sun50i_iommu_iova_to_phys,
.map = sun50i_iommu_map,
@@ -785,6 +856,8 @@ static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu,
report_iommu_fault(iommu->domain, iommu->dev, iova, prot);
else
dev_err(iommu->dev, "Page fault while iommu not attached to any domain?\n");
+
+ sun50i_iommu_zap_range(iommu, iova, SPAGE_SIZE);
}
static phys_addr_t sun50i_iommu_handle_pt_irq(struct sun50i_iommu *iommu,
@@ -868,8 +941,8 @@ static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu)
static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id)
{
+ u32 status, l1_status, l2_status, resets;
struct sun50i_iommu *iommu = dev_id;
- u32 status;
spin_lock(&iommu->iommu_lock);
@@ -879,6 +952,9 @@ static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id)
return IRQ_NONE;
}
+ l1_status = iommu_read(iommu, IOMMU_L1PG_INT_REG);
+ l2_status = iommu_read(iommu, IOMMU_L2PG_INT_REG);
+
if (status & IOMMU_INT_INVALID_L2PG)
sun50i_iommu_handle_pt_irq(iommu,
IOMMU_INT_ERR_ADDR_L2_REG,
@@ -892,8 +968,9 @@ static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id)
iommu_write(iommu, IOMMU_INT_CLR_REG, status);
- iommu_write(iommu, IOMMU_RESET_REG, ~status);
- iommu_write(iommu, IOMMU_RESET_REG, status);
+ resets = (status | l1_status | l2_status) & IOMMU_INT_MASTER_MASK;
+ iommu_write(iommu, IOMMU_RESET_REG, ~resets);
+ iommu_write(iommu, IOMMU_RESET_REG, IOMMU_RESET_RELEASE_ALL);
spin_unlock(&iommu->iommu_lock);
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index e5ca3cf1a949..ed53279d1106 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -112,7 +112,7 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain,
spin_lock(&gart->dom_lock);
if (gart->active_domain && gart->active_domain != domain) {
- ret = -EBUSY;
+ ret = -EINVAL;
} else if (dev_iommu_priv_get(dev) != domain) {
dev_iommu_priv_set(dev, domain);
gart->active_domain = domain;
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 8b1b5c270e50..5b8fe9bfa9a5 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -670,7 +670,7 @@ static int viommu_domain_finalise(struct viommu_endpoint *vdev,
dev_err(vdev->dev,
"granule 0x%lx larger than system page size 0x%lx\n",
viommu_page_size, PAGE_SIZE);
- return -EINVAL;
+ return -ENODEV;
}
ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
@@ -697,7 +697,7 @@ static int viommu_domain_finalise(struct viommu_endpoint *vdev,
if (ret) {
ida_free(&viommu->domain_ids, vdomain->id);
vdomain->viommu = NULL;
- return -EOPNOTSUPP;
+ return ret;
}
}
@@ -734,8 +734,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
*/
ret = viommu_domain_finalise(vdev, domain);
} else if (vdomain->viommu != vdev->viommu) {
- dev_err(dev, "cannot attach to foreign vIOMMU\n");
- ret = -EXDEV;
+ ret = -EINVAL;
}
mutex_unlock(&vdomain->mutex);