summaryrefslogtreecommitdiff
path: root/drivers/iommu/iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/iommu.c')
-rw-r--r--drivers/iommu/iommu.c171
1 files changed, 85 insertions, 86 deletions
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 5f6a85aea501..b3f847b25b4f 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -371,6 +371,30 @@ err_unlock:
return ret;
}
+static bool iommu_is_attach_deferred(struct device *dev)
+{
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
+
+ if (ops->is_attach_deferred)
+ return ops->is_attach_deferred(dev);
+
+ return false;
+}
+
+static int iommu_group_do_dma_first_attach(struct device *dev, void *data)
+{
+ struct iommu_domain *domain = data;
+
+ lockdep_assert_held(&dev->iommu_group->mutex);
+
+ if (iommu_is_attach_deferred(dev)) {
+ dev->iommu->attach_deferred = 1;
+ return 0;
+ }
+
+ return __iommu_attach_device(domain, dev);
+}
+
int iommu_probe_device(struct device *dev)
{
const struct iommu_ops *ops;
@@ -401,7 +425,7 @@ int iommu_probe_device(struct device *dev)
* attach the default domain.
*/
if (group->default_domain && !group->owner) {
- ret = __iommu_attach_device(group->default_domain, dev);
+ ret = iommu_group_do_dma_first_attach(dev, group->default_domain);
if (ret) {
mutex_unlock(&group->mutex);
iommu_group_put(group);
@@ -774,12 +798,16 @@ struct iommu_group *iommu_group_alloc(void)
ret = iommu_group_create_file(group,
&iommu_group_attr_reserved_regions);
- if (ret)
+ if (ret) {
+ kobject_put(group->devices_kobj);
return ERR_PTR(ret);
+ }
ret = iommu_group_create_file(group, &iommu_group_attr_type);
- if (ret)
+ if (ret) {
+ kobject_put(group->devices_kobj);
return ERR_PTR(ret);
+ }
pr_debug("Allocated group %d\n", group->id);
@@ -930,7 +958,7 @@ map_end:
if (map_size) {
ret = iommu_map(domain, addr - map_size,
addr - map_size, map_size,
- entry->prot);
+ entry->prot, GFP_KERNEL);
if (ret)
goto out;
map_size = 0;
@@ -947,16 +975,6 @@ out:
return ret;
}
-static bool iommu_is_attach_deferred(struct device *dev)
-{
- const struct iommu_ops *ops = dev_iommu_ops(dev);
-
- if (ops->is_attach_deferred)
- return ops->is_attach_deferred(dev);
-
- return false;
-}
-
/**
* iommu_group_add_device - add a device to an iommu group
* @group: the group into which to add the device (reference should be held)
@@ -1009,8 +1027,8 @@ rename:
mutex_lock(&group->mutex);
list_add_tail(&device->list, &group->devices);
- if (group->domain && !iommu_is_attach_deferred(dev))
- ret = __iommu_attach_device(group->domain, dev);
+ if (group->domain)
+ ret = iommu_group_do_dma_first_attach(dev, group->domain);
mutex_unlock(&group->mutex);
if (ret)
goto err_put_group;
@@ -1776,21 +1794,10 @@ static void probe_alloc_default_domain(struct bus_type *bus,
}
-static int iommu_group_do_dma_attach(struct device *dev, void *data)
-{
- struct iommu_domain *domain = data;
- int ret = 0;
-
- if (!iommu_is_attach_deferred(dev))
- ret = __iommu_attach_device(domain, dev);
-
- return ret;
-}
-
-static int __iommu_group_dma_attach(struct iommu_group *group)
+static int __iommu_group_dma_first_attach(struct iommu_group *group)
{
return __iommu_group_for_each_dev(group, group->default_domain,
- iommu_group_do_dma_attach);
+ iommu_group_do_dma_first_attach);
}
static int iommu_group_do_probe_finalize(struct device *dev, void *data)
@@ -1855,7 +1862,7 @@ int bus_iommu_probe(struct bus_type *bus)
iommu_group_create_direct_mappings(group);
- ret = __iommu_group_dma_attach(group);
+ ret = __iommu_group_dma_first_attach(group);
mutex_unlock(&group->mutex);
@@ -1987,9 +1994,11 @@ static int __iommu_attach_device(struct iommu_domain *domain,
return -ENODEV;
ret = domain->ops->attach_dev(domain, dev);
- if (!ret)
- trace_attach_device_to_domain(dev);
- return ret;
+ if (ret)
+ return ret;
+ dev->iommu->attach_deferred = 0;
+ trace_attach_device_to_domain(dev);
+ return 0;
}
/**
@@ -2034,22 +2043,12 @@ EXPORT_SYMBOL_GPL(iommu_attach_device);
int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
{
- if (iommu_is_attach_deferred(dev))
+ if (dev->iommu && dev->iommu->attach_deferred)
return __iommu_attach_device(domain, dev);
return 0;
}
-static void __iommu_detach_device(struct iommu_domain *domain,
- struct device *dev)
-{
- if (iommu_is_attach_deferred(dev))
- return;
-
- domain->ops->detach_dev(domain, dev);
- trace_detach_device_from_domain(dev);
-}
-
void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
{
struct iommu_group *group;
@@ -2124,8 +2123,22 @@ static int __iommu_attach_group(struct iommu_domain *domain,
ret = __iommu_group_for_each_dev(group, domain,
iommu_group_do_attach_device);
- if (ret == 0)
+ if (ret == 0) {
group->domain = domain;
+ } else {
+ /*
+ * To recover from the case when certain device within the
+ * group fails to attach to the new domain, we need force
+ * attaching all devices back to the old domain. The old
+ * domain is compatible for all devices in the group,
+ * hence the iommu driver should always return success.
+ */
+ struct iommu_domain *old_domain = group->domain;
+
+ group->domain = NULL;
+ WARN(__iommu_group_set_domain(group, old_domain),
+ "iommu driver failed to attach a compatible domain");
+ }
return ret;
}
@@ -2154,11 +2167,12 @@ int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
}
EXPORT_SYMBOL_GPL(iommu_attach_group);
-static int iommu_group_do_detach_device(struct device *dev, void *data)
+static int iommu_group_do_set_platform_dma(struct device *dev, void *data)
{
- struct iommu_domain *domain = data;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
- __iommu_detach_device(domain, dev);
+ if (!WARN_ON(!ops->set_platform_dma_ops))
+ ops->set_platform_dma_ops(dev);
return 0;
}
@@ -2172,15 +2186,13 @@ static int __iommu_group_set_domain(struct iommu_group *group,
return 0;
/*
- * New drivers should support default domains and so the detach_dev() op
- * will never be called. Otherwise the NULL domain represents some
+ * New drivers should support default domains, so set_platform_dma()
+ * op will never be called. Otherwise the NULL domain represents some
* platform specific behavior.
*/
if (!new_domain) {
- if (WARN_ON(!group->domain->ops->detach_dev))
- return -EINVAL;
- __iommu_group_for_each_dev(group, group->domain,
- iommu_group_do_detach_device);
+ __iommu_group_for_each_dev(group, NULL,
+ iommu_group_do_set_platform_dma);
group->domain = NULL;
return 0;
}
@@ -2360,34 +2372,27 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
return ret;
}
-static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+int iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
const struct iommu_domain_ops *ops = domain->ops;
int ret;
+ might_sleep_if(gfpflags_allow_blocking(gfp));
+
+ /* Discourage passing strange GFP flags */
+ if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 |
+ __GFP_HIGHMEM)))
+ return -EINVAL;
+
ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
if (ret == 0 && ops->iotlb_sync_map)
ops->iotlb_sync_map(domain, iova, size);
return ret;
}
-
-int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
-{
- might_sleep();
- return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
-}
EXPORT_SYMBOL_GPL(iommu_map);
-int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
-{
- return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
-}
-EXPORT_SYMBOL_GPL(iommu_map_atomic);
-
static size_t __iommu_unmap_pages(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather)
@@ -2477,9 +2482,9 @@ size_t iommu_unmap_fast(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_unmap_fast);
-static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg, unsigned int nents, int prot,
- gfp_t gfp)
+ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot,
+ gfp_t gfp)
{
const struct iommu_domain_ops *ops = domain->ops;
size_t len = 0, mapped = 0;
@@ -2487,6 +2492,13 @@ static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
unsigned int i = 0;
int ret;
+ might_sleep_if(gfpflags_allow_blocking(gfp));
+
+ /* Discourage passing strange GFP flags */
+ if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 |
+ __GFP_HIGHMEM)))
+ return -EINVAL;
+
while (i <= nents) {
phys_addr_t s_phys = sg_phys(sg);
@@ -2526,21 +2538,8 @@ out_err:
return ret;
}
-
-ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg, unsigned int nents, int prot)
-{
- might_sleep();
- return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
-}
EXPORT_SYMBOL_GPL(iommu_map_sg);
-ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg, unsigned int nents, int prot)
-{
- return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
-}
-
/**
* report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
* @domain: the iommu domain where the fault has happened