summaryrefslogtreecommitdiff
path: root/drivers/iommu/s390-iommu.c
diff options
context:
space:
mode:
authorNiklas Schnelle <schnelle@linux.ibm.com>2022-11-09 17:29:01 +0300
committerJoerg Roedel <jroedel@suse.de>2022-11-19 12:28:16 +0300
commit2ba8336dab5fb81452aea9c21dfc870050a017f3 (patch)
tree28cf21630a2a43438e63a6b8001f32ebd9abc8b8 /drivers/iommu/s390-iommu.c
parentc228f5a043370ef02867e4f0aab1bdc8422500e6 (diff)
downloadlinux-2ba8336dab5fb81452aea9c21dfc870050a017f3.tar.xz
iommu/s390: Use RCU to allow concurrent domain_list iteration
The s390_domain->devices list is only added to when new devices are attached but is iterated through in read-only fashion for every mapping operation as well as for I/O TLB flushes and thus in performance critical code causing contention on the s390_domain->list_lock. Fortunately such a read-mostly linked list is a standard use case for RCU. This change closely follows the example fpr RCU protected list given in Documentation/RCU/listRCU.rst. Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com> Link: https://lore.kernel.org/r/20221109142903.4080275-4-schnelle@linux.ibm.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/s390-iommu.c')
-rw-r--r--drivers/iommu/s390-iommu.c44
1 files changed, 27 insertions, 17 deletions
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 9771bce86e94..cf5dcbcea4e0 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -10,6 +10,8 @@
#include <linux/iommu.h>
#include <linux/iommu-helper.h>
#include <linux/sizes.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
#include <asm/pci_dma.h>
static const struct iommu_ops s390_iommu_ops;
@@ -20,6 +22,7 @@ struct s390_domain {
unsigned long *dma_table;
spinlock_t dma_table_lock;
spinlock_t list_lock;
+ struct rcu_head rcu;
};
static struct s390_domain *to_s390_domain(struct iommu_domain *dom)
@@ -61,18 +64,28 @@ static struct iommu_domain *s390_domain_alloc(unsigned domain_type)
spin_lock_init(&s390_domain->dma_table_lock);
spin_lock_init(&s390_domain->list_lock);
- INIT_LIST_HEAD(&s390_domain->devices);
+ INIT_LIST_HEAD_RCU(&s390_domain->devices);
return &s390_domain->domain;
}
+static void s390_iommu_rcu_free_domain(struct rcu_head *head)
+{
+ struct s390_domain *s390_domain = container_of(head, struct s390_domain, rcu);
+
+ dma_cleanup_tables(s390_domain->dma_table);
+ kfree(s390_domain);
+}
+
static void s390_domain_free(struct iommu_domain *domain)
{
struct s390_domain *s390_domain = to_s390_domain(domain);
+ rcu_read_lock();
WARN_ON(!list_empty(&s390_domain->devices));
- dma_cleanup_tables(s390_domain->dma_table);
- kfree(s390_domain);
+ rcu_read_unlock();
+
+ call_rcu(&s390_domain->rcu, s390_iommu_rcu_free_domain);
}
static void __s390_iommu_detach_device(struct zpci_dev *zdev)
@@ -84,7 +97,7 @@ static void __s390_iommu_detach_device(struct zpci_dev *zdev)
return;
spin_lock_irqsave(&s390_domain->list_lock, flags);
- list_del_init(&zdev->iommu_list);
+ list_del_rcu(&zdev->iommu_list);
spin_unlock_irqrestore(&s390_domain->list_lock, flags);
zpci_unregister_ioat(zdev, 0);
@@ -127,7 +140,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
zdev->s390_domain = s390_domain;
spin_lock_irqsave(&s390_domain->list_lock, flags);
- list_add(&zdev->iommu_list, &s390_domain->devices);
+ list_add_rcu(&zdev->iommu_list, &s390_domain->devices);
spin_unlock_irqrestore(&s390_domain->list_lock, flags);
return 0;
@@ -203,14 +216,13 @@ static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
{
struct s390_domain *s390_domain = to_s390_domain(domain);
struct zpci_dev *zdev;
- unsigned long flags;
- spin_lock_irqsave(&s390_domain->list_lock, flags);
- list_for_each_entry(zdev, &s390_domain->devices, iommu_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
zdev->end_dma - zdev->start_dma + 1);
}
- spin_unlock_irqrestore(&s390_domain->list_lock, flags);
+ rcu_read_unlock();
}
static void s390_iommu_iotlb_sync(struct iommu_domain *domain,
@@ -219,18 +231,17 @@ static void s390_iommu_iotlb_sync(struct iommu_domain *domain,
struct s390_domain *s390_domain = to_s390_domain(domain);
size_t size = gather->end - gather->start + 1;
struct zpci_dev *zdev;
- unsigned long flags;
/* If gather was never added to there is nothing to flush */
if (!gather->end)
return;
- spin_lock_irqsave(&s390_domain->list_lock, flags);
- list_for_each_entry(zdev, &s390_domain->devices, iommu_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
zpci_refresh_trans((u64)zdev->fh << 32, gather->start,
size);
}
- spin_unlock_irqrestore(&s390_domain->list_lock, flags);
+ rcu_read_unlock();
}
static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
@@ -238,16 +249,15 @@ static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
{
struct s390_domain *s390_domain = to_s390_domain(domain);
struct zpci_dev *zdev;
- unsigned long flags;
- spin_lock_irqsave(&s390_domain->list_lock, flags);
- list_for_each_entry(zdev, &s390_domain->devices, iommu_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
if (!zdev->tlb_refresh)
continue;
zpci_refresh_trans((u64)zdev->fh << 32,
iova, size);
}
- spin_unlock_irqrestore(&s390_domain->list_lock, flags);
+ rcu_read_unlock();
}
static int s390_iommu_update_trans(struct s390_domain *s390_domain,