summaryrefslogtreecommitdiff
path: root/drivers/iommu/s390-iommu.c
diff options
context:
space:
mode:
authorNiklas Schnelle <schnelle@linux.ibm.com>2023-09-28 17:31:37 +0300
committerJoerg Roedel <jroedel@suse.de>2023-10-02 09:43:00 +0300
commitc76c067e488ccd55734c3e750799caf2c5956db6 (patch)
treec4489302a1c2a119afcba4b54535cb4d8c65cbee /drivers/iommu/s390-iommu.c
parentb6f888709a155b4e748aded93afc9fa43aaa28dd (diff)
downloadlinux-c76c067e488ccd55734c3e750799caf2c5956db6.tar.xz
s390/pci: Use dma-iommu layer
While s390 already has a standard IOMMU driver and previous changes have added I/O TLB flushing operations this driver is currently only used for user-space PCI access such as vfio-pci. For the DMA API s390 instead utilizes its own implementation in arch/s390/pci/pci_dma.c which drives the same hardware and shares some code but requires a complex and fragile hand over between DMA API and IOMMU API use of a device and despite code sharing still leads to significant duplication and maintenance effort. Let's utilize the common code DMAP API implementation from drivers/iommu/dma-iommu.c instead allowing us to get rid of arch/s390/pci/pci_dma.c. Reviewed-by: Matthew Rosato <mjrosato@linux.ibm.com> Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com> Link: https://lore.kernel.org/r/20230928-dma_iommu-v13-3-9e5fc4dacc36@linux.ibm.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/s390-iommu.c')
-rw-r--r--drivers/iommu/s390-iommu.c400
1 files changed, 359 insertions, 41 deletions
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 560d0957f9be..bb9e48c82644 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -14,16 +14,300 @@
#include <linux/rcupdate.h>
#include <asm/pci_dma.h>
+#include "dma-iommu.h"
+
static const struct iommu_ops s390_iommu_ops;
+static struct kmem_cache *dma_region_table_cache;
+static struct kmem_cache *dma_page_table_cache;
+
+static u64 s390_iommu_aperture;
+static u32 s390_iommu_aperture_factor = 1;
+
struct s390_domain {
struct iommu_domain domain;
struct list_head devices;
+ struct zpci_iommu_ctrs ctrs;
unsigned long *dma_table;
spinlock_t list_lock;
struct rcu_head rcu;
};
+static inline unsigned int calc_rtx(dma_addr_t ptr)
+{
+ return ((unsigned long)ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
+}
+
+static inline unsigned int calc_sx(dma_addr_t ptr)
+{
+ return ((unsigned long)ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK;
+}
+
+static inline unsigned int calc_px(dma_addr_t ptr)
+{
+ return ((unsigned long)ptr >> PAGE_SHIFT) & ZPCI_PT_MASK;
+}
+
+static inline void set_pt_pfaa(unsigned long *entry, phys_addr_t pfaa)
+{
+ *entry &= ZPCI_PTE_FLAG_MASK;
+ *entry |= (pfaa & ZPCI_PTE_ADDR_MASK);
+}
+
+static inline void set_rt_sto(unsigned long *entry, phys_addr_t sto)
+{
+ *entry &= ZPCI_RTE_FLAG_MASK;
+ *entry |= (sto & ZPCI_RTE_ADDR_MASK);
+ *entry |= ZPCI_TABLE_TYPE_RTX;
+}
+
+static inline void set_st_pto(unsigned long *entry, phys_addr_t pto)
+{
+ *entry &= ZPCI_STE_FLAG_MASK;
+ *entry |= (pto & ZPCI_STE_ADDR_MASK);
+ *entry |= ZPCI_TABLE_TYPE_SX;
+}
+
+static inline void validate_rt_entry(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_VALID_MASK;
+ *entry &= ~ZPCI_TABLE_OFFSET_MASK;
+ *entry |= ZPCI_TABLE_VALID;
+ *entry |= ZPCI_TABLE_LEN_RTX;
+}
+
+static inline void validate_st_entry(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_VALID_MASK;
+ *entry |= ZPCI_TABLE_VALID;
+}
+
+static inline void invalidate_pt_entry(unsigned long *entry)
+{
+ WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_INVALID);
+ *entry &= ~ZPCI_PTE_VALID_MASK;
+ *entry |= ZPCI_PTE_INVALID;
+}
+
+static inline void validate_pt_entry(unsigned long *entry)
+{
+ WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID);
+ *entry &= ~ZPCI_PTE_VALID_MASK;
+ *entry |= ZPCI_PTE_VALID;
+}
+
+static inline void entry_set_protected(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_PROT_MASK;
+ *entry |= ZPCI_TABLE_PROTECTED;
+}
+
+static inline void entry_clr_protected(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_PROT_MASK;
+ *entry |= ZPCI_TABLE_UNPROTECTED;
+}
+
+static inline int reg_entry_isvalid(unsigned long entry)
+{
+ return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID;
+}
+
+static inline int pt_entry_isvalid(unsigned long entry)
+{
+ return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
+}
+
+static inline unsigned long *get_rt_sto(unsigned long entry)
+{
+ if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
+ return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK);
+ else
+ return NULL;
+}
+
+static inline unsigned long *get_st_pto(unsigned long entry)
+{
+ if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX)
+ return phys_to_virt(entry & ZPCI_STE_ADDR_MASK);
+ else
+ return NULL;
+}
+
+static int __init dma_alloc_cpu_table_caches(void)
+{
+ dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
+ ZPCI_TABLE_SIZE,
+ ZPCI_TABLE_ALIGN,
+ 0, NULL);
+ if (!dma_region_table_cache)
+ return -ENOMEM;
+
+ dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
+ ZPCI_PT_SIZE,
+ ZPCI_PT_ALIGN,
+ 0, NULL);
+ if (!dma_page_table_cache) {
+ kmem_cache_destroy(dma_region_table_cache);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static unsigned long *dma_alloc_cpu_table(gfp_t gfp)
+{
+ unsigned long *table, *entry;
+
+ table = kmem_cache_alloc(dma_region_table_cache, gfp);
+ if (!table)
+ return NULL;
+
+ for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
+ *entry = ZPCI_TABLE_INVALID;
+ return table;
+}
+
+static void dma_free_cpu_table(void *table)
+{
+ kmem_cache_free(dma_region_table_cache, table);
+}
+
+static void dma_free_page_table(void *table)
+{
+ kmem_cache_free(dma_page_table_cache, table);
+}
+
+static void dma_free_seg_table(unsigned long entry)
+{
+ unsigned long *sto = get_rt_sto(entry);
+ int sx;
+
+ for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
+ if (reg_entry_isvalid(sto[sx]))
+ dma_free_page_table(get_st_pto(sto[sx]));
+
+ dma_free_cpu_table(sto);
+}
+
+static void dma_cleanup_tables(unsigned long *table)
+{
+ int rtx;
+
+ if (!table)
+ return;
+
+ for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
+ if (reg_entry_isvalid(table[rtx]))
+ dma_free_seg_table(table[rtx]);
+
+ dma_free_cpu_table(table);
+}
+
+static unsigned long *dma_alloc_page_table(gfp_t gfp)
+{
+ unsigned long *table, *entry;
+
+ table = kmem_cache_alloc(dma_page_table_cache, gfp);
+ if (!table)
+ return NULL;
+
+ for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
+ *entry = ZPCI_PTE_INVALID;
+ return table;
+}
+
+static unsigned long *dma_get_seg_table_origin(unsigned long *rtep, gfp_t gfp)
+{
+ unsigned long old_rte, rte;
+ unsigned long *sto;
+
+ rte = READ_ONCE(*rtep);
+ if (reg_entry_isvalid(rte)) {
+ sto = get_rt_sto(rte);
+ } else {
+ sto = dma_alloc_cpu_table(gfp);
+ if (!sto)
+ return NULL;
+
+ set_rt_sto(&rte, virt_to_phys(sto));
+ validate_rt_entry(&rte);
+ entry_clr_protected(&rte);
+
+ old_rte = cmpxchg(rtep, ZPCI_TABLE_INVALID, rte);
+ if (old_rte != ZPCI_TABLE_INVALID) {
+ /* Somone else was faster, use theirs */
+ dma_free_cpu_table(sto);
+ sto = get_rt_sto(old_rte);
+ }
+ }
+ return sto;
+}
+
+static unsigned long *dma_get_page_table_origin(unsigned long *step, gfp_t gfp)
+{
+ unsigned long old_ste, ste;
+ unsigned long *pto;
+
+ ste = READ_ONCE(*step);
+ if (reg_entry_isvalid(ste)) {
+ pto = get_st_pto(ste);
+ } else {
+ pto = dma_alloc_page_table(gfp);
+ if (!pto)
+ return NULL;
+ set_st_pto(&ste, virt_to_phys(pto));
+ validate_st_entry(&ste);
+ entry_clr_protected(&ste);
+
+ old_ste = cmpxchg(step, ZPCI_TABLE_INVALID, ste);
+ if (old_ste != ZPCI_TABLE_INVALID) {
+ /* Somone else was faster, use theirs */
+ dma_free_page_table(pto);
+ pto = get_st_pto(old_ste);
+ }
+ }
+ return pto;
+}
+
+static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr, gfp_t gfp)
+{
+ unsigned long *sto, *pto;
+ unsigned int rtx, sx, px;
+
+ rtx = calc_rtx(dma_addr);
+ sto = dma_get_seg_table_origin(&rto[rtx], gfp);
+ if (!sto)
+ return NULL;
+
+ sx = calc_sx(dma_addr);
+ pto = dma_get_page_table_origin(&sto[sx], gfp);
+ if (!pto)
+ return NULL;
+
+ px = calc_px(dma_addr);
+ return &pto[px];
+}
+
+static void dma_update_cpu_trans(unsigned long *ptep, phys_addr_t page_addr, int flags)
+{
+ unsigned long pte;
+
+ pte = READ_ONCE(*ptep);
+ if (flags & ZPCI_PTE_INVALID) {
+ invalidate_pt_entry(&pte);
+ } else {
+ set_pt_pfaa(&pte, page_addr);
+ validate_pt_entry(&pte);
+ }
+
+ if (flags & ZPCI_TABLE_PROTECTED)
+ entry_set_protected(&pte);
+ else
+ entry_clr_protected(&pte);
+
+ xchg(ptep, pte);
+}
+
static struct s390_domain *to_s390_domain(struct iommu_domain *dom)
{
return container_of(dom, struct s390_domain, domain);
@@ -34,6 +318,8 @@ static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap)
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
return true;
+ case IOMMU_CAP_DEFERRED_FLUSH:
+ return true;
default:
return false;
}
@@ -81,14 +367,13 @@ static void s390_domain_free(struct iommu_domain *domain)
call_rcu(&s390_domain->rcu, s390_iommu_rcu_free_domain);
}
-static void __s390_iommu_detach_device(struct zpci_dev *zdev)
+static void s390_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
{
- struct s390_domain *s390_domain = zdev->s390_domain;
+ struct s390_domain *s390_domain = to_s390_domain(domain);
+ struct zpci_dev *zdev = to_zpci_dev(dev);
unsigned long flags;
- if (!s390_domain)
- return;
-
spin_lock_irqsave(&s390_domain->list_lock, flags);
list_del_rcu(&zdev->iommu_list);
spin_unlock_irqrestore(&s390_domain->list_lock, flags);
@@ -115,9 +400,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
return -EINVAL;
if (zdev->s390_domain)
- __s390_iommu_detach_device(zdev);
- else if (zdev->dma_table)
- zpci_dma_exit_device(zdev);
+ s390_iommu_detach_device(&zdev->s390_domain->domain, dev);
cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
virt_to_phys(s390_domain->dma_table), &status);
@@ -127,7 +410,6 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
*/
if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
return -EIO;
- zdev->dma_table = s390_domain->dma_table;
zdev->dma_table = s390_domain->dma_table;
zdev->s390_domain = s390_domain;
@@ -139,31 +421,6 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
return 0;
}
-/*
- * Switch control over the IOMMU to S390's internal dma_api ops
- */
-static int s390_iommu_platform_attach(struct iommu_domain *platform_domain,
- struct device *dev)
-{
- struct zpci_dev *zdev = to_zpci_dev(dev);
-
- if (!zdev->s390_domain)
- return 0;
-
- __s390_iommu_detach_device(zdev);
- zpci_dma_init_device(zdev);
- return 0;
-}
-
-static struct iommu_domain_ops s390_iommu_platform_ops = {
- .attach_dev = s390_iommu_platform_attach,
-};
-
-static struct iommu_domain s390_iommu_platform_domain = {
- .type = IOMMU_DOMAIN_PLATFORM,
- .ops = &s390_iommu_platform_ops,
-};
-
static void s390_iommu_get_resv_regions(struct device *dev,
struct list_head *list)
{
@@ -216,7 +473,7 @@ static void s390_iommu_release_device(struct device *dev)
* to the device, but keep it attached to other devices in the group.
*/
if (zdev)
- __s390_iommu_detach_device(zdev);
+ s390_iommu_detach_device(&zdev->s390_domain->domain, dev);
}
static int zpci_refresh_all(struct zpci_dev *zdev)
@@ -232,6 +489,7 @@ static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
rcu_read_lock();
list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
+ atomic64_inc(&s390_domain->ctrs.global_rpcits);
zpci_refresh_all(zdev);
}
rcu_read_unlock();
@@ -250,6 +508,7 @@ static void s390_iommu_iotlb_sync(struct iommu_domain *domain,
rcu_read_lock();
list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
+ atomic64_inc(&s390_domain->ctrs.sync_rpcits);
zpci_refresh_trans((u64)zdev->fh << 32, gather->start,
size);
}
@@ -267,6 +526,7 @@ static int s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
if (!zdev->tlb_refresh)
continue;
+ atomic64_inc(&s390_domain->ctrs.sync_map_rpcits);
ret = zpci_refresh_trans((u64)zdev->fh << 32,
iova, size);
/*
@@ -361,16 +621,15 @@ static int s390_iommu_map_pages(struct iommu_domain *domain,
if (!IS_ALIGNED(iova | paddr, pgsize))
return -EINVAL;
- if (!(prot & IOMMU_READ))
- return -EINVAL;
-
if (!(prot & IOMMU_WRITE))
flags |= ZPCI_TABLE_PROTECTED;
rc = s390_iommu_validate_trans(s390_domain, paddr, iova,
- pgcount, flags, gfp);
- if (!rc)
+ pgcount, flags, gfp);
+ if (!rc) {
*mapped = size;
+ atomic64_add(pgcount, &s390_domain->ctrs.mapped_pages);
+ }
return rc;
}
@@ -426,12 +685,26 @@ static size_t s390_iommu_unmap_pages(struct iommu_domain *domain,
return 0;
iommu_iotlb_gather_add_range(gather, iova, size);
+ atomic64_add(pgcount, &s390_domain->ctrs.unmapped_pages);
return size;
}
+static void s390_iommu_probe_finalize(struct device *dev)
+{
+ iommu_setup_dma_ops(dev, 0, U64_MAX);
+}
+
+struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev)
+{
+ if (!zdev || !zdev->s390_domain)
+ return NULL;
+ return &zdev->s390_domain->ctrs;
+}
+
int zpci_init_iommu(struct zpci_dev *zdev)
{
+ u64 aperture_size;
int rc = 0;
rc = iommu_device_sysfs_add(&zdev->iommu_dev, NULL, NULL,
@@ -443,6 +716,12 @@ int zpci_init_iommu(struct zpci_dev *zdev)
if (rc)
goto out_sysfs;
+ zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
+ aperture_size = min3(s390_iommu_aperture,
+ ZPCI_TABLE_SIZE_RT - zdev->start_dma,
+ zdev->end_dma - zdev->start_dma + 1);
+ zdev->end_dma = zdev->start_dma + aperture_size - 1;
+
return 0;
out_sysfs:
@@ -458,11 +737,50 @@ void zpci_destroy_iommu(struct zpci_dev *zdev)
iommu_device_sysfs_remove(&zdev->iommu_dev);
}
+static int __init s390_iommu_setup(char *str)
+{
+ if (!strcmp(str, "strict")) {
+ pr_warn("s390_iommu=strict deprecated; use iommu.strict=1 instead\n");
+ iommu_set_dma_strict();
+ }
+ return 1;
+}
+
+__setup("s390_iommu=", s390_iommu_setup);
+
+static int __init s390_iommu_aperture_setup(char *str)
+{
+ if (kstrtou32(str, 10, &s390_iommu_aperture_factor))
+ s390_iommu_aperture_factor = 1;
+ return 1;
+}
+
+__setup("s390_iommu_aperture=", s390_iommu_aperture_setup);
+
+static int __init s390_iommu_init(void)
+{
+ int rc;
+
+ iommu_dma_forcedac = true;
+ s390_iommu_aperture = (u64)virt_to_phys(high_memory);
+ if (!s390_iommu_aperture_factor)
+ s390_iommu_aperture = ULONG_MAX;
+ else
+ s390_iommu_aperture *= s390_iommu_aperture_factor;
+
+ rc = dma_alloc_cpu_table_caches();
+ if (rc)
+ return rc;
+
+ return rc;
+}
+subsys_initcall(s390_iommu_init);
+
static const struct iommu_ops s390_iommu_ops = {
- .default_domain = &s390_iommu_platform_domain,
.capable = s390_iommu_capable,
.domain_alloc_paging = s390_domain_alloc_paging,
.probe_device = s390_iommu_probe_device,
+ .probe_finalize = s390_iommu_probe_finalize,
.release_device = s390_iommu_release_device,
.device_group = generic_device_group,
.pgsize_bitmap = SZ_4K,