diff options
Diffstat (limited to 'drivers/iommu')
24 files changed, 625 insertions, 439 deletions
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index 0c35018239ce..e2857109e966 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -12,13 +12,14 @@ #include "amd_iommu_types.h" irqreturn_t amd_iommu_int_thread(int irq, void *data); +irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data); +irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data); +irqreturn_t amd_iommu_int_thread_galog(int irq, void *data); irqreturn_t amd_iommu_int_handler(int irq, void *data); void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid); void amd_iommu_restart_event_logging(struct amd_iommu *iommu); void amd_iommu_restart_ga_log(struct amd_iommu *iommu); -int amd_iommu_init_devices(void); -void amd_iommu_uninit_devices(void); -void amd_iommu_init_notifier(void); +void amd_iommu_restart_ppr_log(struct amd_iommu *iommu); void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid); #ifdef CONFIG_AMD_IOMMU_DEBUGFS diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index dc1db6167927..7dc30c2b56b3 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -120,10 +120,13 @@ #define PASID_MASK 0x0000ffff /* MMIO status bits */ -#define MMIO_STATUS_EVT_OVERFLOW_INT_MASK BIT(0) +#define MMIO_STATUS_EVT_OVERFLOW_MASK BIT(0) #define MMIO_STATUS_EVT_INT_MASK BIT(1) #define MMIO_STATUS_COM_WAIT_INT_MASK BIT(2) +#define MMIO_STATUS_EVT_RUN_MASK BIT(3) +#define MMIO_STATUS_PPR_OVERFLOW_MASK BIT(5) #define MMIO_STATUS_PPR_INT_MASK BIT(6) +#define MMIO_STATUS_PPR_RUN_MASK BIT(7) #define MMIO_STATUS_GALOG_RUN_MASK BIT(8) #define MMIO_STATUS_GALOG_OVERFLOW_MASK BIT(9) #define MMIO_STATUS_GALOG_INT_MASK BIT(10) @@ -381,15 +384,15 @@ */ #define DTE_FLAG_V BIT_ULL(0) #define DTE_FLAG_TV BIT_ULL(1) +#define DTE_FLAG_GIOV BIT_ULL(54) +#define DTE_FLAG_GV BIT_ULL(55) +#define DTE_GLX_SHIFT (56) +#define DTE_GLX_MASK (3) #define DTE_FLAG_IR BIT_ULL(61) #define DTE_FLAG_IW BIT_ULL(62) #define DTE_FLAG_IOTLB BIT_ULL(32) -#define DTE_FLAG_GIOV BIT_ULL(54) -#define DTE_FLAG_GV BIT_ULL(55) #define DTE_FLAG_MASK (0x3ffULL << 32) -#define DTE_GLX_SHIFT (56) -#define DTE_GLX_MASK (3) #define DEV_DOMID_MASK 0xffffULL #define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL) @@ -702,12 +705,21 @@ struct amd_iommu { /* event buffer virtual address */ u8 *evt_buf; + /* Name for event log interrupt */ + unsigned char evt_irq_name[16]; + /* Base of the PPR log, if present */ u8 *ppr_log; + /* Name for PPR log interrupt */ + unsigned char ppr_irq_name[16]; + /* Base of the GA log, if present */ u8 *ga_log; + /* Name for GA log interrupt */ + unsigned char ga_irq_name[16]; + /* Tail of the GA log, if present */ u8 *ga_log_tail; diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index ea0f1ab94178..45efb7e5d725 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -483,6 +483,10 @@ static void iommu_disable(struct amd_iommu *iommu) iommu_feature_disable(iommu, CONTROL_GALOG_EN); iommu_feature_disable(iommu, CONTROL_GAINT_EN); + /* Disable IOMMU PPR logging */ + iommu_feature_disable(iommu, CONTROL_PPRLOG_EN); + iommu_feature_disable(iommu, CONTROL_PPRINT_EN); + /* Disable IOMMU hardware itself */ iommu_feature_disable(iommu, CONTROL_IOMMU_EN); @@ -753,37 +757,61 @@ static int __init alloc_command_buffer(struct amd_iommu *iommu) } /* + * Interrupt handler has processed all pending events and adjusted head + * and tail pointer. Reset overflow mask and restart logging again. + */ +static void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type, + u8 cntrl_intr, u8 cntrl_log, + u32 status_run_mask, u32 status_overflow_mask) +{ + u32 status; + + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); + if (status & status_run_mask) + return; + + pr_info_ratelimited("IOMMU %s log restarting\n", evt_type); + + iommu_feature_disable(iommu, cntrl_log); + iommu_feature_disable(iommu, cntrl_intr); + + writel(status_overflow_mask, iommu->mmio_base + MMIO_STATUS_OFFSET); + + iommu_feature_enable(iommu, cntrl_intr); + iommu_feature_enable(iommu, cntrl_log); +} + +/* * This function restarts event logging in case the IOMMU experienced * an event log buffer overflow. */ void amd_iommu_restart_event_logging(struct amd_iommu *iommu) { - iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); - iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); + amd_iommu_restart_log(iommu, "Event", CONTROL_EVT_INT_EN, + CONTROL_EVT_LOG_EN, MMIO_STATUS_EVT_RUN_MASK, + MMIO_STATUS_EVT_OVERFLOW_MASK); } /* * This function restarts event logging in case the IOMMU experienced - * an GA log overflow. + * GA log overflow. */ void amd_iommu_restart_ga_log(struct amd_iommu *iommu) { - u32 status; - - status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); - if (status & MMIO_STATUS_GALOG_RUN_MASK) - return; - - pr_info_ratelimited("IOMMU GA Log restarting\n"); - - iommu_feature_disable(iommu, CONTROL_GALOG_EN); - iommu_feature_disable(iommu, CONTROL_GAINT_EN); - - writel(MMIO_STATUS_GALOG_OVERFLOW_MASK, - iommu->mmio_base + MMIO_STATUS_OFFSET); + amd_iommu_restart_log(iommu, "GA", CONTROL_GAINT_EN, + CONTROL_GALOG_EN, MMIO_STATUS_GALOG_RUN_MASK, + MMIO_STATUS_GALOG_OVERFLOW_MASK); +} - iommu_feature_enable(iommu, CONTROL_GAINT_EN); - iommu_feature_enable(iommu, CONTROL_GALOG_EN); +/* + * This function restarts ppr logging in case the IOMMU experienced + * PPR log overflow. + */ +void amd_iommu_restart_ppr_log(struct amd_iommu *iommu) +{ + amd_iommu_restart_log(iommu, "PPR", CONTROL_PPRINT_EN, + CONTROL_PPRLOG_EN, MMIO_STATUS_PPR_RUN_MASK, + MMIO_STATUS_PPR_OVERFLOW_MASK); } /* @@ -906,6 +934,8 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu) if (iommu->ppr_log == NULL) return; + iommu_feature_enable(iommu, CONTROL_PPR_EN); + entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, @@ -916,7 +946,7 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu) writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); iommu_feature_enable(iommu, CONTROL_PPRLOG_EN); - iommu_feature_enable(iommu, CONTROL_PPR_EN); + iommu_feature_enable(iommu, CONTROL_PPRINT_EN); } static void __init free_ppr_log(struct amd_iommu *iommu) @@ -2311,6 +2341,7 @@ static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq struct irq_data *irqd = irq_domain_get_irq_data(domain, i); irqd->chip = &intcapxt_controller; + irqd->hwirq = info->hwirq; irqd->chip_data = info->data; __irq_set_handler(i, handle_edge_irq, 0, "edge"); } @@ -2337,22 +2368,14 @@ static void intcapxt_unmask_irq(struct irq_data *irqd) xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0); xt.destid_24_31 = cfg->dest_apicid >> 24; - /** - * Current IOMMU implementation uses the same IRQ for all - * 3 IOMMU interrupts. - */ - writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET); - writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET); - writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET); + writeq(xt.capxt, iommu->mmio_base + irqd->hwirq); } static void intcapxt_mask_irq(struct irq_data *irqd) { struct amd_iommu *iommu = irqd->chip_data; - writeq(0, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET); - writeq(0, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET); - writeq(0, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET); + writeq(0, iommu->mmio_base + irqd->hwirq); } @@ -2415,7 +2438,8 @@ static struct irq_domain *iommu_get_irqdomain(void) return iommu_irqdomain; } -static int iommu_setup_intcapxt(struct amd_iommu *iommu) +static int __iommu_setup_intcapxt(struct amd_iommu *iommu, const char *devname, + int hwirq, irq_handler_t thread_fn) { struct irq_domain *domain; struct irq_alloc_info info; @@ -2429,6 +2453,7 @@ static int iommu_setup_intcapxt(struct amd_iommu *iommu) init_irq_alloc_info(&info, NULL); info.type = X86_IRQ_ALLOC_TYPE_AMDVI; info.data = iommu; + info.hwirq = hwirq; irq = irq_domain_alloc_irqs(domain, 1, node, &info); if (irq < 0) { @@ -2437,7 +2462,7 @@ static int iommu_setup_intcapxt(struct amd_iommu *iommu) } ret = request_threaded_irq(irq, amd_iommu_int_handler, - amd_iommu_int_thread, 0, "AMD-Vi", iommu); + thread_fn, 0, devname, iommu); if (ret) { irq_domain_free_irqs(irq, 1); irq_domain_remove(domain); @@ -2447,6 +2472,37 @@ static int iommu_setup_intcapxt(struct amd_iommu *iommu) return 0; } +static int iommu_setup_intcapxt(struct amd_iommu *iommu) +{ + int ret; + + snprintf(iommu->evt_irq_name, sizeof(iommu->evt_irq_name), + "AMD-Vi%d-Evt", iommu->index); + ret = __iommu_setup_intcapxt(iommu, iommu->evt_irq_name, + MMIO_INTCAPXT_EVT_OFFSET, + amd_iommu_int_thread_evtlog); + if (ret) + return ret; + + snprintf(iommu->ppr_irq_name, sizeof(iommu->ppr_irq_name), + "AMD-Vi%d-PPR", iommu->index); + ret = __iommu_setup_intcapxt(iommu, iommu->ppr_irq_name, + MMIO_INTCAPXT_PPR_OFFSET, + amd_iommu_int_thread_pprlog); + if (ret) + return ret; + +#ifdef CONFIG_IRQ_REMAP + snprintf(iommu->ga_irq_name, sizeof(iommu->ga_irq_name), + "AMD-Vi%d-GA", iommu->index); + ret = __iommu_setup_intcapxt(iommu, iommu->ga_irq_name, + MMIO_INTCAPXT_GALOG_OFFSET, + amd_iommu_int_thread_galog); +#endif + + return ret; +} + static int iommu_init_irq(struct amd_iommu *iommu) { int ret; @@ -2472,8 +2528,6 @@ enable_faults: iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); - if (iommu->ppr_log != NULL) - iommu_feature_enable(iommu, CONTROL_PPRINT_EN); return 0; } @@ -2889,8 +2943,6 @@ static void enable_iommus_vapic(void) static void enable_iommus(void) { early_enable_iommus(); - enable_iommus_vapic(); - enable_iommus_v2(); } static void disable_iommus(void) @@ -3154,6 +3206,13 @@ static int amd_iommu_enable_interrupts(void) goto out; } + /* + * Interrupt handler is ready to process interrupts. Enable + * PPR and GA log interrupt for all IOMMUs. + */ + enable_iommus_vapic(); + enable_iommus_v2(); + out: return ret; } @@ -3233,8 +3292,6 @@ static int __init state_next(void) register_syscore_ops(&amd_iommu_syscore_ops); ret = amd_iommu_init_pci(); init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; - enable_iommus_vapic(); - enable_iommus_v2(); break; case IOMMU_PCI_INIT: ret = amd_iommu_enable_interrupts(); diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index c3b58a8389b9..56b6cf8bf03f 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -841,50 +841,27 @@ static inline void amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { } #endif /* !CONFIG_IRQ_REMAP */ -#define AMD_IOMMU_INT_MASK \ - (MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \ - MMIO_STATUS_EVT_INT_MASK | \ - MMIO_STATUS_PPR_INT_MASK | \ - MMIO_STATUS_GALOG_OVERFLOW_MASK | \ - MMIO_STATUS_GALOG_INT_MASK) - -irqreturn_t amd_iommu_int_thread(int irq, void *data) +static void amd_iommu_handle_irq(void *data, const char *evt_type, + u32 int_mask, u32 overflow_mask, + void (*int_handler)(struct amd_iommu *), + void (*overflow_handler)(struct amd_iommu *)) { struct amd_iommu *iommu = (struct amd_iommu *) data; u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); + u32 mask = int_mask | overflow_mask; - while (status & AMD_IOMMU_INT_MASK) { + while (status & mask) { /* Enable interrupt sources again */ - writel(AMD_IOMMU_INT_MASK, - iommu->mmio_base + MMIO_STATUS_OFFSET); + writel(mask, iommu->mmio_base + MMIO_STATUS_OFFSET); - if (status & MMIO_STATUS_EVT_INT_MASK) { - pr_devel("Processing IOMMU Event Log\n"); - iommu_poll_events(iommu); + if (int_handler) { + pr_devel("Processing IOMMU (ivhd%d) %s Log\n", + iommu->index, evt_type); + int_handler(iommu); } - if (status & MMIO_STATUS_PPR_INT_MASK) { - pr_devel("Processing IOMMU PPR Log\n"); - iommu_poll_ppr_log(iommu); - } - -#ifdef CONFIG_IRQ_REMAP - if (status & (MMIO_STATUS_GALOG_INT_MASK | - MMIO_STATUS_GALOG_OVERFLOW_MASK)) { - pr_devel("Processing IOMMU GA Log\n"); - iommu_poll_ga_log(iommu); - } - - if (status & MMIO_STATUS_GALOG_OVERFLOW_MASK) { - pr_info_ratelimited("IOMMU GA Log overflow\n"); - amd_iommu_restart_ga_log(iommu); - } -#endif - - if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) { - pr_info_ratelimited("IOMMU event log overflow\n"); - amd_iommu_restart_event_logging(iommu); - } + if ((status & overflow_mask) && overflow_handler) + overflow_handler(iommu); /* * Hardware bug: ERBT1312 @@ -901,6 +878,43 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data) */ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); } +} + +irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data) +{ + amd_iommu_handle_irq(data, "Evt", MMIO_STATUS_EVT_INT_MASK, + MMIO_STATUS_EVT_OVERFLOW_MASK, + iommu_poll_events, amd_iommu_restart_event_logging); + + return IRQ_HANDLED; +} + +irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data) +{ + amd_iommu_handle_irq(data, "PPR", MMIO_STATUS_PPR_INT_MASK, + MMIO_STATUS_PPR_OVERFLOW_MASK, + iommu_poll_ppr_log, amd_iommu_restart_ppr_log); + + return IRQ_HANDLED; +} + +irqreturn_t amd_iommu_int_thread_galog(int irq, void *data) +{ +#ifdef CONFIG_IRQ_REMAP + amd_iommu_handle_irq(data, "GA", MMIO_STATUS_GALOG_INT_MASK, + MMIO_STATUS_GALOG_OVERFLOW_MASK, + iommu_poll_ga_log, amd_iommu_restart_ga_log); +#endif + + return IRQ_HANDLED; +} + +irqreturn_t amd_iommu_int_thread(int irq, void *data) +{ + amd_iommu_int_thread_evtlog(irq, data); + amd_iommu_int_thread_pprlog(irq, data); + amd_iommu_int_thread_galog(irq, data); + return IRQ_HANDLED; } diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c index 261352a23271..c5825e0a6770 100644 --- a/drivers/iommu/amd/iommu_v2.c +++ b/drivers/iommu/amd/iommu_v2.c @@ -262,8 +262,8 @@ static void put_pasid_state(struct pasid_state *pasid_state) static void put_pasid_state_wait(struct pasid_state *pasid_state) { - refcount_dec(&pasid_state->count); - wait_event(pasid_state->wq, !refcount_read(&pasid_state->count)); + if (!refcount_dec_and_test(&pasid_state->count)) + wait_event(pasid_state->wq, !refcount_read(&pasid_state->count)); free_pasid_state(pasid_state); } @@ -327,6 +327,9 @@ static void free_pasid_states(struct device_state *dev_state) put_pasid_state(pasid_state); + /* Clear the pasid state so that the pasid can be re-used */ + clear_pasid_state(dev_state, pasid_state->pasid); + /* * This will call the mn_release function and * unbind the PASID diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c index 8af64b57f048..2082081402d3 100644 --- a/drivers/iommu/apple-dart.c +++ b/drivers/iommu/apple-dart.c @@ -1276,7 +1276,7 @@ static __maybe_unused int apple_dart_resume(struct device *dev) return 0; } -DEFINE_SIMPLE_DEV_PM_OPS(apple_dart_pm_ops, apple_dart_suspend, apple_dart_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(apple_dart_pm_ops, apple_dart_suspend, apple_dart_resume); static const struct of_device_id apple_dart_of_match[] = { { .compatible = "apple,t8103-dart", .data = &apple_dart_hw_t8103 }, diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index ee70687f060b..e82bf1c449a3 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -2055,24 +2055,6 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) return &smmu_domain->domain; } -static int arm_smmu_bitmap_alloc(unsigned long *map, int span) -{ - int idx, size = 1 << span; - - do { - idx = find_first_zero_bit(map, size); - if (idx == size) - return -ENOSPC; - } while (test_and_set_bit(idx, map)); - - return idx; -} - -static void arm_smmu_bitmap_free(unsigned long *map, int idx) -{ - clear_bit(idx, map); -} - static void arm_smmu_domain_free(struct iommu_domain *domain) { struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); @@ -2093,7 +2075,7 @@ static void arm_smmu_domain_free(struct iommu_domain *domain) } else { struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; if (cfg->vmid) - arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid); + ida_free(&smmu->vmid_map, cfg->vmid); } kfree(smmu_domain); @@ -2167,7 +2149,9 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain, struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr; - vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits); + /* Reserve VMID 0 for stage-2 bypass STEs */ + vmid = ida_alloc_range(&smmu->vmid_map, 1, (1 << smmu->vmid_bits) - 1, + GFP_KERNEL); if (vmid < 0) return vmid; @@ -3098,8 +3082,8 @@ static int arm_smmu_init_strtab(struct arm_smmu_device *smmu) reg |= STRTAB_BASE_RA; smmu->strtab_cfg.strtab_base = reg; - /* Allocate the first VMID for stage-2 bypass STEs */ - set_bit(0, smmu->vmid_map); + ida_init(&smmu->vmid_map); + return 0; } @@ -3923,6 +3907,7 @@ static void arm_smmu_device_remove(struct platform_device *pdev) iommu_device_sysfs_remove(&smmu->iommu); arm_smmu_device_disable(smmu); iopf_queue_free(smmu->evtq.iopf); + ida_destroy(&smmu->vmid_map); } static void arm_smmu_device_shutdown(struct platform_device *pdev) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index dcab85698a4e..9915850dd4db 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -670,7 +670,7 @@ struct arm_smmu_device { #define ARM_SMMU_MAX_VMIDS (1 << 16) unsigned int vmid_bits; - DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS); + struct ida vmid_map; unsigned int ssid_bits; unsigned int sid_bits; diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c index b5b14108e086..bb89d49adf8d 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c @@ -3,7 +3,7 @@ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ -#include <linux/of_device.h> +#include <linux/device.h> #include <linux/firmware/qcom/qcom_scm.h> #include <linux/ratelimit.h> diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c index c71afda79d64..7f52ac67495f 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c @@ -251,10 +251,12 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { { .compatible = "qcom,sc7280-mss-pil" }, { .compatible = "qcom,sc8180x-mdss" }, { .compatible = "qcom,sc8280xp-mdss" }, - { .compatible = "qcom,sm8150-mdss" }, - { .compatible = "qcom,sm8250-mdss" }, { .compatible = "qcom,sdm845-mdss" }, { .compatible = "qcom,sdm845-mss-pil" }, + { .compatible = "qcom,sm6350-mdss" }, + { .compatible = "qcom,sm6375-mdss" }, + { .compatible = "qcom,sm8150-mdss" }, + { .compatible = "qcom,sm8250-mdss" }, { } }; @@ -528,6 +530,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = { { .compatible = "qcom,sm6125-smmu-500", .data = &qcom_smmu_500_impl0_data }, { .compatible = "qcom,sm6350-smmu-v2", .data = &qcom_smmu_v2_data }, { .compatible = "qcom,sm6350-smmu-500", .data = &qcom_smmu_500_impl0_data }, + { .compatible = "qcom,sm6375-smmu-v2", .data = &qcom_smmu_v2_data }, { .compatible = "qcom,sm6375-smmu-500", .data = &qcom_smmu_500_impl0_data }, { .compatible = "qcom,sm8150-smmu-500", .data = &qcom_smmu_500_impl0_data }, { .compatible = "qcom,sm8250-smmu-500", .data = &qcom_smmu_500_impl0_data }, diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index a86acd76c1df..d6d1a2a55cc0 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -29,7 +29,6 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_device.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c index a503ed758ec3..775a3cbaff4e 100644 --- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c +++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c @@ -22,8 +22,7 @@ #include <linux/init.h> #include <linux/mutex.h> #include <linux/of.h> -#include <linux/of_address.h> -#include <linux/of_device.h> +#include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/pm_runtime.h> @@ -51,14 +50,15 @@ struct qcom_iommu_dev { struct clk_bulk_data clks[CLK_NUM]; void __iomem *local_base; u32 sec_id; - u8 num_ctxs; - struct qcom_iommu_ctx *ctxs[]; /* indexed by asid-1 */ + u8 max_asid; + struct qcom_iommu_ctx *ctxs[]; /* indexed by asid */ }; struct qcom_iommu_ctx { struct device *dev; void __iomem *base; bool secure_init; + bool secured_ctx; u8 asid; /* asid and ctx bank # are 1:1 */ struct iommu_domain *domain; }; @@ -94,7 +94,7 @@ static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid struct qcom_iommu_dev *qcom_iommu = d->iommu; if (!qcom_iommu) return NULL; - return qcom_iommu->ctxs[asid - 1]; + return qcom_iommu->ctxs[asid]; } static inline void @@ -273,6 +273,19 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, ctx->secure_init = true; } + /* Secured QSMMU-500/QSMMU-v2 contexts cannot be programmed */ + if (ctx->secured_ctx) { + ctx->domain = domain; + continue; + } + + /* Disable context bank before programming */ + iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0); + + /* Clear context bank fault address fault status registers */ + iommu_writel(ctx, ARM_SMMU_CB_FAR, 0); + iommu_writel(ctx, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT); + /* TTBRs */ iommu_writeq(ctx, ARM_SMMU_CB_TTBR0, pgtbl_cfg.arm_lpae_s1_cfg.ttbr | @@ -527,11 +540,10 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) qcom_iommu = platform_get_drvdata(iommu_pdev); /* make sure the asid specified in dt is valid, so we don't have - * to sanity check this elsewhere, since 'asid - 1' is used to - * index into qcom_iommu->ctxs: + * to sanity check this elsewhere: */ - if (WARN_ON(asid < 1) || - WARN_ON(asid > qcom_iommu->num_ctxs)) { + if (WARN_ON(asid > qcom_iommu->max_asid) || + WARN_ON(qcom_iommu->ctxs[asid] == NULL)) { put_device(&iommu_pdev->dev); return -EINVAL; } @@ -617,7 +629,8 @@ free_mem: static int get_asid(const struct device_node *np) { - u32 reg; + u32 reg, val; + int asid; /* read the "reg" property directly to get the relative address * of the context bank, and calculate the asid from that: @@ -625,7 +638,17 @@ static int get_asid(const struct device_node *np) if (of_property_read_u32_index(np, "reg", 0, ®)) return -ENODEV; - return reg / 0x1000; /* context banks are 0x1000 apart */ + /* + * Context banks are 0x1000 apart but, in some cases, the ASID + * number doesn't match to this logic and needs to be passed + * from the DT configuration explicitly. + */ + if (!of_property_read_u32(np, "qcom,ctx-asid", &val)) + asid = val; + else + asid = reg / 0x1000; + + return asid; } static int qcom_iommu_ctx_probe(struct platform_device *pdev) @@ -633,7 +656,6 @@ static int qcom_iommu_ctx_probe(struct platform_device *pdev) struct qcom_iommu_ctx *ctx; struct device *dev = &pdev->dev; struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev->parent); - struct resource *res; int ret, irq; ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); @@ -643,19 +665,22 @@ static int qcom_iommu_ctx_probe(struct platform_device *pdev) ctx->dev = dev; platform_set_drvdata(pdev, ctx); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ctx->base = devm_ioremap_resource(dev, res); + ctx->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctx->base)) return PTR_ERR(ctx->base); irq = platform_get_irq(pdev, 0); if (irq < 0) - return -ENODEV; + return irq; + + if (of_device_is_compatible(dev->of_node, "qcom,msm-iommu-v2-sec")) + ctx->secured_ctx = true; /* clear IRQs before registering fault handler, just in case the * boot-loader left us a surprise: */ - iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR)); + if (!ctx->secured_ctx) + iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR)); ret = devm_request_irq(dev, irq, qcom_iommu_fault, @@ -677,7 +702,7 @@ static int qcom_iommu_ctx_probe(struct platform_device *pdev) dev_dbg(dev, "found asid %u\n", ctx->asid); - qcom_iommu->ctxs[ctx->asid - 1] = ctx; + qcom_iommu->ctxs[ctx->asid] = ctx; return 0; } @@ -689,12 +714,14 @@ static void qcom_iommu_ctx_remove(struct platform_device *pdev) platform_set_drvdata(pdev, NULL); - qcom_iommu->ctxs[ctx->asid - 1] = NULL; + qcom_iommu->ctxs[ctx->asid] = NULL; } static const struct of_device_id ctx_of_match[] = { { .compatible = "qcom,msm-iommu-v1-ns" }, { .compatible = "qcom,msm-iommu-v1-sec" }, + { .compatible = "qcom,msm-iommu-v2-ns" }, + { .compatible = "qcom,msm-iommu-v2-sec" }, { /* sentinel */ } }; @@ -712,7 +739,8 @@ static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu) struct device_node *child; for_each_child_of_node(qcom_iommu->dev->of_node, child) { - if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec")) { + if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec") || + of_device_is_compatible(child, "qcom,msm-iommu-v2-sec")) { of_node_put(child); return true; } @@ -736,11 +764,11 @@ static int qcom_iommu_device_probe(struct platform_device *pdev) for_each_child_of_node(dev->of_node, child) max_asid = max(max_asid, get_asid(child)); - qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid), + qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid + 1), GFP_KERNEL); if (!qcom_iommu) return -ENOMEM; - qcom_iommu->num_ctxs = max_asid; + qcom_iommu->max_asid = max_asid; qcom_iommu->dev = dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -856,6 +884,7 @@ static const struct dev_pm_ops qcom_iommu_pm_ops = { static const struct of_device_id qcom_iommu_of_match[] = { { .compatible = "qcom,msm-iommu-v1" }, + { .compatible = "qcom,msm-iommu-v2" }, { /* sentinel */ } }; diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index e57724163835..4b1a88f514c9 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -660,7 +660,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, { struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iova_domain *iovad = &cookie->iovad; - unsigned long shift, iova_len, iova = 0; + unsigned long shift, iova_len, iova; if (cookie->type == IOMMU_DMA_MSI_COOKIE) { cookie->msi_iova += size; @@ -675,15 +675,29 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, if (domain->geometry.force_aperture) dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); - /* Try to get PCI devices a SAC address */ - if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev)) + /* + * Try to use all the 32-bit PCI addresses first. The original SAC vs. + * DAC reasoning loses relevance with PCIe, but enough hardware and + * firmware bugs are still lurking out there that it's safest not to + * venture into the 64-bit space until necessary. + * + * If your device goes wrong after seeing the notice then likely either + * its driver is not setting DMA masks accurately, the hardware has + * some inherent bug in handling >32-bit addresses, or not all the + * expected address bits are wired up between the device and the IOMMU. + */ + if (dma_limit > DMA_BIT_MASK(32) && dev->iommu->pci_32bit_workaround) { iova = alloc_iova_fast(iovad, iova_len, DMA_BIT_MASK(32) >> shift, false); + if (iova) + goto done; - if (!iova) - iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, - true); + dev->iommu->pci_32bit_workaround = false; + dev_notice(dev, "Using %d-bit DMA addresses\n", bits_per(dma_limit)); + } + iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true); +done: return (dma_addr_t)iova << shift; } diff --git a/drivers/iommu/dma-iommu.h b/drivers/iommu/dma-iommu.h index 942790009292..c829f1f82a99 100644 --- a/drivers/iommu/dma-iommu.h +++ b/drivers/iommu/dma-iommu.h @@ -17,6 +17,10 @@ int iommu_dma_init_fq(struct iommu_domain *domain); void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); extern bool iommu_dma_forcedac; +static inline void iommu_dma_set_pci_32bit_workaround(struct device *dev) +{ + dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac; +} #else /* CONFIG_IOMMU_DMA */ @@ -38,5 +42,9 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he { } +static inline void iommu_dma_set_pci_32bit_workaround(struct device *dev) +{ +} + #endif /* CONFIG_IOMMU_DMA */ #endif /* __DMA_IOMMU_H */ diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index dbf9270e565e..dd8ff358867d 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -3749,7 +3749,6 @@ static int __init probe_acpi_namespace_devices(void) for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) { struct acpi_device_physical_node *pn; - struct iommu_group *group; struct acpi_device *adev; if (dev->bus != &acpi_bus_type) @@ -3759,12 +3758,6 @@ static int __init probe_acpi_namespace_devices(void) mutex_lock(&adev->physical_node_lock); list_for_each_entry(pn, &adev->physical_node_list, node) { - group = iommu_group_get(pn->dev); - if (group) { - iommu_group_put(group); - continue; - } - ret = iommu_probe_device(pn->dev); if (ret) break; diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c index 99869217fbec..cbe378c34ba3 100644 --- a/drivers/iommu/iommu-sysfs.c +++ b/drivers/iommu/iommu-sysfs.c @@ -107,9 +107,6 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link) { int ret; - if (!iommu || IS_ERR(iommu)) - return -ENODEV; - ret = sysfs_add_link_to_group(&iommu->dev->kobj, "devices", &link->kobj, dev_name(link)); if (ret) @@ -122,14 +119,9 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link) return ret; } -EXPORT_SYMBOL_GPL(iommu_device_link); void iommu_device_unlink(struct iommu_device *iommu, struct device *link) { - if (!iommu || IS_ERR(iommu)) - return; - sysfs_remove_link(&link->kobj, "iommu"); sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link)); } -EXPORT_SYMBOL_GPL(iommu_device_unlink); diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index e385a99e25e1..39601fbfd0e0 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -128,9 +128,12 @@ static int iommu_setup_default_domain(struct iommu_group *group, int target_type); static int iommu_create_device_direct_mappings(struct iommu_domain *domain, struct device *dev); -static struct iommu_group *iommu_group_get_for_dev(struct device *dev); static ssize_t iommu_group_store_type(struct iommu_group *group, const char *buf, size_t count); +static struct group_device *iommu_group_alloc_device(struct iommu_group *group, + struct device *dev); +static void __iommu_group_free_device(struct iommu_group *group, + struct group_device *grp_dev); #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ struct iommu_group_attribute iommu_group_attr_##_name = \ @@ -334,28 +337,18 @@ static u32 dev_iommu_get_max_pasids(struct device *dev) return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids); } -static int __iommu_probe_device(struct device *dev, struct list_head *group_list) +/* + * Init the dev->iommu and dev->iommu_group in the struct device and get the + * driver probed + */ +static int iommu_init_device(struct device *dev, const struct iommu_ops *ops) { - const struct iommu_ops *ops = dev->bus->iommu_ops; struct iommu_device *iommu_dev; struct iommu_group *group; - static DEFINE_MUTEX(iommu_probe_device_lock); int ret; - if (!ops) - return -ENODEV; - /* - * Serialise to avoid races between IOMMU drivers registering in - * parallel and/or the "replay" calls from ACPI/OF code via client - * driver probe. Once the latter have been cleaned up we should - * probably be able to use device_lock() here to minimise the scope, - * but for now enforcing a simple global ordering is fine. - */ - mutex_lock(&iommu_probe_device_lock); - if (!dev_iommu_get(dev)) { - ret = -ENOMEM; - goto err_unlock; - } + if (!dev_iommu_get(dev)) + return -ENOMEM; if (!try_module_get(ops->owner)) { ret = -EINVAL; @@ -365,124 +358,184 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list iommu_dev = ops->probe_device(dev); if (IS_ERR(iommu_dev)) { ret = PTR_ERR(iommu_dev); - goto out_module_put; + goto err_module_put; } - dev->iommu->iommu_dev = iommu_dev; - dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev); - if (ops->is_attach_deferred) - dev->iommu->attach_deferred = ops->is_attach_deferred(dev); + ret = iommu_device_link(iommu_dev, dev); + if (ret) + goto err_release; - group = iommu_group_get_for_dev(dev); + group = ops->device_group(dev); + if (WARN_ON_ONCE(group == NULL)) + group = ERR_PTR(-EINVAL); if (IS_ERR(group)) { ret = PTR_ERR(group); - goto out_release; + goto err_unlink; } + dev->iommu_group = group; - mutex_lock(&group->mutex); - if (group_list && !group->default_domain && list_empty(&group->entry)) - list_add_tail(&group->entry, group_list); - mutex_unlock(&group->mutex); - iommu_group_put(group); - - mutex_unlock(&iommu_probe_device_lock); - iommu_device_link(iommu_dev, dev); - + dev->iommu->iommu_dev = iommu_dev; + dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev); + if (ops->is_attach_deferred) + dev->iommu->attach_deferred = ops->is_attach_deferred(dev); return 0; -out_release: +err_unlink: + iommu_device_unlink(iommu_dev, dev); +err_release: if (ops->release_device) ops->release_device(dev); - -out_module_put: +err_module_put: module_put(ops->owner); - err_free: dev_iommu_free(dev); + return ret; +} -err_unlock: - mutex_unlock(&iommu_probe_device_lock); +static void iommu_deinit_device(struct device *dev) +{ + struct iommu_group *group = dev->iommu_group; + const struct iommu_ops *ops = dev_iommu_ops(dev); - return ret; + lockdep_assert_held(&group->mutex); + + iommu_device_unlink(dev->iommu->iommu_dev, dev); + + /* + * release_device() must stop using any attached domain on the device. + * If there are still other devices in the group they are not effected + * by this callback. + * + * The IOMMU driver must set the device to either an identity or + * blocking translation and stop using any domain pointer, as it is + * going to be freed. + */ + if (ops->release_device) + ops->release_device(dev); + + /* + * If this is the last driver to use the group then we must free the + * domains before we do the module_put(). + */ + if (list_empty(&group->devices)) { + if (group->default_domain) { + iommu_domain_free(group->default_domain); + group->default_domain = NULL; + } + if (group->blocking_domain) { + iommu_domain_free(group->blocking_domain); + group->blocking_domain = NULL; + } + group->domain = NULL; + } + + /* Caller must put iommu_group */ + dev->iommu_group = NULL; + module_put(ops->owner); + dev_iommu_free(dev); } -int iommu_probe_device(struct device *dev) +static int __iommu_probe_device(struct device *dev, struct list_head *group_list) { - const struct iommu_ops *ops; + const struct iommu_ops *ops = dev->bus->iommu_ops; struct iommu_group *group; + static DEFINE_MUTEX(iommu_probe_device_lock); + struct group_device *gdev; int ret; - ret = __iommu_probe_device(dev, NULL); - if (ret) - goto err_out; + if (!ops) + return -ENODEV; + /* + * Serialise to avoid races between IOMMU drivers registering in + * parallel and/or the "replay" calls from ACPI/OF code via client + * driver probe. Once the latter have been cleaned up we should + * probably be able to use device_lock() here to minimise the scope, + * but for now enforcing a simple global ordering is fine. + */ + mutex_lock(&iommu_probe_device_lock); - group = iommu_group_get(dev); - if (!group) { - ret = -ENODEV; - goto err_release; + /* Device is probed already if in a group */ + if (dev->iommu_group) { + ret = 0; + goto out_unlock; } + ret = iommu_init_device(dev, ops); + if (ret) + goto out_unlock; + + group = dev->iommu_group; + gdev = iommu_group_alloc_device(group, dev); mutex_lock(&group->mutex); + if (IS_ERR(gdev)) { + ret = PTR_ERR(gdev); + goto err_put_group; + } + /* + * The gdev must be in the list before calling + * iommu_setup_default_domain() + */ + list_add_tail(&gdev->list, &group->devices); + WARN_ON(group->default_domain && !group->domain); if (group->default_domain) iommu_create_device_direct_mappings(group->default_domain, dev); - if (group->domain) { ret = __iommu_device_set_domain(group, dev, group->domain, 0); if (ret) - goto err_unlock; - } else if (!group->default_domain) { + goto err_remove_gdev; + } else if (!group->default_domain && !group_list) { ret = iommu_setup_default_domain(group, 0); if (ret) - goto err_unlock; + goto err_remove_gdev; + } else if (!group->default_domain) { + /* + * With a group_list argument we defer the default_domain setup + * to the caller by providing a de-duplicated list of groups + * that need further setup. + */ + if (list_empty(&group->entry)) + list_add_tail(&group->entry, group_list); } - mutex_unlock(&group->mutex); - iommu_group_put(group); + mutex_unlock(&iommu_probe_device_lock); - ops = dev_iommu_ops(dev); - if (ops->probe_finalize) - ops->probe_finalize(dev); + if (dev_is_pci(dev)) + iommu_dma_set_pci_32bit_workaround(dev); return 0; -err_unlock: +err_remove_gdev: + list_del(&gdev->list); + __iommu_group_free_device(group, gdev); +err_put_group: + iommu_deinit_device(dev); mutex_unlock(&group->mutex); iommu_group_put(group); -err_release: - iommu_release_device(dev); +out_unlock: + mutex_unlock(&iommu_probe_device_lock); -err_out: return ret; - } -/* - * Remove a device from a group's device list and return the group device - * if successful. - */ -static struct group_device * -__iommu_group_remove_device(struct iommu_group *group, struct device *dev) +int iommu_probe_device(struct device *dev) { - struct group_device *device; + const struct iommu_ops *ops; + int ret; - lockdep_assert_held(&group->mutex); - for_each_group_device(group, device) { - if (device->dev == dev) { - list_del(&device->list); - return device; - } - } + ret = __iommu_probe_device(dev, NULL); + if (ret) + return ret; - return NULL; + ops = dev_iommu_ops(dev); + if (ops->probe_finalize) + ops->probe_finalize(dev); + + return 0; } -/* - * Release a device from its group and decrements the iommu group reference - * count. - */ -static void __iommu_group_release_device(struct iommu_group *group, - struct group_device *grp_dev) +static void __iommu_group_free_device(struct iommu_group *group, + struct group_device *grp_dev) { struct device *dev = grp_dev->dev; @@ -491,54 +544,57 @@ static void __iommu_group_release_device(struct iommu_group *group, trace_remove_device_from_group(group->id, dev); + /* + * If the group has become empty then ownership must have been + * released, and the current domain must be set back to NULL or + * the default domain. + */ + if (list_empty(&group->devices)) + WARN_ON(group->owner_cnt || + group->domain != group->default_domain); + kfree(grp_dev->name); kfree(grp_dev); - dev->iommu_group = NULL; - kobject_put(group->devices_kobj); } -static void iommu_release_device(struct device *dev) +/* Remove the iommu_group from the struct device. */ +static void __iommu_group_remove_device(struct device *dev) { struct iommu_group *group = dev->iommu_group; struct group_device *device; - const struct iommu_ops *ops; - - if (!dev->iommu || !group) - return; - - iommu_device_unlink(dev->iommu->iommu_dev, dev); mutex_lock(&group->mutex); - device = __iommu_group_remove_device(group, dev); + for_each_group_device(group, device) { + if (device->dev != dev) + continue; - /* - * If the group has become empty then ownership must have been released, - * and the current domain must be set back to NULL or the default - * domain. - */ - if (list_empty(&group->devices)) - WARN_ON(group->owner_cnt || - group->domain != group->default_domain); + list_del(&device->list); + __iommu_group_free_device(group, device); + if (dev->iommu && dev->iommu->iommu_dev) + iommu_deinit_device(dev); + else + dev->iommu_group = NULL; + break; + } + mutex_unlock(&group->mutex); /* - * release_device() must stop using any attached domain on the device. - * If there are still other devices in the group they are not effected - * by this callback. - * - * The IOMMU driver must set the device to either an identity or - * blocking translation and stop using any domain pointer, as it is - * going to be freed. + * Pairs with the get in iommu_init_device() or + * iommu_group_add_device() */ - ops = dev_iommu_ops(dev); - if (ops->release_device) - ops->release_device(dev); - mutex_unlock(&group->mutex); + iommu_group_put(group); +} - if (device) - __iommu_group_release_device(group, device); +static void iommu_release_device(struct device *dev) +{ + struct iommu_group *group = dev->iommu_group; - module_put(ops->owner); - dev_iommu_free(dev); + if (group) + __iommu_group_remove_device(dev); + + /* Free any fwspec if no iommu_driver was ever attached */ + if (dev->iommu) + dev_iommu_free(dev); } static int __init iommu_set_def_domain_type(char *str) @@ -799,10 +855,9 @@ static void iommu_group_release(struct kobject *kobj) ida_free(&iommu_group_ida, group->id); - if (group->default_domain) - iommu_domain_free(group->default_domain); - if (group->blocking_domain) - iommu_domain_free(group->blocking_domain); + /* Domains are free'd by iommu_deinit_device() */ + WARN_ON(group->default_domain); + WARN_ON(group->blocking_domain); kfree(group->name); kfree(group); @@ -1017,22 +1072,16 @@ out: return ret; } -/** - * iommu_group_add_device - add a device to an iommu group - * @group: the group into which to add the device (reference should be held) - * @dev: the device - * - * This function is called by an iommu driver to add a device into a - * group. Adding a device increments the group reference count. - */ -int iommu_group_add_device(struct iommu_group *group, struct device *dev) +/* This is undone by __iommu_group_free_device() */ +static struct group_device *iommu_group_alloc_device(struct iommu_group *group, + struct device *dev) { int ret, i = 0; struct group_device *device; device = kzalloc(sizeof(*device), GFP_KERNEL); if (!device) - return -ENOMEM; + return ERR_PTR(-ENOMEM); device->dev = dev; @@ -1063,18 +1112,11 @@ rename: goto err_free_name; } - kobject_get(group->devices_kobj); - - dev->iommu_group = group; - - mutex_lock(&group->mutex); - list_add_tail(&device->list, &group->devices); - mutex_unlock(&group->mutex); trace_add_device_to_group(group->id, dev); dev_info(dev, "Adding to iommu group %d\n", group->id); - return 0; + return device; err_free_name: kfree(device->name); @@ -1083,7 +1125,32 @@ err_remove_link: err_free_device: kfree(device); dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); - return ret; + return ERR_PTR(ret); +} + +/** + * iommu_group_add_device - add a device to an iommu group + * @group: the group into which to add the device (reference should be held) + * @dev: the device + * + * This function is called by an iommu driver to add a device into a + * group. Adding a device increments the group reference count. + */ +int iommu_group_add_device(struct iommu_group *group, struct device *dev) +{ + struct group_device *gdev; + + gdev = iommu_group_alloc_device(group, dev); + if (IS_ERR(gdev)) + return PTR_ERR(gdev); + + iommu_group_ref_get(group); + dev->iommu_group = group; + + mutex_lock(&group->mutex); + list_add_tail(&gdev->list, &group->devices); + mutex_unlock(&group->mutex); + return 0; } EXPORT_SYMBOL_GPL(iommu_group_add_device); @@ -1097,19 +1164,13 @@ EXPORT_SYMBOL_GPL(iommu_group_add_device); void iommu_group_remove_device(struct device *dev) { struct iommu_group *group = dev->iommu_group; - struct group_device *device; if (!group) return; dev_info(dev, "Removing from iommu group %d\n", group->id); - mutex_lock(&group->mutex); - device = __iommu_group_remove_device(group, dev); - mutex_unlock(&group->mutex); - - if (device) - __iommu_group_release_device(group, device); + __iommu_group_remove_device(dev); } EXPORT_SYMBOL_GPL(iommu_group_remove_device); @@ -1667,45 +1728,6 @@ iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) return dom; } -/** - * iommu_group_get_for_dev - Find or create the IOMMU group for a device - * @dev: target device - * - * This function is intended to be called by IOMMU drivers and extended to - * support common, bus-defined algorithms when determining or creating the - * IOMMU group for a device. On success, the caller will hold a reference - * to the returned IOMMU group, which will already include the provided - * device. The reference should be released with iommu_group_put(). - */ -static struct iommu_group *iommu_group_get_for_dev(struct device *dev) -{ - const struct iommu_ops *ops = dev_iommu_ops(dev); - struct iommu_group *group; - int ret; - - group = iommu_group_get(dev); - if (group) - return group; - - group = ops->device_group(dev); - if (WARN_ON_ONCE(group == NULL)) - return ERR_PTR(-EINVAL); - - if (IS_ERR(group)) - return group; - - ret = iommu_group_add_device(group, dev); - if (ret) - goto out_put_group; - - return group; - -out_put_group: - iommu_group_put(group); - - return ERR_PTR(ret); -} - struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) { return group->default_domain; @@ -1714,16 +1736,8 @@ struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) static int probe_iommu_group(struct device *dev, void *data) { struct list_head *group_list = data; - struct iommu_group *group; int ret; - /* Device is probed already if in a group */ - group = iommu_group_get(dev); - if (group) { - iommu_group_put(group); - return 0; - } - ret = __iommu_probe_device(dev, group_list); if (ret == -ENODEV) ret = 0; @@ -1799,11 +1813,6 @@ int bus_iommu_probe(const struct bus_type *bus) LIST_HEAD(group_list); int ret; - /* - * This code-path does not allocate the default domain when - * creating the iommu group, so do it after the groups are - * created. - */ ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group); if (ret) return ret; @@ -1816,6 +1825,11 @@ int bus_iommu_probe(const struct bus_type *bus) /* Remove item from the list */ list_del_init(&group->entry); + /* + * We go to the trouble of deferred default domain creation so + * that the cross-group default domain type and the setup of the + * IOMMU_RESV_DIRECT will work correctly in non-hotpug scenarios. + */ ret = iommu_setup_default_domain(group, 0); if (ret) { mutex_unlock(&group->mutex); @@ -3221,7 +3235,7 @@ static void __iommu_release_dma_ownership(struct iommu_group *group) /** * iommu_group_release_dma_owner() - Release DMA ownership of a group - * @dev: The device + * @group: The group * * Release the DMA ownership claimed by iommu_group_claim_dma_owner(). */ @@ -3235,7 +3249,7 @@ EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner); /** * iommu_device_release_dma_owner() - Release DMA ownership of a device - * @group: The device. + * @dev: The device. * * Release the DMA ownership claimed by iommu_device_claim_dma_owner(). */ diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 9f64c5c9f5b9..65ff69477c43 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -14,11 +14,12 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/io-pgtable.h> #include <linux/iommu.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_platform.h> +#include <linux/pci.h> #include <linux/platform_device.h> #include <linux/sizes.h> #include <linux/slab.h> @@ -253,17 +254,13 @@ static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu, /* Wait for any pending TLB invalidations to complete */ static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) { - unsigned int count = 0; + u32 val; - while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) { - cpu_relax(); - if (++count == TLB_LOOP_TIMEOUT) { - dev_err_ratelimited(domain->mmu->dev, + if (read_poll_timeout_atomic(ipmmu_ctx_read_root, val, + !(val & IMCTR_FLUSH), 1, TLB_LOOP_TIMEOUT, + false, domain, IMCTR)) + dev_err_ratelimited(domain->mmu->dev, "TLB sync timed out -- MMU may be deadlocked\n"); - return; - } - udelay(1); - } } static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) @@ -723,6 +720,10 @@ static bool ipmmu_device_is_allowed(struct device *dev) if (soc_device_match(soc_denylist)) return false; + /* Check whether this device is a PCI device */ + if (dev_is_pci(dev)) + return true; + /* Check whether this device can work with the IPMMU */ for (i = 0; i < ARRAY_SIZE(devices_allowlist); i++) { if (!strcmp(dev_name(dev), devices_allowlist[i])) diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index e93906d6e112..640275873a27 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -3,6 +3,7 @@ * Copyright (c) 2015-2016 MediaTek Inc. * Author: Yong Wu <yong.wu@mediatek.com> */ +#include <linux/arm-smccc.h> #include <linux/bitfield.h> #include <linux/bug.h> #include <linux/clk.h> @@ -27,6 +28,7 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/soc/mediatek/infracfg.h> +#include <linux/soc/mediatek/mtk_sip_svc.h> #include <asm/barrier.h> #include <soc/mediatek/smi.h> @@ -143,6 +145,7 @@ #define PGTABLE_PA_35_EN BIT(17) #define TF_PORT_TO_ADDR_MT8173 BIT(18) #define INT_ID_PORT_WIDTH_6 BIT(19) +#define CFG_IFA_MASTER_IN_ATF BIT(20) #define MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, mask) \ ((((pdata)->flags) & (mask)) == (_x)) @@ -167,6 +170,7 @@ enum mtk_iommu_plat { M4U_MT8173, M4U_MT8183, M4U_MT8186, + M4U_MT8188, M4U_MT8192, M4U_MT8195, M4U_MT8365, @@ -258,6 +262,8 @@ struct mtk_iommu_data { struct device *smicomm_dev; struct mtk_iommu_bank_data *bank; + struct mtk_iommu_domain *share_dom; /* For 2 HWs share pgtable */ + struct regmap *pericfg; struct mutex mutex; /* Protect m4u_group/m4u_dom above */ @@ -577,41 +583,55 @@ static int mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev, unsigned int larbid, portid; struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); const struct mtk_iommu_iova_region *region; - u32 peri_mmuen, peri_mmuen_msk; + unsigned long portid_msk = 0; + struct arm_smccc_res res; int i, ret = 0; for (i = 0; i < fwspec->num_ids; ++i) { - larbid = MTK_M4U_TO_LARB(fwspec->ids[i]); portid = MTK_M4U_TO_PORT(fwspec->ids[i]); + portid_msk |= BIT(portid); + } - if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { - larb_mmu = &data->larb_imu[larbid]; + if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { + /* All ports should be in the same larb. just use 0 here */ + larbid = MTK_M4U_TO_LARB(fwspec->ids[0]); + larb_mmu = &data->larb_imu[larbid]; + region = data->plat_data->iova_region + regionid; - region = data->plat_data->iova_region + regionid; + for_each_set_bit(portid, &portid_msk, 32) larb_mmu->bank[portid] = upper_32_bits(region->iova_base); - dev_dbg(dev, "%s iommu for larb(%s) port %d region %d rgn-bank %d.\n", - enable ? "enable" : "disable", dev_name(larb_mmu->dev), - portid, regionid, larb_mmu->bank[portid]); + dev_dbg(dev, "%s iommu for larb(%s) port 0x%lx region %d rgn-bank %d.\n", + enable ? "enable" : "disable", dev_name(larb_mmu->dev), + portid_msk, regionid, upper_32_bits(region->iova_base)); - if (enable) - larb_mmu->mmu |= MTK_SMI_MMU_EN(portid); - else - larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid); - } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) { - peri_mmuen_msk = BIT(portid); + if (enable) + larb_mmu->mmu |= portid_msk; + else + larb_mmu->mmu &= ~portid_msk; + } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) { + if (MTK_IOMMU_HAS_FLAG(data->plat_data, CFG_IFA_MASTER_IN_ATF)) { + arm_smccc_smc(MTK_SIP_KERNEL_IOMMU_CONTROL, + IOMMU_ATF_CMD_CONFIG_INFRA_IOMMU, + portid_msk, enable, 0, 0, 0, 0, &res); + ret = res.a0; + } else { /* PCI dev has only one output id, enable the next writing bit for PCIe */ - if (dev_is_pci(dev)) - peri_mmuen_msk |= BIT(portid + 1); + if (dev_is_pci(dev)) { + if (fwspec->num_ids != 1) { + dev_err(dev, "PCI dev can only have one port.\n"); + return -ENODEV; + } + portid_msk |= BIT(portid + 1); + } - peri_mmuen = enable ? peri_mmuen_msk : 0; ret = regmap_update_bits(data->pericfg, PERICFG_IOMMU_1, - peri_mmuen_msk, peri_mmuen); - if (ret) - dev_err(dev, "%s iommu(%s) inframaster 0x%x fail(%d).\n", - enable ? "enable" : "disable", - dev_name(data->dev), peri_mmuen_msk, ret); + (u32)portid_msk, enable ? (u32)portid_msk : 0); } + if (ret) + dev_err(dev, "%s iommu(%s) inframaster 0x%lx fail(%d).\n", + enable ? "enable" : "disable", + dev_name(data->dev), portid_msk, ret); } return ret; } @@ -620,15 +640,14 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom, struct mtk_iommu_data *data, unsigned int region_id) { + struct mtk_iommu_domain *share_dom = data->share_dom; const struct mtk_iommu_iova_region *region; - struct mtk_iommu_domain *m4u_dom; - - /* Always use bank0 in sharing pgtable case */ - m4u_dom = data->bank[0].m4u_dom; - if (m4u_dom) { - dom->iop = m4u_dom->iop; - dom->cfg = m4u_dom->cfg; - dom->domain.pgsize_bitmap = m4u_dom->cfg.pgsize_bitmap; + + /* Always use share domain in sharing pgtable case */ + if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE) && share_dom) { + dom->iop = share_dom->iop; + dom->cfg = share_dom->cfg; + dom->domain.pgsize_bitmap = share_dom->cfg.pgsize_bitmap; goto update_iova_region; } @@ -658,6 +677,9 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom, /* Update our support page sizes bitmap */ dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap; + if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE)) + data->share_dom = dom; + update_iova_region: /* Update the iova region for this domain */ region = data->plat_data->iova_region + region_id; @@ -708,7 +730,9 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain, /* Data is in the frstdata in sharing pgtable case. */ frstdata = mtk_iommu_get_frst_data(hw_list); + mutex_lock(&frstdata->mutex); ret = mtk_iommu_domain_finalise(dom, frstdata, region_id); + mutex_unlock(&frstdata->mutex); if (ret) { mutex_unlock(&dom->mutex); return ret; @@ -1318,7 +1342,8 @@ static int mtk_iommu_probe(struct platform_device *pdev) dev_err_probe(dev, ret, "mm dts parse fail\n"); goto out_runtime_disable; } - } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) { + } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) && + !MTK_IOMMU_HAS_FLAG(data->plat_data, CFG_IFA_MASTER_IN_ATF)) { p = data->plat_data->pericfg_comp_str; data->pericfg = syscon_regmap_lookup_by_compatible(p); if (IS_ERR(data->pericfg)) { @@ -1570,6 +1595,67 @@ static const struct mtk_iommu_plat_data mt8186_data_mm = { .iova_region_larb_msk = mt8186_larb_region_msk, }; +static const struct mtk_iommu_plat_data mt8188_data_infra = { + .m4u_plat = M4U_MT8188, + .flags = WR_THROT_EN | DCM_DISABLE | STD_AXI_MODE | PM_CLK_AO | + MTK_IOMMU_TYPE_INFRA | IFA_IOMMU_PCIE_SUPPORT | + PGTABLE_PA_35_EN | CFG_IFA_MASTER_IN_ATF, + .inv_sel_reg = REG_MMU_INV_SEL_GEN2, + .banks_num = 1, + .banks_enable = {true}, + .iova_region = single_domain, + .iova_region_nr = ARRAY_SIZE(single_domain), +}; + +static const u32 mt8188_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = { + [0] = {~0, ~0, ~0, ~0}, /* Region0: all ports for larb0/1/2/3 */ + [1] = {0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, ~0, ~0, ~0}, /* Region1: larb19(21)/21(22)/23 */ + [2] = {0, 0, 0, 0, ~0, ~0, ~0, ~0, /* Region2: the other larbs. */ + ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, + ~0, ~0, ~0, ~0, ~0, 0, 0, 0, + 0, ~0}, + [3] = {0}, + [4] = {[24] = BIT(0) | BIT(1)}, /* Only larb27(24) port0/1 */ + [5] = {[24] = BIT(2) | BIT(3)}, /* Only larb27(24) port2/3 */ +}; + +static const struct mtk_iommu_plat_data mt8188_data_vdo = { + .m4u_plat = M4U_MT8188, + .flags = HAS_BCLK | HAS_SUB_COMM_3BITS | OUT_ORDER_WR_EN | + WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | + PGTABLE_PA_35_EN | MTK_IOMMU_TYPE_MM, + .hw_list = &m4ulist, + .inv_sel_reg = REG_MMU_INV_SEL_GEN2, + .banks_num = 1, + .banks_enable = {true}, + .iova_region = mt8192_multi_dom, + .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom), + .iova_region_larb_msk = mt8188_larb_region_msk, + .larbid_remap = {{2}, {0}, {21}, {0}, {19}, {9, 10, + 11 /* 11a */, 25 /* 11c */}, + {13, 0, 29 /* 16b */, 30 /* 17b */, 0}, {5}}, +}; + +static const struct mtk_iommu_plat_data mt8188_data_vpp = { + .m4u_plat = M4U_MT8188, + .flags = HAS_BCLK | HAS_SUB_COMM_3BITS | OUT_ORDER_WR_EN | + WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | + PGTABLE_PA_35_EN | MTK_IOMMU_TYPE_MM, + .hw_list = &m4ulist, + .inv_sel_reg = REG_MMU_INV_SEL_GEN2, + .banks_num = 1, + .banks_enable = {true}, + .iova_region = mt8192_multi_dom, + .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom), + .iova_region_larb_msk = mt8188_larb_region_msk, + .larbid_remap = {{1}, {3}, {23}, {7}, {MTK_INVALID_LARBID}, + {12, 15, 24 /* 11b */}, {14, MTK_INVALID_LARBID, + 16 /* 16a */, 17 /* 17a */, MTK_INVALID_LARBID, + 27, 28 /* ccu0 */, MTK_INVALID_LARBID}, {4, 6}}, +}; + static const unsigned int mt8192_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = { [0] = {~0, ~0}, /* Region0: larb0/1 */ [1] = {0, 0, 0, 0, ~0, ~0, 0, ~0}, /* Region1: larb4/5/7 */ @@ -1678,6 +1764,9 @@ static const struct of_device_id mtk_iommu_of_ids[] = { { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data}, { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data}, { .compatible = "mediatek,mt8186-iommu-mm", .data = &mt8186_data_mm}, /* mm: m4u */ + { .compatible = "mediatek,mt8188-iommu-infra", .data = &mt8188_data_infra}, + { .compatible = "mediatek,mt8188-iommu-vdo", .data = &mt8188_data_vdo}, + { .compatible = "mediatek,mt8188-iommu-vpp", .data = &mt8188_data_vpp}, { .compatible = "mediatek,mt8192-m4u", .data = &mt8192_data}, { .compatible = "mediatek,mt8195-iommu-infra", .data = &mt8195_data_infra}, { .compatible = "mediatek,mt8195-iommu-vdo", .data = &mt8195_data_vdo}, diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index 40f57d293a79..157b286e36bf 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c @@ -159,7 +159,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev, * If we have reason to believe the IOMMU driver missed the initial * probe for dev, replay it to get things in order. */ - if (!err && dev->bus && !device_iommu_mapped(dev)) + if (!err && dev->bus) err = iommu_probe_device(dev); /* Ignore all other errors apart from EPROBE_DEFER */ diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 4054030c3237..8ff69fbf9f65 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -98,9 +98,8 @@ struct rk_iommu_ops { phys_addr_t (*pt_address)(u32 dte); u32 (*mk_dtentries)(dma_addr_t pt_dma); u32 (*mk_ptentries)(phys_addr_t page, int prot); - phys_addr_t (*dte_addr_phys)(u32 addr); - u32 (*dma_addr_dte)(dma_addr_t dt_dma); u64 dma_bit_mask; + gfp_t gfp_flags; }; struct rk_iommu { @@ -278,8 +277,8 @@ static u32 rk_mk_pte(phys_addr_t page, int prot) /* * In v2: * 31:12 - Page address bit 31:0 - * 11:9 - Page address bit 34:32 - * 8:4 - Page address bit 39:35 + * 11: 8 - Page address bit 35:32 + * 7: 4 - Page address bit 39:36 * 3 - Security * 2 - Writable * 1 - Readable @@ -506,7 +505,7 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu) /* * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY - * and verifying that upper 5 nybbles are read back. + * and verifying that upper 5 (v1) or 7 (v2) nybbles are read back. */ for (i = 0; i < iommu->num_mmu; i++) { dte_addr = rk_ops->pt_address(DTE_ADDR_DUMMY); @@ -531,33 +530,6 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu) return 0; } -static inline phys_addr_t rk_dte_addr_phys(u32 addr) -{ - return (phys_addr_t)addr; -} - -static inline u32 rk_dma_addr_dte(dma_addr_t dt_dma) -{ - return dt_dma; -} - -#define DT_HI_MASK GENMASK_ULL(39, 32) -#define DTE_BASE_HI_MASK GENMASK(11, 4) -#define DT_SHIFT 28 - -static inline phys_addr_t rk_dte_addr_phys_v2(u32 addr) -{ - u64 addr64 = addr; - return (phys_addr_t)(addr64 & RK_DTE_PT_ADDRESS_MASK) | - ((addr64 & DTE_BASE_HI_MASK) << DT_SHIFT); -} - -static inline u32 rk_dma_addr_dte_v2(dma_addr_t dt_dma) -{ - return (dt_dma & RK_DTE_PT_ADDRESS_MASK) | - ((dt_dma & DT_HI_MASK) >> DT_SHIFT); -} - static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova) { void __iomem *base = iommu->bases[index]; @@ -577,7 +549,7 @@ static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova) page_offset = rk_iova_page_offset(iova); mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR); - mmu_dte_addr_phys = rk_ops->dte_addr_phys(mmu_dte_addr); + mmu_dte_addr_phys = rk_ops->pt_address(mmu_dte_addr); dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index); dte_addr = phys_to_virt(dte_addr_phys); @@ -756,7 +728,7 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, if (rk_dte_is_pt_valid(dte)) goto done; - page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); + page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | rk_ops->gfp_flags); if (!page_table) return ERR_PTR(-ENOMEM); @@ -967,7 +939,7 @@ static int rk_iommu_enable(struct rk_iommu *iommu) for (i = 0; i < iommu->num_mmu; i++) { rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, - rk_ops->dma_addr_dte(rk_domain->dt_dma)); + rk_ops->mk_dtentries(rk_domain->dt_dma)); rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); } @@ -1105,7 +1077,7 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type) * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries. * Allocate one 4 KiB page for each table. */ - rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32); + rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | rk_ops->gfp_flags); if (!rk_domain->dt) goto err_free_domain; @@ -1405,18 +1377,16 @@ static struct rk_iommu_ops iommu_data_ops_v1 = { .pt_address = &rk_dte_pt_address, .mk_dtentries = &rk_mk_dte, .mk_ptentries = &rk_mk_pte, - .dte_addr_phys = &rk_dte_addr_phys, - .dma_addr_dte = &rk_dma_addr_dte, .dma_bit_mask = DMA_BIT_MASK(32), + .gfp_flags = GFP_DMA32, }; static struct rk_iommu_ops iommu_data_ops_v2 = { .pt_address = &rk_dte_pt_address_v2, .mk_dtentries = &rk_mk_dte_v2, .mk_ptentries = &rk_mk_pte_v2, - .dte_addr_phys = &rk_dte_addr_phys_v2, - .dma_addr_dte = &rk_dma_addr_dte_v2, .dma_bit_mask = DMA_BIT_MASK(40), + .gfp_flags = 0, }; static const struct of_device_id rk_iommu_dt_ids[] = { diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c index 39e34fdeccda..2fa9afebd4f5 100644 --- a/drivers/iommu/sprd-iommu.c +++ b/drivers/iommu/sprd-iommu.c @@ -14,6 +14,7 @@ #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/slab.h> @@ -148,6 +149,7 @@ static struct iommu_domain *sprd_iommu_domain_alloc(unsigned int domain_type) dom->domain.geometry.aperture_start = 0; dom->domain.geometry.aperture_end = SZ_256M - 1; + dom->domain.geometry.force_aperture = true; return &dom->domain; } diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 1cbf063ccf14..e445f80d0226 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -9,7 +9,7 @@ #include <linux/iommu.h> #include <linux/kernel.h> #include <linux/of.h> -#include <linux/of_device.h> +#include <linux/of_platform.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/slab.h> diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 3551ed057774..17dcd826f5c2 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -13,7 +13,7 @@ #include <linux/interval_tree.h> #include <linux/iommu.h> #include <linux/module.h> -#include <linux/of_platform.h> +#include <linux/of.h> #include <linux/pci.h> #include <linux/virtio.h> #include <linux/virtio_config.h> |