From 46efb3053f4f23357e9e29f8abaa6f801d956a0c Mon Sep 17 00:00:00 2001 From: Biju Das Date: Mon, 18 Mar 2024 08:50:41 +0000 Subject: irqchip/renesas-rzg2l: Simplify rzg2l_irqc_irq_{en,dis}able() Simplify rzg2l_irqc_irq_{en,dis}able() by moving common code to rzg2l_tint_irq_endisable(). Signed-off-by: Biju Das Signed-off-by: Thomas Gleixner --- drivers/irqchip/irq-renesas-rzg2l.c | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c index ae67fec2ab46..f6484bf15e0b 100644 --- a/drivers/irqchip/irq-renesas-rzg2l.c +++ b/drivers/irqchip/irq-renesas-rzg2l.c @@ -138,7 +138,7 @@ static void rzg2l_irqc_eoi(struct irq_data *d) irq_chip_eoi_parent(d); } -static void rzg2l_irqc_irq_disable(struct irq_data *d) +static void rzg2l_tint_irq_endisable(struct irq_data *d, bool enable) { unsigned int hw_irq = irqd_to_hwirq(d); @@ -151,30 +151,24 @@ static void rzg2l_irqc_irq_disable(struct irq_data *d) raw_spin_lock(&priv->lock); reg = readl_relaxed(priv->base + TSSR(tssr_index)); - reg &= ~(TIEN << TSSEL_SHIFT(tssr_offset)); + if (enable) + reg |= TIEN << TSSEL_SHIFT(tssr_offset); + else + reg &= ~(TIEN << TSSEL_SHIFT(tssr_offset)); writel_relaxed(reg, priv->base + TSSR(tssr_index)); raw_spin_unlock(&priv->lock); } +} + +static void rzg2l_irqc_irq_disable(struct irq_data *d) +{ + rzg2l_tint_irq_endisable(d, false); irq_chip_disable_parent(d); } static void rzg2l_irqc_irq_enable(struct irq_data *d) { - unsigned int hw_irq = irqd_to_hwirq(d); - - if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) { - struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); - u32 offset = hw_irq - IRQC_TINT_START; - u32 tssr_offset = TSSR_OFFSET(offset); - u8 tssr_index = TSSR_INDEX(offset); - u32 reg; - - raw_spin_lock(&priv->lock); - reg = readl_relaxed(priv->base + TSSR(tssr_index)); - reg |= TIEN << TSSEL_SHIFT(tssr_offset); - writel_relaxed(reg, priv->base + TSSR(tssr_index)); - raw_spin_unlock(&priv->lock); - } + rzg2l_tint_irq_endisable(d, true); irq_chip_enable_parent(d); } -- cgit v1.2.3 From 21a8f8a0eb35ceb21e2c9ddd87468bc3b5ac87c0 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Thu, 7 Mar 2024 19:33:00 +0530 Subject: irqchip: Add RISC-V incoming MSI controller early driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The RISC-V advanced interrupt architecture (AIA) specification defines a new MSI controller called incoming message signalled interrupt controller (IMSIC) which manages MSI on per-HART (or per-CPU) basis. It also supports IPIs as software injected MSIs. (For more details refer https://github.com/riscv/riscv-aia) Add an early irqchip driver for RISC-V IMSIC which sets up the IMSIC state and provide IPIs. Signed-off-by: Anup Patel Signed-off-by: Thomas Gleixner Tested-by: Björn Töpel Reviewed-by: Björn Töpel Link: https://lore.kernel.org/r/20240307140307.646078-3-apatel@ventanamicro.com --- drivers/irqchip/Kconfig | 7 + drivers/irqchip/Makefile | 1 + drivers/irqchip/irq-riscv-imsic-early.c | 201 ++++++++ drivers/irqchip/irq-riscv-imsic-state.c | 865 ++++++++++++++++++++++++++++++++ drivers/irqchip/irq-riscv-imsic-state.h | 107 ++++ include/linux/cpuhotplug.h | 1 + include/linux/irqchip/riscv-imsic.h | 87 ++++ 7 files changed, 1269 insertions(+) create mode 100644 drivers/irqchip/irq-riscv-imsic-early.c create mode 100644 drivers/irqchip/irq-riscv-imsic-state.c create mode 100644 drivers/irqchip/irq-riscv-imsic-state.h create mode 100644 include/linux/irqchip/riscv-imsic.h (limited to 'drivers') diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 72c07a12f5e1..8610810b80da 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -540,6 +540,13 @@ config RISCV_INTC depends on RISCV select IRQ_DOMAIN_HIERARCHY +config RISCV_IMSIC + bool + depends on RISCV + select IRQ_DOMAIN_HIERARCHY + select GENERIC_IRQ_MATRIX_ALLOCATOR + select GENERIC_MSI_IRQ + config SIFIVE_PLIC bool depends on RISCV diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index ec4a18380998..972aa9fecf96 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -95,6 +95,7 @@ obj-$(CONFIG_QCOM_MPM) += irq-qcom-mpm.o obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o +obj-$(CONFIG_RISCV_IMSIC) += irq-riscv-imsic-state.o irq-riscv-imsic-early.o obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o obj-$(CONFIG_STARFIVE_JH8100_INTC) += irq-starfive-jh8100-intc.o obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o diff --git a/drivers/irqchip/irq-riscv-imsic-early.c b/drivers/irqchip/irq-riscv-imsic-early.c new file mode 100644 index 000000000000..886418ec06cb --- /dev/null +++ b/drivers/irqchip/irq-riscv-imsic-early.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + +#define pr_fmt(fmt) "riscv-imsic: " fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "irq-riscv-imsic-state.h" + +static int imsic_parent_irq; + +#ifdef CONFIG_SMP +static void imsic_ipi_send(unsigned int cpu) +{ + struct imsic_local_config *local = per_cpu_ptr(imsic->global.local, cpu); + + writel_relaxed(IMSIC_IPI_ID, local->msi_va); +} + +static void imsic_ipi_starting_cpu(void) +{ + /* Enable IPIs for current CPU. */ + __imsic_id_set_enable(IMSIC_IPI_ID); +} + +static void imsic_ipi_dying_cpu(void) +{ + /* Disable IPIs for current CPU. */ + __imsic_id_clear_enable(IMSIC_IPI_ID); +} + +static int __init imsic_ipi_domain_init(void) +{ + int virq; + + /* Create IMSIC IPI multiplexing */ + virq = ipi_mux_create(IMSIC_NR_IPI, imsic_ipi_send); + if (virq <= 0) + return virq < 0 ? virq : -ENOMEM; + + /* Set vIRQ range */ + riscv_ipi_set_virq_range(virq, IMSIC_NR_IPI, true); + + /* Announce that IMSIC is providing IPIs */ + pr_info("%pfwP: providing IPIs using interrupt %d\n", imsic->fwnode, IMSIC_IPI_ID); + + return 0; +} +#else +static void imsic_ipi_starting_cpu(void) { } +static void imsic_ipi_dying_cpu(void) { } +static int __init imsic_ipi_domain_init(void) { return 0; } +#endif + +/* + * To handle an interrupt, we read the TOPEI CSR and write zero in one + * instruction. If TOPEI CSR is non-zero then we translate TOPEI.ID to + * Linux interrupt number and let Linux IRQ subsystem handle it. + */ +static void imsic_handle_irq(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + int err, cpu = smp_processor_id(); + struct imsic_vector *vec; + unsigned long local_id; + + chained_irq_enter(chip, desc); + + while ((local_id = csr_swap(CSR_TOPEI, 0))) { + local_id >>= TOPEI_ID_SHIFT; + + if (local_id == IMSIC_IPI_ID) { + if (IS_ENABLED(CONFIG_SMP)) + ipi_mux_process(); + continue; + } + + if (unlikely(!imsic->base_domain)) + continue; + + vec = imsic_vector_from_local_id(cpu, local_id); + if (!vec) { + pr_warn_ratelimited("vector not found for local ID 0x%lx\n", local_id); + continue; + } + + err = generic_handle_domain_irq(imsic->base_domain, vec->hwirq); + if (unlikely(err)) + pr_warn_ratelimited("hwirq 0x%x mapping not found\n", vec->hwirq); + } + + chained_irq_exit(chip, desc); +} + +static int imsic_starting_cpu(unsigned int cpu) +{ + /* Mark per-CPU IMSIC state as online */ + imsic_state_online(); + + /* Enable per-CPU parent interrupt */ + enable_percpu_irq(imsic_parent_irq, irq_get_trigger_type(imsic_parent_irq)); + + /* Setup IPIs */ + imsic_ipi_starting_cpu(); + + /* + * Interrupts identities might have been enabled/disabled while + * this CPU was not running so sync-up local enable/disable state. + */ + imsic_local_sync_all(); + + /* Enable local interrupt delivery */ + imsic_local_delivery(true); + + return 0; +} + +static int imsic_dying_cpu(unsigned int cpu) +{ + /* Cleanup IPIs */ + imsic_ipi_dying_cpu(); + + /* Mark per-CPU IMSIC state as offline */ + imsic_state_offline(); + + return 0; +} + +static int __init imsic_early_probe(struct fwnode_handle *fwnode) +{ + struct irq_domain *domain; + int rc; + + /* Find parent domain and register chained handler */ + domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); + if (!domain) { + pr_err("%pfwP: Failed to find INTC domain\n", fwnode); + return -ENOENT; + } + imsic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); + if (!imsic_parent_irq) { + pr_err("%pfwP: Failed to create INTC mapping\n", fwnode); + return -ENOENT; + } + + /* Initialize IPI domain */ + rc = imsic_ipi_domain_init(); + if (rc) { + pr_err("%pfwP: Failed to initialize IPI domain\n", fwnode); + return rc; + } + + /* Setup chained handler to the parent domain interrupt */ + irq_set_chained_handler(imsic_parent_irq, imsic_handle_irq); + + /* + * Setup cpuhp state (must be done after setting imsic_parent_irq) + * + * Don't disable per-CPU IMSIC file when CPU goes offline + * because this affects IPI and the masking/unmasking of + * virtual IPIs is done via generic IPI-Mux + */ + cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_IMSIC_STARTING, "irqchip/riscv/imsic:starting", + imsic_starting_cpu, imsic_dying_cpu); + + return 0; +} + +static int __init imsic_early_dt_init(struct device_node *node, struct device_node *parent) +{ + struct fwnode_handle *fwnode = &node->fwnode; + int rc; + + /* Setup IMSIC state */ + rc = imsic_setup_state(fwnode); + if (rc) { + pr_err("%pfwP: failed to setup state (error %d)\n", fwnode, rc); + return rc; + } + + /* Do early setup of IPIs */ + rc = imsic_early_probe(fwnode); + if (rc) + return rc; + + /* Ensure that OF platform device gets probed */ + of_node_clear_flag(node, OF_POPULATED); + return 0; +} + +IRQCHIP_DECLARE(riscv_imsic, "riscv,imsics", imsic_early_dt_init); diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c new file mode 100644 index 000000000000..5479f872e62b --- /dev/null +++ b/drivers/irqchip/irq-riscv-imsic-state.c @@ -0,0 +1,865 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + +#define pr_fmt(fmt) "riscv-imsic: " fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "irq-riscv-imsic-state.h" + +#define IMSIC_DISABLE_EIDELIVERY 0 +#define IMSIC_ENABLE_EIDELIVERY 1 +#define IMSIC_DISABLE_EITHRESHOLD 1 +#define IMSIC_ENABLE_EITHRESHOLD 0 + +static inline void imsic_csr_write(unsigned long reg, unsigned long val) +{ + csr_write(CSR_ISELECT, reg); + csr_write(CSR_IREG, val); +} + +static inline unsigned long imsic_csr_read(unsigned long reg) +{ + csr_write(CSR_ISELECT, reg); + return csr_read(CSR_IREG); +} + +static inline unsigned long imsic_csr_read_clear(unsigned long reg, unsigned long val) +{ + csr_write(CSR_ISELECT, reg); + return csr_read_clear(CSR_IREG, val); +} + +static inline void imsic_csr_set(unsigned long reg, unsigned long val) +{ + csr_write(CSR_ISELECT, reg); + csr_set(CSR_IREG, val); +} + +static inline void imsic_csr_clear(unsigned long reg, unsigned long val) +{ + csr_write(CSR_ISELECT, reg); + csr_clear(CSR_IREG, val); +} + +struct imsic_priv *imsic; + +const struct imsic_global_config *imsic_get_global_config(void) +{ + return imsic ? &imsic->global : NULL; +} +EXPORT_SYMBOL_GPL(imsic_get_global_config); + +static bool __imsic_eix_read_clear(unsigned long id, bool pend) +{ + unsigned long isel, imask; + + isel = id / BITS_PER_LONG; + isel *= BITS_PER_LONG / IMSIC_EIPx_BITS; + isel += pend ? IMSIC_EIP0 : IMSIC_EIE0; + imask = BIT(id & (__riscv_xlen - 1)); + + return !!(imsic_csr_read_clear(isel, imask) & imask); +} + +static inline bool __imsic_id_read_clear_enabled(unsigned long id) +{ + return __imsic_eix_read_clear(id, false); +} + +static inline bool __imsic_id_read_clear_pending(unsigned long id) +{ + return __imsic_eix_read_clear(id, true); +} + +void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend, bool val) +{ + unsigned long id = base_id, last_id = base_id + num_id; + unsigned long i, isel, ireg; + + while (id < last_id) { + isel = id / BITS_PER_LONG; + isel *= BITS_PER_LONG / IMSIC_EIPx_BITS; + isel += pend ? IMSIC_EIP0 : IMSIC_EIE0; + + /* + * Prepare the ID mask to be programmed in the + * IMSIC EIEx and EIPx registers. These registers + * are XLEN-wide and we must not touch IDs which + * are < base_id and >= (base_id + num_id). + */ + ireg = 0; + for (i = id & (__riscv_xlen - 1); id < last_id && i < __riscv_xlen; i++) { + ireg |= BIT(i); + id++; + } + + /* + * The IMSIC EIEx and EIPx registers are indirectly + * accessed via using ISELECT and IREG CSRs so we + * need to access these CSRs without getting preempted. + * + * All existing users of this function call this + * function with local IRQs disabled so we don't + * need to do anything special here. + */ + if (val) + imsic_csr_set(isel, ireg); + else + imsic_csr_clear(isel, ireg); + } +} + +static void __imsic_local_sync(struct imsic_local_priv *lpriv) +{ + struct imsic_local_config *mlocal; + struct imsic_vector *vec, *mvec; + int i; + + lockdep_assert_held(&lpriv->lock); + + for_each_set_bit(i, lpriv->dirty_bitmap, imsic->global.nr_ids + 1) { + if (!i || i == IMSIC_IPI_ID) + goto skip; + vec = &lpriv->vectors[i]; + + if (READ_ONCE(vec->enable)) + __imsic_id_set_enable(i); + else + __imsic_id_clear_enable(i); + + /* + * If the ID was being moved to a new ID on some other CPU + * then we can get a MSI during the movement so check the + * ID pending bit and re-trigger the new ID on other CPU + * using MMIO write. + */ + mvec = READ_ONCE(vec->move); + WRITE_ONCE(vec->move, NULL); + if (mvec && mvec != vec) { + if (__imsic_id_read_clear_pending(i)) { + mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu); + writel_relaxed(mvec->local_id, mlocal->msi_va); + } + + imsic_vector_free(&lpriv->vectors[i]); + } + +skip: + bitmap_clear(lpriv->dirty_bitmap, i, 1); + } +} + +void imsic_local_sync_all(void) +{ + struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv); + unsigned long flags; + + raw_spin_lock_irqsave(&lpriv->lock, flags); + bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1); + __imsic_local_sync(lpriv); + raw_spin_unlock_irqrestore(&lpriv->lock, flags); +} + +void imsic_local_delivery(bool enable) +{ + if (enable) { + imsic_csr_write(IMSIC_EITHRESHOLD, IMSIC_ENABLE_EITHRESHOLD); + imsic_csr_write(IMSIC_EIDELIVERY, IMSIC_ENABLE_EIDELIVERY); + return; + } + + imsic_csr_write(IMSIC_EIDELIVERY, IMSIC_DISABLE_EIDELIVERY); + imsic_csr_write(IMSIC_EITHRESHOLD, IMSIC_DISABLE_EITHRESHOLD); +} + +#ifdef CONFIG_SMP +static void imsic_local_timer_callback(struct timer_list *timer) +{ + struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv); + unsigned long flags; + + raw_spin_lock_irqsave(&lpriv->lock, flags); + __imsic_local_sync(lpriv); + raw_spin_unlock_irqrestore(&lpriv->lock, flags); +} + +static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu) +{ + lockdep_assert_held(&lpriv->lock); + + /* + * The spinlock acquire/release semantics ensure that changes + * to vector enable, vector move and dirty bitmap are visible + * to the target CPU. + */ + + /* + * We schedule a timer on the target CPU if the target CPU is not + * same as the current CPU. An offline CPU will unconditionally + * synchronize IDs through imsic_starting_cpu() when the + * CPU is brought up. + */ + if (cpu_online(cpu)) { + if (cpu == smp_processor_id()) { + __imsic_local_sync(lpriv); + return; + } + + if (!timer_pending(&lpriv->timer)) { + lpriv->timer.expires = jiffies + 1; + add_timer_on(&lpriv->timer, cpu); + } + } +} +#else +static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu) +{ + lockdep_assert_held(&lpriv->lock); + __imsic_local_sync(lpriv); +} +#endif + +void imsic_vector_mask(struct imsic_vector *vec) +{ + struct imsic_local_priv *lpriv; + + lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu); + if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec)) + return; + + /* + * This function is called through Linux irq subsystem with + * irqs disabled so no need to save/restore irq flags. + */ + + raw_spin_lock(&lpriv->lock); + + WRITE_ONCE(vec->enable, false); + bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1); + __imsic_remote_sync(lpriv, vec->cpu); + + raw_spin_unlock(&lpriv->lock); +} + +void imsic_vector_unmask(struct imsic_vector *vec) +{ + struct imsic_local_priv *lpriv; + + lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu); + if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec)) + return; + + /* + * This function is called through Linux irq subsystem with + * irqs disabled so no need to save/restore irq flags. + */ + + raw_spin_lock(&lpriv->lock); + + WRITE_ONCE(vec->enable, true); + bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1); + __imsic_remote_sync(lpriv, vec->cpu); + + raw_spin_unlock(&lpriv->lock); +} + +static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsic_vector *vec, + bool new_enable, struct imsic_vector *new_move) +{ + unsigned long flags; + bool enabled; + + raw_spin_lock_irqsave(&lpriv->lock, flags); + + /* Update enable and move details */ + enabled = READ_ONCE(vec->enable); + WRITE_ONCE(vec->enable, new_enable); + WRITE_ONCE(vec->move, new_move); + + /* Mark the vector as dirty and synchronize */ + bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1); + __imsic_remote_sync(lpriv, vec->cpu); + + raw_spin_unlock_irqrestore(&lpriv->lock, flags); + + return enabled; +} + +void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec) +{ + struct imsic_local_priv *old_lpriv, *new_lpriv; + bool enabled; + + if (WARN_ON_ONCE(old_vec->cpu == new_vec->cpu)) + return; + + old_lpriv = per_cpu_ptr(imsic->lpriv, old_vec->cpu); + if (WARN_ON_ONCE(&old_lpriv->vectors[old_vec->local_id] != old_vec)) + return; + + new_lpriv = per_cpu_ptr(imsic->lpriv, new_vec->cpu); + if (WARN_ON_ONCE(&new_lpriv->vectors[new_vec->local_id] != new_vec)) + return; + + /* + * Move and re-trigger the new vector based on the pending + * state of the old vector because we might get a device + * interrupt on the old vector while device was being moved + * to the new vector. + */ + enabled = imsic_vector_move_update(old_lpriv, old_vec, false, new_vec); + imsic_vector_move_update(new_lpriv, new_vec, enabled, new_vec); +} + +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS +void imsic_vector_debug_show(struct seq_file *m, struct imsic_vector *vec, int ind) +{ + struct imsic_local_priv *lpriv; + struct imsic_vector *mvec; + bool is_enabled; + + lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu); + if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec)) + return; + + is_enabled = imsic_vector_isenabled(vec); + mvec = imsic_vector_get_move(vec); + + seq_printf(m, "%*starget_cpu : %5u\n", ind, "", vec->cpu); + seq_printf(m, "%*starget_local_id : %5u\n", ind, "", vec->local_id); + seq_printf(m, "%*sis_reserved : %5u\n", ind, "", + (vec->local_id <= IMSIC_IPI_ID) ? 1 : 0); + seq_printf(m, "%*sis_enabled : %5u\n", ind, "", is_enabled ? 1 : 0); + seq_printf(m, "%*sis_move_pending : %5u\n", ind, "", mvec ? 1 : 0); + if (mvec) { + seq_printf(m, "%*smove_cpu : %5u\n", ind, "", mvec->cpu); + seq_printf(m, "%*smove_local_id : %5u\n", ind, "", mvec->local_id); + } +} + +void imsic_vector_debug_show_summary(struct seq_file *m, int ind) +{ + irq_matrix_debug_show(m, imsic->matrix, ind); +} +#endif + +struct imsic_vector *imsic_vector_from_local_id(unsigned int cpu, unsigned int local_id) +{ + struct imsic_local_priv *lpriv = per_cpu_ptr(imsic->lpriv, cpu); + + if (!lpriv || imsic->global.nr_ids < local_id) + return NULL; + + return &lpriv->vectors[local_id]; +} + +struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask *mask) +{ + struct imsic_vector *vec = NULL; + struct imsic_local_priv *lpriv; + unsigned long flags; + unsigned int cpu; + int local_id; + + raw_spin_lock_irqsave(&imsic->matrix_lock, flags); + local_id = irq_matrix_alloc(imsic->matrix, mask, false, &cpu); + raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags); + if (local_id < 0) + return NULL; + + lpriv = per_cpu_ptr(imsic->lpriv, cpu); + vec = &lpriv->vectors[local_id]; + vec->hwirq = hwirq; + vec->enable = false; + vec->move = NULL; + + return vec; +} + +void imsic_vector_free(struct imsic_vector *vec) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&imsic->matrix_lock, flags); + vec->hwirq = UINT_MAX; + irq_matrix_free(imsic->matrix, vec->cpu, vec->local_id, false); + raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags); +} + +static void __init imsic_local_cleanup(void) +{ + struct imsic_local_priv *lpriv; + int cpu; + + for_each_possible_cpu(cpu) { + lpriv = per_cpu_ptr(imsic->lpriv, cpu); + + bitmap_free(lpriv->dirty_bitmap); + kfree(lpriv->vectors); + } + + free_percpu(imsic->lpriv); +} + +static int __init imsic_local_init(void) +{ + struct imsic_global_config *global = &imsic->global; + struct imsic_local_priv *lpriv; + struct imsic_vector *vec; + int cpu, i; + + /* Allocate per-CPU private state */ + imsic->lpriv = alloc_percpu(typeof(*imsic->lpriv)); + if (!imsic->lpriv) + return -ENOMEM; + + /* Setup per-CPU private state */ + for_each_possible_cpu(cpu) { + lpriv = per_cpu_ptr(imsic->lpriv, cpu); + + raw_spin_lock_init(&lpriv->lock); + + /* Allocate dirty bitmap */ + lpriv->dirty_bitmap = bitmap_zalloc(global->nr_ids + 1, GFP_KERNEL); + if (!lpriv->dirty_bitmap) + goto fail_local_cleanup; + +#ifdef CONFIG_SMP + /* Setup lazy timer for synchronization */ + timer_setup(&lpriv->timer, imsic_local_timer_callback, TIMER_PINNED); +#endif + + /* Allocate vector array */ + lpriv->vectors = kcalloc(global->nr_ids + 1, sizeof(*lpriv->vectors), + GFP_KERNEL); + if (!lpriv->vectors) + goto fail_local_cleanup; + + /* Setup vector array */ + for (i = 0; i <= global->nr_ids; i++) { + vec = &lpriv->vectors[i]; + vec->cpu = cpu; + vec->local_id = i; + vec->hwirq = UINT_MAX; + } + } + + return 0; + +fail_local_cleanup: + imsic_local_cleanup(); + return -ENOMEM; +} + +void imsic_state_online(void) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&imsic->matrix_lock, flags); + irq_matrix_online(imsic->matrix); + raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags); +} + +void imsic_state_offline(void) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&imsic->matrix_lock, flags); + irq_matrix_offline(imsic->matrix); + raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags); + +#ifdef CONFIG_SMP + struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv); + + raw_spin_lock_irqsave(&lpriv->lock, flags); + WARN_ON_ONCE(try_to_del_timer_sync(&lpriv->timer) < 0); + raw_spin_unlock_irqrestore(&lpriv->lock, flags); +#endif +} + +static int __init imsic_matrix_init(void) +{ + struct imsic_global_config *global = &imsic->global; + + raw_spin_lock_init(&imsic->matrix_lock); + imsic->matrix = irq_alloc_matrix(global->nr_ids + 1, + 0, global->nr_ids + 1); + if (!imsic->matrix) + return -ENOMEM; + + /* Reserve ID#0 because it is special and never implemented */ + irq_matrix_assign_system(imsic->matrix, 0, false); + + /* Reserve IPI ID because it is special and used internally */ + irq_matrix_assign_system(imsic->matrix, IMSIC_IPI_ID, false); + + return 0; +} + +static int __init imsic_get_parent_hartid(struct fwnode_handle *fwnode, + u32 index, unsigned long *hartid) +{ + struct of_phandle_args parent; + int rc; + + /* + * Currently, only OF fwnode is supported so extend this + * function for ACPI support. + */ + if (!is_of_node(fwnode)) + return -EINVAL; + + rc = of_irq_parse_one(to_of_node(fwnode), index, &parent); + if (rc) + return rc; + + /* + * Skip interrupts other than external interrupts for + * current privilege level. + */ + if (parent.args[0] != RV_IRQ_EXT) + return -EINVAL; + + return riscv_of_parent_hartid(parent.np, hartid); +} + +static int __init imsic_get_mmio_resource(struct fwnode_handle *fwnode, + u32 index, struct resource *res) +{ + /* + * Currently, only OF fwnode is supported so extend this + * function for ACPI support. + */ + if (!is_of_node(fwnode)) + return -EINVAL; + + return of_address_to_resource(to_of_node(fwnode), index, res); +} + +static int __init imsic_parse_fwnode(struct fwnode_handle *fwnode, + struct imsic_global_config *global, + u32 *nr_parent_irqs, + u32 *nr_mmios) +{ + unsigned long hartid; + struct resource res; + int rc; + u32 i; + + /* + * Currently, only OF fwnode is supported so extend this + * function for ACPI support. + */ + if (!is_of_node(fwnode)) + return -EINVAL; + + *nr_parent_irqs = 0; + *nr_mmios = 0; + + /* Find number of parent interrupts */ + while (!imsic_get_parent_hartid(fwnode, *nr_parent_irqs, &hartid)) + (*nr_parent_irqs)++; + if (!*nr_parent_irqs) { + pr_err("%pfwP: no parent irqs available\n", fwnode); + return -EINVAL; + } + + /* Find number of guest index bits in MSI address */ + rc = of_property_read_u32(to_of_node(fwnode), "riscv,guest-index-bits", + &global->guest_index_bits); + if (rc) + global->guest_index_bits = 0; + + /* Find number of HART index bits */ + rc = of_property_read_u32(to_of_node(fwnode), "riscv,hart-index-bits", + &global->hart_index_bits); + if (rc) { + /* Assume default value */ + global->hart_index_bits = __fls(*nr_parent_irqs); + if (BIT(global->hart_index_bits) < *nr_parent_irqs) + global->hart_index_bits++; + } + + /* Find number of group index bits */ + rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-bits", + &global->group_index_bits); + if (rc) + global->group_index_bits = 0; + + /* + * Find first bit position of group index. + * If not specified assumed the default APLIC-IMSIC configuration. + */ + rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-shift", + &global->group_index_shift); + if (rc) + global->group_index_shift = IMSIC_MMIO_PAGE_SHIFT * 2; + + /* Find number of interrupt identities */ + rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-ids", + &global->nr_ids); + if (rc) { + pr_err("%pfwP: number of interrupt identities not found\n", fwnode); + return rc; + } + + /* Find number of guest interrupt identities */ + rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-guest-ids", + &global->nr_guest_ids); + if (rc) + global->nr_guest_ids = global->nr_ids; + + /* Sanity check guest index bits */ + i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT; + if (i < global->guest_index_bits) { + pr_err("%pfwP: guest index bits too big\n", fwnode); + return -EINVAL; + } + + /* Sanity check HART index bits */ + i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT - global->guest_index_bits; + if (i < global->hart_index_bits) { + pr_err("%pfwP: HART index bits too big\n", fwnode); + return -EINVAL; + } + + /* Sanity check group index bits */ + i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT - + global->guest_index_bits - global->hart_index_bits; + if (i < global->group_index_bits) { + pr_err("%pfwP: group index bits too big\n", fwnode); + return -EINVAL; + } + + /* Sanity check group index shift */ + i = global->group_index_bits + global->group_index_shift - 1; + if (i >= BITS_PER_LONG) { + pr_err("%pfwP: group index shift too big\n", fwnode); + return -EINVAL; + } + + /* Sanity check number of interrupt identities */ + if (global->nr_ids < IMSIC_MIN_ID || + global->nr_ids >= IMSIC_MAX_ID || + (global->nr_ids & IMSIC_MIN_ID) != IMSIC_MIN_ID) { + pr_err("%pfwP: invalid number of interrupt identities\n", fwnode); + return -EINVAL; + } + + /* Sanity check number of guest interrupt identities */ + if (global->nr_guest_ids < IMSIC_MIN_ID || + global->nr_guest_ids >= IMSIC_MAX_ID || + (global->nr_guest_ids & IMSIC_MIN_ID) != IMSIC_MIN_ID) { + pr_err("%pfwP: invalid number of guest interrupt identities\n", fwnode); + return -EINVAL; + } + + /* Compute base address */ + rc = imsic_get_mmio_resource(fwnode, 0, &res); + if (rc) { + pr_err("%pfwP: first MMIO resource not found\n", fwnode); + return -EINVAL; + } + global->base_addr = res.start; + global->base_addr &= ~(BIT(global->guest_index_bits + + global->hart_index_bits + + IMSIC_MMIO_PAGE_SHIFT) - 1); + global->base_addr &= ~((BIT(global->group_index_bits) - 1) << + global->group_index_shift); + + /* Find number of MMIO register sets */ + while (!imsic_get_mmio_resource(fwnode, *nr_mmios, &res)) + (*nr_mmios)++; + + return 0; +} + +int __init imsic_setup_state(struct fwnode_handle *fwnode) +{ + u32 i, j, index, nr_parent_irqs, nr_mmios, nr_handlers = 0; + struct imsic_global_config *global; + struct imsic_local_config *local; + void __iomem **mmios_va = NULL; + struct resource *mmios = NULL; + unsigned long reloff, hartid; + phys_addr_t base_addr; + int rc, cpu; + + /* + * Only one IMSIC instance allowed in a platform for clean + * implementation of SMP IRQ affinity and per-CPU IPIs. + * + * This means on a multi-socket (or multi-die) platform we + * will have multiple MMIO regions for one IMSIC instance. + */ + if (imsic) { + pr_err("%pfwP: already initialized hence ignoring\n", fwnode); + return -EALREADY; + } + + if (!riscv_isa_extension_available(NULL, SxAIA)) { + pr_err("%pfwP: AIA support not available\n", fwnode); + return -ENODEV; + } + + imsic = kzalloc(sizeof(*imsic), GFP_KERNEL); + if (!imsic) + return -ENOMEM; + imsic->fwnode = fwnode; + global = &imsic->global; + + global->local = alloc_percpu(typeof(*global->local)); + if (!global->local) { + rc = -ENOMEM; + goto out_free_priv; + } + + /* Parse IMSIC fwnode */ + rc = imsic_parse_fwnode(fwnode, global, &nr_parent_irqs, &nr_mmios); + if (rc) + goto out_free_local; + + /* Allocate MMIO resource array */ + mmios = kcalloc(nr_mmios, sizeof(*mmios), GFP_KERNEL); + if (!mmios) { + rc = -ENOMEM; + goto out_free_local; + } + + /* Allocate MMIO virtual address array */ + mmios_va = kcalloc(nr_mmios, sizeof(*mmios_va), GFP_KERNEL); + if (!mmios_va) { + rc = -ENOMEM; + goto out_iounmap; + } + + /* Parse and map MMIO register sets */ + for (i = 0; i < nr_mmios; i++) { + rc = imsic_get_mmio_resource(fwnode, i, &mmios[i]); + if (rc) { + pr_err("%pfwP: unable to parse MMIO regset %d\n", fwnode, i); + goto out_iounmap; + } + + base_addr = mmios[i].start; + base_addr &= ~(BIT(global->guest_index_bits + + global->hart_index_bits + + IMSIC_MMIO_PAGE_SHIFT) - 1); + base_addr &= ~((BIT(global->group_index_bits) - 1) << + global->group_index_shift); + if (base_addr != global->base_addr) { + rc = -EINVAL; + pr_err("%pfwP: address mismatch for regset %d\n", fwnode, i); + goto out_iounmap; + } + + mmios_va[i] = ioremap(mmios[i].start, resource_size(&mmios[i])); + if (!mmios_va[i]) { + rc = -EIO; + pr_err("%pfwP: unable to map MMIO regset %d\n", fwnode, i); + goto out_iounmap; + } + } + + /* Initialize local (or per-CPU )state */ + rc = imsic_local_init(); + if (rc) { + pr_err("%pfwP: failed to initialize local state\n", + fwnode); + goto out_iounmap; + } + + /* Configure handlers for target CPUs */ + for (i = 0; i < nr_parent_irqs; i++) { + rc = imsic_get_parent_hartid(fwnode, i, &hartid); + if (rc) { + pr_warn("%pfwP: hart ID for parent irq%d not found\n", fwnode, i); + continue; + } + + cpu = riscv_hartid_to_cpuid(hartid); + if (cpu < 0) { + pr_warn("%pfwP: invalid cpuid for parent irq%d\n", fwnode, i); + continue; + } + + /* Find MMIO location of MSI page */ + index = nr_mmios; + reloff = i * BIT(global->guest_index_bits) * + IMSIC_MMIO_PAGE_SZ; + for (j = 0; nr_mmios; j++) { + if (reloff < resource_size(&mmios[j])) { + index = j; + break; + } + + /* + * MMIO region size may not be aligned to + * BIT(global->guest_index_bits) * IMSIC_MMIO_PAGE_SZ + * if holes are present. + */ + reloff -= ALIGN(resource_size(&mmios[j]), + BIT(global->guest_index_bits) * IMSIC_MMIO_PAGE_SZ); + } + if (index >= nr_mmios) { + pr_warn("%pfwP: MMIO not found for parent irq%d\n", fwnode, i); + continue; + } + + local = per_cpu_ptr(global->local, cpu); + local->msi_pa = mmios[index].start + reloff; + local->msi_va = mmios_va[index] + reloff; + + nr_handlers++; + } + + /* If no CPU handlers found then can't take interrupts */ + if (!nr_handlers) { + pr_err("%pfwP: No CPU handlers found\n", fwnode); + rc = -ENODEV; + goto out_local_cleanup; + } + + /* Initialize matrix allocator */ + rc = imsic_matrix_init(); + if (rc) { + pr_err("%pfwP: failed to create matrix allocator\n", fwnode); + goto out_local_cleanup; + } + + /* We don't need MMIO arrays anymore so let's free-up */ + kfree(mmios_va); + kfree(mmios); + + return 0; + +out_local_cleanup: + imsic_local_cleanup(); +out_iounmap: + for (i = 0; i < nr_mmios; i++) { + if (mmios_va[i]) + iounmap(mmios_va[i]); + } + kfree(mmios_va); + kfree(mmios); +out_free_local: + free_percpu(imsic->global.local); +out_free_priv: + kfree(imsic); + imsic = NULL; + return rc; +} diff --git a/drivers/irqchip/irq-riscv-imsic-state.h b/drivers/irqchip/irq-riscv-imsic-state.h new file mode 100644 index 000000000000..8ec9649d0d01 --- /dev/null +++ b/drivers/irqchip/irq-riscv-imsic-state.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + +#ifndef _IRQ_RISCV_IMSIC_STATE_H +#define _IRQ_RISCV_IMSIC_STATE_H + +#include +#include +#include +#include + +#define IMSIC_IPI_ID 1 +#define IMSIC_NR_IPI 8 + +struct imsic_vector { + /* Fixed details of the vector */ + unsigned int cpu; + unsigned int local_id; + /* Details saved by driver in the vector */ + unsigned int hwirq; + /* Details accessed using local lock held */ + bool enable; + struct imsic_vector *move; +}; + +struct imsic_local_priv { + /* Local lock to protect vector enable/move variables and dirty bitmap */ + raw_spinlock_t lock; + + /* Local dirty bitmap for synchronization */ + unsigned long *dirty_bitmap; + +#ifdef CONFIG_SMP + /* Local timer for synchronization */ + struct timer_list timer; +#endif + + /* Local vector table */ + struct imsic_vector *vectors; +}; + +struct imsic_priv { + /* Device details */ + struct fwnode_handle *fwnode; + + /* Global configuration common for all HARTs */ + struct imsic_global_config global; + + /* Per-CPU state */ + struct imsic_local_priv __percpu *lpriv; + + /* State of IRQ matrix allocator */ + raw_spinlock_t matrix_lock; + struct irq_matrix *matrix; + + /* IRQ domains (created by platform driver) */ + struct irq_domain *base_domain; +}; + +extern struct imsic_priv *imsic; + +void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend, bool val); + +static inline void __imsic_id_set_enable(unsigned long id) +{ + __imsic_eix_update(id, 1, false, true); +} + +static inline void __imsic_id_clear_enable(unsigned long id) +{ + __imsic_eix_update(id, 1, false, false); +} + +void imsic_local_sync_all(void); +void imsic_local_delivery(bool enable); + +void imsic_vector_mask(struct imsic_vector *vec); +void imsic_vector_unmask(struct imsic_vector *vec); + +static inline bool imsic_vector_isenabled(struct imsic_vector *vec) +{ + return READ_ONCE(vec->enable); +} + +static inline struct imsic_vector *imsic_vector_get_move(struct imsic_vector *vec) +{ + return READ_ONCE(vec->move); +} + +void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec); + +struct imsic_vector *imsic_vector_from_local_id(unsigned int cpu, unsigned int local_id); + +struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask *mask); +void imsic_vector_free(struct imsic_vector *vector); + +void imsic_vector_debug_show(struct seq_file *m, struct imsic_vector *vec, int ind); +void imsic_vector_debug_show_summary(struct seq_file *m, int ind); + +void imsic_state_online(void); +void imsic_state_offline(void); +int imsic_setup_state(struct fwnode_handle *fwnode); + +#endif diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 35e78ddb2b37..7a5785f405b6 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -146,6 +146,7 @@ enum cpuhp_state { CPUHP_AP_IRQ_MIPS_GIC_STARTING, CPUHP_AP_IRQ_LOONGARCH_STARTING, CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, + CPUHP_AP_IRQ_RISCV_IMSIC_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, CPUHP_AP_PERF_X86_STARTING, diff --git a/include/linux/irqchip/riscv-imsic.h b/include/linux/irqchip/riscv-imsic.h new file mode 100644 index 000000000000..faf0b800b1b0 --- /dev/null +++ b/include/linux/irqchip/riscv-imsic.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ +#ifndef __LINUX_IRQCHIP_RISCV_IMSIC_H +#define __LINUX_IRQCHIP_RISCV_IMSIC_H + +#include +#include +#include + +#define IMSIC_MMIO_PAGE_SHIFT 12 +#define IMSIC_MMIO_PAGE_SZ BIT(IMSIC_MMIO_PAGE_SHIFT) +#define IMSIC_MMIO_PAGE_LE 0x00 +#define IMSIC_MMIO_PAGE_BE 0x04 + +#define IMSIC_MIN_ID 63 +#define IMSIC_MAX_ID 2048 + +#define IMSIC_EIDELIVERY 0x70 + +#define IMSIC_EITHRESHOLD 0x72 + +#define IMSIC_EIP0 0x80 +#define IMSIC_EIP63 0xbf +#define IMSIC_EIPx_BITS 32 + +#define IMSIC_EIE0 0xc0 +#define IMSIC_EIE63 0xff +#define IMSIC_EIEx_BITS 32 + +#define IMSIC_FIRST IMSIC_EIDELIVERY +#define IMSIC_LAST IMSIC_EIE63 + +#define IMSIC_MMIO_SETIPNUM_LE 0x00 +#define IMSIC_MMIO_SETIPNUM_BE 0x04 + +struct imsic_local_config { + phys_addr_t msi_pa; + void __iomem *msi_va; +}; + +struct imsic_global_config { + /* + * MSI Target Address Scheme + * + * XLEN-1 12 0 + * | | | + * ------------------------------------------------------------- + * |xxxxxx|Group Index|xxxxxxxxxxx|HART Index|Guest Index| 0 | + * ------------------------------------------------------------- + */ + + /* Bits representing Guest index, HART index, and Group index */ + u32 guest_index_bits; + u32 hart_index_bits; + u32 group_index_bits; + u32 group_index_shift; + + /* Global base address matching all target MSI addresses */ + phys_addr_t base_addr; + + /* Number of interrupt identities */ + u32 nr_ids; + + /* Number of guest interrupt identities */ + u32 nr_guest_ids; + + /* Per-CPU IMSIC addresses */ + struct imsic_local_config __percpu *local; +}; + +#ifdef CONFIG_RISCV_IMSIC + +const struct imsic_global_config *imsic_get_global_config(void); + +#else + +static inline const struct imsic_global_config *imsic_get_global_config(void) +{ + return NULL; +} + +#endif + +#endif -- cgit v1.2.3 From 027e125acdbad79e9a7274940e8bf92299b208af Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Thu, 7 Mar 2024 19:33:01 +0530 Subject: irqchip/riscv-imsic: Add device MSI domain support for platform devices MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Linux platform MSI support allows per-device MSI domains so add a platform irqchip driver for RISC-V IMSIC which provides a base IRQ domain with MSI parent support for platform device domains. The IMSIC platform driver assumes that the IMSIC state is already initialized by the IMSIC early driver. Signed-off-by: Anup Patel Signed-off-by: Thomas Gleixner Tested-by: Björn Töpel Reviewed-by: Björn Töpel Link: https://lore.kernel.org/r/20240307140307.646078-4-apatel@ventanamicro.com --- drivers/irqchip/Makefile | 2 +- drivers/irqchip/irq-riscv-imsic-platform.c | 343 +++++++++++++++++++++++++++++ drivers/irqchip/irq-riscv-imsic-state.h | 1 + 3 files changed, 345 insertions(+), 1 deletion(-) create mode 100644 drivers/irqchip/irq-riscv-imsic-platform.c (limited to 'drivers') diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 972aa9fecf96..c1390980b4eb 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -95,7 +95,7 @@ obj-$(CONFIG_QCOM_MPM) += irq-qcom-mpm.o obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o -obj-$(CONFIG_RISCV_IMSIC) += irq-riscv-imsic-state.o irq-riscv-imsic-early.o +obj-$(CONFIG_RISCV_IMSIC) += irq-riscv-imsic-state.o irq-riscv-imsic-early.o irq-riscv-imsic-platform.o obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o obj-$(CONFIG_STARFIVE_JH8100_INTC) += irq-starfive-jh8100-intc.o obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c new file mode 100644 index 000000000000..35291bf90d65 --- /dev/null +++ b/drivers/irqchip/irq-riscv-imsic-platform.c @@ -0,0 +1,343 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + +#define pr_fmt(fmt) "riscv-imsic: " fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "irq-riscv-imsic-state.h" + +static bool imsic_cpu_page_phys(unsigned int cpu, unsigned int guest_index, + phys_addr_t *out_msi_pa) +{ + struct imsic_global_config *global; + struct imsic_local_config *local; + + global = &imsic->global; + local = per_cpu_ptr(global->local, cpu); + + if (BIT(global->guest_index_bits) <= guest_index) + return false; + + if (out_msi_pa) + *out_msi_pa = local->msi_pa + (guest_index * IMSIC_MMIO_PAGE_SZ); + + return true; +} + +static void imsic_irq_mask(struct irq_data *d) +{ + imsic_vector_mask(irq_data_get_irq_chip_data(d)); +} + +static void imsic_irq_unmask(struct irq_data *d) +{ + imsic_vector_unmask(irq_data_get_irq_chip_data(d)); +} + +static int imsic_irq_retrigger(struct irq_data *d) +{ + struct imsic_vector *vec = irq_data_get_irq_chip_data(d); + struct imsic_local_config *local; + + if (WARN_ON(!vec)) + return -ENOENT; + + local = per_cpu_ptr(imsic->global.local, vec->cpu); + writel_relaxed(vec->local_id, local->msi_va); + return 0; +} + +static void imsic_irq_compose_vector_msg(struct imsic_vector *vec, struct msi_msg *msg) +{ + phys_addr_t msi_addr; + + if (WARN_ON(!vec)) + return; + + if (WARN_ON(!imsic_cpu_page_phys(vec->cpu, 0, &msi_addr))) + return; + + msg->address_hi = upper_32_bits(msi_addr); + msg->address_lo = lower_32_bits(msi_addr); + msg->data = vec->local_id; +} + +static void imsic_irq_compose_msg(struct irq_data *d, struct msi_msg *msg) +{ + imsic_irq_compose_vector_msg(irq_data_get_irq_chip_data(d), msg); +} + +#ifdef CONFIG_SMP +static void imsic_msi_update_msg(struct irq_data *d, struct imsic_vector *vec) +{ + struct msi_msg msg = { }; + + imsic_irq_compose_vector_msg(vec, &msg); + irq_data_get_irq_chip(d)->irq_write_msi_msg(d, &msg); +} + +static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) +{ + struct imsic_vector *old_vec, *new_vec; + struct irq_data *pd = d->parent_data; + + old_vec = irq_data_get_irq_chip_data(pd); + if (WARN_ON(!old_vec)) + return -ENOENT; + + /* If old vector cpu belongs to the target cpumask then do nothing */ + if (cpumask_test_cpu(old_vec->cpu, mask_val)) + return IRQ_SET_MASK_OK_DONE; + + /* If move is already in-flight then return failure */ + if (imsic_vector_get_move(old_vec)) + return -EBUSY; + + /* Get a new vector on the desired set of CPUs */ + new_vec = imsic_vector_alloc(old_vec->hwirq, mask_val); + if (!new_vec) + return -ENOSPC; + + /* Point device to the new vector */ + imsic_msi_update_msg(d, new_vec); + + /* Update irq descriptors with the new vector */ + pd->chip_data = new_vec; + + /* Update effective affinity of parent irq data */ + irq_data_update_effective_affinity(pd, cpumask_of(new_vec->cpu)); + + /* Move state of the old vector to the new vector */ + imsic_vector_move(old_vec, new_vec); + + return IRQ_SET_MASK_OK_DONE; +} +#endif + +static struct irq_chip imsic_irq_base_chip = { + .name = "IMSIC", + .irq_mask = imsic_irq_mask, + .irq_unmask = imsic_irq_unmask, + .irq_retrigger = imsic_irq_retrigger, + .irq_compose_msi_msg = imsic_irq_compose_msg, + .flags = IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static int imsic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct imsic_vector *vec; + + /* Multi-MSI is not supported yet. */ + if (nr_irqs > 1) + return -EOPNOTSUPP; + + vec = imsic_vector_alloc(virq, cpu_online_mask); + if (!vec) + return -ENOSPC; + + irq_domain_set_info(domain, virq, virq, &imsic_irq_base_chip, vec, + handle_simple_irq, NULL, NULL); + irq_set_noprobe(virq); + irq_set_affinity(virq, cpu_online_mask); + + return 0; +} + +static void imsic_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + + imsic_vector_free(irq_data_get_irq_chip_data(d)); + irq_domain_free_irqs_parent(domain, virq, nr_irqs); +} + +static int imsic_irq_domain_select(struct irq_domain *domain, struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token) +{ + const struct msi_parent_ops *ops = domain->msi_parent_ops; + u32 busmask = BIT(bus_token); + + if (fwspec->fwnode != domain->fwnode || fwspec->param_count != 0) + return 0; + + /* Handle pure domain searches */ + if (bus_token == ops->bus_select_token) + return 1; + + return !!(ops->bus_select_mask & busmask); +} + +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS +static void imsic_irq_debug_show(struct seq_file *m, struct irq_domain *d, + struct irq_data *irqd, int ind) +{ + if (!irqd) { + imsic_vector_debug_show_summary(m, ind); + return; + } + + imsic_vector_debug_show(m, irq_data_get_irq_chip_data(irqd), ind); +} +#endif + +static const struct irq_domain_ops imsic_base_domain_ops = { + .alloc = imsic_irq_domain_alloc, + .free = imsic_irq_domain_free, + .select = imsic_irq_domain_select, +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS + .debug_show = imsic_irq_debug_show, +#endif +}; + +static bool imsic_init_dev_msi_info(struct device *dev, + struct irq_domain *domain, + struct irq_domain *real_parent, + struct msi_domain_info *info) +{ + const struct msi_parent_ops *pops = real_parent->msi_parent_ops; + + /* MSI parent domain specific settings */ + switch (real_parent->bus_token) { + case DOMAIN_BUS_NEXUS: + if (WARN_ON_ONCE(domain != real_parent)) + return false; +#ifdef CONFIG_SMP + info->chip->irq_set_affinity = imsic_irq_set_affinity; +#endif + break; + default: + WARN_ON_ONCE(1); + return false; + } + + /* Is the target supported? */ + switch (info->bus_token) { + case DOMAIN_BUS_DEVICE_MSI: + /* + * Per-device MSI should never have any MSI feature bits + * set. It's sole purpose is to create a dumb interrupt + * chip which has a device specific irq_write_msi_msg() + * callback. + */ + if (WARN_ON_ONCE(info->flags)) + return false; + + /* Core managed MSI descriptors */ + info->flags |= MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS | + MSI_FLAG_FREE_MSI_DESCS; + break; + case DOMAIN_BUS_WIRED_TO_MSI: + break; + default: + WARN_ON_ONCE(1); + return false; + } + + /* Use hierarchial chip operations re-trigger */ + info->chip->irq_retrigger = irq_chip_retrigger_hierarchy; + + /* + * Mask out the domain specific MSI feature flags which are not + * supported by the real parent. + */ + info->flags &= pops->supported_flags; + + /* Enforce the required flags */ + info->flags |= pops->required_flags; + + return true; +} + +#define MATCH_PLATFORM_MSI BIT(DOMAIN_BUS_PLATFORM_MSI) + +static const struct msi_parent_ops imsic_msi_parent_ops = { + .supported_flags = MSI_GENERIC_FLAGS_MASK, + .required_flags = MSI_FLAG_USE_DEF_DOM_OPS | + MSI_FLAG_USE_DEF_CHIP_OPS, + .bus_select_token = DOMAIN_BUS_NEXUS, + .bus_select_mask = MATCH_PLATFORM_MSI, + .init_dev_msi_info = imsic_init_dev_msi_info, +}; + +int imsic_irqdomain_init(void) +{ + struct imsic_global_config *global; + + if (!imsic || !imsic->fwnode) { + pr_err("early driver not probed\n"); + return -ENODEV; + } + + if (imsic->base_domain) { + pr_err("%pfwP: irq domain already created\n", imsic->fwnode); + return -ENODEV; + } + + /* Create Base IRQ domain */ + imsic->base_domain = irq_domain_create_tree(imsic->fwnode, + &imsic_base_domain_ops, imsic); + if (!imsic->base_domain) { + pr_err("%pfwP: failed to create IMSIC base domain\n", imsic->fwnode); + return -ENOMEM; + } + imsic->base_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT; + imsic->base_domain->msi_parent_ops = &imsic_msi_parent_ops; + + irq_domain_update_bus_token(imsic->base_domain, DOMAIN_BUS_NEXUS); + + global = &imsic->global; + pr_info("%pfwP: hart-index-bits: %d, guest-index-bits: %d\n", + imsic->fwnode, global->hart_index_bits, global->guest_index_bits); + pr_info("%pfwP: group-index-bits: %d, group-index-shift: %d\n", + imsic->fwnode, global->group_index_bits, global->group_index_shift); + pr_info("%pfwP: per-CPU IDs %d at base PPN %pa\n", + imsic->fwnode, global->nr_ids, &global->base_addr); + pr_info("%pfwP: total %d interrupts available\n", + imsic->fwnode, num_possible_cpus() * (global->nr_ids - 1)); + + return 0; +} + +static int imsic_platform_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + if (imsic && imsic->fwnode != dev->fwnode) { + dev_err(dev, "fwnode mismatch\n"); + return -ENODEV; + } + + return imsic_irqdomain_init(); +} + +static const struct of_device_id imsic_platform_match[] = { + { .compatible = "riscv,imsics" }, + {} +}; + +static struct platform_driver imsic_platform_driver = { + .driver = { + .name = "riscv-imsic", + .of_match_table = imsic_platform_match, + }, + .probe = imsic_platform_probe, +}; +builtin_platform_driver(imsic_platform_driver); diff --git a/drivers/irqchip/irq-riscv-imsic-state.h b/drivers/irqchip/irq-riscv-imsic-state.h index 8ec9649d0d01..5ae2f69b035b 100644 --- a/drivers/irqchip/irq-riscv-imsic-state.h +++ b/drivers/irqchip/irq-riscv-imsic-state.h @@ -103,5 +103,6 @@ void imsic_vector_debug_show_summary(struct seq_file *m, int ind); void imsic_state_online(void); void imsic_state_offline(void); int imsic_setup_state(struct fwnode_handle *fwnode); +int imsic_irqdomain_init(void); #endif -- cgit v1.2.3 From 5c5a71d0434093cd42d09afd4e2032c0b16a7da8 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Thu, 7 Mar 2024 19:33:02 +0530 Subject: irqchip/riscv-imsic: Add device MSI domain support for PCI devices MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Linux PCI framework supports per-device MSI domains for PCI devices so extend the IMSIC driver to allow PCI per-device MSI domains. Signed-off-by: Anup Patel Signed-off-by: Thomas Gleixner Tested-by: Björn Töpel Reviewed-by: Björn Töpel Link: https://lore.kernel.org/r/20240307140307.646078-5-apatel@ventanamicro.com --- drivers/irqchip/Kconfig | 7 ++++++ drivers/irqchip/irq-riscv-imsic-platform.c | 35 ++++++++++++++++++++++++++++-- 2 files changed, 40 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 8610810b80da..8f6ce419ba4c 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -547,6 +547,13 @@ config RISCV_IMSIC select GENERIC_IRQ_MATRIX_ALLOCATOR select GENERIC_MSI_IRQ +config RISCV_IMSIC_PCI + bool + depends on RISCV_IMSIC + depends on PCI + depends on PCI_MSI + default RISCV_IMSIC + config SIFIVE_PLIC bool depends on RISCV diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c index 35291bf90d65..1e6dddfd3046 100644 --- a/drivers/irqchip/irq-riscv-imsic-platform.c +++ b/drivers/irqchip/irq-riscv-imsic-platform.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -207,6 +208,28 @@ static const struct irq_domain_ops imsic_base_domain_ops = { #endif }; +#ifdef CONFIG_RISCV_IMSIC_PCI + +static void imsic_pci_mask_irq(struct irq_data *d) +{ + pci_msi_mask_irq(d); + irq_chip_mask_parent(d); +} + +static void imsic_pci_unmask_irq(struct irq_data *d) +{ + irq_chip_unmask_parent(d); + pci_msi_unmask_irq(d); +} + +#define MATCH_PCI_MSI BIT(DOMAIN_BUS_PCI_MSI) + +#else + +#define MATCH_PCI_MSI 0 + +#endif + static bool imsic_init_dev_msi_info(struct device *dev, struct irq_domain *domain, struct irq_domain *real_parent, @@ -230,6 +253,13 @@ static bool imsic_init_dev_msi_info(struct device *dev, /* Is the target supported? */ switch (info->bus_token) { +#ifdef CONFIG_RISCV_IMSIC_PCI + case DOMAIN_BUS_PCI_DEVICE_MSI: + case DOMAIN_BUS_PCI_DEVICE_MSIX: + info->chip->irq_mask = imsic_pci_mask_irq; + info->chip->irq_unmask = imsic_pci_unmask_irq; + break; +#endif case DOMAIN_BUS_DEVICE_MSI: /* * Per-device MSI should never have any MSI feature bits @@ -269,11 +299,12 @@ static bool imsic_init_dev_msi_info(struct device *dev, #define MATCH_PLATFORM_MSI BIT(DOMAIN_BUS_PLATFORM_MSI) static const struct msi_parent_ops imsic_msi_parent_ops = { - .supported_flags = MSI_GENERIC_FLAGS_MASK, + .supported_flags = MSI_GENERIC_FLAGS_MASK | + MSI_FLAG_PCI_MSIX, .required_flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS, .bus_select_token = DOMAIN_BUS_NEXUS, - .bus_select_mask = MATCH_PLATFORM_MSI, + .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI, .init_dev_msi_info = imsic_init_dev_msi_info, }; -- cgit v1.2.3 From 2333df5ae51ead2188d07c99e841e159a664741e Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Thu, 7 Mar 2024 19:33:04 +0530 Subject: irqchip: Add RISC-V advanced PLIC driver for direct-mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The RISC-V advanced interrupt architecture (AIA) specification defines advanced platform-level interrupt controller (APLIC) which has two modes of operation: 1) Direct mode and 2) MSI mode. (For more details, refer https://github.com/riscv/riscv-aia) In APLIC direct-mode, wired interrupts are forwared to CPUs (or HARTs) as a local external interrupt. Add a platform irqchip driver for the RISC-V APLIC direct-mode to support RISC-V platforms having only wired interrupts. Signed-off-by: Anup Patel Signed-off-by: Thomas Gleixner Tested-by: Björn Töpel Reviewed-by: Björn Töpel Link: https://lore.kernel.org/r/20240307140307.646078-7-apatel@ventanamicro.com --- drivers/irqchip/Kconfig | 5 + drivers/irqchip/Makefile | 1 + drivers/irqchip/irq-riscv-aplic-direct.c | 326 +++++++++++++++++++++++++++++++ drivers/irqchip/irq-riscv-aplic-main.c | 211 ++++++++++++++++++++ drivers/irqchip/irq-riscv-aplic-main.h | 44 +++++ include/linux/irqchip/riscv-aplic.h | 145 ++++++++++++++ 6 files changed, 732 insertions(+) create mode 100644 drivers/irqchip/irq-riscv-aplic-direct.c create mode 100644 drivers/irqchip/irq-riscv-aplic-main.c create mode 100644 drivers/irqchip/irq-riscv-aplic-main.h create mode 100644 include/linux/irqchip/riscv-aplic.h (limited to 'drivers') diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 8f6ce419ba4c..f6728471f592 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -540,6 +540,11 @@ config RISCV_INTC depends on RISCV select IRQ_DOMAIN_HIERARCHY +config RISCV_APLIC + bool + depends on RISCV + select IRQ_DOMAIN_HIERARCHY + config RISCV_IMSIC bool depends on RISCV diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index c1390980b4eb..5adbe7a6f604 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -95,6 +95,7 @@ obj-$(CONFIG_QCOM_MPM) += irq-qcom-mpm.o obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o +obj-$(CONFIG_RISCV_APLIC) += irq-riscv-aplic-main.o irq-riscv-aplic-direct.o obj-$(CONFIG_RISCV_IMSIC) += irq-riscv-imsic-state.o irq-riscv-imsic-early.o irq-riscv-imsic-platform.o obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o obj-$(CONFIG_STARFIVE_JH8100_INTC) += irq-starfive-jh8100-intc.o diff --git a/drivers/irqchip/irq-riscv-aplic-direct.c b/drivers/irqchip/irq-riscv-aplic-direct.c new file mode 100644 index 000000000000..06bace9b7497 --- /dev/null +++ b/drivers/irqchip/irq-riscv-aplic-direct.c @@ -0,0 +1,326 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "irq-riscv-aplic-main.h" + +#define APLIC_DISABLE_IDELIVERY 0 +#define APLIC_ENABLE_IDELIVERY 1 +#define APLIC_DISABLE_ITHRESHOLD 1 +#define APLIC_ENABLE_ITHRESHOLD 0 + +struct aplic_direct { + struct aplic_priv priv; + struct irq_domain *irqdomain; + struct cpumask lmask; +}; + +struct aplic_idc { + unsigned int hart_index; + void __iomem *regs; + struct aplic_direct *direct; +}; + +static unsigned int aplic_direct_parent_irq; +static DEFINE_PER_CPU(struct aplic_idc, aplic_idcs); + +static void aplic_direct_irq_eoi(struct irq_data *d) +{ + /* + * The fasteoi_handler requires irq_eoi() callback hence + * provide a dummy handler. + */ +} + +#ifdef CONFIG_SMP +static int aplic_direct_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) +{ + struct aplic_priv *priv = irq_data_get_irq_chip_data(d); + struct aplic_direct *direct = container_of(priv, struct aplic_direct, priv); + struct aplic_idc *idc; + unsigned int cpu, val; + struct cpumask amask; + void __iomem *target; + + cpumask_and(&amask, &direct->lmask, mask_val); + + if (force) + cpu = cpumask_first(&amask); + else + cpu = cpumask_any_and(&amask, cpu_online_mask); + + if (cpu >= nr_cpu_ids) + return -EINVAL; + + idc = per_cpu_ptr(&aplic_idcs, cpu); + target = priv->regs + APLIC_TARGET_BASE + (d->hwirq - 1) * sizeof(u32); + val = FIELD_PREP(APLIC_TARGET_HART_IDX, idc->hart_index); + val |= FIELD_PREP(APLIC_TARGET_IPRIO, APLIC_DEFAULT_PRIORITY); + writel(val, target); + + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + return IRQ_SET_MASK_OK_DONE; +} +#endif + +static struct irq_chip aplic_direct_chip = { + .name = "APLIC-DIRECT", + .irq_mask = aplic_irq_mask, + .irq_unmask = aplic_irq_unmask, + .irq_set_type = aplic_irq_set_type, + .irq_eoi = aplic_direct_irq_eoi, +#ifdef CONFIG_SMP + .irq_set_affinity = aplic_direct_set_affinity, +#endif + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static int aplic_direct_irqdomain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, + unsigned long *hwirq, unsigned int *type) +{ + struct aplic_priv *priv = d->host_data; + + return aplic_irqdomain_translate(fwspec, priv->gsi_base, hwirq, type); +} + +static int aplic_direct_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct aplic_priv *priv = domain->host_data; + struct aplic_direct *direct = container_of(priv, struct aplic_direct, priv); + struct irq_fwspec *fwspec = arg; + irq_hw_number_t hwirq; + unsigned int type; + int i, ret; + + ret = aplic_irqdomain_translate(fwspec, priv->gsi_base, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) { + irq_domain_set_info(domain, virq + i, hwirq + i, &aplic_direct_chip, + priv, handle_fasteoi_irq, NULL, NULL); + irq_set_affinity(virq + i, &direct->lmask); + } + + return 0; +} + +static const struct irq_domain_ops aplic_direct_irqdomain_ops = { + .translate = aplic_direct_irqdomain_translate, + .alloc = aplic_direct_irqdomain_alloc, + .free = irq_domain_free_irqs_top, +}; + +/* + * To handle an APLIC direct interrupts, we just read the CLAIMI register + * which will return highest priority pending interrupt and clear the + * pending bit of the interrupt. This process is repeated until CLAIMI + * register return zero value. + */ +static void aplic_direct_handle_irq(struct irq_desc *desc) +{ + struct aplic_idc *idc = this_cpu_ptr(&aplic_idcs); + struct irq_domain *irqdomain = idc->direct->irqdomain; + struct irq_chip *chip = irq_desc_get_chip(desc); + irq_hw_number_t hw_irq; + int irq; + + chained_irq_enter(chip, desc); + + while ((hw_irq = readl(idc->regs + APLIC_IDC_CLAIMI))) { + hw_irq = hw_irq >> APLIC_IDC_TOPI_ID_SHIFT; + irq = irq_find_mapping(irqdomain, hw_irq); + + if (unlikely(irq <= 0)) { + dev_warn_ratelimited(idc->direct->priv.dev, + "hw_irq %lu mapping not found\n", hw_irq); + } else { + generic_handle_irq(irq); + } + } + + chained_irq_exit(chip, desc); +} + +static void aplic_idc_set_delivery(struct aplic_idc *idc, bool en) +{ + u32 de = (en) ? APLIC_ENABLE_IDELIVERY : APLIC_DISABLE_IDELIVERY; + u32 th = (en) ? APLIC_ENABLE_ITHRESHOLD : APLIC_DISABLE_ITHRESHOLD; + + /* Priority must be less than threshold for interrupt triggering */ + writel(th, idc->regs + APLIC_IDC_ITHRESHOLD); + + /* Delivery must be set to 1 for interrupt triggering */ + writel(de, idc->regs + APLIC_IDC_IDELIVERY); +} + +static int aplic_direct_dying_cpu(unsigned int cpu) +{ + if (aplic_direct_parent_irq) + disable_percpu_irq(aplic_direct_parent_irq); + + return 0; +} + +static int aplic_direct_starting_cpu(unsigned int cpu) +{ + if (aplic_direct_parent_irq) { + enable_percpu_irq(aplic_direct_parent_irq, + irq_get_trigger_type(aplic_direct_parent_irq)); + } + + return 0; +} + +static int aplic_direct_parse_parent_hwirq(struct device *dev, u32 index, + u32 *parent_hwirq, unsigned long *parent_hartid) +{ + struct of_phandle_args parent; + int rc; + + /* + * Currently, only OF fwnode is supported so extend this + * function for ACPI support. + */ + if (!is_of_node(dev->fwnode)) + return -EINVAL; + + rc = of_irq_parse_one(to_of_node(dev->fwnode), index, &parent); + if (rc) + return rc; + + rc = riscv_of_parent_hartid(parent.np, parent_hartid); + if (rc) + return rc; + + *parent_hwirq = parent.args[0]; + return 0; +} + +int aplic_direct_setup(struct device *dev, void __iomem *regs) +{ + int i, j, rc, cpu, current_cpu, setup_count = 0; + struct aplic_direct *direct; + struct irq_domain *domain; + struct aplic_priv *priv; + struct aplic_idc *idc; + unsigned long hartid; + u32 v, hwirq; + + direct = devm_kzalloc(dev, sizeof(*direct), GFP_KERNEL); + if (!direct) + return -ENOMEM; + priv = &direct->priv; + + rc = aplic_setup_priv(priv, dev, regs); + if (rc) { + dev_err(dev, "failed to create APLIC context\n"); + return rc; + } + + /* Setup per-CPU IDC and target CPU mask */ + current_cpu = get_cpu(); + for (i = 0; i < priv->nr_idcs; i++) { + rc = aplic_direct_parse_parent_hwirq(dev, i, &hwirq, &hartid); + if (rc) { + dev_warn(dev, "parent irq for IDC%d not found\n", i); + continue; + } + + /* + * Skip interrupts other than external interrupts for + * current privilege level. + */ + if (hwirq != RV_IRQ_EXT) + continue; + + cpu = riscv_hartid_to_cpuid(hartid); + if (cpu < 0) { + dev_warn(dev, "invalid cpuid for IDC%d\n", i); + continue; + } + + cpumask_set_cpu(cpu, &direct->lmask); + + idc = per_cpu_ptr(&aplic_idcs, cpu); + idc->hart_index = i; + idc->regs = priv->regs + APLIC_IDC_BASE + i * APLIC_IDC_SIZE; + idc->direct = direct; + + aplic_idc_set_delivery(idc, true); + + /* + * Boot cpu might not have APLIC hart_index = 0 so check + * and update target registers of all interrupts. + */ + if (cpu == current_cpu && idc->hart_index) { + v = FIELD_PREP(APLIC_TARGET_HART_IDX, idc->hart_index); + v |= FIELD_PREP(APLIC_TARGET_IPRIO, APLIC_DEFAULT_PRIORITY); + for (j = 1; j <= priv->nr_irqs; j++) + writel(v, priv->regs + APLIC_TARGET_BASE + (j - 1) * sizeof(u32)); + } + + setup_count++; + } + put_cpu(); + + /* Find parent domain and register chained handler */ + domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), + DOMAIN_BUS_ANY); + if (!aplic_direct_parent_irq && domain) { + aplic_direct_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); + if (aplic_direct_parent_irq) { + irq_set_chained_handler(aplic_direct_parent_irq, + aplic_direct_handle_irq); + + /* + * Setup CPUHP notifier to enable parent + * interrupt on all CPUs + */ + cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "irqchip/riscv/aplic:starting", + aplic_direct_starting_cpu, + aplic_direct_dying_cpu); + } + } + + /* Fail if we were not able to setup IDC for any CPU */ + if (!setup_count) + return -ENODEV; + + /* Setup global config and interrupt delivery */ + aplic_init_hw_global(priv, false); + + /* Create irq domain instance for the APLIC */ + direct->irqdomain = irq_domain_create_linear(dev->fwnode, priv->nr_irqs + 1, + &aplic_direct_irqdomain_ops, priv); + if (!direct->irqdomain) { + dev_err(dev, "failed to create direct irq domain\n"); + return -ENOMEM; + } + + /* Advertise the interrupt controller */ + dev_info(dev, "%d interrupts directly connected to %d CPUs\n", + priv->nr_irqs, priv->nr_idcs); + + return 0; +} diff --git a/drivers/irqchip/irq-riscv-aplic-main.c b/drivers/irqchip/irq-riscv-aplic-main.c new file mode 100644 index 000000000000..160ff99d6979 --- /dev/null +++ b/drivers/irqchip/irq-riscv-aplic-main.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "irq-riscv-aplic-main.h" + +void aplic_irq_unmask(struct irq_data *d) +{ + struct aplic_priv *priv = irq_data_get_irq_chip_data(d); + + writel(d->hwirq, priv->regs + APLIC_SETIENUM); +} + +void aplic_irq_mask(struct irq_data *d) +{ + struct aplic_priv *priv = irq_data_get_irq_chip_data(d); + + writel(d->hwirq, priv->regs + APLIC_CLRIENUM); +} + +int aplic_irq_set_type(struct irq_data *d, unsigned int type) +{ + struct aplic_priv *priv = irq_data_get_irq_chip_data(d); + void __iomem *sourcecfg; + u32 val = 0; + + switch (type) { + case IRQ_TYPE_NONE: + val = APLIC_SOURCECFG_SM_INACTIVE; + break; + case IRQ_TYPE_LEVEL_LOW: + val = APLIC_SOURCECFG_SM_LEVEL_LOW; + break; + case IRQ_TYPE_LEVEL_HIGH: + val = APLIC_SOURCECFG_SM_LEVEL_HIGH; + break; + case IRQ_TYPE_EDGE_FALLING: + val = APLIC_SOURCECFG_SM_EDGE_FALL; + break; + case IRQ_TYPE_EDGE_RISING: + val = APLIC_SOURCECFG_SM_EDGE_RISE; + break; + default: + return -EINVAL; + } + + sourcecfg = priv->regs + APLIC_SOURCECFG_BASE; + sourcecfg += (d->hwirq - 1) * sizeof(u32); + writel(val, sourcecfg); + + return 0; +} + +int aplic_irqdomain_translate(struct irq_fwspec *fwspec, u32 gsi_base, + unsigned long *hwirq, unsigned int *type) +{ + if (WARN_ON(fwspec->param_count < 2)) + return -EINVAL; + if (WARN_ON(!fwspec->param[0])) + return -EINVAL; + + /* For DT, gsi_base is always zero. */ + *hwirq = fwspec->param[0] - gsi_base; + *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; + + WARN_ON(*type == IRQ_TYPE_NONE); + + return 0; +} + +void aplic_init_hw_global(struct aplic_priv *priv, bool msi_mode) +{ + u32 val; +#ifdef CONFIG_RISCV_M_MODE + u32 valh; + + if (msi_mode) { + val = lower_32_bits(priv->msicfg.base_ppn); + valh = FIELD_PREP(APLIC_xMSICFGADDRH_BAPPN, upper_32_bits(priv->msicfg.base_ppn)); + valh |= FIELD_PREP(APLIC_xMSICFGADDRH_LHXW, priv->msicfg.lhxw); + valh |= FIELD_PREP(APLIC_xMSICFGADDRH_HHXW, priv->msicfg.hhxw); + valh |= FIELD_PREP(APLIC_xMSICFGADDRH_LHXS, priv->msicfg.lhxs); + valh |= FIELD_PREP(APLIC_xMSICFGADDRH_HHXS, priv->msicfg.hhxs); + writel(val, priv->regs + APLIC_xMSICFGADDR); + writel(valh, priv->regs + APLIC_xMSICFGADDRH); + } +#endif + + /* Setup APLIC domaincfg register */ + val = readl(priv->regs + APLIC_DOMAINCFG); + val |= APLIC_DOMAINCFG_IE; + if (msi_mode) + val |= APLIC_DOMAINCFG_DM; + writel(val, priv->regs + APLIC_DOMAINCFG); + if (readl(priv->regs + APLIC_DOMAINCFG) != val) + dev_warn(priv->dev, "unable to write 0x%x in domaincfg\n", val); +} + +static void aplic_init_hw_irqs(struct aplic_priv *priv) +{ + int i; + + /* Disable all interrupts */ + for (i = 0; i <= priv->nr_irqs; i += 32) + writel(-1U, priv->regs + APLIC_CLRIE_BASE + (i / 32) * sizeof(u32)); + + /* Set interrupt type and default priority for all interrupts */ + for (i = 1; i <= priv->nr_irqs; i++) { + writel(0, priv->regs + APLIC_SOURCECFG_BASE + (i - 1) * sizeof(u32)); + writel(APLIC_DEFAULT_PRIORITY, + priv->regs + APLIC_TARGET_BASE + (i - 1) * sizeof(u32)); + } + + /* Clear APLIC domaincfg */ + writel(0, priv->regs + APLIC_DOMAINCFG); +} + +int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *regs) +{ + struct of_phandle_args parent; + int rc; + + /* + * Currently, only OF fwnode is supported so extend this + * function for ACPI support. + */ + if (!is_of_node(dev->fwnode)) + return -EINVAL; + + /* Save device pointer and register base */ + priv->dev = dev; + priv->regs = regs; + + /* Find out number of interrupt sources */ + rc = of_property_read_u32(to_of_node(dev->fwnode), "riscv,num-sources", + &priv->nr_irqs); + if (rc) { + dev_err(dev, "failed to get number of interrupt sources\n"); + return rc; + } + + /* + * Find out number of IDCs based on parent interrupts + * + * If "msi-parent" property is present then we ignore the + * APLIC IDCs which forces the APLIC driver to use MSI mode. + */ + if (!of_property_present(to_of_node(dev->fwnode), "msi-parent")) { + while (!of_irq_parse_one(to_of_node(dev->fwnode), priv->nr_idcs, &parent)) + priv->nr_idcs++; + } + + /* Setup initial state APLIC interrupts */ + aplic_init_hw_irqs(priv); + + return 0; +} + +static int aplic_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + bool msi_mode = false; + void __iomem *regs; + int rc; + + /* Map the MMIO registers */ + regs = devm_platform_ioremap_resource(pdev, 0); + if (!regs) { + dev_err(dev, "failed map MMIO registers\n"); + return -ENOMEM; + } + + /* + * If msi-parent property is present then setup APLIC MSI + * mode otherwise setup APLIC direct mode. + */ + if (is_of_node(dev->fwnode)) + msi_mode = of_property_present(to_of_node(dev->fwnode), "msi-parent"); + if (msi_mode) + rc = -ENODEV; + else + rc = aplic_direct_setup(dev, regs); + if (rc) + dev_err(dev, "failed to setup APLIC in %s mode\n", msi_mode ? "MSI" : "direct"); + + return rc; +} + +static const struct of_device_id aplic_match[] = { + { .compatible = "riscv,aplic" }, + {} +}; + +static struct platform_driver aplic_driver = { + .driver = { + .name = "riscv-aplic", + .of_match_table = aplic_match, + }, + .probe = aplic_probe, +}; +builtin_platform_driver(aplic_driver); diff --git a/drivers/irqchip/irq-riscv-aplic-main.h b/drivers/irqchip/irq-riscv-aplic-main.h new file mode 100644 index 000000000000..4cfbadf37ddc --- /dev/null +++ b/drivers/irqchip/irq-riscv-aplic-main.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + +#ifndef _IRQ_RISCV_APLIC_MAIN_H +#define _IRQ_RISCV_APLIC_MAIN_H + +#include +#include +#include +#include +#include + +#define APLIC_DEFAULT_PRIORITY 1 + +struct aplic_msicfg { + phys_addr_t base_ppn; + u32 hhxs; + u32 hhxw; + u32 lhxs; + u32 lhxw; +}; + +struct aplic_priv { + struct device *dev; + u32 gsi_base; + u32 nr_irqs; + u32 nr_idcs; + void __iomem *regs; + struct aplic_msicfg msicfg; +}; + +void aplic_irq_unmask(struct irq_data *d); +void aplic_irq_mask(struct irq_data *d); +int aplic_irq_set_type(struct irq_data *d, unsigned int type); +int aplic_irqdomain_translate(struct irq_fwspec *fwspec, u32 gsi_base, + unsigned long *hwirq, unsigned int *type); +void aplic_init_hw_global(struct aplic_priv *priv, bool msi_mode); +int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *regs); +int aplic_direct_setup(struct device *dev, void __iomem *regs); + +#endif diff --git a/include/linux/irqchip/riscv-aplic.h b/include/linux/irqchip/riscv-aplic.h new file mode 100644 index 000000000000..ec8f7df50583 --- /dev/null +++ b/include/linux/irqchip/riscv-aplic.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ +#ifndef __LINUX_IRQCHIP_RISCV_APLIC_H +#define __LINUX_IRQCHIP_RISCV_APLIC_H + +#include + +#define APLIC_MAX_IDC BIT(14) +#define APLIC_MAX_SOURCE 1024 + +#define APLIC_DOMAINCFG 0x0000 +#define APLIC_DOMAINCFG_RDONLY 0x80000000 +#define APLIC_DOMAINCFG_IE BIT(8) +#define APLIC_DOMAINCFG_DM BIT(2) +#define APLIC_DOMAINCFG_BE BIT(0) + +#define APLIC_SOURCECFG_BASE 0x0004 +#define APLIC_SOURCECFG_D BIT(10) +#define APLIC_SOURCECFG_CHILDIDX_MASK 0x000003ff +#define APLIC_SOURCECFG_SM_MASK 0x00000007 +#define APLIC_SOURCECFG_SM_INACTIVE 0x0 +#define APLIC_SOURCECFG_SM_DETACH 0x1 +#define APLIC_SOURCECFG_SM_EDGE_RISE 0x4 +#define APLIC_SOURCECFG_SM_EDGE_FALL 0x5 +#define APLIC_SOURCECFG_SM_LEVEL_HIGH 0x6 +#define APLIC_SOURCECFG_SM_LEVEL_LOW 0x7 + +#define APLIC_MMSICFGADDR 0x1bc0 +#define APLIC_MMSICFGADDRH 0x1bc4 +#define APLIC_SMSICFGADDR 0x1bc8 +#define APLIC_SMSICFGADDRH 0x1bcc + +#ifdef CONFIG_RISCV_M_MODE +#define APLIC_xMSICFGADDR APLIC_MMSICFGADDR +#define APLIC_xMSICFGADDRH APLIC_MMSICFGADDRH +#else +#define APLIC_xMSICFGADDR APLIC_SMSICFGADDR +#define APLIC_xMSICFGADDRH APLIC_SMSICFGADDRH +#endif + +#define APLIC_xMSICFGADDRH_L BIT(31) +#define APLIC_xMSICFGADDRH_HHXS_MASK 0x1f +#define APLIC_xMSICFGADDRH_HHXS_SHIFT 24 +#define APLIC_xMSICFGADDRH_HHXS (APLIC_xMSICFGADDRH_HHXS_MASK << \ + APLIC_xMSICFGADDRH_HHXS_SHIFT) +#define APLIC_xMSICFGADDRH_LHXS_MASK 0x7 +#define APLIC_xMSICFGADDRH_LHXS_SHIFT 20 +#define APLIC_xMSICFGADDRH_LHXS (APLIC_xMSICFGADDRH_LHXS_MASK << \ + APLIC_xMSICFGADDRH_LHXS_SHIFT) +#define APLIC_xMSICFGADDRH_HHXW_MASK 0x7 +#define APLIC_xMSICFGADDRH_HHXW_SHIFT 16 +#define APLIC_xMSICFGADDRH_HHXW (APLIC_xMSICFGADDRH_HHXW_MASK << \ + APLIC_xMSICFGADDRH_HHXW_SHIFT) +#define APLIC_xMSICFGADDRH_LHXW_MASK 0xf +#define APLIC_xMSICFGADDRH_LHXW_SHIFT 12 +#define APLIC_xMSICFGADDRH_LHXW (APLIC_xMSICFGADDRH_LHXW_MASK << \ + APLIC_xMSICFGADDRH_LHXW_SHIFT) +#define APLIC_xMSICFGADDRH_BAPPN_MASK 0xfff +#define APLIC_xMSICFGADDRH_BAPPN_SHIFT 0 +#define APLIC_xMSICFGADDRH_BAPPN (APLIC_xMSICFGADDRH_BAPPN_MASK << \ + APLIC_xMSICFGADDRH_BAPPN_SHIFT) + +#define APLIC_xMSICFGADDR_PPN_SHIFT 12 + +#define APLIC_xMSICFGADDR_PPN_HART(__lhxs) \ + (BIT(__lhxs) - 1) + +#define APLIC_xMSICFGADDR_PPN_LHX_MASK(__lhxw) \ + (BIT(__lhxw) - 1) +#define APLIC_xMSICFGADDR_PPN_LHX_SHIFT(__lhxs) \ + ((__lhxs)) +#define APLIC_xMSICFGADDR_PPN_LHX(__lhxw, __lhxs) \ + (APLIC_xMSICFGADDR_PPN_LHX_MASK(__lhxw) << \ + APLIC_xMSICFGADDR_PPN_LHX_SHIFT(__lhxs)) + +#define APLIC_xMSICFGADDR_PPN_HHX_MASK(__hhxw) \ + (BIT(__hhxw) - 1) +#define APLIC_xMSICFGADDR_PPN_HHX_SHIFT(__hhxs) \ + ((__hhxs) + APLIC_xMSICFGADDR_PPN_SHIFT) +#define APLIC_xMSICFGADDR_PPN_HHX(__hhxw, __hhxs) \ + (APLIC_xMSICFGADDR_PPN_HHX_MASK(__hhxw) << \ + APLIC_xMSICFGADDR_PPN_HHX_SHIFT(__hhxs)) + +#define APLIC_IRQBITS_PER_REG 32 + +#define APLIC_SETIP_BASE 0x1c00 +#define APLIC_SETIPNUM 0x1cdc + +#define APLIC_CLRIP_BASE 0x1d00 +#define APLIC_CLRIPNUM 0x1ddc + +#define APLIC_SETIE_BASE 0x1e00 +#define APLIC_SETIENUM 0x1edc + +#define APLIC_CLRIE_BASE 0x1f00 +#define APLIC_CLRIENUM 0x1fdc + +#define APLIC_SETIPNUM_LE 0x2000 +#define APLIC_SETIPNUM_BE 0x2004 + +#define APLIC_GENMSI 0x3000 + +#define APLIC_TARGET_BASE 0x3004 +#define APLIC_TARGET_HART_IDX_SHIFT 18 +#define APLIC_TARGET_HART_IDX_MASK 0x3fff +#define APLIC_TARGET_HART_IDX (APLIC_TARGET_HART_IDX_MASK << \ + APLIC_TARGET_HART_IDX_SHIFT) +#define APLIC_TARGET_GUEST_IDX_SHIFT 12 +#define APLIC_TARGET_GUEST_IDX_MASK 0x3f +#define APLIC_TARGET_GUEST_IDX (APLIC_TARGET_GUEST_IDX_MASK << \ + APLIC_TARGET_GUEST_IDX_SHIFT) +#define APLIC_TARGET_IPRIO_SHIFT 0 +#define APLIC_TARGET_IPRIO_MASK 0xff +#define APLIC_TARGET_IPRIO (APLIC_TARGET_IPRIO_MASK << \ + APLIC_TARGET_IPRIO_SHIFT) +#define APLIC_TARGET_EIID_SHIFT 0 +#define APLIC_TARGET_EIID_MASK 0x7ff +#define APLIC_TARGET_EIID (APLIC_TARGET_EIID_MASK << \ + APLIC_TARGET_EIID_SHIFT) + +#define APLIC_IDC_BASE 0x4000 +#define APLIC_IDC_SIZE 32 + +#define APLIC_IDC_IDELIVERY 0x00 + +#define APLIC_IDC_IFORCE 0x04 + +#define APLIC_IDC_ITHRESHOLD 0x08 + +#define APLIC_IDC_TOPI 0x18 +#define APLIC_IDC_TOPI_ID_SHIFT 16 +#define APLIC_IDC_TOPI_ID_MASK 0x3ff +#define APLIC_IDC_TOPI_ID (APLIC_IDC_TOPI_ID_MASK << \ + APLIC_IDC_TOPI_ID_SHIFT) +#define APLIC_IDC_TOPI_PRIO_SHIFT 0 +#define APLIC_IDC_TOPI_PRIO_MASK 0xff +#define APLIC_IDC_TOPI_PRIO (APLIC_IDC_TOPI_PRIO_MASK << \ + APLIC_IDC_TOPI_PRIO_SHIFT) + +#define APLIC_IDC_CLAIMI 0x1c + +#endif -- cgit v1.2.3 From ca8df97fe6798afbe395fc4a8e23bac0c7fbd248 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Thu, 7 Mar 2024 19:33:05 +0530 Subject: irqchip/riscv-aplic: Add support for MSI-mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The RISC-V advanced platform-level interrupt controller (APLIC) has two modes of operation: 1) Direct mode and 2) MSI mode. (For more details, refer https://github.com/riscv/riscv-aia) In APLIC MSI-mode, wired interrupts are forwared as message signaled interrupts (MSIs) to CPUs via IMSIC. Extend the existing APLIC irqchip driver to support MSI-mode for RISC-V platforms having both wired interrupts and MSIs. Signed-off-by: Anup Patel Signed-off-by: Thomas Gleixner Tested-by: Björn Töpel Reviewed-by: Björn Töpel Link: https://lore.kernel.org/r/20240307140307.646078-8-apatel@ventanamicro.com --- drivers/irqchip/Kconfig | 6 + drivers/irqchip/Makefile | 1 + drivers/irqchip/irq-riscv-aplic-main.c | 2 +- drivers/irqchip/irq-riscv-aplic-main.h | 8 + drivers/irqchip/irq-riscv-aplic-msi.c | 257 +++++++++++++++++++++++++++++++++ 5 files changed, 273 insertions(+), 1 deletion(-) create mode 100644 drivers/irqchip/irq-riscv-aplic-msi.c (limited to 'drivers') diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index f6728471f592..687d17e47556 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -545,6 +545,12 @@ config RISCV_APLIC depends on RISCV select IRQ_DOMAIN_HIERARCHY +config RISCV_APLIC_MSI + bool + depends on RISCV_APLIC + select GENERIC_MSI_IRQ + default RISCV_APLIC + config RISCV_IMSIC bool depends on RISCV diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 5adbe7a6f604..d9dc3d99aaa8 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -96,6 +96,7 @@ obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o obj-$(CONFIG_RISCV_APLIC) += irq-riscv-aplic-main.o irq-riscv-aplic-direct.o +obj-$(CONFIG_RISCV_APLIC_MSI) += irq-riscv-aplic-msi.o obj-$(CONFIG_RISCV_IMSIC) += irq-riscv-imsic-state.o irq-riscv-imsic-early.o irq-riscv-imsic-platform.o obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o obj-$(CONFIG_STARFIVE_JH8100_INTC) += irq-starfive-jh8100-intc.o diff --git a/drivers/irqchip/irq-riscv-aplic-main.c b/drivers/irqchip/irq-riscv-aplic-main.c index 160ff99d6979..774a0c97fdab 100644 --- a/drivers/irqchip/irq-riscv-aplic-main.c +++ b/drivers/irqchip/irq-riscv-aplic-main.c @@ -187,7 +187,7 @@ static int aplic_probe(struct platform_device *pdev) if (is_of_node(dev->fwnode)) msi_mode = of_property_present(to_of_node(dev->fwnode), "msi-parent"); if (msi_mode) - rc = -ENODEV; + rc = aplic_msi_setup(dev, regs); else rc = aplic_direct_setup(dev, regs); if (rc) diff --git a/drivers/irqchip/irq-riscv-aplic-main.h b/drivers/irqchip/irq-riscv-aplic-main.h index 4cfbadf37ddc..4393927d8c80 100644 --- a/drivers/irqchip/irq-riscv-aplic-main.h +++ b/drivers/irqchip/irq-riscv-aplic-main.h @@ -40,5 +40,13 @@ int aplic_irqdomain_translate(struct irq_fwspec *fwspec, u32 gsi_base, void aplic_init_hw_global(struct aplic_priv *priv, bool msi_mode); int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *regs); int aplic_direct_setup(struct device *dev, void __iomem *regs); +#ifdef CONFIG_RISCV_APLIC_MSI +int aplic_msi_setup(struct device *dev, void __iomem *regs); +#else +static inline int aplic_msi_setup(struct device *dev, void __iomem *regs) +{ + return -ENODEV; +} +#endif #endif diff --git a/drivers/irqchip/irq-riscv-aplic-msi.c b/drivers/irqchip/irq-riscv-aplic-msi.c new file mode 100644 index 000000000000..36cd04a5057b --- /dev/null +++ b/drivers/irqchip/irq-riscv-aplic-msi.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "irq-riscv-aplic-main.h" + +static void aplic_msi_irq_mask(struct irq_data *d) +{ + aplic_irq_mask(d); + irq_chip_mask_parent(d); +} + +static void aplic_msi_irq_unmask(struct irq_data *d) +{ + irq_chip_unmask_parent(d); + aplic_irq_unmask(d); +} + +static void aplic_msi_irq_eoi(struct irq_data *d) +{ + struct aplic_priv *priv = irq_data_get_irq_chip_data(d); + + /* + * EOI handling is required only for level-triggered interrupts + * when APLIC is in MSI mode. + */ + + switch (irqd_get_trigger_type(d)) { + case IRQ_TYPE_LEVEL_LOW: + case IRQ_TYPE_LEVEL_HIGH: + /* + * The section "4.9.2 Special consideration for level-sensitive interrupt + * sources" of the RISC-V AIA specification says: + * + * A second option is for the interrupt service routine to write the + * APLIC’s source identity number for the interrupt to the domain’s + * setipnum register just before exiting. This will cause the interrupt’s + * pending bit to be set to one again if the source is still asserting + * an interrupt, but not if the source is not asserting an interrupt. + */ + writel(d->hwirq, priv->regs + APLIC_SETIPNUM_LE); + break; + } +} + +static void aplic_msi_write_msg(struct irq_data *d, struct msi_msg *msg) +{ + unsigned int group_index, hart_index, guest_index, val; + struct aplic_priv *priv = irq_data_get_irq_chip_data(d); + struct aplic_msicfg *mc = &priv->msicfg; + phys_addr_t tppn, tbppn, msg_addr; + void __iomem *target; + + /* For zeroed MSI, simply write zero into the target register */ + if (!msg->address_hi && !msg->address_lo && !msg->data) { + target = priv->regs + APLIC_TARGET_BASE; + target += (d->hwirq - 1) * sizeof(u32); + writel(0, target); + return; + } + + /* Sanity check on message data */ + WARN_ON(msg->data > APLIC_TARGET_EIID_MASK); + + /* Compute target MSI address */ + msg_addr = (((u64)msg->address_hi) << 32) | msg->address_lo; + tppn = msg_addr >> APLIC_xMSICFGADDR_PPN_SHIFT; + + /* Compute target HART Base PPN */ + tbppn = tppn; + tbppn &= ~APLIC_xMSICFGADDR_PPN_HART(mc->lhxs); + tbppn &= ~APLIC_xMSICFGADDR_PPN_LHX(mc->lhxw, mc->lhxs); + tbppn &= ~APLIC_xMSICFGADDR_PPN_HHX(mc->hhxw, mc->hhxs); + WARN_ON(tbppn != mc->base_ppn); + + /* Compute target group and hart indexes */ + group_index = (tppn >> APLIC_xMSICFGADDR_PPN_HHX_SHIFT(mc->hhxs)) & + APLIC_xMSICFGADDR_PPN_HHX_MASK(mc->hhxw); + hart_index = (tppn >> APLIC_xMSICFGADDR_PPN_LHX_SHIFT(mc->lhxs)) & + APLIC_xMSICFGADDR_PPN_LHX_MASK(mc->lhxw); + hart_index |= (group_index << mc->lhxw); + WARN_ON(hart_index > APLIC_TARGET_HART_IDX_MASK); + + /* Compute target guest index */ + guest_index = tppn & APLIC_xMSICFGADDR_PPN_HART(mc->lhxs); + WARN_ON(guest_index > APLIC_TARGET_GUEST_IDX_MASK); + + /* Update IRQ TARGET register */ + target = priv->regs + APLIC_TARGET_BASE; + target += (d->hwirq - 1) * sizeof(u32); + val = FIELD_PREP(APLIC_TARGET_HART_IDX, hart_index); + val |= FIELD_PREP(APLIC_TARGET_GUEST_IDX, guest_index); + val |= FIELD_PREP(APLIC_TARGET_EIID, msg->data); + writel(val, target); +} + +static void aplic_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) +{ + arg->desc = desc; + arg->hwirq = (u32)desc->data.icookie.value; +} + +static int aplic_msi_translate(struct irq_domain *d, struct irq_fwspec *fwspec, + unsigned long *hwirq, unsigned int *type) +{ + struct msi_domain_info *info = d->host_data; + struct aplic_priv *priv = info->data; + + return aplic_irqdomain_translate(fwspec, priv->gsi_base, hwirq, type); +} + +static const struct msi_domain_template aplic_msi_template = { + .chip = { + .name = "APLIC-MSI", + .irq_mask = aplic_msi_irq_mask, + .irq_unmask = aplic_msi_irq_unmask, + .irq_set_type = aplic_irq_set_type, + .irq_eoi = aplic_msi_irq_eoi, +#ifdef CONFIG_SMP + .irq_set_affinity = irq_chip_set_affinity_parent, +#endif + .irq_write_msi_msg = aplic_msi_write_msg, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, + }, + + .ops = { + .set_desc = aplic_msi_set_desc, + .msi_translate = aplic_msi_translate, + }, + + .info = { + .bus_token = DOMAIN_BUS_WIRED_TO_MSI, + .flags = MSI_FLAG_USE_DEV_FWNODE, + .handler = handle_fasteoi_irq, + .handler_name = "fasteoi", + }, +}; + +int aplic_msi_setup(struct device *dev, void __iomem *regs) +{ + const struct imsic_global_config *imsic_global; + struct aplic_priv *priv; + struct aplic_msicfg *mc; + phys_addr_t pa; + int rc; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + rc = aplic_setup_priv(priv, dev, regs); + if (rc) { + dev_err(dev, "failed to create APLIC context\n"); + return rc; + } + mc = &priv->msicfg; + + /* + * The APLIC outgoing MSI config registers assume target MSI + * controller to be RISC-V AIA IMSIC controller. + */ + imsic_global = imsic_get_global_config(); + if (!imsic_global) { + dev_err(dev, "IMSIC global config not found\n"); + return -ENODEV; + } + + /* Find number of guest index bits (LHXS) */ + mc->lhxs = imsic_global->guest_index_bits; + if (APLIC_xMSICFGADDRH_LHXS_MASK < mc->lhxs) { + dev_err(dev, "IMSIC guest index bits big for APLIC LHXS\n"); + return -EINVAL; + } + + /* Find number of HART index bits (LHXW) */ + mc->lhxw = imsic_global->hart_index_bits; + if (APLIC_xMSICFGADDRH_LHXW_MASK < mc->lhxw) { + dev_err(dev, "IMSIC hart index bits big for APLIC LHXW\n"); + return -EINVAL; + } + + /* Find number of group index bits (HHXW) */ + mc->hhxw = imsic_global->group_index_bits; + if (APLIC_xMSICFGADDRH_HHXW_MASK < mc->hhxw) { + dev_err(dev, "IMSIC group index bits big for APLIC HHXW\n"); + return -EINVAL; + } + + /* Find first bit position of group index (HHXS) */ + mc->hhxs = imsic_global->group_index_shift; + if (mc->hhxs < (2 * APLIC_xMSICFGADDR_PPN_SHIFT)) { + dev_err(dev, "IMSIC group index shift should be >= %d\n", + (2 * APLIC_xMSICFGADDR_PPN_SHIFT)); + return -EINVAL; + } + mc->hhxs -= (2 * APLIC_xMSICFGADDR_PPN_SHIFT); + if (APLIC_xMSICFGADDRH_HHXS_MASK < mc->hhxs) { + dev_err(dev, "IMSIC group index shift big for APLIC HHXS\n"); + return -EINVAL; + } + + /* Compute PPN base */ + mc->base_ppn = imsic_global->base_addr >> APLIC_xMSICFGADDR_PPN_SHIFT; + mc->base_ppn &= ~APLIC_xMSICFGADDR_PPN_HART(mc->lhxs); + mc->base_ppn &= ~APLIC_xMSICFGADDR_PPN_LHX(mc->lhxw, mc->lhxs); + mc->base_ppn &= ~APLIC_xMSICFGADDR_PPN_HHX(mc->hhxw, mc->hhxs); + + /* Setup global config and interrupt delivery */ + aplic_init_hw_global(priv, true); + + /* Set the APLIC device MSI domain if not available */ + if (!dev_get_msi_domain(dev)) { + /* + * The device MSI domain for OF devices is only set at the + * time of populating/creating OF device. If the device MSI + * domain is discovered later after the OF device is created + * then we need to set it explicitly before using any platform + * MSI functions. + * + * In case of APLIC device, the parent MSI domain is always + * IMSIC and the IMSIC MSI domains are created later through + * the platform driver probing so we set it explicitly here. + */ + if (is_of_node(dev->fwnode)) + of_msi_configure(dev, to_of_node(dev->fwnode)); + } + + if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN, &aplic_msi_template, + priv->nr_irqs + 1, priv, priv)) { + dev_err(dev, "failed to create MSI irq domain\n"); + return -ENOMEM; + } + + /* Advertise the interrupt controller */ + pa = priv->msicfg.base_ppn << APLIC_xMSICFGADDR_PPN_SHIFT; + dev_info(dev, "%d interrupts forwared to MSI base %pa\n", priv->nr_irqs, &pa); + + return 0; +} -- cgit v1.2.3 From 7b6f0f278d02de5a8f49202465c6427b56c97545 Mon Sep 17 00:00:00 2001 From: Keguang Zhang Date: Mon, 11 Mar 2024 19:53:44 +0800 Subject: irqchip: Remove redundant irq_chip::name initialization Since commit 021a8ca2ba23 ("genirq/generic-chip: Fix the irq_chip name for /proc/interrupts"), the chip name of all chip types are set to the same name by irq_init_generic_chip() now. So the initialization to the same irq_chip name are no longer needed. Drop them. Signed-off-by: Keguang Zhang Signed-off-by: Thomas Gleixner Acked-by: Jernej Skrabec Link: https://lore.kernel.org/r/20240311115344.72567-1-keguang.zhang@gmail.com --- drivers/irqchip/irq-sunxi-nmi.c | 1 - drivers/irqchip/irq-tb10x.c | 1 - 2 files changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c index e760b1278143..bb92fd85e975 100644 --- a/drivers/irqchip/irq-sunxi-nmi.c +++ b/drivers/irqchip/irq-sunxi-nmi.c @@ -192,7 +192,6 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node, gc->chip_types[0].regs.type = reg_offs->ctrl; gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH; - gc->chip_types[1].chip.name = gc->chip_types[0].chip.name; gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit; gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit; gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit; diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c index 680586354d12..d59bfbe8c6d0 100644 --- a/drivers/irqchip/irq-tb10x.c +++ b/drivers/irqchip/irq-tb10x.c @@ -150,7 +150,6 @@ static int __init of_tb10x_init_irq(struct device_node *ictl, gc->chip_types[0].regs.mask = AB_IRQCTL_INT_ENABLE; gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH; - gc->chip_types[1].chip.name = gc->chip_types[0].chip.name; gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit; gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit; gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit; -- cgit v1.2.3 From 14ced4756458f2c7295f27f615d22c2b5912c733 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 27 Mar 2024 11:05:16 +0000 Subject: irqchip/riscv-aplic: Fix spelling mistake "forwared" -> "forwarded" There is a spelling mistake in a dev_info message. Fix it. Signed-off-by: Colin Ian King Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240327110516.283738-1-colin.i.king@gmail.com --- drivers/irqchip/irq-riscv-aplic-msi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/irqchip/irq-riscv-aplic-msi.c b/drivers/irqchip/irq-riscv-aplic-msi.c index 36cd04a5057b..028444af48bd 100644 --- a/drivers/irqchip/irq-riscv-aplic-msi.c +++ b/drivers/irqchip/irq-riscv-aplic-msi.c @@ -251,7 +251,7 @@ int aplic_msi_setup(struct device *dev, void __iomem *regs) /* Advertise the interrupt controller */ pa = priv->msicfg.base_ppn << APLIC_xMSICFGADDR_PPN_SHIFT; - dev_info(dev, "%d interrupts forwared to MSI base %pa\n", priv->nr_irqs, &pa); + dev_info(dev, "%d interrupts forwarded to MSI base %pa\n", priv->nr_irqs, &pa); return 0; } -- cgit v1.2.3 From ff3669a71afa06208de58d6bea1cc49d5e3fcbd1 Mon Sep 17 00:00:00 2001 From: Zenghui Yu Date: Wed, 27 Mar 2024 22:23:05 +0800 Subject: irqchip/alpine-msi: Fix off-by-one in allocation error path When alpine_msix_gic_domain_alloc() fails, there is an off-by-one in the number of interrupts to be freed. Fix it by passing the number of successfully allocated interrupts, instead of the relative index of the last allocated one. Fixes: 3841245e8498 ("irqchip/alpine-msi: Fix freeing of interrupts on allocation error path") Signed-off-by: Zenghui Yu Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240327142305.1048-1-yuzenghui@huawei.com --- drivers/irqchip/irq-alpine-msi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c index 9c8b1349ee17..a1430ab60a8a 100644 --- a/drivers/irqchip/irq-alpine-msi.c +++ b/drivers/irqchip/irq-alpine-msi.c @@ -165,7 +165,7 @@ static int alpine_msix_middle_domain_alloc(struct irq_domain *domain, return 0; err_sgi: - irq_domain_free_irqs_parent(domain, virq, i - 1); + irq_domain_free_irqs_parent(domain, virq, i); alpine_msix_free_sgi(priv, sgi, nr_irqs); return err; } -- cgit v1.2.3 From b327708798809328f21da8dc14cc8883d1e8a4b3 Mon Sep 17 00:00:00 2001 From: Zenghui Yu Date: Wed, 27 Mar 2024 22:23:34 +0800 Subject: irqchip/loongson-pch-msi: Fix off-by-one on allocation error path When pch_msi_parent_domain_alloc() returns an error, there is an off-by-one in the number of interrupts to be freed. Fix it by passing the number of successfully allocated interrupts, instead of the relative index of the last allocated one. Fixes: 632dcc2c75ef ("irqchip: Add Loongson PCH MSI controller") Signed-off-by: Zenghui Yu Signed-off-by: Thomas Gleixner Reviewed-by: Jiaxun Yang Link: https://lore.kernel.org/r/20240327142334.1098-1-yuzenghui@huawei.com --- drivers/irqchip/irq-loongson-pch-msi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c index 6e1e1f011bb2..dd4d699170f4 100644 --- a/drivers/irqchip/irq-loongson-pch-msi.c +++ b/drivers/irqchip/irq-loongson-pch-msi.c @@ -136,7 +136,7 @@ static int pch_msi_middle_domain_alloc(struct irq_domain *domain, err_hwirq: pch_msi_free_hwirq(priv, hwirq, nr_irqs); - irq_domain_free_irqs_parent(domain, virq, i - 1); + irq_domain_free_irqs_parent(domain, virq, i); return err; } -- cgit v1.2.3 From a64003da0ef8e135cda678eb2c8a6f0baf4a9f35 Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Tue, 26 Mar 2024 20:11:30 +0800 Subject: irqchip/loongson-eiointc: Set CPU affinity only on SMP machines for LoongArch According to the code comment of "struct irq_chip", the member "irq_set_affinity" is to set the CPU affinity on SMP machines, so define and call eiointc_set_irq_affinity() only under CONFIG_SMP. Signed-off-by: Tiezhu Yang Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240326121130.16622-4-yangtiezhu@loongson.cn --- drivers/irqchip/irq-loongson-eiointc.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c index b64cbe3052e8..4f5e6d21d77d 100644 --- a/drivers/irqchip/irq-loongson-eiointc.c +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -59,6 +59,7 @@ static int cpu_to_eio_node(int cpu) return cpu_logical_map(cpu) / CORES_PER_EIO_NODE; } +#ifdef CONFIG_SMP static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map) { int i, node, cpu_node, route_node; @@ -126,6 +127,7 @@ static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *af return IRQ_SET_MASK_OK; } +#endif static int eiointc_index(int node) { @@ -238,7 +240,9 @@ static struct irq_chip eiointc_irq_chip = { .irq_ack = eiointc_ack_irq, .irq_mask = eiointc_mask_irq, .irq_unmask = eiointc_unmask_irq, +#ifdef CONFIG_SMP .irq_set_affinity = eiointc_set_irq_affinity, +#endif }; static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq, -- cgit v1.2.3 From 42a7d887664b02a747ef5d479f6fd01081564af8 Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Tue, 26 Mar 2024 20:11:29 +0800 Subject: irqchip/loongson: Select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP for IRQ_LOONGARCH_CPU An interrupt's effective affinity can only be different from its configured affinity if there are multiple CPUs. Make it clear that this option is only meaningful when SMP is enabled. Otherwise, there exists "WARNING: unmet direct dependencies detected for GENERIC_IRQ_EFFECTIVE_AFF_MASK" when make menuconfig if CONFIG_SMP is not set on LoongArch. Signed-off-by: Tiezhu Yang Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240326121130.16622-3-yangtiezhu@loongson.cn --- drivers/irqchip/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 687d17e47556..14464716bacb 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -593,7 +593,7 @@ config IRQ_LOONGARCH_CPU bool select GENERIC_IRQ_CHIP select IRQ_DOMAIN - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP select LOONGSON_HTVEC select LOONGSON_LIOINTC select LOONGSON_EIOINTC -- cgit v1.2.3 From 35d77eb7b974f62aaef5a0dc72d93ddb1ada4074 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Sat, 13 Apr 2024 12:22:10 +0530 Subject: irqchip/riscv-imsic: Fix boot time update effective affinity warning Currently, the following warning is observed on the QEMU virt machine: genirq: irq_chip APLIC-MSI-d000000.aplic did not update eff. affinity mask of irq 12 The above warning is because the IMSIC driver does not set the initial value of effective affinity in the interrupt descriptor. To address this, initialize the effective affinity in imsic_irq_domain_alloc(). Fixes: 027e125acdba ("irqchip/riscv-imsic: Add device MSI domain support for platform devices") Signed-off-by: Anup Patel Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240413065210.315896-1-apatel@ventanamicro.com --- drivers/irqchip/irq-riscv-imsic-platform.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c index 1e6dddfd3046..11723a763c10 100644 --- a/drivers/irqchip/irq-riscv-imsic-platform.c +++ b/drivers/irqchip/irq-riscv-imsic-platform.c @@ -157,6 +157,7 @@ static int imsic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, handle_simple_irq, NULL, NULL); irq_set_noprobe(virq); irq_set_affinity(virq, cpu_online_mask); + irq_data_update_effective_affinity(irq_get_irq_data(virq), cpumask_of(vec->cpu)); return 0; } -- cgit v1.2.3 From 234a557e28b9142e07eae21083a04fffef83ee8d Mon Sep 17 00:00:00 2001 From: Baoqi Zhang Date: Mon, 22 Apr 2024 17:38:30 +0800 Subject: irqchip/loongson-pch-pic: Update interrupt registration policy The current code is using a fixed mapping between the LS7A interrupt source and the HT interrupt vector. This prevents the utilization of the full interrupt vector space and therefore limits the number of interrupt source in a system. Replace the fixed mapping with a dynamic mapping which allocates a vector when an interrupt source is set up. This avoids that unused sources prevent vectors from being used for other devices. Introduce a mapping table in struct pch_pic, where each interrupt source will allocate an index as a 'hwirq' number from the table in the order of application and set table value as interrupt source number. This hwirq number will be configured as vector in the HT interrupt controller. For an interrupt source, the validity period of the obtained hwirq will last until the system reset. Co-developed-by: Biao Dong Signed-off-by: Biao Dong Co-developed-by: Tianyang Zhang Signed-off-by: Tianyang Zhang Signed-off-by: Baoqi Zhang Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240422093830.27212-1-zhangtianyang@loongson.cn --- drivers/irqchip/irq-loongson-pch-pic.c | 76 ++++++++++++++++++++++++++-------- 1 file changed, 59 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c index 63db8e2172e0..cbaef65e804c 100644 --- a/drivers/irqchip/irq-loongson-pch-pic.c +++ b/drivers/irqchip/irq-loongson-pch-pic.c @@ -33,6 +33,7 @@ #define PIC_COUNT (PIC_COUNT_PER_REG * PIC_REG_COUNT) #define PIC_REG_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG) #define PIC_REG_BIT(irq_id) ((irq_id) % PIC_COUNT_PER_REG) +#define PIC_UNDEF_VECTOR 255 static int nr_pics; @@ -46,12 +47,19 @@ struct pch_pic { u32 saved_vec_en[PIC_REG_COUNT]; u32 saved_vec_pol[PIC_REG_COUNT]; u32 saved_vec_edge[PIC_REG_COUNT]; + u8 table[PIC_COUNT]; + int inuse; }; static struct pch_pic *pch_pic_priv[MAX_IO_PICS]; struct fwnode_handle *pch_pic_handle[MAX_IO_PICS]; +static inline u8 hwirq_to_bit(struct pch_pic *priv, int hirq) +{ + return priv->table[hirq]; +} + static void pch_pic_bitset(struct pch_pic *priv, int offset, int bit) { u32 reg; @@ -80,45 +88,47 @@ static void pch_pic_mask_irq(struct irq_data *d) { struct pch_pic *priv = irq_data_get_irq_chip_data(d); - pch_pic_bitset(priv, PCH_PIC_MASK, d->hwirq); + pch_pic_bitset(priv, PCH_PIC_MASK, hwirq_to_bit(priv, d->hwirq)); irq_chip_mask_parent(d); } static void pch_pic_unmask_irq(struct irq_data *d) { struct pch_pic *priv = irq_data_get_irq_chip_data(d); + int bit = hwirq_to_bit(priv, d->hwirq); - writel(BIT(PIC_REG_BIT(d->hwirq)), - priv->base + PCH_PIC_CLR + PIC_REG_IDX(d->hwirq) * 4); + writel(BIT(PIC_REG_BIT(bit)), + priv->base + PCH_PIC_CLR + PIC_REG_IDX(bit) * 4); irq_chip_unmask_parent(d); - pch_pic_bitclr(priv, PCH_PIC_MASK, d->hwirq); + pch_pic_bitclr(priv, PCH_PIC_MASK, bit); } static int pch_pic_set_type(struct irq_data *d, unsigned int type) { struct pch_pic *priv = irq_data_get_irq_chip_data(d); + int bit = hwirq_to_bit(priv, d->hwirq); int ret = 0; switch (type) { case IRQ_TYPE_EDGE_RISING: - pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq); - pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq); + pch_pic_bitset(priv, PCH_PIC_EDGE, bit); + pch_pic_bitclr(priv, PCH_PIC_POL, bit); irq_set_handler_locked(d, handle_edge_irq); break; case IRQ_TYPE_EDGE_FALLING: - pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq); - pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq); + pch_pic_bitset(priv, PCH_PIC_EDGE, bit); + pch_pic_bitset(priv, PCH_PIC_POL, bit); irq_set_handler_locked(d, handle_edge_irq); break; case IRQ_TYPE_LEVEL_HIGH: - pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq); - pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq); + pch_pic_bitclr(priv, PCH_PIC_EDGE, bit); + pch_pic_bitclr(priv, PCH_PIC_POL, bit); irq_set_handler_locked(d, handle_level_irq); break; case IRQ_TYPE_LEVEL_LOW: - pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq); - pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq); + pch_pic_bitclr(priv, PCH_PIC_EDGE, bit); + pch_pic_bitset(priv, PCH_PIC_POL, bit); irq_set_handler_locked(d, handle_level_irq); break; default: @@ -133,11 +143,12 @@ static void pch_pic_ack_irq(struct irq_data *d) { unsigned int reg; struct pch_pic *priv = irq_data_get_irq_chip_data(d); + int bit = hwirq_to_bit(priv, d->hwirq); - reg = readl(priv->base + PCH_PIC_EDGE + PIC_REG_IDX(d->hwirq) * 4); - if (reg & BIT(PIC_REG_BIT(d->hwirq))) { - writel(BIT(PIC_REG_BIT(d->hwirq)), - priv->base + PCH_PIC_CLR + PIC_REG_IDX(d->hwirq) * 4); + reg = readl(priv->base + PCH_PIC_EDGE + PIC_REG_IDX(bit) * 4); + if (reg & BIT(PIC_REG_BIT(bit))) { + writel(BIT(PIC_REG_BIT(bit)), + priv->base + PCH_PIC_CLR + PIC_REG_IDX(bit) * 4); } irq_chip_ack_parent(d); } @@ -159,6 +170,8 @@ static int pch_pic_domain_translate(struct irq_domain *d, { struct pch_pic *priv = d->host_data; struct device_node *of_node = to_of_node(fwspec->fwnode); + unsigned long flags; + int i; if (of_node) { if (fwspec->param_count < 2) @@ -171,12 +184,33 @@ static int pch_pic_domain_translate(struct irq_domain *d, return -EINVAL; *hwirq = fwspec->param[0] - priv->gsi_base; + if (fwspec->param_count > 1) *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; else *type = IRQ_TYPE_NONE; } + raw_spin_lock_irqsave(&priv->pic_lock, flags); + /* Check pic-table to confirm if the hwirq has been assigned */ + for (i = 0; i < priv->inuse; i++) { + if (priv->table[i] == *hwirq) { + *hwirq = i; + break; + } + } + if (i == priv->inuse) { + /* Assign a new hwirq in pic-table */ + if (priv->inuse >= PIC_COUNT) { + pr_err("pch-pic domain has no free vectors\n"); + raw_spin_unlock_irqrestore(&priv->pic_lock, flags); + return -EINVAL; + } + priv->table[priv->inuse] = *hwirq; + *hwirq = priv->inuse++; + } + raw_spin_unlock_irqrestore(&priv->pic_lock, flags); + return 0; } @@ -194,6 +228,9 @@ static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq, if (err) return err; + /* Write vector ID */ + writeb(priv->ht_vec_base + hwirq, priv->base + PCH_INT_HTVEC(hwirq_to_bit(priv, hwirq))); + parent_fwspec.fwnode = domain->parent->fwnode; parent_fwspec.param_count = 1; parent_fwspec.param[0] = hwirq + priv->ht_vec_base; @@ -222,7 +259,7 @@ static void pch_pic_reset(struct pch_pic *priv) for (i = 0; i < PIC_COUNT; i++) { /* Write vector ID */ - writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(i)); + writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(hwirq_to_bit(priv, i))); /* Hardcode route to HT0 Lo */ writeb(1, priv->base + PCH_INT_ROUTE(i)); } @@ -284,6 +321,7 @@ static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base, u32 gsi_base) { struct pch_pic *priv; + int i; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) @@ -294,6 +332,10 @@ static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base, if (!priv->base) goto free_priv; + priv->inuse = 0; + for (i = 0; i < PIC_COUNT; i++) + priv->table[i] = PIC_UNDEF_VECTOR; + priv->ht_vec_base = vec_base; priv->vec_count = ((readq(priv->base) >> 48) & 0xff) + 1; priv->gsi_base = gsi_base; -- cgit v1.2.3 From 8371696a975a52eb055dcf36ac1e562bfda493cc Mon Sep 17 00:00:00 2001 From: Stefan Wahren Date: Fri, 12 Apr 2024 23:53:34 +0200 Subject: irqchip/mxs: Declare icoll_handle_irq() as static After commit 5bb578a0c1b8 ("ARM: 9298/1: Drop custom mdesc->handle_irq()") the function icoll_handle_irq() is only used within irq-mxs.c. So declare it as static to fix the warning about a missing prototype when building with W=1. Signed-off-by: Stefan Wahren Signed-off-by: Thomas Gleixner --- drivers/irqchip/irq-mxs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c index be9680645545..d67b5da38982 100644 --- a/drivers/irqchip/irq-mxs.c +++ b/drivers/irqchip/irq-mxs.c @@ -130,7 +130,7 @@ static struct irq_chip asm9260_icoll_chip = { IRQCHIP_SKIP_SET_WAKE, }; -asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs) +static void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs) { u32 irqnr; -- cgit v1.2.3 From 8661327f80b02fbdc763e081c34ed15fbd63f810 Mon Sep 17 00:00:00 2001 From: Antonio Borneo Date: Mon, 15 Apr 2024 15:49:16 +0200 Subject: irqchip/stm32-exti: Fix minor indentation issue Commit 046a6ee2343b ("irqchip: Bulk conversion to generic_handle_domain_irq()") incorrectly added a leading space character in the line indentation. Use only TAB for indentation, removing the leading space. Signed-off-by: Antonio Borneo Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240415134926.1254428-2-antonio.borneo@foss.st.com --- drivers/irqchip/irq-stm32-exti.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index 26a5193d0ae4..3b35f138ed3d 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -322,7 +322,7 @@ static void stm32_irq_handler(struct irq_desc *desc) while ((pending = stm32_exti_pending(gc))) { for_each_set_bit(n, &pending, IRQS_PER_BANK) generic_handle_domain_irq(domain, irq_base + n); - } + } } chained_irq_exit(chip, desc); -- cgit v1.2.3 From 77ec258ffa5c5f8c700fab7a0a78426904d1e6d4 Mon Sep 17 00:00:00 2001 From: Antonio Borneo Date: Mon, 15 Apr 2024 15:49:18 +0200 Subject: irqchip/stm32-exti: Map interrupts through interrupts-extended The mapping of EXTI events to its parent interrupt controller is both SoC and instance dependent. The current implementation requires adding a new mapping table to the driver's code and a new compatible for each new EXTI instance. Check for the presence of the optional interrupts-extended property and use it to map EXTI events to the parent's interrupts. For old device trees without the optional interrupts-extended property, the driver's behavior is unchanged, thus keeps backward compatibility. Signed-off-by: Antonio Borneo Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240415134926.1254428-4-antonio.borneo@foss.st.com --- drivers/irqchip/irq-stm32-exti.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'drivers') diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index 3b35f138ed3d..e5714a0111e7 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -61,6 +61,7 @@ struct stm32_exti_host_data { struct stm32_exti_chip_data *chips_data; const struct stm32_exti_drv_data *drv_data; struct hwspinlock *hwlock; + bool dt_has_irqs_desc; /* skip internal desc_irqs array and get it from DT */ }; static struct stm32_exti_host_data *stm32_host_data; @@ -731,6 +732,23 @@ static int stm32_exti_h_domain_alloc(struct irq_domain *dm, irq_domain_set_hwirq_and_chip(dm, virq, hwirq, chip, chip_data); + if (host_data->dt_has_irqs_desc) { + struct of_phandle_args out_irq; + int ret; + + ret = of_irq_parse_one(host_data->dev->of_node, hwirq, &out_irq); + if (ret) + return ret; + /* we only support one parent, so far */ + if (of_node_to_fwnode(out_irq.np) != dm->parent->fwnode) + return -EINVAL; + + of_phandle_args_to_fwspec(out_irq.np, out_irq.args, + out_irq.args_count, &p_fwspec); + + return irq_domain_alloc_irqs_parent(dm, virq, 1, &p_fwspec); + } + if (!host_data->drv_data->desc_irqs) return -EINVAL; @@ -975,6 +993,9 @@ static int stm32_exti_probe(struct platform_device *pdev) if (ret) return ret; + if (of_property_read_bool(np, "interrupts-extended")) + host_data->dt_has_irqs_desc = true; + stm32_exti_h_syscore_init(host_data); return 0; -- cgit v1.2.3 From 06d7e914cada8fccc6e30275b5cf5cca68fc385b Mon Sep 17 00:00:00 2001 From: Antonio Borneo Date: Mon, 15 Apr 2024 15:49:19 +0200 Subject: irqchip/stm32-exti: Convert driver to standard PM All driver's dependencies for suspend/resume have been fixed long ago. There are no more reasons to use syscore PM for the part of this driver related to Cortex-A MPU. Switch to standard PM using NOIRQ_SYSTEM_SLEEP_PM_OPS, so all the registers of the interrupt controller get resumed before any irq gets enabled. A side effect of this change is to drop the only global variable 'stm32_host_data', used to keep the driver's data for syscore_ops. This makes the driver ready to support multiple EXTI instances. Signed-off-by: Antonio Borneo Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240415134926.1254428-5-antonio.borneo@foss.st.com --- drivers/irqchip/irq-stm32-exti.c | 57 ++++++++++++---------------------------- 1 file changed, 17 insertions(+), 40 deletions(-) (limited to 'drivers') diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index e5714a0111e7..ded20d9bde73 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include @@ -64,8 +64,6 @@ struct stm32_exti_host_data { bool dt_has_irqs_desc; /* skip internal desc_irqs array and get it from DT */ }; -static struct stm32_exti_host_data *stm32_host_data; - static const struct stm32_exti_bank stm32f4xx_exti_b1 = { .imr_ofst = 0x00, .emr_ofst = 0x04, @@ -622,50 +620,32 @@ static int stm32_exti_h_set_affinity(struct irq_data *d, return IRQ_SET_MASK_OK_DONE; } -static int __maybe_unused stm32_exti_h_suspend(void) +static int stm32_exti_h_suspend(struct device *dev) { + struct stm32_exti_host_data *host_data = dev_get_drvdata(dev); struct stm32_exti_chip_data *chip_data; int i; - for (i = 0; i < stm32_host_data->drv_data->bank_nr; i++) { - chip_data = &stm32_host_data->chips_data[i]; - raw_spin_lock(&chip_data->rlock); + for (i = 0; i < host_data->drv_data->bank_nr; i++) { + chip_data = &host_data->chips_data[i]; stm32_chip_suspend(chip_data, chip_data->wake_active); - raw_spin_unlock(&chip_data->rlock); } return 0; } -static void __maybe_unused stm32_exti_h_resume(void) +static int stm32_exti_h_resume(struct device *dev) { + struct stm32_exti_host_data *host_data = dev_get_drvdata(dev); struct stm32_exti_chip_data *chip_data; int i; - for (i = 0; i < stm32_host_data->drv_data->bank_nr; i++) { - chip_data = &stm32_host_data->chips_data[i]; - raw_spin_lock(&chip_data->rlock); + for (i = 0; i < host_data->drv_data->bank_nr; i++) { + chip_data = &host_data->chips_data[i]; stm32_chip_resume(chip_data, chip_data->mask_cache); - raw_spin_unlock(&chip_data->rlock); } -} -static struct syscore_ops stm32_exti_h_syscore_ops = { -#ifdef CONFIG_PM_SLEEP - .suspend = stm32_exti_h_suspend, - .resume = stm32_exti_h_resume, -#endif -}; - -static void stm32_exti_h_syscore_init(struct stm32_exti_host_data *host_data) -{ - stm32_host_data = host_data; - register_syscore_ops(&stm32_exti_h_syscore_ops); -} - -static void stm32_exti_h_syscore_deinit(void) -{ - unregister_syscore_ops(&stm32_exti_h_syscore_ops); + return 0; } static int stm32_exti_h_retrigger(struct irq_data *d) @@ -789,8 +769,6 @@ stm32_exti_host_data *stm32_exti_host_init(const struct stm32_exti_drv_data *dd, goto free_chips_data; } - stm32_host_data = host_data; - return host_data; free_chips_data: @@ -916,11 +894,6 @@ static void stm32_exti_remove_irq(void *data) irq_domain_remove(domain); } -static void stm32_exti_remove(struct platform_device *pdev) -{ - stm32_exti_h_syscore_deinit(); -} - static int stm32_exti_probe(struct platform_device *pdev) { int ret, i; @@ -934,6 +907,8 @@ static int stm32_exti_probe(struct platform_device *pdev) if (!host_data) return -ENOMEM; + dev_set_drvdata(dev, host_data); + /* check for optional hwspinlock which may be not available yet */ ret = of_hwspin_lock_get_id(np, 0); if (ret == -EPROBE_DEFER) @@ -996,8 +971,6 @@ static int stm32_exti_probe(struct platform_device *pdev) if (of_property_read_bool(np, "interrupts-extended")) host_data->dt_has_irqs_desc = true; - stm32_exti_h_syscore_init(host_data); - return 0; } @@ -1009,12 +982,16 @@ static const struct of_device_id stm32_exti_ids[] = { }; MODULE_DEVICE_TABLE(of, stm32_exti_ids); +static const struct dev_pm_ops stm32_exti_dev_pm_ops = { + NOIRQ_SYSTEM_SLEEP_PM_OPS(stm32_exti_h_suspend, stm32_exti_h_resume) +}; + static struct platform_driver stm32_exti_driver = { .probe = stm32_exti_probe, - .remove_new = stm32_exti_remove, .driver = { .name = "stm32_exti", .of_match_table = stm32_exti_ids, + .pm = &stm32_exti_dev_pm_ops, }, }; -- cgit v1.2.3 From c00a4cb15a90668684d60bc4ff768a23558dadb9 Mon Sep 17 00:00:00 2001 From: Antonio Borneo Date: Mon, 15 Apr 2024 15:49:20 +0200 Subject: irqchip/stm32-exti: Skip secure events Secure OS can reserve some EXTI events, marking them as "secure" by setting the corresponding bit in register SECCFGR (aka TZENR). These events cannot be used by Linux. Read the list of reserved events and check it during interrupt domain allocation. Signed-off-by: Antonio Borneo Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240415134926.1254428-6-antonio.borneo@foss.st.com --- drivers/irqchip/irq-stm32-exti.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'drivers') diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index ded20d9bde73..c0a020aab557 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -36,6 +36,7 @@ struct stm32_exti_bank { u32 rpr_ofst; u32 fpr_ofst; u32 trg_ofst; + u32 seccfgr_ofst; }; #define UNDEF_REG ~0 @@ -54,10 +55,12 @@ struct stm32_exti_chip_data { u32 mask_cache; u32 rtsr_cache; u32 ftsr_cache; + u32 event_reserved; }; struct stm32_exti_host_data { void __iomem *base; + struct device *dev; struct stm32_exti_chip_data *chips_data; const struct stm32_exti_drv_data *drv_data; struct hwspinlock *hwlock; @@ -73,6 +76,7 @@ static const struct stm32_exti_bank stm32f4xx_exti_b1 = { .rpr_ofst = 0x14, .fpr_ofst = UNDEF_REG, .trg_ofst = UNDEF_REG, + .seccfgr_ofst = UNDEF_REG, }; static const struct stm32_exti_bank *stm32f4xx_exti_banks[] = { @@ -93,6 +97,7 @@ static const struct stm32_exti_bank stm32h7xx_exti_b1 = { .rpr_ofst = 0x88, .fpr_ofst = UNDEF_REG, .trg_ofst = UNDEF_REG, + .seccfgr_ofst = UNDEF_REG, }; static const struct stm32_exti_bank stm32h7xx_exti_b2 = { @@ -104,6 +109,7 @@ static const struct stm32_exti_bank stm32h7xx_exti_b2 = { .rpr_ofst = 0x98, .fpr_ofst = UNDEF_REG, .trg_ofst = UNDEF_REG, + .seccfgr_ofst = UNDEF_REG, }; static const struct stm32_exti_bank stm32h7xx_exti_b3 = { @@ -115,6 +121,7 @@ static const struct stm32_exti_bank stm32h7xx_exti_b3 = { .rpr_ofst = 0xA8, .fpr_ofst = UNDEF_REG, .trg_ofst = UNDEF_REG, + .seccfgr_ofst = UNDEF_REG, }; static const struct stm32_exti_bank *stm32h7xx_exti_banks[] = { @@ -137,6 +144,7 @@ static const struct stm32_exti_bank stm32mp1_exti_b1 = { .rpr_ofst = 0x0C, .fpr_ofst = 0x10, .trg_ofst = 0x3EC, + .seccfgr_ofst = 0x14, }; static const struct stm32_exti_bank stm32mp1_exti_b2 = { @@ -148,6 +156,7 @@ static const struct stm32_exti_bank stm32mp1_exti_b2 = { .rpr_ofst = 0x2C, .fpr_ofst = 0x30, .trg_ofst = 0x3E8, + .seccfgr_ofst = 0x34, }; static const struct stm32_exti_bank stm32mp1_exti_b3 = { @@ -159,6 +168,7 @@ static const struct stm32_exti_bank stm32mp1_exti_b3 = { .rpr_ofst = 0x4C, .fpr_ofst = 0x50, .trg_ofst = 0x3E4, + .seccfgr_ofst = 0x54, }; static const struct stm32_exti_bank *stm32mp1_exti_banks[] = { @@ -706,6 +716,12 @@ static int stm32_exti_h_domain_alloc(struct irq_domain *dm, bank = hwirq / IRQS_PER_BANK; chip_data = &host_data->chips_data[bank]; + /* Check if event is reserved (Secure) */ + if (chip_data->event_reserved & BIT(hwirq % IRQS_PER_BANK)) { + dev_err(host_data->dev, "event %lu is reserved, secure\n", hwirq); + return -EPERM; + } + event_trg = readl_relaxed(host_data->base + chip_data->reg_bank->trg_ofst); chip = (event_trg & BIT(hwirq % IRQS_PER_BANK)) ? &stm32_exti_h_chip : &stm32_exti_h_chip_direct; @@ -803,6 +819,10 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data, if (stm32_bank->emr_ofst != UNDEF_REG) writel_relaxed(0, base + stm32_bank->emr_ofst); + /* reserve Secure events */ + if (stm32_bank->seccfgr_ofst != UNDEF_REG) + chip_data->event_reserved = readl_relaxed(base + stm32_bank->seccfgr_ofst); + pr_info("%pOF: bank%d\n", node, bank_idx); return chip_data; @@ -908,6 +928,7 @@ static int stm32_exti_probe(struct platform_device *pdev) return -ENOMEM; dev_set_drvdata(dev, host_data); + host_data->dev = dev; /* check for optional hwspinlock which may be not available yet */ ret = of_hwspin_lock_get_id(np, 0); -- cgit v1.2.3 From df41b65ceecac507352804d7647c480f180ff6f9 Mon Sep 17 00:00:00 2001 From: Antonio Borneo Date: Mon, 15 Apr 2024 15:49:21 +0200 Subject: irqchip/stm32-exti: Mark events reserved with RIF configuration check EXTI events availability depends on Resource Isolation Framework (RIF) configuration. RIF grants access to buses with Compartment ID (CID) filtering, secure and privilege level. It also assigns EXTI events to one or several processors (CID, Secure, Privilege). EXTI events used by Linux must be CID-filtered (EnCIDCFGR.CFEN=1) and statically assigned to CID1 (EnCIDCFR.CID=CID1). EXTI events not filling these conditions are marked as reserved and can't be used by Linux. Signed-off-by: Antonio Borneo Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240415134926.1254428-7-antonio.borneo@foss.st.com --- drivers/irqchip/irq-stm32-exti.c | 40 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index c0a020aab557..2cc9f3b7d669 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -23,9 +23,22 @@ #include -#define IRQS_PER_BANK 32 +#define IRQS_PER_BANK 32 -#define HWSPNLCK_TIMEOUT 1000 /* usec */ +#define HWSPNLCK_TIMEOUT 1000 /* usec */ + +#define EXTI_EnCIDCFGR(n) (0x180 + (n) * 4) +#define EXTI_HWCFGR1 0x3f0 + +/* Register: EXTI_EnCIDCFGR(n) */ +#define EXTI_CIDCFGR_CFEN_MASK BIT(0) +#define EXTI_CIDCFGR_CID_MASK GENMASK(6, 4) +#define EXTI_CIDCFGR_CID_SHIFT 4 + +/* Register: EXTI_HWCFGR1 */ +#define EXTI_HWCFGR1_CIDWIDTH_MASK GENMASK(27, 24) + +#define EXTI_CID1 1 struct stm32_exti_bank { u32 imr_ofst; @@ -907,6 +920,27 @@ static const struct irq_domain_ops stm32_exti_h_domain_ops = { .xlate = irq_domain_xlate_twocell, }; +static void stm32_exti_check_rif(struct stm32_exti_host_data *host_data) +{ + unsigned int bank, i, event; + u32 cid, cidcfgr, hwcfgr1; + + /* quit on CID not supported */ + hwcfgr1 = readl_relaxed(host_data->base + EXTI_HWCFGR1); + if ((hwcfgr1 & EXTI_HWCFGR1_CIDWIDTH_MASK) == 0) + return; + + for (bank = 0; bank < host_data->drv_data->bank_nr; bank++) { + for (i = 0; i < IRQS_PER_BANK; i++) { + event = bank * IRQS_PER_BANK + i; + cidcfgr = readl_relaxed(host_data->base + EXTI_EnCIDCFGR(event)); + cid = (cidcfgr & EXTI_CIDCFGR_CID_MASK) >> EXTI_CIDCFGR_CID_SHIFT; + if ((cidcfgr & EXTI_CIDCFGR_CFEN_MASK) && cid != EXTI_CID1) + host_data->chips_data[bank].event_reserved |= BIT(i); + } + } +} + static void stm32_exti_remove_irq(void *data) { struct irq_domain *domain = data; @@ -969,6 +1003,8 @@ static int stm32_exti_probe(struct platform_device *pdev) for (i = 0; i < drv_data->bank_nr; i++) stm32_exti_chip_init(host_data, i, np); + stm32_exti_check_rif(host_data); + parent_domain = irq_find_host(of_irq_find_parent(np)); if (!parent_domain) { dev_err(dev, "GIC interrupt-parent not found\n"); -- cgit v1.2.3 From c7cad38d37486668a448215fc92bace9c8cf747a Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 24 Apr 2024 10:57:32 -0700 Subject: irqchip/irq-brcmstb-l2: Avoid saving mask on shutdown The interrupt controller shutdown path does not need to save the mask of enabled interrupts because the next state the system is going to be in is akin to a cold boot, or a kexec'd kernel. Saving the mask only makes sense if the software state needs to preserve the hardware state across a system suspend/resume cycle. As an optimization, and given that there are systems with dozens of such interrupt controller, save a "slow" memory mapped I/O read in the shutdown path where no saving/restoring is required. Reported-by: Tim Ross Signed-off-by: Florian Fainelli Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240424175732.1526531-1-florian.fainelli@broadcom.com --- drivers/irqchip/irq-brcmstb-l2.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index 2b0b3175cea0..c988886917f7 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c @@ -118,7 +118,7 @@ out: chained_irq_exit(chip, desc); } -static void brcmstb_l2_intc_suspend(struct irq_data *d) +static void __brcmstb_l2_intc_suspend(struct irq_data *d, bool save) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct irq_chip_type *ct = irq_data_get_chip_type(d); @@ -127,7 +127,8 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d) irq_gc_lock_irqsave(gc, flags); /* Save the current mask */ - b->saved_mask = irq_reg_readl(gc, ct->regs.mask); + if (save) + b->saved_mask = irq_reg_readl(gc, ct->regs.mask); if (b->can_wake) { /* Program the wakeup mask */ @@ -137,6 +138,16 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d) irq_gc_unlock_irqrestore(gc, flags); } +static void brcmstb_l2_intc_shutdown(struct irq_data *d) +{ + __brcmstb_l2_intc_suspend(d, false); +} + +static void brcmstb_l2_intc_suspend(struct irq_data *d) +{ + __brcmstb_l2_intc_suspend(d, true); +} + static void brcmstb_l2_intc_resume(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); @@ -252,7 +263,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np, ct->chip.irq_suspend = brcmstb_l2_intc_suspend; ct->chip.irq_resume = brcmstb_l2_intc_resume; - ct->chip.irq_pm_shutdown = brcmstb_l2_intc_suspend; + ct->chip.irq_pm_shutdown = brcmstb_l2_intc_shutdown; if (data->can_wake) { /* This IRQ chip can wake the system, set all child interrupts -- cgit v1.2.3 From 6a9a52f74e3b82ff3f5398810c1b23ad497e2df5 Mon Sep 17 00:00:00 2001 From: Dawei Li Date: Tue, 16 Apr 2024 16:54:49 +0800 Subject: irqchip/irq-bcm6345-l1: Avoid explicit cpumask allocation on stack In general it's preferable to avoid placing cpumasks on the stack, as for large values of NR_CPUS these can consume significant amounts of stack space and make stack overflows more likely. Use cpumask_first_and_and() to avoid the need for a temporary cpumask on the stack. Signed-off-by: Dawei Li Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240416085454.3547175-3-dawei.li@shingroup.cn --- drivers/irqchip/irq-bcm6345-l1.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c index eb02d203c963..90daa274ef23 100644 --- a/drivers/irqchip/irq-bcm6345-l1.c +++ b/drivers/irqchip/irq-bcm6345-l1.c @@ -192,14 +192,10 @@ static int bcm6345_l1_set_affinity(struct irq_data *d, u32 mask = BIT(d->hwirq % IRQS_PER_WORD); unsigned int old_cpu = cpu_for_irq(intc, d); unsigned int new_cpu; - struct cpumask valid; unsigned long flags; bool enabled; - if (!cpumask_and(&valid, &intc->cpumask, dest)) - return -EINVAL; - - new_cpu = cpumask_any_and(&valid, cpu_online_mask); + new_cpu = cpumask_first_and_and(&intc->cpumask, dest, cpu_online_mask); if (new_cpu >= nr_cpu_ids) return -EINVAL; -- cgit v1.2.3 From fcb8af4cbcd122e33ceeadd347b8866d32035af7 Mon Sep 17 00:00:00 2001 From: Dawei Li Date: Tue, 16 Apr 2024 16:54:50 +0800 Subject: irqchip/gic-v3-its: Avoid explicit cpumask allocation on stack In general it's preferable to avoid placing cpumasks on the stack, as for large values of NR_CPUS these can consume significant amounts of stack space and make stack overflows more likely. Remove cpumask var on stack and use cpumask_any_and() to address it. Signed-off-by: Dawei Li Signed-off-by: Thomas Gleixner Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20240416085454.3547175-4-dawei.li@shingroup.cn --- drivers/irqchip/irq-gic-v3-its.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index fca888b36680..20f954211c61 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -3826,9 +3826,9 @@ static int its_vpe_set_affinity(struct irq_data *d, bool force) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); - struct cpumask common, *table_mask; + unsigned int from, cpu = nr_cpu_ids; + struct cpumask *table_mask; unsigned long flags; - int from, cpu; /* * Changing affinity is mega expensive, so let's be as lazy as @@ -3850,10 +3850,15 @@ static int its_vpe_set_affinity(struct irq_data *d, * If we are offered another CPU in the same GICv4.1 ITS * affinity, pick this one. Otherwise, any CPU will do. */ - if (table_mask && cpumask_and(&common, mask_val, table_mask)) - cpu = cpumask_test_cpu(from, &common) ? from : cpumask_first(&common); - else + if (table_mask) + cpu = cpumask_any_and(mask_val, table_mask); + if (cpu < nr_cpu_ids) { + if (cpumask_test_cpu(from, mask_val) && + cpumask_test_cpu(from, table_mask)) + cpu = from; + } else { cpu = cpumask_first(mask_val); + } if (from == cpu) goto out; -- cgit v1.2.3 From 2bc32db5a262cc34753cb4208b2d3043d1cd81ae Mon Sep 17 00:00:00 2001 From: Dawei Li Date: Tue, 16 Apr 2024 16:54:51 +0800 Subject: irqchip/loongson-eiointc: Avoid explicit cpumask allocation on stack In general it's preferable to avoid placing cpumasks on the stack, as for large values of NR_CPUS these can consume significant amounts of stack space and make stack overflows more likely. Use cpumask_first_and_and() to avoid the need for a temporary cpumask on the stack. Signed-off-by: Dawei Li Signed-off-by: Thomas Gleixner Reviewed-by: Yury Norov Link: https://lore.kernel.org/r/20240416085454.3547175-5-dawei.li@shingroup.cn --- drivers/irqchip/irq-loongson-eiointc.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c index 4f5e6d21d77d..c7ddebf312ad 100644 --- a/drivers/irqchip/irq-loongson-eiointc.c +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -93,19 +93,15 @@ static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *af unsigned int cpu; unsigned long flags; uint32_t vector, regaddr; - struct cpumask intersect_affinity; struct eiointc_priv *priv = d->domain->host_data; raw_spin_lock_irqsave(&affinity_lock, flags); - cpumask_and(&intersect_affinity, affinity, cpu_online_mask); - cpumask_and(&intersect_affinity, &intersect_affinity, &priv->cpuspan_map); - - if (cpumask_empty(&intersect_affinity)) { + cpu = cpumask_first_and_and(&priv->cpuspan_map, affinity, cpu_online_mask); + if (cpu >= nr_cpu_ids) { raw_spin_unlock_irqrestore(&affinity_lock, flags); return -EINVAL; } - cpu = cpumask_first(&intersect_affinity); vector = d->hwirq; regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2); -- cgit v1.2.3 From 5d650d1eba876717888a0951ed873ef0f1d8cf61 Mon Sep 17 00:00:00 2001 From: Dawei Li Date: Tue, 16 Apr 2024 16:54:52 +0800 Subject: irqchip/riscv-aplic-direct: Avoid explicit cpumask allocation on stack In general it's preferable to avoid placing cpumasks on the stack, as for large values of NR_CPUS these can consume significant amounts of stack space and make stack overflows more likely. Use cpumask_first_and_and() to avoid the need for a temporary cpumask on the stack. Signed-off-by: Dawei Li Signed-off-by: Thomas Gleixner Reviewed-by: Anup Patel Link: https://lore.kernel.org/r/20240416085454.3547175-6-dawei.li@shingroup.cn --- drivers/irqchip/irq-riscv-aplic-direct.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/irqchip/irq-riscv-aplic-direct.c b/drivers/irqchip/irq-riscv-aplic-direct.c index 06bace9b7497..4a3ffe856d6c 100644 --- a/drivers/irqchip/irq-riscv-aplic-direct.c +++ b/drivers/irqchip/irq-riscv-aplic-direct.c @@ -54,15 +54,12 @@ static int aplic_direct_set_affinity(struct irq_data *d, const struct cpumask *m struct aplic_direct *direct = container_of(priv, struct aplic_direct, priv); struct aplic_idc *idc; unsigned int cpu, val; - struct cpumask amask; void __iomem *target; - cpumask_and(&amask, &direct->lmask, mask_val); - if (force) - cpu = cpumask_first(&amask); + cpu = cpumask_first_and(&direct->lmask, mask_val); else - cpu = cpumask_any_and(&amask, cpu_online_mask); + cpu = cpumask_first_and_and(&direct->lmask, mask_val, cpu_online_mask); if (cpu >= nr_cpu_ids) return -EINVAL; -- cgit v1.2.3 From a7fb69ffd7ce438a259b2f9fbcebc62f5caf2d4f Mon Sep 17 00:00:00 2001 From: Dawei Li Date: Tue, 16 Apr 2024 16:54:53 +0800 Subject: irqchip/sifive-plic: Avoid explicit cpumask allocation on stack In general it's preferable to avoid placing cpumasks on the stack, as for large values of NR_CPUS these can consume significant amounts of stack space and make stack overflows more likely. Use cpumask_first_and_and() to avoid the need for a temporary cpumask on the stack. Signed-off-by: Dawei Li Signed-off-by: Thomas Gleixner Reviewed-by: Anup Patel Link: https://lore.kernel.org/r/20240416085454.3547175-7-dawei.li@shingroup.cn --- drivers/irqchip/irq-sifive-plic.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index f3d4cb9e34f7..8fb183ced1e7 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -164,15 +164,12 @@ static int plic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { unsigned int cpu; - struct cpumask amask; struct plic_priv *priv = irq_data_get_irq_chip_data(d); - cpumask_and(&amask, &priv->lmask, mask_val); - if (force) - cpu = cpumask_first(&amask); + cpu = cpumask_first_and(&priv->lmask, mask_val); else - cpu = cpumask_any_and(&amask, cpu_online_mask); + cpu = cpumask_first_and_and(&priv->lmask, mask_val, cpu_online_mask); if (cpu >= nr_cpu_ids) return -EINVAL; -- cgit v1.2.3 From 6f28c4a852fab8bd759a383149dfd30511477249 Mon Sep 17 00:00:00 2001 From: Dawei Li Date: Tue, 16 Apr 2024 16:54:54 +0800 Subject: cpuidle: Avoid explicit cpumask allocation on stack In general it's preferable to avoid placing cpumasks on the stack, as for large values of NR_CPUS these can consume significant amounts of stack space and make stack overflows more likely. Use cpumask_first_and_and() and cpumask_weight_and() to avoid the need for a temporary cpumask on the stack. Signed-off-by: Dawei Li Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240416085454.3547175-8-dawei.li@shingroup.cn --- drivers/cpuidle/coupled.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c index 9acde71558d5..bb8761c8a42e 100644 --- a/drivers/cpuidle/coupled.c +++ b/drivers/cpuidle/coupled.c @@ -439,13 +439,8 @@ static int cpuidle_coupled_clear_pokes(int cpu) static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled) { - cpumask_t cpus; - int ret; - - cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); - ret = cpumask_and(&cpus, &cpuidle_coupled_poke_pending, &cpus); - - return ret; + return cpumask_first_and_and(cpu_online_mask, &coupled->coupled_cpus, + &cpuidle_coupled_poke_pending) < nr_cpu_ids; } /** @@ -626,9 +621,7 @@ out: static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled) { - cpumask_t cpus; - cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); - coupled->online_count = cpumask_weight(&cpus); + coupled->online_count = cpumask_weight_and(cpu_online_mask, &coupled->coupled_cpus); } /** -- cgit v1.2.3 From 382d2ffe86efb1e2fa803d2cf17e5bfc34e574f3 Mon Sep 17 00:00:00 2001 From: Guanrui Huang Date: Thu, 18 Apr 2024 14:10:53 +0800 Subject: irqchip/gic-v3-its: Remove BUG_ON in its_vpe_irq_domain_alloc This BUG_ON() is useless, because the same effect will be obtained by letting the code run its course and vm being dereferenced, triggering an exception. So just remove this check. Signed-off-by: Guanrui Huang Signed-off-by: Thomas Gleixner Reviewed-by: Zenghui Yu Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20240418061053.96803-3-guanrui.huang@linux.alibaba.com --- drivers/irqchip/irq-gic-v3-its.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 20f954211c61..98e559304b7f 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -4526,8 +4526,6 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq struct page *vprop_page; int base, nr_ids, i, err = 0; - BUG_ON(!vm); - bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids); if (!bitmap) return -ENOMEM; -- cgit v1.2.3