summaryrefslogtreecommitdiff
path: root/drivers/irqchip/irq-apple-aic.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-02-21 21:03:48 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2023-02-21 21:03:48 +0300
commit9e58df973d2272e6e558965e7cb32453a4b380ff (patch)
treea912da5b57de28bc08dcda61cc37159c43d490c3 /drivers/irqchip/irq-apple-aic.c
parent560b80306782aee1f7d42bd929ddf010eb52121d (diff)
parent6f3ee0e22b4c62f44b8fa3c8de6e369a4d112a75 (diff)
downloadlinux-9e58df973d2272e6e558965e7cb32453a4b380ff.tar.xz
Merge tag 'irq-core-2023-02-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq updates from Thomas Gleixner: "Updates for the interrupt subsystem: Core: - Move the interrupt affinity spreading mechanism into lib/group_cpus so it can be used for similar spreading requirements, e.g. in the block multi-queue code This also contains a first usecase in the block multi-queue code which Jens asked to take along with the librarization - Improve irqdomain locking to close a number race conditions which can be observed with massive parallel device driver probing - Enforce and document the semantics of disable_irq() which cannot be invoked safely from non-sleepable context - Move the IPI multiplexing code from the Apple AIC driver into the core, so it can be reused by RISCV Drivers: - Plug OF node refcounting leaks in various drivers - Correctly mark level triggered interrupts in the Broadcom L2 drivers - The usual small fixes and improvements - No new drivers for the record!" * tag 'irq-core-2023-02-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (42 commits) irqchip/irq-bcm7120-l2: Set IRQ_LEVEL for level triggered interrupts irqchip/irq-brcmstb-l2: Set IRQ_LEVEL for level triggered interrupts irqdomain: Switch to per-domain locking irqchip/mvebu-odmi: Use irq_domain_create_hierarchy() irqchip/loongson-pch-msi: Use irq_domain_create_hierarchy() irqchip/gic-v3-mbi: Use irq_domain_create_hierarchy() irqchip/gic-v3-its: Use irq_domain_create_hierarchy() irqchip/gic-v2m: Use irq_domain_create_hierarchy() irqchip/alpine-msi: Use irq_domain_add_hierarchy() x86/uv: Use irq_domain_create_hierarchy() x86/ioapic: Use irq_domain_create_hierarchy() irqdomain: Clean up irq_domain_push/pop_irq() irqdomain: Drop leftover brackets irqdomain: Drop dead domain-name assignment irqdomain: Drop revmap mutex irqdomain: Fix domain registration race irqdomain: Fix mapping-creation race irqdomain: Refactor __irq_domain_alloc_irqs() irqdomain: Look for existing mapping only once irqdomain: Drop bogus fwspec-mapping error handling ...
Diffstat (limited to 'drivers/irqchip/irq-apple-aic.c')
-rw-r--r--drivers/irqchip/irq-apple-aic.c161
1 files changed, 8 insertions, 153 deletions
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index ae3437f03e6c..eabb3b92965b 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -292,7 +292,6 @@ struct aic_irq_chip {
void __iomem *base;
void __iomem *event;
struct irq_domain *hw_domain;
- struct irq_domain *ipi_domain;
struct {
cpumask_t aff;
} *fiq_aff[AIC_NR_FIQ];
@@ -307,9 +306,6 @@ struct aic_irq_chip {
static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked);
-static DEFINE_PER_CPU(atomic_t, aic_vipi_flag);
-static DEFINE_PER_CPU(atomic_t, aic_vipi_enable);
-
static struct aic_irq_chip *aic_irqc;
static void aic_handle_ipi(struct pt_regs *regs);
@@ -751,98 +747,8 @@ static void aic_ipi_send_fast(int cpu)
isb();
}
-static void aic_ipi_mask(struct irq_data *d)
-{
- u32 irq_bit = BIT(irqd_to_hwirq(d));
-
- /* No specific ordering requirements needed here. */
- atomic_andnot(irq_bit, this_cpu_ptr(&aic_vipi_enable));
-}
-
-static void aic_ipi_unmask(struct irq_data *d)
-{
- struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
- u32 irq_bit = BIT(irqd_to_hwirq(d));
-
- atomic_or(irq_bit, this_cpu_ptr(&aic_vipi_enable));
-
- /*
- * The atomic_or() above must complete before the atomic_read()
- * below to avoid racing aic_ipi_send_mask().
- */
- smp_mb__after_atomic();
-
- /*
- * If a pending vIPI was unmasked, raise a HW IPI to ourselves.
- * No barriers needed here since this is a self-IPI.
- */
- if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit) {
- if (static_branch_likely(&use_fast_ipi))
- aic_ipi_send_fast(smp_processor_id());
- else
- aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id()));
- }
-}
-
-static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
-{
- struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
- u32 irq_bit = BIT(irqd_to_hwirq(d));
- u32 send = 0;
- int cpu;
- unsigned long pending;
-
- for_each_cpu(cpu, mask) {
- /*
- * This sequence is the mirror of the one in aic_ipi_unmask();
- * see the comment there. Additionally, release semantics
- * ensure that the vIPI flag set is ordered after any shared
- * memory accesses that precede it. This therefore also pairs
- * with the atomic_fetch_andnot in aic_handle_ipi().
- */
- pending = atomic_fetch_or_release(irq_bit, per_cpu_ptr(&aic_vipi_flag, cpu));
-
- /*
- * The atomic_fetch_or_release() above must complete before the
- * atomic_read() below to avoid racing aic_ipi_unmask().
- */
- smp_mb__after_atomic();
-
- if (!(pending & irq_bit) &&
- (atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit)) {
- if (static_branch_likely(&use_fast_ipi))
- aic_ipi_send_fast(cpu);
- else
- send |= AIC_IPI_SEND_CPU(cpu);
- }
- }
-
- /*
- * The flag writes must complete before the physical IPI is issued
- * to another CPU. This is implied by the control dependency on
- * the result of atomic_read_acquire() above, which is itself
- * already ordered after the vIPI flag write.
- */
- if (send)
- aic_ic_write(ic, AIC_IPI_SEND, send);
-}
-
-static struct irq_chip ipi_chip = {
- .name = "AIC-IPI",
- .irq_mask = aic_ipi_mask,
- .irq_unmask = aic_ipi_unmask,
- .ipi_send_mask = aic_ipi_send_mask,
-};
-
-/*
- * IPI IRQ domain
- */
-
static void aic_handle_ipi(struct pt_regs *regs)
{
- int i;
- unsigned long enabled, firing;
-
/*
* Ack the IPI. We need to order this after the AIC event read, but
* that is enforced by normal MMIO ordering guarantees.
@@ -857,27 +763,7 @@ static void aic_handle_ipi(struct pt_regs *regs)
aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER);
}
- /*
- * The mask read does not need to be ordered. Only we can change
- * our own mask anyway, so no races are possible here, as long as
- * we are properly in the interrupt handler (which is covered by
- * the barrier that is part of the top-level AIC handler's readl()).
- */
- enabled = atomic_read(this_cpu_ptr(&aic_vipi_enable));
-
- /*
- * Clear the IPIs we are about to handle. This pairs with the
- * atomic_fetch_or_release() in aic_ipi_send_mask(), and needs to be
- * ordered after the aic_ic_write() above (to avoid dropping vIPIs) and
- * before IPI handling code (to avoid races handling vIPIs before they
- * are signaled). The former is taken care of by the release semantics
- * of the write portion, while the latter is taken care of by the
- * acquire semantics of the read portion.
- */
- firing = atomic_fetch_andnot(enabled, this_cpu_ptr(&aic_vipi_flag)) & enabled;
-
- for_each_set_bit(i, &firing, AIC_NR_SWIPI)
- generic_handle_domain_irq(aic_irqc->ipi_domain, i);
+ ipi_mux_process();
/*
* No ordering needed here; at worst this just changes the timing of
@@ -887,55 +773,24 @@ static void aic_handle_ipi(struct pt_regs *regs)
aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
}
-static int aic_ipi_alloc(struct irq_domain *d, unsigned int virq,
- unsigned int nr_irqs, void *args)
+static void aic_ipi_send_single(unsigned int cpu)
{
- int i;
-
- for (i = 0; i < nr_irqs; i++) {
- irq_set_percpu_devid(virq + i);
- irq_domain_set_info(d, virq + i, i, &ipi_chip, d->host_data,
- handle_percpu_devid_irq, NULL, NULL);
- }
-
- return 0;
-}
-
-static void aic_ipi_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs)
-{
- /* Not freeing IPIs */
+ if (static_branch_likely(&use_fast_ipi))
+ aic_ipi_send_fast(cpu);
+ else
+ aic_ic_write(aic_irqc, AIC_IPI_SEND, AIC_IPI_SEND_CPU(cpu));
}
-static const struct irq_domain_ops aic_ipi_domain_ops = {
- .alloc = aic_ipi_alloc,
- .free = aic_ipi_free,
-};
-
static int __init aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node)
{
- struct irq_domain *ipi_domain;
int base_ipi;
- ipi_domain = irq_domain_create_linear(irqc->hw_domain->fwnode, AIC_NR_SWIPI,
- &aic_ipi_domain_ops, irqc);
- if (WARN_ON(!ipi_domain))
- return -ENODEV;
-
- ipi_domain->flags |= IRQ_DOMAIN_FLAG_IPI_SINGLE;
- irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
-
- base_ipi = __irq_domain_alloc_irqs(ipi_domain, -1, AIC_NR_SWIPI,
- NUMA_NO_NODE, NULL, false, NULL);
-
- if (WARN_ON(!base_ipi)) {
- irq_domain_remove(ipi_domain);
+ base_ipi = ipi_mux_create(AIC_NR_SWIPI, aic_ipi_send_single);
+ if (WARN_ON(base_ipi <= 0))
return -ENODEV;
- }
set_smp_ipi_range(base_ipi, AIC_NR_SWIPI);
- irqc->ipi_domain = ipi_domain;
-
return 0;
}