summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-03-31 03:35:14 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2020-03-31 03:35:14 +0300
commit2d385336afcc43732aef1d51528c03f177ecd54e (patch)
treefff0780aea481a225c4d3460aad286c8e95aa1e7 /drivers
parent673b41e04a035d760bc0aff83fa9ee24fd9c2779 (diff)
parent8a13b02a010a743ea0725e9a5454f42cddb65cf0 (diff)
downloadlinux-2d385336afcc43732aef1d51528c03f177ecd54e.tar.xz
Merge tag 'irq-core-2020-03-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq updates from Thomas Gleixner: "Updates for the interrupt subsystem: Treewide: - Cleanup of setup_irq() which is not longer required because the memory allocator is available early. Most cleanup changes come through the various maintainer trees, so the final removal of setup_irq() is postponed towards the end of the merge window. Core: - Protection against unsafe invocation of interrupt handlers and unsafe interrupt injection including a fixup of the offending PCI/AER error injection mechanism. Invoking interrupt handlers from arbitrary contexts, i.e. outside of an actual interrupt, can cause inconsistent state on the fragile x86 interrupt affinity changing hardware trainwreck. Drivers: - Second wave of support for the new ARM GICv4.1 - Multi-instance support for Xilinx and PLIC interrupt controllers - CPU-Hotplug support for PLIC - The obligatory new driver for X1000 TCU - Enhancements, cleanups and fixes all over the place" * tag 'irq-core-2020-03-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (58 commits) unicore32: Replace setup_irq() by request_irq() sh: Replace setup_irq() by request_irq() hexagon: Replace setup_irq() by request_irq() c6x: Replace setup_irq() by request_irq() alpha: Replace setup_irq() by request_irq() irqchip/gic-v4.1: Eagerly vmap vPEs irqchip/gic-v4.1: Add VSGI property setup irqchip/gic-v4.1: Add VSGI allocation/teardown irqchip/gic-v4.1: Move doorbell management to the GICv4 abstraction layer irqchip/gic-v4.1: Plumb set_vcpu_affinity SGI callbacks irqchip/gic-v4.1: Plumb get/set_irqchip_state SGI callbacks irqchip/gic-v4.1: Plumb mask/unmask SGI callbacks irqchip/gic-v4.1: Add initial SGI configuration irqchip/gic-v4.1: Plumb skeletal VSGI irqchip irqchip/stm32: Retrigger both in eoi and unmask callbacks irqchip/gic-v3: Move irq_domain_update_bus_token to after checking for NULL domain irqchip/xilinx: Do not call irq_set_default_host() irqchip/xilinx: Enable generic irq multi handler irqchip/xilinx: Fill error code when irq domain registration fails irqchip/xilinx: Add support for multiple instances ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/irqchip/Kconfig2
-rw-r--r--drivers/irqchip/irq-atmel-aic.c2
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c2
-rw-r--r--drivers/irqchip/irq-bcm2835.c15
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c532
-rw-r--r--drivers/irqchip/irq-gic-v3.c16
-rw-r--r--drivers/irqchip/irq-gic-v4.c134
-rw-r--r--drivers/irqchip/irq-i8259.c16
-rw-r--r--drivers/irqchip/irq-ingenic-tcu.c1
-rw-r--r--drivers/irqchip/irq-ingenic.c9
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c2
-rw-r--r--drivers/irqchip/irq-sifive-plic.c119
-rw-r--r--drivers/irqchip/irq-stm32-exti.c14
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c18
-rw-r--r--drivers/irqchip/irq-vic.c9
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c123
-rw-r--r--drivers/irqchip/qcom-irq-combiner.c2
-rw-r--r--drivers/pci/pcie/Kconfig1
-rw-r--r--drivers/pci/pcie/aer_inject.c6
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c57
21 files changed, 892 insertions, 190 deletions
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 6d397732138d..24fe08702ef7 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -458,7 +458,7 @@ config IMX_IRQSTEER
Support for the i.MX IRQSTEER interrupt multiplexer/remapper.
config IMX_INTMUX
- def_bool y if ARCH_MXC
+ def_bool y if ARCH_MXC || COMPILE_TEST
select IRQ_DOMAIN
help
Support for the i.MX INTMUX interrupt multiplexer.
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
index bb1ad451392f..2c999dc310c1 100644
--- a/drivers/irqchip/irq-atmel-aic.c
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -83,7 +83,7 @@ static int aic_retrigger(struct irq_data *d)
irq_reg_writel(gc, d->mask, AT91_AIC_ISCR);
irq_gc_unlock(gc);
- return 0;
+ return 1;
}
static int aic_set_type(struct irq_data *d, unsigned type)
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index 29333497ba10..fc1b3a9cdafc 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -128,7 +128,7 @@ static int aic5_retrigger(struct irq_data *d)
irq_reg_writel(bgc, 1, AT91_AIC5_ISCR);
irq_gc_unlock(bgc);
- return 0;
+ return 1;
}
static int aic5_set_type(struct irq_data *d, unsigned type)
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index 418245d31921..a1e004af23e7 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -61,6 +61,7 @@
| SHORTCUT1_MASK | SHORTCUT2_MASK)
#define REG_FIQ_CONTROL 0x0c
+#define FIQ_CONTROL_ENABLE BIT(7)
#define NR_BANKS 3
#define IRQS_PER_BANK 32
@@ -135,6 +136,7 @@ static int __init armctrl_of_init(struct device_node *node,
{
void __iomem *base;
int irq, b, i;
+ u32 reg;
base = of_iomap(node, 0);
if (!base)
@@ -157,6 +159,19 @@ static int __init armctrl_of_init(struct device_node *node,
handle_level_irq);
irq_set_probe(irq);
}
+
+ reg = readl_relaxed(intc.enable[b]);
+ if (reg) {
+ writel_relaxed(reg, intc.disable[b]);
+ pr_err(FW_BUG "Bootloader left irq enabled: "
+ "bank %d irq %*pbl\n", b, IRQS_PER_BANK, &reg);
+ }
+ }
+
+ reg = readl_relaxed(base + REG_FIQ_CONTROL);
+ if (reg & FIQ_CONTROL_ENABLE) {
+ writel_relaxed(0, base + REG_FIQ_CONTROL);
+ pr_err(FW_BUG "Bootloader left fiq enabled\n");
}
if (is_2836) {
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index cbf01afcd2a6..eb9bce93cd05 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -50,7 +50,7 @@ struct bcm7038_l1_chip {
struct bcm7038_l1_cpu {
void __iomem *map_base;
- u32 mask_cache[0];
+ u32 mask_cache[];
};
/*
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 83b1186ffcad..54d142ccc63a 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -96,6 +96,7 @@ struct its_node {
struct mutex dev_alloc_lock;
struct list_head entry;
void __iomem *base;
+ void __iomem *sgir_base;
phys_addr_t phys_base;
struct its_cmd_block *cmd_base;
struct its_cmd_block *cmd_write;
@@ -188,6 +189,15 @@ static DEFINE_IDA(its_vpeid_ida);
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
+/*
+ * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
+ * always have vSGIs mapped.
+ */
+static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
+{
+ return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
+}
+
static u16 get_its_list(struct its_vm *vm)
{
struct its_node *its;
@@ -197,7 +207,7 @@ static u16 get_its_list(struct its_vm *vm)
if (!is_v4(its))
continue;
- if (vm->vlpi_count[its->list_nr])
+ if (require_its_list_vmovp(vm, its))
__set_bit(its->list_nr, &its_list);
}
@@ -239,15 +249,41 @@ static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
return NULL;
}
-static int irq_to_cpuid(struct irq_data *d)
+static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags)
+{
+ raw_spin_lock_irqsave(&vpe->vpe_lock, *flags);
+ return vpe->col_idx;
+}
+
+static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
+{
+ raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
+}
+
+static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
{
- struct its_device *its_dev = irq_data_get_irq_chip_data(d);
struct its_vlpi_map *map = get_vlpi_map(d);
+ int cpu;
- if (map)
- return map->vpe->col_idx;
+ if (map) {
+ cpu = vpe_to_cpuid_lock(map->vpe, flags);
+ } else {
+ /* Physical LPIs are already locked via the irq_desc lock */
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ cpu = its_dev->event_map.col_map[its_get_event_id(d)];
+ /* Keep GCC quiet... */
+ *flags = 0;
+ }
+
+ return cpu;
+}
+
+static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
+{
+ struct its_vlpi_map *map = get_vlpi_map(d);
- return its_dev->event_map.col_map[its_get_event_id(d)];
+ if (map)
+ vpe_to_cpuid_unlock(map->vpe, flags);
}
static struct its_collection *valid_col(struct its_collection *col)
@@ -353,6 +389,15 @@ struct its_cmd_desc {
struct {
struct its_vpe *vpe;
} its_invdb_cmd;
+
+ struct {
+ struct its_vpe *vpe;
+ u8 sgi;
+ u8 priority;
+ bool enable;
+ bool group;
+ bool clear;
+ } its_vsgi_cmd;
};
};
@@ -501,6 +546,31 @@ static void its_encode_db(struct its_cmd_block *cmd, bool db)
its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
}
+static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi)
+{
+ its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32);
+}
+
+static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio)
+{
+ its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20);
+}
+
+static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp)
+{
+ its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10);
+}
+
+static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr)
+{
+ its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9);
+}
+
+static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en)
+{
+ its_mask_encode(&cmd->raw_cmd[0], en, 8, 8);
+}
+
static inline void its_fixup_cmd(struct its_cmd_block *cmd)
{
/* Let's fixup BE commands */
@@ -866,6 +936,26 @@ static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
return valid_vpe(its, desc->its_invdb_cmd.vpe);
}
+static struct its_vpe *its_build_vsgi_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ if (WARN_ON(!is_v4_1(its)))
+ return NULL;
+
+ its_encode_cmd(cmd, GITS_CMD_VSGI);
+ its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id);
+ its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi);
+ its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority);
+ its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group);
+ its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear);
+ its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable);
+
+ its_fixup_cmd(cmd);
+
+ return valid_vpe(its, desc->its_vsgi_cmd.vpe);
+}
+
static u64 its_cmd_ptr_to_offset(struct its_node *its,
struct its_cmd_block *ptr)
{
@@ -1214,7 +1304,7 @@ static void its_send_vmovp(struct its_vpe *vpe)
if (!is_v4(its))
continue;
- if (!vpe->its_vm->vlpi_count[its->list_nr])
+ if (!require_its_list_vmovp(vpe->its_vm, its))
continue;
desc.its_vmovp_cmd.col = &its->collections[col_id];
@@ -1321,7 +1411,7 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
static void wait_for_syncr(void __iomem *rdbase)
{
- while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
+ while (readl_relaxed(rdbase + GICR_SYNCR) & 1)
cpu_relax();
}
@@ -1329,7 +1419,9 @@ static void direct_lpi_inv(struct irq_data *d)
{
struct its_vlpi_map *map = get_vlpi_map(d);
void __iomem *rdbase;
+ unsigned long flags;
u64 val;
+ int cpu;
if (map) {
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
@@ -1344,10 +1436,14 @@ static void direct_lpi_inv(struct irq_data *d)
}
/* Target the redistributor this LPI is currently routed to */
- rdbase = per_cpu_ptr(gic_rdists->rdist, irq_to_cpuid(d))->rd_base;
+ cpu = irq_to_cpuid_lock(d, &flags);
+ raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
+ rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
gic_write_lpir(val, rdbase + GICR_INVLPIR);
wait_for_syncr(rdbase);
+ raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
+ irq_to_cpuid_unlock(d, flags);
}
static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
@@ -1499,12 +1595,31 @@ static int its_irq_set_irqchip_state(struct irq_data *d,
return 0;
}
+/*
+ * Two favourable cases:
+ *
+ * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
+ * for vSGI delivery
+ *
+ * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough
+ * and we're better off mapping all VPEs always
+ *
+ * If neither (a) nor (b) is true, then we map vPEs on demand.
+ *
+ */
+static bool gic_requires_eager_mapping(void)
+{
+ if (!its_list_map || gic_rdists->has_rvpeid)
+ return true;
+
+ return false;
+}
+
static void its_map_vm(struct its_node *its, struct its_vm *vm)
{
unsigned long flags;
- /* Not using the ITS list? Everything is always mapped. */
- if (!its_list_map)
+ if (gic_requires_eager_mapping())
return;
raw_spin_lock_irqsave(&vmovp_lock, flags);
@@ -1538,7 +1653,7 @@ static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
unsigned long flags;
/* Not using the ITS list? Everything is always mapped. */
- if (!its_list_map)
+ if (gic_requires_eager_mapping())
return;
raw_spin_lock_irqsave(&vmovp_lock, flags);
@@ -2036,18 +2151,17 @@ static void its_write_baser(struct its_node *its, struct its_baser *baser,
}
static int its_setup_baser(struct its_node *its, struct its_baser *baser,
- u64 cache, u64 shr, u32 psz, u32 order,
- bool indirect)
+ u64 cache, u64 shr, u32 order, bool indirect)
{
u64 val = its_read_baser(its, baser);
u64 esz = GITS_BASER_ENTRY_SIZE(val);
u64 type = GITS_BASER_TYPE(val);
u64 baser_phys, tmp;
- u32 alloc_pages;
+ u32 alloc_pages, psz;
struct page *page;
void *base;
-retry_alloc_baser:
+ psz = baser->psz;
alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
if (alloc_pages > GITS_BASER_PAGES_MAX) {
pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
@@ -2120,25 +2234,6 @@ retry_baser:
goto retry_baser;
}
- if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
- /*
- * Page size didn't stick. Let's try a smaller
- * size and retry. If we reach 4K, then
- * something is horribly wrong...
- */
- free_pages((unsigned long)base, order);
- baser->base = NULL;
-
- switch (psz) {
- case SZ_16K:
- psz = SZ_4K;
- goto retry_alloc_baser;
- case SZ_64K:
- psz = SZ_16K;
- goto retry_alloc_baser;
- }
- }
-
if (val != tmp) {
pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
&its->phys_base, its_base_type_string[type],
@@ -2164,13 +2259,14 @@ retry_baser:
static bool its_parse_indirect_baser(struct its_node *its,
struct its_baser *baser,
- u32 psz, u32 *order, u32 ids)
+ u32 *order, u32 ids)
{
u64 tmp = its_read_baser(its, baser);
u64 type = GITS_BASER_TYPE(tmp);
u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
u32 new_order = *order;
+ u32 psz = baser->psz;
bool indirect = false;
/* No need to enable Indirection if memory requirement < (psz*2)bytes */
@@ -2288,11 +2384,58 @@ static void its_free_tables(struct its_node *its)
}
}
+static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser)
+{
+ u64 psz = SZ_64K;
+
+ while (psz) {
+ u64 val, gpsz;
+
+ val = its_read_baser(its, baser);
+ val &= ~GITS_BASER_PAGE_SIZE_MASK;
+
+ switch (psz) {
+ case SZ_64K:
+ gpsz = GITS_BASER_PAGE_SIZE_64K;
+ break;
+ case SZ_16K:
+ gpsz = GITS_BASER_PAGE_SIZE_16K;
+ break;
+ case SZ_4K:
+ default:
+ gpsz = GITS_BASER_PAGE_SIZE_4K;
+ break;
+ }
+
+ gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT;
+
+ val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz);
+ its_write_baser(its, baser, val);
+
+ if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz)
+ break;
+
+ switch (psz) {
+ case SZ_64K:
+ psz = SZ_16K;
+ break;
+ case SZ_16K:
+ psz = SZ_4K;
+ break;
+ case SZ_4K:
+ default:
+ return -1;
+ }
+ }
+
+ baser->psz = psz;
+ return 0;
+}
+
static int its_alloc_tables(struct its_node *its)
{
u64 shr = GITS_BASER_InnerShareable;
u64 cache = GITS_BASER_RaWaWb;
- u32 psz = SZ_64K;
int err, i;
if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
@@ -2303,16 +2446,22 @@ static int its_alloc_tables(struct its_node *its)
struct its_baser *baser = its->tables + i;
u64 val = its_read_baser(its, baser);
u64 type = GITS_BASER_TYPE(val);
- u32 order = get_order(psz);
bool indirect = false;
+ u32 order;
- switch (type) {
- case GITS_BASER_TYPE_NONE:
+ if (type == GITS_BASER_TYPE_NONE)
continue;
+ if (its_probe_baser_psz(its, baser)) {
+ its_free_tables(its);
+ return -ENXIO;
+ }
+
+ order = get_order(baser->psz);
+
+ switch (type) {
case GITS_BASER_TYPE_DEVICE:
- indirect = its_parse_indirect_baser(its, baser,
- psz, &order,
+ indirect = its_parse_indirect_baser(its, baser, &order,
device_ids(its));
break;
@@ -2328,20 +2477,18 @@ static int its_alloc_tables(struct its_node *its)
}
}
- indirect = its_parse_indirect_baser(its, baser,
- psz, &order,
+ indirect = its_parse_indirect_baser(its, baser, &order,
ITS_MAX_VPEID_BITS);
break;
}
- err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
+ err = its_setup_baser(its, baser, cache, shr, order, indirect);
if (err < 0) {
its_free_tables(its);
return err;
}
/* Update settings which will be used for next BASERn */
- psz = baser->psz;
cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
}
@@ -2452,6 +2599,10 @@ static bool allocate_vpe_l2_table(int cpu, u32 id)
if (!gic_rdists->has_rvpeid)
return true;
+ /* Skip non-present CPUs */
+ if (!base)
+ return true;
+
val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1;
@@ -3482,17 +3633,25 @@ static int its_vpe_set_affinity(struct irq_data *d,
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
int from, cpu = cpumask_first(mask_val);
+ unsigned long flags;
/*
* Changing affinity is mega expensive, so let's be as lazy as
* we can and only do it if we really have to. Also, if mapped
* into the proxy device, we need to move the doorbell
* interrupt to its new location.
+ *
+ * Another thing is that changing the affinity of a vPE affects
+ * *other interrupts* such as all the vLPIs that are routed to
+ * this vPE. This means that the irq_desc lock is not enough to
+ * protect us, and that we must ensure nobody samples vpe->col_idx
+ * during the update, hence the lock below which must also be
+ * taken on any vLPI handling path that evaluates vpe->col_idx.
*/
- if (vpe->col_idx == cpu)
+ from = vpe_to_cpuid_lock(vpe, &flags);
+ if (from == cpu)
goto out;
- from = vpe->col_idx;
vpe->col_idx = cpu;
/*
@@ -3508,6 +3667,7 @@ static int its_vpe_set_affinity(struct irq_data *d,
out:
irq_data_update_effective_affinity(d, cpumask_of(cpu));
+ vpe_to_cpuid_unlock(vpe, flags);
return IRQ_SET_MASK_OK_DONE;
}
@@ -3528,7 +3688,7 @@ static void its_vpe_schedule(struct its_vpe *vpe)
val = virt_to_phys(page_address(vpe->vpt_page)) &
GENMASK_ULL(51, 16);
val |= GICR_VPENDBASER_RaWaWb;
- val |= GICR_VPENDBASER_NonShareable;
+ val |= GICR_VPENDBASER_InnerShareable;
/*
* There is no good way of finding out if the pending table is
* empty as we can race against the doorbell interrupt very
@@ -3619,9 +3779,11 @@ static void its_vpe_send_inv(struct irq_data *d)
void __iomem *rdbase;
/* Target the redistributor this VPE is currently known on */
+ raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
wait_for_syncr(rdbase);
+ raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
} else {
its_vpe_send_cmd(vpe, its_send_inv);
}
@@ -3675,12 +3837,18 @@ static int its_vpe_set_irqchip_state(struct irq_data *d,
return 0;
}
+static int its_vpe_retrigger(struct irq_data *d)
+{
+ return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
+}
+
static struct irq_chip its_vpe_irq_chip = {
.name = "GICv4-vpe",
.irq_mask = its_vpe_mask_irq,
.irq_unmask = its_vpe_unmask_irq,
.irq_eoi = irq_chip_eoi_parent,
.irq_set_affinity = its_vpe_set_affinity,
+ .irq_retrigger = its_vpe_retrigger,
.irq_set_irqchip_state = its_vpe_set_irqchip_state,
.irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
};
@@ -3782,8 +3950,12 @@ static void its_vpe_4_1_invall(struct its_vpe *vpe)
val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
/* Target the redistributor this vPE is currently known on */
+ raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
gic_write_lpir(val, rdbase + GICR_INVALLR);
+
+ wait_for_syncr(rdbase);
+ raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
}
static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
@@ -3818,6 +3990,221 @@ static struct irq_chip its_vpe_4_1_irq_chip = {
.irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity,
};
+static void its_configure_sgi(struct irq_data *d, bool clear)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_cmd_desc desc;
+
+ desc.its_vsgi_cmd.vpe = vpe;
+ desc.its_vsgi_cmd.sgi = d->hwirq;
+ desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority;
+ desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled;
+ desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group;
+ desc.its_vsgi_cmd.clear = clear;
+
+ /*
+ * GICv4.1 allows us to send VSGI commands to any ITS as long as the
+ * destination VPE is mapped there. Since we map them eagerly at
+ * activation time, we're pretty sure the first GICv4.1 ITS will do.
+ */
+ its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc);
+}
+
+static void its_sgi_mask_irq(struct irq_data *d)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+
+ vpe->sgi_config[d->hwirq].enabled = false;
+ its_configure_sgi(d, false);
+}
+
+static void its_sgi_unmask_irq(struct irq_data *d)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+
+ vpe->sgi_config[d->hwirq].enabled = true;
+ its_configure_sgi(d, false);
+}
+
+static int its_sgi_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val,
+ bool force)
+{
+ /*
+ * There is no notion of affinity for virtual SGIs, at least
+ * not on the host (since they can only be targetting a vPE).
+ * Tell the kernel we've done whatever it asked for.
+ */
+ return IRQ_SET_MASK_OK;
+}
+
+static int its_sgi_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool state)
+{
+ if (which != IRQCHIP_STATE_PENDING)
+ return -EINVAL;
+
+ if (state) {
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_node *its = find_4_1_its();
+ u64 val;
+
+ val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id);
+ val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq);
+ writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K);
+ } else {
+ its_configure_sgi(d, true);
+ }
+
+ return 0;
+}
+
+static int its_sgi_get_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which, bool *val)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ void __iomem *base;
+ unsigned long flags;
+ u32 count = 1000000; /* 1s! */
+ u32 status;
+ int cpu;
+
+ if (which != IRQCHIP_STATE_PENDING)
+ return -EINVAL;
+
+ /*
+ * Locking galore! We can race against two different events:
+ *
+ * - Concurent vPE affinity change: we must make sure it cannot
+ * happen, or we'll talk to the wrong redistributor. This is
+ * identical to what happens with vLPIs.
+ *
+ * - Concurrent VSGIPENDR access: As it involves accessing two
+ * MMIO registers, this must be made atomic one way or another.
+ */
+ cpu = vpe_to_cpuid_lock(vpe, &flags);
+ raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
+ base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K;
+ writel_relaxed(vpe->vpe_id, base + GICR_VSGIR);
+ do {
+ status = readl_relaxed(base + GICR_VSGIPENDR);
+ if (!(status & GICR_VSGIPENDR_BUSY))
+ goto out;
+
+ count--;
+ if (!count) {
+ pr_err_ratelimited("Unable to get SGI status\n");
+ goto out;
+ }
+ cpu_relax();
+ udelay(1);
+ } while (count);
+
+out:
+ raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
+ vpe_to_cpuid_unlock(vpe, flags);
+
+ if (!count)
+ return -ENXIO;
+
+ *val = !!(status & (1 << d->hwirq));
+
+ return 0;
+}
+
+static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_cmd_info *info = vcpu_info;
+
+ switch (info->cmd_type) {
+ case PROP_UPDATE_VSGI:
+ vpe->sgi_config[d->hwirq].priority = info->priority;
+ vpe->sgi_config[d->hwirq].group = info->group;
+ its_configure_sgi(d, false);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct irq_chip its_sgi_irq_chip = {
+ .name = "GICv4.1-sgi",
+ .irq_mask = its_sgi_mask_irq,
+ .irq_unmask = its_sgi_unmask_irq,
+ .irq_set_affinity = its_sgi_set_affinity,
+ .irq_set_irqchip_state = its_sgi_set_irqchip_state,
+ .irq_get_irqchip_state = its_sgi_get_irqchip_state,
+ .irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity,
+};
+
+static int its_sgi_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *args)
+{
+ struct its_vpe *vpe = args;
+ int i;
+
+ /* Yes, we do want 16 SGIs */
+ WARN_ON(nr_irqs != 16);
+
+ for (i = 0; i < 16; i++) {
+ vpe->sgi_config[i].priority = 0;
+ vpe->sgi_config[i].enabled = false;
+ vpe->sgi_config[i].group = false;
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i, i,
+ &its_sgi_irq_chip, vpe);
+ irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY);
+ }
+
+ return 0;
+}
+
+static void its_sgi_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ /* Nothing to do */
+}
+
+static int its_sgi_irq_domain_activate(struct irq_domain *domain,
+ struct irq_data *d, bool reserve)
+{
+ /* Write out the initial SGI configuration */
+ its_configure_sgi(d, false);
+ return 0;
+}
+
+static void its_sgi_irq_domain_deactivate(struct irq_domain *domain,
+ struct irq_data *d)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+
+ /*
+ * The VSGI command is awkward:
+ *
+ * - To change the configuration, CLEAR must be set to false,
+ * leaving the pending bit unchanged.
+ * - To clear the pending bit, CLEAR must be set to true, leaving
+ * the configuration unchanged.
+ *
+ * You just can't do both at once, hence the two commands below.
+ */
+ vpe->sgi_config[d->hwirq].enabled = false;
+ its_configure_sgi(d, false);
+ its_configure_sgi(d, true);
+}
+
+static const struct irq_domain_ops its_sgi_domain_ops = {
+ .alloc = its_sgi_irq_domain_alloc,
+ .free = its_sgi_irq_domain_free,
+ .activate = its_sgi_irq_domain_activate,
+ .deactivate = its_sgi_irq_domain_deactivate,
+};
+
static int its_vpe_id_alloc(void)
{
return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
@@ -3851,6 +4238,7 @@ static int its_vpe_init(struct its_vpe *vpe)
return -ENOMEM;
}
+ raw_spin_lock_init(&vpe->vpe_lock);
vpe->vpe_id = vpe_id;
vpe->vpt_page = vpt_page;
if (gic_rdists->has_rvpeid)
@@ -3960,8 +4348,12 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
struct its_node *its;
- /* If we use the list map, we issue VMAPP on demand... */
- if (its_list_map)
+ /*
+ * If we use the list map, we issue VMAPP on demand... Unless
+ * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
+ * so that VSGIs can work.
+ */
+ if (!gic_requires_eager_mapping())
return 0;
/* Map the VPE to the first possible CPU */
@@ -3987,10 +4379,10 @@ static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
struct its_node *its;
/*
- * If we use the list map, we unmap the VPE once no VLPIs are
- * associated with the VM.
+ * If we use the list map on GICv4.0, we unmap the VPE once no
+ * VLPIs are associated with the VM.
*/
- if (its_list_map)
+ if (!gic_requires_eager_mapping())
return;
list_for_each_entry(its, &its_nodes, entry) {
@@ -4404,7 +4796,7 @@ static int __init its_probe_one(struct resource *res,
struct page *page;
int err;
- its_base = ioremap(res->start, resource_size(res));
+ its_base = ioremap(res->start, SZ_64K);
if (!its_base) {
pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
return -ENOMEM;
@@ -4455,6 +4847,13 @@ static int __init its_probe_one(struct resource *res,
if (is_v4_1(its)) {
u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer);
+
+ its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K);
+ if (!its->sgir_base) {
+ err = -ENOMEM;
+ goto out_free_its;
+ }
+
its->mpidr = readl_relaxed(its_base + GITS_MPIDR);
pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
@@ -4468,7 +4867,7 @@ static int __init its_probe_one(struct resource *res,
get_order(ITS_CMD_QUEUE_SZ));
if (!page) {
err = -ENOMEM;
- goto out_free_its;
+ goto out_unmap_sgir;
}
its->cmd_base = (void *)page_address(page);
its->cmd_write = its->cmd_base;
@@ -4535,6 +4934,9 @@ out_free_tables:
its_free_tables(its);
out_free_cmd:
free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
+out_unmap_sgir:
+ if (its->sgir_base)
+ iounmap(its->sgir_base);
out_free_its:
kfree(its);
out_unmap:
@@ -4818,6 +5220,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
struct device_node *of_node;
struct its_node *its;
bool has_v4 = false;
+ bool has_v4_1 = false;
int err;
gic_rdists = rdists;
@@ -4838,12 +5241,25 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
if (err)
return err;
- list_for_each_entry(its, &its_nodes, entry)
+ list_for_each_entry(its, &its_nodes, entry) {
has_v4 |= is_v4(its);
+ has_v4_1 |= is_v4_1(its);
+ }
+
+ /* Don't bother with inconsistent systems */
+ if (WARN_ON(!has_v4_1 && rdists->has_rvpeid))
+ rdists->has_rvpeid = false;
if (has_v4 & rdists->has_vlpis) {
+ const struct irq_domain_ops *sgi_ops;
+
+ if (has_v4_1)
+ sgi_ops = &its_sgi_domain_ops;
+ else
+ sgi_ops = NULL;
+
if (its_init_vpe_domain() ||
- its_init_v4(parent_domain, &its_vpe_domain_ops)) {
+ its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) {
rdists->has_vlpis = false;
pr_err("ITS: Disabling GICv4 support\n");
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 1eec9d4649d5..9dbc81b6f62e 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -724,6 +724,7 @@ static void __init gic_dist_init(void)
unsigned int i;
u64 affinity;
void __iomem *base = gic_data.dist_base;
+ u32 val;
/* Disable the distributor */
writel_relaxed(0, base + GICD_CTLR);
@@ -756,9 +757,14 @@ static void __init gic_dist_init(void)
/* Now do the common stuff, and wait for the distributor to drain */
gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp);
+ val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
+ if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
+ pr_info("Enabling SGIs without active state\n");
+ val |= GICD_CTLR_nASSGIreq;
+ }
+
/* Enable distributor with ARE, Group1 */
- writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
- base + GICD_CTLR);
+ writel_relaxed(val, base + GICD_CTLR);
/*
* Set all global interrupts to the boot CPU only. ARE must be
@@ -829,6 +835,7 @@ static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
typer = gic_read_typer(ptr + GICR_TYPER);
if ((typer >> 32) == aff) {
u64 offset = ptr - region->redist_base;
+ raw_spin_lock_init(&gic_data_rdist()->rd_lock);
gic_data_rdist_rd_base() = ptr;
gic_data_rdist()->phys_base = region->phys_base + offset;
@@ -1609,7 +1616,6 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
&gic_data);
- irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
gic_data.rdists.has_rvpeid = true;
gic_data.rdists.has_vlpis = true;
@@ -1620,6 +1626,8 @@ static int __init gic_init_bases(void __iomem *dist_base,
goto out_free;
}
+ irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
+
gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
pr_info("Distributor has %sRange Selector support\n",
gic_data.has_rss ? "" : "no ");
@@ -1785,6 +1793,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
gic_v3_kvm_info.vcpu = r;
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
+ gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
gic_set_kvm_info(&gic_v3_kvm_info);
}
@@ -2100,6 +2109,7 @@ static void __init gic_acpi_setup_kvm_info(void)
}
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
+ gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
gic_set_kvm_info(&gic_v3_kvm_info);
}
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
index 45969927cc81..0c18714ae13e 100644
--- a/drivers/irqchip/irq-gic-v4.c
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -85,6 +85,53 @@
static struct irq_domain *gic_domain;
static const struct irq_domain_ops *vpe_domain_ops;
+static const struct irq_domain_ops *sgi_domain_ops;
+
+static bool has_v4_1(void)
+{
+ return !!sgi_domain_ops;
+}
+
+static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx)
+{
+ char *name;
+ int sgi_base;
+
+ if (!has_v4_1())
+ return 0;
+
+ name = kasprintf(GFP_KERNEL, "GICv4-sgi-%d", task_pid_nr(current));
+ if (!name)
+ goto err;
+
+ vpe->fwnode = irq_domain_alloc_named_id_fwnode(name, idx);
+ if (!vpe->fwnode)
+ goto err;
+
+ kfree(name);
+ name = NULL;
+
+ vpe->sgi_domain = irq_domain_create_linear(vpe->fwnode, 16,
+ sgi_domain_ops, vpe);
+ if (!vpe->sgi_domain)
+ goto err;
+
+ sgi_base = __irq_domain_alloc_irqs(vpe->sgi_domain, -1, 16,
+ NUMA_NO_NODE, vpe,
+ false, NULL);
+ if (sgi_base <= 0)
+ goto err;
+
+ return 0;
+
+err:
+ if (vpe->sgi_domain)
+ irq_domain_remove(vpe->sgi_domain);
+ if (vpe->fwnode)
+ irq_domain_free_fwnode(vpe->fwnode);
+ kfree(name);
+ return -ENOMEM;
+}
int its_alloc_vcpu_irqs(struct its_vm *vm)
{
@@ -112,8 +159,13 @@ int its_alloc_vcpu_irqs(struct its_vm *vm)
if (vpe_base_irq <= 0)
goto err;
- for (i = 0; i < vm->nr_vpes; i++)
+ for (i = 0; i < vm->nr_vpes; i++) {
+ int ret;
vm->vpes[i]->irq = vpe_base_irq + i;
+ ret = its_alloc_vcpu_sgis(vm->vpes[i], i);
+ if (ret)
+ goto err;
+ }
return 0;
@@ -126,8 +178,28 @@ err:
return -ENOMEM;
}
+static void its_free_sgi_irqs(struct its_vm *vm)
+{
+ int i;
+
+ if (!has_v4_1())
+ return;
+
+ for (i = 0; i < vm->nr_vpes; i++) {
+ unsigned int irq = irq_find_mapping(vm->vpes[i]->sgi_domain, 0);
+
+ if (WARN_ON(!irq))
+ continue;
+
+ irq_domain_free_irqs(irq, 16);
+ irq_domain_remove(vm->vpes[i]->sgi_domain);
+ irq_domain_free_fwnode(vm->vpes[i]->fwnode);
+ }
+}
+
void its_free_vcpu_irqs(struct its_vm *vm)
{
+ its_free_sgi_irqs(vm);
irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes);
irq_domain_remove(vm->domain);
irq_domain_free_fwnode(vm->fwnode);
@@ -138,18 +210,50 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
return irq_set_vcpu_affinity(vpe->irq, info);
}
-int its_schedule_vpe(struct its_vpe *vpe, bool on)
+int its_make_vpe_non_resident(struct its_vpe *vpe, bool db)
+{
+ struct irq_desc *desc = irq_to_desc(vpe->irq);
+ struct its_cmd_info info = { };
+ int ret;
+
+ WARN_ON(preemptible());
+
+ info.cmd_type = DESCHEDULE_VPE;
+ if (has_v4_1()) {
+ /* GICv4.1 can directly deal with doorbells */
+ info.req_db = db;
+ } else {
+ /* Undo the nested disable_irq() calls... */
+ while (db && irqd_irq_disabled(&desc->irq_data))
+ enable_irq(vpe->irq);
+ }
+
+ ret = its_send_vpe_cmd(vpe, &info);
+ if (!ret)
+ vpe->resident = false;
+
+ return ret;
+}
+
+int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en)
{
- struct its_cmd_info info;
+ struct its_cmd_info info = { };
int ret;
WARN_ON(preemptible());
- info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE;
+ info.cmd_type = SCHEDULE_VPE;
+ if (has_v4_1()) {
+ info.g0en = g0en;
+ info.g1en = g1en;
+ } else {
+ /* Disabled the doorbell, as we're about to enter the guest */
+ disable_irq_nosync(vpe->irq);
+ }
ret = its_send_vpe_cmd(vpe, &info);
if (!ret)
- vpe->resident = on;
+ vpe->resident = true;
return ret;
}
@@ -216,12 +320,28 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv)
return irq_set_vcpu_affinity(irq, &info);
}
-int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops)
+int its_prop_update_vsgi(int irq, u8 priority, bool group)
+{
+ struct its_cmd_info info = {
+ .cmd_type = PROP_UPDATE_VSGI,
+ {
+ .priority = priority,
+ .group = group,
+ },
+ };
+
+ return irq_set_vcpu_affinity(irq, &info);
+}
+
+int its_init_v4(struct irq_domain *domain,
+ const struct irq_domain_ops *vpe_ops,
+ const struct irq_domain_ops *sgi_ops)
{
if (domain) {
pr_info("ITS: Enabling GICv4 support\n");
gic_domain = domain;
- vpe_domain_ops = ops;
+ vpe_domain_ops = vpe_ops;
+ sgi_domain_ops = sgi_ops;
return 0;
}
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
index d000870d9b6b..b6f6aa7b2862 100644
--- a/drivers/irqchip/irq-i8259.c
+++ b/drivers/irqchip/irq-i8259.c
@@ -268,15 +268,6 @@ static void init_8259A(int auto_eoi)
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
-/*
- * IRQ2 is cascade interrupt to second interrupt controller
- */
-static struct irqaction irq2 = {
- .handler = no_action,
- .name = "cascade",
- .flags = IRQF_NO_THREAD,
-};
-
static struct resource pic1_io_resource = {
.name = "pic1",
.start = PIC_MASTER_CMD,
@@ -311,6 +302,10 @@ static const struct irq_domain_ops i8259A_ops = {
*/
struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
{
+ /*
+ * PIC_CASCADE_IR is cascade interrupt to second interrupt controller
+ */
+ int irq = I8259A_IRQ_BASE + PIC_CASCADE_IR;
struct irq_domain *domain;
insert_resource(&ioport_resource, &pic1_io_resource);
@@ -323,7 +318,8 @@ struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
if (!domain)
panic("Failed to add i8259 IRQ domain");
- setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
+ if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade", NULL))
+ pr_err("Failed to register cascade interrupt\n");
register_syscore_ops(&i8259_syscore_ops);
return domain;
}
diff --git a/drivers/irqchip/irq-ingenic-tcu.c b/drivers/irqchip/irq-ingenic-tcu.c
index 6d05cefe9d79..7a7222d4c19c 100644
--- a/drivers/irqchip/irq-ingenic-tcu.c
+++ b/drivers/irqchip/irq-ingenic-tcu.c
@@ -180,3 +180,4 @@ err_free_tcu:
IRQCHIP_DECLARE(jz4740_tcu_irq, "ingenic,jz4740-tcu", ingenic_tcu_irq_init);
IRQCHIP_DECLARE(jz4725b_tcu_irq, "ingenic,jz4725b-tcu", ingenic_tcu_irq_init);
IRQCHIP_DECLARE(jz4770_tcu_irq, "ingenic,jz4770-tcu", ingenic_tcu_irq_init);
+IRQCHIP_DECLARE(x1000_tcu_irq, "ingenic,x1000-tcu", ingenic_tcu_irq_init);
diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
index c5589ee0dfb3..9f3da4260ca6 100644
--- a/drivers/irqchip/irq-ingenic.c
+++ b/drivers/irqchip/irq-ingenic.c
@@ -58,11 +58,6 @@ static irqreturn_t intc_cascade(int irq, void *data)
return IRQ_HANDLED;
}
-static struct irqaction intc_cascade_action = {
- .handler = intc_cascade,
- .name = "SoC intc cascade interrupt",
-};
-
static int __init ingenic_intc_of_init(struct device_node *node,
unsigned num_chips)
{
@@ -130,7 +125,9 @@ static int __init ingenic_intc_of_init(struct device_node *node,
irq_reg_writel(gc, IRQ_MSK(32), JZ_REG_INTC_SET_MASK);
}
- setup_irq(parent_irq, &intc_cascade_action);
+ if (request_irq(parent_irq, intc_cascade, 0,
+ "SoC intc cascade interrupt", NULL))
+ pr_err("Failed to register SoC intc cascade interrupt\n");
return 0;
out_domain_remove:
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 6e5e3172796b..3819185bfd02 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -461,7 +461,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
}
i->iomem = devm_ioremap(dev, io[k]->start,
- resource_size(io[k]));
+ resource_size(io[k]));
if (!i->iomem) {
dev_err(dev, "failed to remap IOMEM\n");
ret = -ENXIO;
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index aa4af886e43a..c34fb3ae0ff8 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -4,6 +4,7 @@
* Copyright (C) 2018 Christoph Hellwig
*/
#define pr_fmt(fmt) "plic: " fmt
+#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
@@ -55,7 +56,14 @@
#define CONTEXT_THRESHOLD 0x00
#define CONTEXT_CLAIM 0x04
-static void __iomem *plic_regs;
+#define PLIC_DISABLE_THRESHOLD 0xf
+#define PLIC_ENABLE_THRESHOLD 0
+
+struct plic_priv {
+ struct cpumask lmask;
+ struct irq_domain *irqdomain;
+ void __iomem *regs;
+};
struct plic_handler {
bool present;
@@ -66,6 +74,7 @@ struct plic_handler {
*/
raw_spinlock_t enable_lock;
void __iomem *enable_base;
+ struct plic_priv *priv;
};
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
@@ -84,31 +93,40 @@ static inline void plic_toggle(struct plic_handler *handler,
}
static inline void plic_irq_toggle(const struct cpumask *mask,
- int hwirq, int enable)
+ struct irq_data *d, int enable)
{
int cpu;
+ struct plic_priv *priv = irq_get_chip_data(d->irq);
- writel(enable, plic_regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID);
+ writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
for_each_cpu(cpu, mask) {
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
- if (handler->present)
- plic_toggle(handler, hwirq, enable);
+ if (handler->present &&
+ cpumask_test_cpu(cpu, &handler->priv->lmask))
+ plic_toggle(handler, d->hwirq, enable);
}
}
static void plic_irq_unmask(struct irq_data *d)
{
- unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
- cpu_online_mask);
+ struct cpumask amask;
+ unsigned int cpu;
+ struct plic_priv *priv = irq_get_chip_data(d->irq);
+
+ cpumask_and(&amask, &priv->lmask, cpu_online_mask);
+ cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
+ &amask);
if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
return;
- plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
+ plic_irq_toggle(cpumask_of(cpu), d, 1);
}
static void plic_irq_mask(struct irq_data *d)
{
- plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
+ struct plic_priv *priv = irq_get_chip_data(d->irq);
+
+ plic_irq_toggle(&priv->lmask, d, 0);
}
#ifdef CONFIG_SMP
@@ -116,17 +134,21 @@ static int plic_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force)
{
unsigned int cpu;
+ struct cpumask amask;
+ struct plic_priv *priv = irq_get_chip_data(d->irq);
+
+ cpumask_and(&amask, &priv->lmask, mask_val);
if (force)
- cpu = cpumask_first(mask_val);
+ cpu = cpumask_first(&amask);
else
- cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ cpu = cpumask_any_and(&amask, cpu_online_mask);
if (cpu >= nr_cpu_ids)
return -EINVAL;
- plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
- plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
+ plic_irq_toggle(&priv->lmask, d, 0);
+ plic_irq_toggle(cpumask_of(cpu), d, 1);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
@@ -187,8 +209,6 @@ static const struct irq_domain_ops plic_irqdomain_ops = {
.free = irq_domain_free_irqs_top,
};
-static struct irq_domain *plic_irqdomain;
-
/*
* Handling an interrupt is a two-step process: first you claim the interrupt
* by reading the claim register, then you complete the interrupt by writing
@@ -205,7 +225,7 @@ static void plic_handle_irq(struct pt_regs *regs)
csr_clear(CSR_IE, IE_EIE);
while ((hwirq = readl(claim))) {
- int irq = irq_find_mapping(plic_irqdomain, hwirq);
+ int irq = irq_find_mapping(handler->priv->irqdomain, hwirq);
if (unlikely(irq <= 0))
pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
@@ -230,20 +250,48 @@ static int plic_find_hart_id(struct device_node *node)
return -1;
}
+static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
+{
+ /* priority must be > threshold to trigger an interrupt */
+ writel(threshold, handler->hart_base + CONTEXT_THRESHOLD);
+}
+
+static int plic_dying_cpu(unsigned int cpu)
+{
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+
+ csr_clear(CSR_IE, IE_EIE);
+ plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
+
+ return 0;
+}
+
+static int plic_starting_cpu(unsigned int cpu)
+{
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+
+ csr_set(CSR_IE, IE_EIE);
+ plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD);
+
+ return 0;
+}
+
static int __init plic_init(struct device_node *node,
struct device_node *parent)
{
int error = 0, nr_contexts, nr_handlers = 0, i;
u32 nr_irqs;
+ struct plic_priv *priv;
- if (plic_regs) {
- pr_warn("PLIC already present.\n");
- return -ENXIO;
- }
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
- plic_regs = of_iomap(node, 0);
- if (WARN_ON(!plic_regs))
- return -EIO;
+ priv->regs = of_iomap(node, 0);
+ if (WARN_ON(!priv->regs)) {
+ error = -EIO;
+ goto out_free_priv;
+ }
error = -EINVAL;
of_property_read_u32(node, "riscv,ndev", &nr_irqs);
@@ -257,9 +305,9 @@ static int __init plic_init(struct device_node *node,
goto out_iounmap;
error = -ENOMEM;
- plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
- &plic_irqdomain_ops, NULL);
- if (WARN_ON(!plic_irqdomain))
+ priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
+ &plic_irqdomain_ops, priv);
+ if (WARN_ON(!priv->irqdomain))
goto out_iounmap;
for (i = 0; i < nr_contexts; i++) {
@@ -267,7 +315,6 @@ static int __init plic_init(struct device_node *node,
struct plic_handler *handler;
irq_hw_number_t hwirq;
int cpu, hartid;
- u32 threshold = 0;
if (of_irq_parse_one(node, i, &parent)) {
pr_err("failed to parse parent for context %d.\n", i);
@@ -301,32 +348,36 @@ static int __init plic_init(struct device_node *node,
handler = per_cpu_ptr(&plic_handlers, cpu);
if (handler->present) {
pr_warn("handler already present for context %d.\n", i);
- threshold = 0xffffffff;
+ plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
goto done;
}
+ cpumask_set_cpu(cpu, &priv->lmask);
handler->present = true;
handler->hart_base =
- plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
+ priv->regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
raw_spin_lock_init(&handler->enable_lock);
handler->enable_base =
- plic_regs + ENABLE_BASE + i * ENABLE_PER_HART;
-
+ priv->regs + ENABLE_BASE + i * ENABLE_PER_HART;
+ handler->priv = priv;
done:
- /* priority must be > threshold to trigger an interrupt */
- writel(threshold, handler->hart_base + CONTEXT_THRESHOLD);
for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
plic_toggle(handler, hwirq, 0);
nr_handlers++;
}
+ cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
+ "irqchip/sifive/plic:starting",
+ plic_starting_cpu, plic_dying_cpu);
pr_info("mapped %d interrupts with %d handlers for %d contexts.\n",
nr_irqs, nr_handlers, nr_contexts);
set_handle_irq(plic_handle_irq);
return 0;
out_iounmap:
- iounmap(plic_regs);
+ iounmap(priv->regs);
+out_free_priv:
+ kfree(priv);
return error;
}
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index e00f2fa27f00..faa8482c8246 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -604,12 +604,24 @@ static void stm32_exti_h_syscore_deinit(void)
unregister_syscore_ops(&stm32_exti_h_syscore_ops);
}
+static int stm32_exti_h_retrigger(struct irq_data *d)
+{
+ struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
+ void __iomem *base = chip_data->host_data->base;
+ u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
+
+ writel_relaxed(mask, base + stm32_bank->swier_ofst);
+
+ return 0;
+}
+
static struct irq_chip stm32_exti_h_chip = {
.name = "stm32-exti-h",
.irq_eoi = stm32_exti_h_eoi,
.irq_mask = stm32_exti_h_mask,
.irq_unmask = stm32_exti_h_unmask,
- .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_retrigger = stm32_exti_h_retrigger,
.irq_set_type = stm32_exti_h_set_type,
.irq_set_wake = stm32_exti_h_set_wake,
.flags = IRQCHIP_MASK_ON_SUSPEND,
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 928858dada75..f1386733d3bc 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -6,6 +6,7 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/versatile-fpga.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
@@ -68,12 +69,16 @@ static void fpga_irq_unmask(struct irq_data *d)
static void fpga_irq_handle(struct irq_desc *desc)
{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
struct fpga_irq_data *f = irq_desc_get_handler_data(desc);
- u32 status = readl(f->base + IRQ_STATUS);
+ u32 status;
+
+ chained_irq_enter(chip, desc);
+ status = readl(f->base + IRQ_STATUS);
if (status == 0) {
do_bad_IRQ(desc);
- return;
+ goto out;
}
do {
@@ -82,6 +87,9 @@ static void fpga_irq_handle(struct irq_desc *desc)
status &= ~(1 << irq);
generic_handle_irq(irq_find_mapping(f->domain, irq));
} while (status);
+
+out:
+ chained_irq_exit(chip, desc);
}
/*
@@ -204,6 +212,9 @@ int __init fpga_irq_of_init(struct device_node *node,
if (of_property_read_u32(node, "valid-mask", &valid_mask))
valid_mask = 0;
+ writel(clear_mask, base + IRQ_ENABLE_CLEAR);
+ writel(clear_mask, base + FIQ_ENABLE_CLEAR);
+
/* Some chips are cascaded from a parent IRQ */
parent_irq = irq_of_parse_and_map(node, 0);
if (!parent_irq) {
@@ -213,9 +224,6 @@ int __init fpga_irq_of_init(struct device_node *node,
fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node);
- writel(clear_mask, base + IRQ_ENABLE_CLEAR);
- writel(clear_mask, base + FIQ_ENABLE_CLEAR);
-
/*
* On Versatile AB/PB, some secondary interrupts have a direct
* pass-thru to the primary controller for IRQs 20 and 22-31 which need
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index f3f20a3cff50..3c87d925f74c 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -509,9 +509,7 @@ static int __init vic_of_init(struct device_node *node,
void __iomem *regs;
u32 interrupt_mask = ~0;
u32 wakeup_mask = ~0;
-
- if (WARN(parent, "non-root VICs are not supported"))
- return -EINVAL;
+ int parent_irq;
regs = of_iomap(node, 0);
if (WARN_ON(!regs))
@@ -519,11 +517,14 @@ static int __init vic_of_init(struct device_node *node,
of_property_read_u32(node, "valid-mask", &interrupt_mask);
of_property_read_u32(node, "valid-wakeup-mask", &wakeup_mask);
+ parent_irq = of_irq_get(node, 0);
+ if (parent_irq < 0)
+ parent_irq = 0;
/*
* Passing 0 as first IRQ makes the simple domain allocate descriptors
*/
- __vic_init(regs, 0, 0, interrupt_mask, wakeup_mask, node);
+ __vic_init(regs, parent_irq, 0, interrupt_mask, wakeup_mask, node);
return 0;
}
diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
index e3043ded8973..7f811fe5bf69 100644
--- a/drivers/irqchip/irq-xilinx-intc.c
+++ b/drivers/irqchip/irq-xilinx-intc.c
@@ -38,29 +38,31 @@ struct xintc_irq_chip {
void __iomem *base;
struct irq_domain *root_domain;
u32 intr_mask;
+ u32 nr_irq;
};
-static struct xintc_irq_chip *xintc_irqc;
+static struct xintc_irq_chip *primary_intc;
-static void xintc_write(int reg, u32 data)
+static void xintc_write(struct xintc_irq_chip *irqc, int reg, u32 data)
{
if (static_branch_unlikely(&xintc_is_be))
- iowrite32be(data, xintc_irqc->base + reg);
+ iowrite32be(data, irqc->base + reg);
else
- iowrite32(data, xintc_irqc->base + reg);
+ iowrite32(data, irqc->base + reg);
}
-static unsigned int xintc_read(int reg)
+static u32 xintc_read(struct xintc_irq_chip *irqc, int reg)
{
if (static_branch_unlikely(&xintc_is_be))
- return ioread32be(xintc_irqc->base + reg);
+ return ioread32be(irqc->base + reg);
else
- return ioread32(xintc_irqc->base + reg);
+ return ioread32(irqc->base + reg);
}
static void intc_enable_or_unmask(struct irq_data *d)
{
- unsigned long mask = 1 << d->hwirq;
+ struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
+ unsigned long mask = BIT(d->hwirq);
pr_debug("irq-xilinx: enable_or_unmask: %ld\n", d->hwirq);
@@ -69,30 +71,35 @@ static void intc_enable_or_unmask(struct irq_data *d)
* acks the irq before calling the interrupt handler
*/
if (irqd_is_level_type(d))
- xintc_write(IAR, mask);
+ xintc_write(irqc, IAR, mask);
- xintc_write(SIE, mask);
+ xintc_write(irqc, SIE, mask);
}
static void intc_disable_or_mask(struct irq_data *d)
{
+ struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
+
pr_debug("irq-xilinx: disable: %ld\n", d->hwirq);
- xintc_write(CIE, 1 << d->hwirq);
+ xintc_write(irqc, CIE, BIT(d->hwirq));
}
static void intc_ack(struct irq_data *d)
{
+ struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
+
pr_debug("irq-xilinx: ack: %ld\n", d->hwirq);
- xintc_write(IAR, 1 << d->hwirq);
+ xintc_write(irqc, IAR, BIT(d->hwirq));
}
static void intc_mask_ack(struct irq_data *d)
{
- unsigned long mask = 1 << d->hwirq;
+ struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
+ unsigned long mask = BIT(d->hwirq);
pr_debug("irq-xilinx: disable_and_ack: %ld\n", d->hwirq);
- xintc_write(CIE, mask);
- xintc_write(IAR, mask);
+ xintc_write(irqc, CIE, mask);
+ xintc_write(irqc, IAR, mask);
}
static struct irq_chip intc_dev = {
@@ -103,13 +110,14 @@ static struct irq_chip intc_dev = {
.irq_mask_ack = intc_mask_ack,
};
-unsigned int xintc_get_irq(void)
+static unsigned int xintc_get_irq_local(struct xintc_irq_chip *irqc)
{
- unsigned int hwirq, irq = -1;
+ unsigned int irq = 0;
+ u32 hwirq;
- hwirq = xintc_read(IVR);
+ hwirq = xintc_read(irqc, IVR);
if (hwirq != -1U)
- irq = irq_find_mapping(xintc_irqc->root_domain, hwirq);
+ irq = irq_find_mapping(irqc->root_domain, hwirq);
pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq);
@@ -118,15 +126,18 @@ unsigned int xintc_get_irq(void)
static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
- if (xintc_irqc->intr_mask & (1 << hw)) {
+ struct xintc_irq_chip *irqc = d->host_data;
+
+ if (irqc->intr_mask & BIT(hw)) {
irq_set_chip_and_handler_name(irq, &intc_dev,
- handle_edge_irq, "edge");
+ handle_edge_irq, "edge");
irq_clear_status_flags(irq, IRQ_LEVEL);
} else {
irq_set_chip_and_handler_name(irq, &intc_dev,
- handle_level_irq, "level");
+ handle_level_irq, "level");
irq_set_status_flags(irq, IRQ_LEVEL);
}
+ irq_set_chip_data(irq, irqc);
return 0;
}
@@ -138,43 +149,55 @@ static const struct irq_domain_ops xintc_irq_domain_ops = {
static void xil_intc_irq_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct xintc_irq_chip *irqc;
u32 pending;
+ irqc = irq_data_get_irq_handler_data(&desc->irq_data);
chained_irq_enter(chip, desc);
do {
- pending = xintc_get_irq();
- if (pending == -1U)
+ pending = xintc_get_irq_local(irqc);
+ if (pending == 0)
break;
generic_handle_irq(pending);
} while (true);
chained_irq_exit(chip, desc);
}
+static void xil_intc_handle_irq(struct pt_regs *regs)
+{
+ u32 hwirq;
+ struct xintc_irq_chip *irqc = primary_intc;
+
+ do {
+ hwirq = xintc_read(irqc, IVR);
+ if (likely(hwirq != -1U)) {
+ int ret;
+
+ ret = handle_domain_irq(irqc->root_domain, hwirq, regs);
+ WARN_ONCE(ret, "Unhandled HWIRQ %d\n", hwirq);
+ continue;
+ }
+
+ break;
+ } while (1);
+}
+
static int __init xilinx_intc_of_init(struct device_node *intc,
struct device_node *parent)
{
- u32 nr_irq;
- int ret, irq;
struct xintc_irq_chip *irqc;
-
- if (xintc_irqc) {
- pr_err("irq-xilinx: Multiple instances aren't supported\n");
- return -EINVAL;
- }
+ int ret, irq;
irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
if (!irqc)
return -ENOMEM;
-
- xintc_irqc = irqc;
-
irqc->base = of_iomap(intc, 0);
BUG_ON(!irqc->base);
- ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq);
+ ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &irqc->nr_irq);
if (ret < 0) {
pr_err("irq-xilinx: unable to read xlnx,num-intr-inputs\n");
- goto err_alloc;
+ goto error;
}
ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &irqc->intr_mask);
@@ -183,34 +206,35 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
irqc->intr_mask = 0;
}
- if (irqc->intr_mask >> nr_irq)
+ if (irqc->intr_mask >> irqc->nr_irq)
pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",
- intc, nr_irq, irqc->intr_mask);
+ intc, irqc->nr_irq, irqc->intr_mask);
/*
* Disable all external interrupts until they are
* explicity requested.
*/
- xintc_write(IER, 0);
+ xintc_write(irqc, IER, 0);
/* Acknowledge any pending interrupts just in case. */
- xintc_write(IAR, 0xffffffff);
+ xintc_write(irqc, IAR, 0xffffffff);
/* Turn on the Master Enable. */
- xintc_write(MER, MER_HIE | MER_ME);
- if (!(xintc_read(MER) & (MER_HIE | MER_ME))) {
+ xintc_write(irqc, MER, MER_HIE | MER_ME);
+ if (xintc_read(irqc, MER) != (MER_HIE | MER_ME)) {
static_branch_enable(&xintc_is_be);
- xintc_write(MER, MER_HIE | MER_ME);
+ xintc_write(irqc, MER, MER_HIE | MER_ME);
}
- irqc->root_domain = irq_domain_add_linear(intc, nr_irq,
+ irqc->root_domain = irq_domain_add_linear(intc, irqc->nr_irq,
&xintc_irq_domain_ops, irqc);
if (!irqc->root_domain) {
pr_err("irq-xilinx: Unable to create IRQ domain\n");
- goto err_alloc;
+ ret = -EINVAL;
+ goto error;
}
if (parent) {
@@ -222,16 +246,17 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
} else {
pr_err("irq-xilinx: interrupts property not in DT\n");
ret = -EINVAL;
- goto err_alloc;
+ goto error;
}
} else {
- irq_set_default_host(irqc->root_domain);
+ primary_intc = irqc;
+ set_handle_irq(xil_intc_handle_irq);
}
return 0;
-err_alloc:
- xintc_irqc = NULL;
+error:
+ iounmap(irqc->base);
kfree(irqc);
return ret;
diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c
index abfe59284ff2..aa54bfcb0433 100644
--- a/drivers/irqchip/qcom-irq-combiner.c
+++ b/drivers/irqchip/qcom-irq-combiner.c
@@ -33,7 +33,7 @@ struct combiner {
int parent_irq;
u32 nirqs;
u32 nregs;
- struct combiner_reg regs[0];
+ struct combiner_reg regs[];
};
static inline int irq_nr(u32 reg, u32 bit)
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 6e3c04b46fb1..7876dc4b28f8 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -34,6 +34,7 @@ config PCIEAER
config PCIEAER_INJECT
tristate "PCI Express error injection support"
depends on PCIEAER
+ select GENERIC_IRQ_INJECTION
help
This enables PCI Express Root Port Advanced Error Reporting
(AER) software error injector.
diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c
index 6988fe7389b9..21cc3d3387f7 100644
--- a/drivers/pci/pcie/aer_inject.c
+++ b/drivers/pci/pcie/aer_inject.c
@@ -16,7 +16,7 @@
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/irq.h>
+#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include <linux/pci.h>
#include <linux/slab.h>
@@ -468,9 +468,7 @@ static int aer_inject(struct aer_error_inj *einj)
}
pci_info(edev->port, "Injecting errors %08x/%08x into device %s\n",
einj->cor_status, einj->uncor_status, pci_name(dev));
- local_irq_disable();
- generic_handle_irq(edev->irq);
- local_irq_enable();
+ ret = irq_inject_interrupt(edev->irq);
} else {
pci_err(rpdev, "AER device not found\n");
ret = -ENODEV;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 2d5e0435af0a..af3b24f26ff2 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -92,6 +92,7 @@ struct stm32_gpio_bank {
u32 bank_nr;
u32 bank_ioport_nr;
u32 pin_backup[STM32_GPIO_PINS_PER_BANK];
+ u8 irq_type[STM32_GPIO_PINS_PER_BANK];
};
struct stm32_pinctrl {
@@ -303,6 +304,50 @@ static const struct gpio_chip stm32_gpio_template = {
.get_direction = stm32_gpio_get_direction,
};
+static void stm32_gpio_irq_trigger(struct irq_data *d)
+{
+ struct stm32_gpio_bank *bank = d->domain->host_data;
+ int level;
+
+ /* If level interrupt type then retrig */
+ level = stm32_gpio_get(&bank->gpio_chip, d->hwirq);
+ if ((level == 0 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_LOW) ||
+ (level == 1 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_HIGH))
+ irq_chip_retrigger_hierarchy(d);
+}
+
+static void stm32_gpio_irq_eoi(struct irq_data *d)
+{
+ irq_chip_eoi_parent(d);
+ stm32_gpio_irq_trigger(d);
+};
+
+static int stm32_gpio_set_type(struct irq_data *d, unsigned int type)
+{
+ struct stm32_gpio_bank *bank = d->domain->host_data;
+ u32 parent_type;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_EDGE_BOTH:
+ parent_type = type;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ parent_type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ parent_type = IRQ_TYPE_EDGE_FALLING;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ bank->irq_type[d->hwirq] = type;
+
+ return irq_chip_set_type_parent(d, parent_type);
+};
+
static int stm32_gpio_irq_request_resources(struct irq_data *irq_data)
{
struct stm32_gpio_bank *bank = irq_data->domain->host_data;
@@ -330,13 +375,19 @@ static void stm32_gpio_irq_release_resources(struct irq_data *irq_data)
gpiochip_unlock_as_irq(&bank->gpio_chip, irq_data->hwirq);
}
+static void stm32_gpio_irq_unmask(struct irq_data *d)
+{
+ irq_chip_unmask_parent(d);
+ stm32_gpio_irq_trigger(d);
+}
+
static struct irq_chip stm32_gpio_irq_chip = {
.name = "stm32gpio",
- .irq_eoi = irq_chip_eoi_parent,
+ .irq_eoi = stm32_gpio_irq_eoi,
.irq_ack = irq_chip_ack_parent,
.irq_mask = irq_chip_mask_parent,
- .irq_unmask = irq_chip_unmask_parent,
- .irq_set_type = irq_chip_set_type_parent,
+ .irq_unmask = stm32_gpio_irq_unmask,
+ .irq_set_type = stm32_gpio_set_type,
.irq_set_wake = irq_chip_set_wake_parent,
.irq_request_resources = stm32_gpio_irq_request_resources,
.irq_release_resources = stm32_gpio_irq_release_resources,