From d4913eee152d3ffaf9a1e70073bc15e773625685 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 16 Jan 2023 16:04:43 +0000 Subject: arm64/sme: Add basic enumeration for SME2 Add basic feature detection for SME2, detecting that the feature is present and disabling traps for ZT0. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20221208-arm64-sme2-v4-8-f2fa0aef982f@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/tools/cpucaps | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64/tools/cpucaps') diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index a86ee376920a..1222b0ed63a2 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -50,6 +50,7 @@ MTE MTE_ASYMM SME SME_FA64 +SME2 SPECTRE_V2 SPECTRE_V3A SPECTRE_V4 -- cgit v1.2.3 From 0e62ccb9598dae492588bef0453a059bb2bbbabe Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 30 Jan 2023 14:54:25 +0000 Subject: arm64: rename ARM64_HAS_SYSREG_GIC_CPUIF to ARM64_HAS_GIC_CPUIF_SYSREGS Subsequent patches will add more GIC-related cpucaps. When we do so, it would be nice to give them a consistent HAS_GIC_* prefix. In preparation for doing so, this patch renames the existing ARM64_HAS_SYSREG_GIC_CPUIF cap to ARM64_HAS_GIC_CPUIF_SYSREGS. The 'CPUIF_SYSREGS' suffix is chosen so that this will be ordered ahead of other ARM64_HAS_GIC_* definitions in subsequent patches. The cpucaps file was hand-modified; all other changes were scripted with: find . -type f -name '*.[chS]' -print0 | \ xargs -0 sed -i 's/ARM64_HAS_SYSREG_GIC_CPUIF/ARM64_HAS_GIC_CPUIF_SYSREGS/' There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Marc Zyngier Cc: Mark Brown Cc: Will Deacon Link: https://lore.kernel.org/r/20230130145429.903791-2-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 2 +- arch/arm64/tools/cpucaps | 2 +- drivers/irqchip/irq-gic.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm64/tools/cpucaps') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index a77315b338e6..ad2a1f5503f3 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2142,7 +2142,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { }, { .desc = "GIC system register CPU interface", - .capability = ARM64_HAS_SYSREG_GIC_CPUIF, + .capability = ARM64_HAS_GIC_CPUIF_SYSREGS, .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .matches = has_useable_gicv3_cpuif, .sys_reg = SYS_ID_AA64PFR0_EL1, diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index a86ee376920a..373eb148498e 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -28,6 +28,7 @@ HAS_GENERIC_AUTH HAS_GENERIC_AUTH_ARCH_QARMA3 HAS_GENERIC_AUTH_ARCH_QARMA5 HAS_GENERIC_AUTH_IMP_DEF +HAS_GIC_CPUIF_SYSREGS HAS_IRQ_PRIO_MASKING HAS_LDAPR HAS_LSE_ATOMICS @@ -38,7 +39,6 @@ HAS_RAS_EXTN HAS_RNG HAS_SB HAS_STAGE2_FWB -HAS_SYSREG_GIC_CPUIF HAS_TIDCP1 HAS_TLB_RANGE HAS_VIRT_HOST_EXTN diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 210bc2f4d555..6ae697a3800d 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -54,7 +54,7 @@ static void gic_check_cpu_features(void) { - WARN_TAINT_ONCE(this_cpu_has_cap(ARM64_HAS_SYSREG_GIC_CPUIF), + WARN_TAINT_ONCE(this_cpu_has_cap(ARM64_HAS_GIC_CPUIF_SYSREGS), TAINT_CPU_OUT_OF_SPEC, "GICv3 system registers enabled, broken firmware!\n"); } -- cgit v1.2.3 From c888b7bd916c4d85e06a6c3e507d5d68b229518f Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 30 Jan 2023 14:54:26 +0000 Subject: arm64: rename ARM64_HAS_IRQ_PRIO_MASKING to ARM64_HAS_GIC_PRIO_MASKING Subsequent patches will add more GIC-related cpucaps. When we do so, it would be nice to give them a consistent HAS_GIC_* prefix. In preparation for doing so, this patch renames the existing ARM64_HAS_IRQ_PRIO_MASKING cap to ARM64_HAS_GIC_PRIO_MASKING. The cpucaps file was hand-modified; all other changes were scripted with: find . -type f -name '*.[chS]' -print0 | \ xargs -0 sed -i 's/ARM64_HAS_IRQ_PRIO_MASKING/ARM64_HAS_GIC_PRIO_MASKING/' There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Marc Zyngier Cc: Mark Brown Cc: Will Deacon Link: https://lore.kernel.org/r/20230130145429.903791-3-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 2 +- arch/arm64/include/asm/irqflags.h | 10 +++++----- arch/arm64/include/asm/ptrace.h | 2 +- arch/arm64/kernel/cpufeature.c | 2 +- arch/arm64/kernel/entry.S | 4 ++-- arch/arm64/tools/cpucaps | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) (limited to 'arch/arm64/tools/cpucaps') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 03d1c9d7af82..c50928398e4b 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -806,7 +806,7 @@ static inline bool system_has_full_ptr_auth(void) static __always_inline bool system_uses_irq_prio_masking(void) { return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && - cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING); + cpus_have_const_cap(ARM64_HAS_GIC_PRIO_MASKING); } static inline bool system_supports_mte(void) diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index b57b9b1e4344..f51653fb90e4 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -35,7 +35,7 @@ static inline void arch_local_irq_enable(void) asm volatile(ALTERNATIVE( "msr daifclr, #3 // arch_local_irq_enable", __msr_s(SYS_ICC_PMR_EL1, "%0"), - ARM64_HAS_IRQ_PRIO_MASKING) + ARM64_HAS_GIC_PRIO_MASKING) : : "r" ((unsigned long) GIC_PRIO_IRQON) : "memory"); @@ -54,7 +54,7 @@ static inline void arch_local_irq_disable(void) asm volatile(ALTERNATIVE( "msr daifset, #3 // arch_local_irq_disable", __msr_s(SYS_ICC_PMR_EL1, "%0"), - ARM64_HAS_IRQ_PRIO_MASKING) + ARM64_HAS_GIC_PRIO_MASKING) : : "r" ((unsigned long) GIC_PRIO_IRQOFF) : "memory"); @@ -70,7 +70,7 @@ static inline unsigned long arch_local_save_flags(void) asm volatile(ALTERNATIVE( "mrs %0, daif", __mrs_s("%0", SYS_ICC_PMR_EL1), - ARM64_HAS_IRQ_PRIO_MASKING) + ARM64_HAS_GIC_PRIO_MASKING) : "=&r" (flags) : : "memory"); @@ -85,7 +85,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) asm volatile(ALTERNATIVE( "and %w0, %w1, #" __stringify(PSR_I_BIT), "eor %w0, %w1, #" __stringify(GIC_PRIO_IRQON), - ARM64_HAS_IRQ_PRIO_MASKING) + ARM64_HAS_GIC_PRIO_MASKING) : "=&r" (res) : "r" ((int) flags) : "memory"); @@ -122,7 +122,7 @@ static inline void arch_local_irq_restore(unsigned long flags) asm volatile(ALTERNATIVE( "msr daif, %0", __msr_s(SYS_ICC_PMR_EL1, "%0"), - ARM64_HAS_IRQ_PRIO_MASKING) + ARM64_HAS_GIC_PRIO_MASKING) : : "r" (flags) : "memory"); diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 41b332c054ab..47ec58031f11 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -194,7 +194,7 @@ struct pt_regs { u32 unused2; #endif u64 sdei_ttbr1; - /* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */ + /* Only valid when ARM64_HAS_GIC_PRIO_MASKING is enabled. */ u64 pmr_save; u64 stackframe[2]; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index ad2a1f5503f3..afd547a5309c 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2534,7 +2534,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { * Depends on having GICv3 */ .desc = "IRQ priority masking", - .capability = ARM64_HAS_IRQ_PRIO_MASKING, + .capability = ARM64_HAS_GIC_PRIO_MASKING, .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .matches = can_use_gic_priorities, .sys_reg = SYS_ID_AA64PFR0_EL1, diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 11cb99c4d298..e2d1d3d5de1d 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -312,7 +312,7 @@ alternative_else_nop_endif #ifdef CONFIG_ARM64_PSEUDO_NMI /* Save pmr */ -alternative_if ARM64_HAS_IRQ_PRIO_MASKING +alternative_if ARM64_HAS_GIC_PRIO_MASKING mrs_s x20, SYS_ICC_PMR_EL1 str x20, [sp, #S_PMR_SAVE] mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET @@ -337,7 +337,7 @@ alternative_else_nop_endif #ifdef CONFIG_ARM64_PSEUDO_NMI /* Restore pmr */ -alternative_if ARM64_HAS_IRQ_PRIO_MASKING +alternative_if ARM64_HAS_GIC_PRIO_MASKING ldr x20, [sp, #S_PMR_SAVE] msr_s SYS_ICC_PMR_EL1, x20 mrs_s x21, SYS_ICC_CTLR_EL1 diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index 373eb148498e..c993d43624b3 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -29,7 +29,7 @@ HAS_GENERIC_AUTH_ARCH_QARMA3 HAS_GENERIC_AUTH_ARCH_QARMA5 HAS_GENERIC_AUTH_IMP_DEF HAS_GIC_CPUIF_SYSREGS -HAS_IRQ_PRIO_MASKING +HAS_GIC_PRIO_MASKING HAS_LDAPR HAS_LSE_ATOMICS HAS_NO_FPSIMD -- cgit v1.2.3 From 8bf0a8048b155eebc05aa8896f4c378a4c538214 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 30 Jan 2023 14:54:28 +0000 Subject: arm64: add ARM64_HAS_GIC_PRIO_RELAXED_SYNC cpucap When Priority Mask Hint Enable (PMHE) == 0b1, the GIC may use the PMR value to determine whether to signal an IRQ to a PE, and consequently after a change to the PMR value, a DSB SY may be required to ensure that interrupts are signalled to a CPU in finite time. When PMHE == 0b0, interrupts are always signalled to the relevant PE, and all masking occurs locally, without requiring a DSB SY. Since commit: f226650494c6aa87 ("arm64: Relax ICC_PMR_EL1 accesses when ICC_CTLR_EL1.PMHE is clear") ... we handle this dynamically: in most cases a static key is used to determine whether to issue a DSB SY, but the entry code must read from ICC_CTLR_EL1 as static keys aren't accessible from plain assembly. It would be much nicer to use an alternative instruction sequence for the DSB, as this would avoid the need to read from ICC_CTLR_EL1 in the entry code, and for most other code this will result in simpler code generation with fewer instructions and fewer branches. This patch adds a new ARM64_HAS_GIC_PRIO_RELAXED_SYNC cpucap which is only set when ICC_CTLR_EL1.PMHE == 0b0 (and GIC priority masking is in use). This allows us to replace the existing users of the `gic_pmr_sync` static key with alternative sequences which default to a DSB SY and are relaxed to a NOP when PMHE is not in use. The entry assembly management of the PMR is slightly restructured to use a branch (rather than multiple NOPs) when priority masking is not in use. This is more in keeping with other alternatives in the entry assembly, and permits the use of a separate alternatives for the PMHE-dependent DSB SY (and removal of the conditional branch this currently requires). For consistency I've adjusted both the save and restore paths. According to bloat-o-meter, when building defconfig + CONFIG_ARM64_PSEUDO_NMI=y this shrinks the kernel text by ~4KiB: | add/remove: 4/2 grow/shrink: 42/310 up/down: 332/-5032 (-4700) The resulting vmlinux is ~66KiB smaller, though the resulting Image size is unchanged due to padding and alignment: | [mark@lakrids:~/src/linux]% ls -al vmlinux-* | -rwxr-xr-x 1 mark mark 137508344 Jan 17 14:11 vmlinux-after | -rwxr-xr-x 1 mark mark 137575440 Jan 17 13:49 vmlinux-before | [mark@lakrids:~/src/linux]% ls -al Image-* | -rw-r--r-- 1 mark mark 38777344 Jan 17 14:11 Image-after | -rw-r--r-- 1 mark mark 38777344 Jan 17 13:49 Image-before Prior to this patch we did not verify the state of ICC_CTLR_EL1.PMHE on secondary CPUs. As of this patch this is verified by the cpufeature code when using GIC priority masking (i.e. when using pseudo-NMIs). Note that since commit: 7e3a57fa6ca831fa ("arm64: Document ICC_CTLR_EL3.PMHE setting requirements") ... Documentation/arm64/booting.rst specifies: | - ICC_CTLR_EL3.PMHE (bit 6) must be set to the same value across | all CPUs the kernel is executing on, and must stay constant | for the lifetime of the kernel. ... so that should not adversely affect any compliant systems, and as we'll only check for the absense of PMHE when using pseudo-NMIs, this will only fire when such mismatch will adversely affect the system. Signed-off-by: Mark Rutland Reviewed-by: Marc Zyngier Cc: Mark Brown Cc: Will Deacon Link: https://lore.kernel.org/r/20230130145429.903791-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm/include/asm/arch_gicv3.h | 5 +++++ arch/arm64/include/asm/arch_gicv3.h | 5 +++++ arch/arm64/include/asm/barrier.h | 11 +++++++---- arch/arm64/kernel/cpufeature.c | 36 ++++++++++++++++++++++++++++++++++++ arch/arm64/kernel/entry.S | 25 ++++++++++++++++--------- arch/arm64/kernel/image-vars.h | 2 -- arch/arm64/tools/cpucaps | 1 + drivers/irqchip/irq-gic-v3.c | 19 +------------------ 8 files changed, 71 insertions(+), 33 deletions(-) (limited to 'arch/arm64/tools/cpucaps') diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h index f82a819eb0db..311e83038bdb 100644 --- a/arch/arm/include/asm/arch_gicv3.h +++ b/arch/arm/include/asm/arch_gicv3.h @@ -252,5 +252,10 @@ static inline void gic_arch_enable_irqs(void) WARN_ON_ONCE(true); } +static inline bool gic_has_relaxed_pmr_sync(void) +{ + return false; +} + #endif /* !__ASSEMBLY__ */ #endif /* !__ASM_ARCH_GICV3_H */ diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 48d4473e8eee..01281a5336cf 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -190,5 +190,10 @@ static inline void gic_arch_enable_irqs(void) asm volatile ("msr daifclr, #3" : : : "memory"); } +static inline bool gic_has_relaxed_pmr_sync(void) +{ + return cpus_have_cap(ARM64_HAS_GIC_PRIO_RELAXED_SYNC); +} + #endif /* __ASSEMBLY__ */ #endif /* __ASM_ARCH_GICV3_H */ diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 2cfc4245d2e2..3dd8982a9ce3 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -11,6 +11,8 @@ #include +#include + #define __nops(n) ".rept " #n "\nnop\n.endr\n" #define nops(n) asm volatile(__nops(n)) @@ -41,10 +43,11 @@ #ifdef CONFIG_ARM64_PSEUDO_NMI #define pmr_sync() \ do { \ - extern struct static_key_false gic_pmr_sync; \ - \ - if (static_branch_unlikely(&gic_pmr_sync)) \ - dsb(sy); \ + asm volatile( \ + ALTERNATIVE_CB("dsb sy", \ + ARM64_HAS_GIC_PRIO_RELAXED_SYNC, \ + alt_cb_patch_nops) \ + ); \ } while(0) #else #define pmr_sync() do {} while (0) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 515975f42d03..bdabfc98226a 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2056,6 +2056,34 @@ static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry, return enable_pseudo_nmi; } + +static bool has_gic_prio_relaxed_sync(const struct arm64_cpu_capabilities *entry, + int scope) +{ + /* + * If we're not using priority masking then we won't be poking PMR_EL1, + * and there's no need to relax synchronization of writes to it, and + * ICC_CTLR_EL1 might not be accessible and we must avoid reads from + * that. + * + * ARM64_HAS_GIC_PRIO_MASKING has a lower index, and is a boot CPU + * feature, so will be detected earlier. + */ + BUILD_BUG_ON(ARM64_HAS_GIC_PRIO_RELAXED_SYNC <= ARM64_HAS_GIC_PRIO_MASKING); + if (!cpus_have_cap(ARM64_HAS_GIC_PRIO_MASKING)) + return false; + + /* + * When Priority Mask Hint Enable (PMHE) == 0b0, PMR is not used as a + * hint for interrupt distribution, a DSB is not necessary when + * unmasking IRQs via PMR, and we can relax the barrier to a NOP. + * + * Linux itself doesn't use 1:N distribution, so has no need to + * set PMHE. The only reason to have it set is if EL3 requires it + * (and we can't change it). + */ + return (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK) == 0; +} #endif #ifdef CONFIG_ARM64_BTI @@ -2546,6 +2574,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .matches = can_use_gic_priorities, }, + { + /* + * Depends on ARM64_HAS_GIC_PRIO_MASKING + */ + .capability = ARM64_HAS_GIC_PRIO_RELAXED_SYNC, + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, + .matches = has_gic_prio_relaxed_sync, + }, #endif #ifdef CONFIG_ARM64_E0PD { diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index e2d1d3d5de1d..8427cdc0cfcb 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -311,13 +311,16 @@ alternative_else_nop_endif .endif #ifdef CONFIG_ARM64_PSEUDO_NMI - /* Save pmr */ -alternative_if ARM64_HAS_GIC_PRIO_MASKING +alternative_if_not ARM64_HAS_GIC_PRIO_MASKING + b .Lskip_pmr_save\@ +alternative_else_nop_endif + mrs_s x20, SYS_ICC_PMR_EL1 str x20, [sp, #S_PMR_SAVE] mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET msr_s SYS_ICC_PMR_EL1, x20 -alternative_else_nop_endif + +.Lskip_pmr_save\@: #endif /* @@ -336,15 +339,19 @@ alternative_else_nop_endif .endif #ifdef CONFIG_ARM64_PSEUDO_NMI - /* Restore pmr */ -alternative_if ARM64_HAS_GIC_PRIO_MASKING +alternative_if_not ARM64_HAS_GIC_PRIO_MASKING + b .Lskip_pmr_restore\@ +alternative_else_nop_endif + ldr x20, [sp, #S_PMR_SAVE] msr_s SYS_ICC_PMR_EL1, x20 - mrs_s x21, SYS_ICC_CTLR_EL1 - tbz x21, #6, .L__skip_pmr_sync\@ // Check for ICC_CTLR_EL1.PMHE - dsb sy // Ensure priority change is seen by redistributor -.L__skip_pmr_sync\@: + + /* Ensure priority change is seen by redistributor */ +alternative_if_not ARM64_HAS_GIC_PRIO_RELAXED_SYNC + dsb sy alternative_else_nop_endif + +.Lskip_pmr_restore\@: #endif ldp x21, x22, [sp, #S_PC] // load ELR, SPSR diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index d0e9bb5c91fc..97e750a35f70 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -67,9 +67,7 @@ KVM_NVHE_ALIAS(__hyp_stub_vectors); KVM_NVHE_ALIAS(vgic_v2_cpuif_trap); KVM_NVHE_ALIAS(vgic_v3_cpuif_trap); -/* Static key checked in pmr_sync(). */ #ifdef CONFIG_ARM64_PSEUDO_NMI -KVM_NVHE_ALIAS(gic_pmr_sync); /* Static key checked in GIC_PRIO_IRQOFF. */ KVM_NVHE_ALIAS(gic_nonsecure_priorities); #endif diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index c993d43624b3..10ce8f88f86b 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -30,6 +30,7 @@ HAS_GENERIC_AUTH_ARCH_QARMA5 HAS_GENERIC_AUTH_IMP_DEF HAS_GIC_CPUIF_SYSREGS HAS_GIC_PRIO_MASKING +HAS_GIC_PRIO_RELAXED_SYNC HAS_LDAPR HAS_LSE_ATOMICS HAS_NO_FPSIMD diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 997104d4338e..3779836737c8 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -89,15 +89,6 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); */ static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); -/* - * Global static key controlling whether an update to PMR allowing more - * interrupts requires to be propagated to the redistributor (DSB SY). - * And this needs to be exported for modules to be able to enable - * interrupts... - */ -DEFINE_STATIC_KEY_FALSE(gic_pmr_sync); -EXPORT_SYMBOL(gic_pmr_sync); - DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities); EXPORT_SYMBOL(gic_nonsecure_priorities); @@ -1768,16 +1759,8 @@ static void gic_enable_nmi_support(void) for (i = 0; i < gic_data.ppi_nr; i++) refcount_set(&ppi_nmi_refs[i], 0); - /* - * Linux itself doesn't use 1:N distribution, so has no need to - * set PMHE. The only reason to have it set is if EL3 requires it - * (and we can't change it). - */ - if (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK) - static_branch_enable(&gic_pmr_sync); - pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n", - static_branch_unlikely(&gic_pmr_sync) ? "forced" : "relaxed"); + gic_has_relaxed_pmr_sync() ? "relaxed" : "forced"); /* * How priority values are used by the GIC depends on two things: -- cgit v1.2.3