summaryrefslogtreecommitdiff
path: root/drivers/perf/arm_pmuv3.c
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2023-03-17 22:50:21 +0300
committerWill Deacon <will@kernel.org>2023-03-27 16:01:18 +0300
commitdf29ddf4f04b00cf60669686dc7f534c3ed7c433 (patch)
treece097f499cefb922b04ab3c115894b81b8b9916b /drivers/perf/arm_pmuv3.c
parent7755cec63adeecea3cbbf4032047812c37cf7cc3 (diff)
downloadlinux-df29ddf4f04b00cf60669686dc7f534c3ed7c433.tar.xz
arm64: perf: Abstract system register accesses away
As we want to enable 32bit support, we need to distanciate the PMUv3 driver from the AArch64 system register names. This patch moves all system register accesses to an architecture specific include file, allowing the 32bit counterpart to be slotted in at a later time. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Co-developed-by: Zaid Al-Bassam <zalbassam@google.com> Signed-off-by: Zaid Al-Bassam <zalbassam@google.com> Tested-by: Florian Fainelli <f.fainelli@gmail.com> Link: https://lore.kernel.org/r/20230317195027.3746949-3-zalbassam@google.com Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'drivers/perf/arm_pmuv3.c')
-rw-r--r--drivers/perf/arm_pmuv3.c115
1 files changed, 23 insertions, 92 deletions
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index c25203f8b940..f783f068d612 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -10,7 +10,6 @@
#include <asm/irq_regs.h>
#include <asm/perf_event.h>
-#include <asm/sysreg.h>
#include <asm/virt.h>
#include <clocksource/arm_arch_timer.h>
@@ -25,6 +24,8 @@
#include <linux/sched_clock.h>
#include <linux/smp.h>
+#include <asm/arm_pmuv3.h>
+
/* ARMv8 Cortex-A53 specific event types. */
#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
@@ -425,83 +426,16 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event)
#define ARMV8_IDX_TO_COUNTER(x) \
(((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
-/*
- * This code is really good
- */
-
-#define PMEVN_CASE(n, case_macro) \
- case n: case_macro(n); break
-
-#define PMEVN_SWITCH(x, case_macro) \
- do { \
- switch (x) { \
- PMEVN_CASE(0, case_macro); \
- PMEVN_CASE(1, case_macro); \
- PMEVN_CASE(2, case_macro); \
- PMEVN_CASE(3, case_macro); \
- PMEVN_CASE(4, case_macro); \
- PMEVN_CASE(5, case_macro); \
- PMEVN_CASE(6, case_macro); \
- PMEVN_CASE(7, case_macro); \
- PMEVN_CASE(8, case_macro); \
- PMEVN_CASE(9, case_macro); \
- PMEVN_CASE(10, case_macro); \
- PMEVN_CASE(11, case_macro); \
- PMEVN_CASE(12, case_macro); \
- PMEVN_CASE(13, case_macro); \
- PMEVN_CASE(14, case_macro); \
- PMEVN_CASE(15, case_macro); \
- PMEVN_CASE(16, case_macro); \
- PMEVN_CASE(17, case_macro); \
- PMEVN_CASE(18, case_macro); \
- PMEVN_CASE(19, case_macro); \
- PMEVN_CASE(20, case_macro); \
- PMEVN_CASE(21, case_macro); \
- PMEVN_CASE(22, case_macro); \
- PMEVN_CASE(23, case_macro); \
- PMEVN_CASE(24, case_macro); \
- PMEVN_CASE(25, case_macro); \
- PMEVN_CASE(26, case_macro); \
- PMEVN_CASE(27, case_macro); \
- PMEVN_CASE(28, case_macro); \
- PMEVN_CASE(29, case_macro); \
- PMEVN_CASE(30, case_macro); \
- default: WARN(1, "Invalid PMEV* index\n"); \
- } \
- } while (0)
-
-#define RETURN_READ_PMEVCNTRN(n) \
- return read_sysreg(pmevcntr##n##_el0)
-static unsigned long read_pmevcntrn(int n)
-{
- PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
- return 0;
-}
-
-#define WRITE_PMEVCNTRN(n) \
- write_sysreg(val, pmevcntr##n##_el0)
-static void write_pmevcntrn(int n, unsigned long val)
-{
- PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
-}
-
-#define WRITE_PMEVTYPERN(n) \
- write_sysreg(val, pmevtyper##n##_el0)
-static void write_pmevtypern(int n, unsigned long val)
-{
- PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
-}
-
static inline u32 armv8pmu_pmcr_read(void)
{
- return read_sysreg(pmcr_el0);
+ return read_pmcr();
}
static inline void armv8pmu_pmcr_write(u32 val)
{
val &= ARMV8_PMU_PMCR_MASK;
isb();
- write_sysreg(val, pmcr_el0);
+ write_pmcr(val);
}
static inline int armv8pmu_has_overflowed(u32 pmovsr)
@@ -576,7 +510,7 @@ static u64 armv8pmu_read_counter(struct perf_event *event)
u64 value;
if (idx == ARMV8_IDX_CYCLE_COUNTER)
- value = read_sysreg(pmccntr_el0);
+ value = read_pmccntr();
else
value = armv8pmu_read_hw_counter(event);
@@ -611,7 +545,7 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value)
value = armv8pmu_bias_long_counter(event, value);
if (idx == ARMV8_IDX_CYCLE_COUNTER)
- write_sysreg(value, pmccntr_el0);
+ write_pmccntr(value);
else
armv8pmu_write_hw_counter(event, value);
}
@@ -642,7 +576,7 @@ static inline void armv8pmu_write_event_type(struct perf_event *event)
armv8pmu_write_evtype(idx, chain_evt);
} else {
if (idx == ARMV8_IDX_CYCLE_COUNTER)
- write_sysreg(hwc->config_base, pmccfiltr_el0);
+ write_pmccfiltr(hwc->config_base);
else
armv8pmu_write_evtype(idx, hwc->config_base);
}
@@ -665,7 +599,7 @@ static inline void armv8pmu_enable_counter(u32 mask)
* enable the counter.
* */
isb();
- write_sysreg(mask, pmcntenset_el0);
+ write_pmcntenset(mask);
}
static inline void armv8pmu_enable_event_counter(struct perf_event *event)
@@ -682,7 +616,7 @@ static inline void armv8pmu_enable_event_counter(struct perf_event *event)
static inline void armv8pmu_disable_counter(u32 mask)
{
- write_sysreg(mask, pmcntenclr_el0);
+ write_pmcntenclr(mask);
/*
* Make sure the effects of disabling the counter are visible before we
* start configuring the event.
@@ -704,7 +638,7 @@ static inline void armv8pmu_disable_event_counter(struct perf_event *event)
static inline void armv8pmu_enable_intens(u32 mask)
{
- write_sysreg(mask, pmintenset_el1);
+ write_pmintenset(mask);
}
static inline void armv8pmu_enable_event_irq(struct perf_event *event)
@@ -715,10 +649,10 @@ static inline void armv8pmu_enable_event_irq(struct perf_event *event)
static inline void armv8pmu_disable_intens(u32 mask)
{
- write_sysreg(mask, pmintenclr_el1);
+ write_pmintenclr(mask);
isb();
/* Clear the overflow flag in case an interrupt is pending. */
- write_sysreg(mask, pmovsclr_el0);
+ write_pmovsclr(mask);
isb();
}
@@ -733,18 +667,18 @@ static inline u32 armv8pmu_getreset_flags(void)
u32 value;
/* Read */
- value = read_sysreg(pmovsclr_el0);
+ value = read_pmovsclr();
/* Write to clear flags */
value &= ARMV8_PMU_OVSR_MASK;
- write_sysreg(value, pmovsclr_el0);
+ write_pmovsclr(value);
return value;
}
static void armv8pmu_disable_user_access(void)
{
- write_sysreg(0, pmuserenr_el0);
+ write_pmuserenr(0);
}
static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
@@ -755,13 +689,13 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
/* Clear any unused counters to avoid leaking their contents */
for_each_clear_bit(i, cpuc->used_mask, cpu_pmu->num_events) {
if (i == ARMV8_IDX_CYCLE_COUNTER)
- write_sysreg(0, pmccntr_el0);
+ write_pmccntr(0);
else
armv8pmu_write_evcntr(i, 0);
}
- write_sysreg(0, pmuserenr_el0);
- write_sysreg(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR, pmuserenr_el0);
+ write_pmuserenr(0);
+ write_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR);
}
static void armv8pmu_enable_event(struct perf_event *event)
@@ -1145,14 +1079,11 @@ static void __armv8pmu_probe_pmu(void *info)
{
struct armv8pmu_probe_info *probe = info;
struct arm_pmu *cpu_pmu = probe->pmu;
- u64 dfr0;
u64 pmceid_raw[2];
u32 pmceid[2];
int pmuver;
- dfr0 = read_sysreg(id_aa64dfr0_el1);
- pmuver = cpuid_feature_extract_unsigned_field(dfr0,
- ID_AA64DFR0_EL1_PMUVer_SHIFT);
+ pmuver = read_pmuver();
if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF ||
pmuver == ID_AA64DFR0_EL1_PMUVer_NI)
return;
@@ -1167,8 +1098,8 @@ static void __armv8pmu_probe_pmu(void *info)
/* Add the CPU cycles counter */
cpu_pmu->num_events += 1;
- pmceid[0] = pmceid_raw[0] = read_sysreg(pmceid0_el0);
- pmceid[1] = pmceid_raw[1] = read_sysreg(pmceid1_el0);
+ pmceid[0] = pmceid_raw[0] = read_pmceid0();
+ pmceid[1] = pmceid_raw[1] = read_pmceid1();
bitmap_from_arr32(cpu_pmu->pmceid_bitmap,
pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
@@ -1179,9 +1110,9 @@ static void __armv8pmu_probe_pmu(void *info)
bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap,
pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
- /* store PMMIR_EL1 register for sysfs */
+ /* store PMMIR register for sysfs */
if (pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4 && (pmceid_raw[1] & BIT(31)))
- cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1);
+ cpu_pmu->reg_pmmir = read_pmmir();
else
cpu_pmu->reg_pmmir = 0;
}