summaryrefslogtreecommitdiff
path: root/arch/x86/events/perf_event.h
diff options
context:
space:
mode:
authorKan Liang <kan.liang@linux.intel.com>2024-06-26 17:35:34 +0300
committerPeter Zijlstra <peterz@infradead.org>2024-07-04 17:00:36 +0300
commit722e42e45c2f1c6d1adec7813651dba5139f52f4 (patch)
tree6110b0fd8d8877d8d9e9a9928ab5ff7d430e29a8 /arch/x86/events/perf_event.h
parenta23eb2fc1d818cdac9b31f032842d55483a6a040 (diff)
downloadlinux-722e42e45c2f1c6d1adec7813651dba5139f52f4.tar.xz
perf/x86: Support counter mask
The current perf assumes that both GP and fixed counters are contiguous. But it's not guaranteed on newer Intel platforms or in a virtualization environment. Use the counter mask to replace the number of counters for both GP and the fixed counters. For the other ARCHs or old platforms which don't support a counter mask, using GENMASK_ULL(num_counter - 1, 0) to replace. There is no functional change for them. The interface to KVM is not changed. The number of counters still be passed to KVM. It can be updated later separately. Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Andi Kleen <ak@linux.intel.com> Reviewed-by: Ian Rogers <irogers@google.com> Link: https://lkml.kernel.org/r/20240626143545.480761-3-kan.liang@linux.intel.com
Diffstat (limited to 'arch/x86/events/perf_event.h')
-rw-r--r--arch/x86/events/perf_event.h47
1 files changed, 39 insertions, 8 deletions
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index a7ba2868018c..745c174fc880 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -685,8 +685,14 @@ struct x86_hybrid_pmu {
union perf_capabilities intel_cap;
u64 intel_ctrl;
u64 pebs_events_mask;
- int num_counters;
- int num_counters_fixed;
+ union {
+ u64 cntr_mask64;
+ unsigned long cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ };
+ union {
+ u64 fixed_cntr_mask64;
+ unsigned long fixed_cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ };
struct event_constraint unconstrained;
u64 hw_cache_event_ids
@@ -774,8 +780,14 @@ struct x86_pmu {
int (*rdpmc_index)(int index);
u64 (*event_map)(int);
int max_events;
- int num_counters;
- int num_counters_fixed;
+ union {
+ u64 cntr_mask64;
+ unsigned long cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ };
+ union {
+ u64 fixed_cntr_mask64;
+ unsigned long fixed_cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ };
int cntval_bits;
u64 cntval_mask;
union {
@@ -1125,8 +1137,8 @@ static inline int x86_pmu_rdpmc_index(int index)
return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
}
-bool check_hw_exists(struct pmu *pmu, int num_counters,
- int num_counters_fixed);
+bool check_hw_exists(struct pmu *pmu, unsigned long *cntr_mask,
+ unsigned long *fixed_cntr_mask);
int x86_add_exclusive(unsigned int what);
@@ -1197,8 +1209,27 @@ void x86_pmu_enable_event(struct perf_event *event);
int x86_pmu_handle_irq(struct pt_regs *regs);
-void x86_pmu_show_pmu_cap(int num_counters, int num_counters_fixed,
- u64 intel_ctrl);
+void x86_pmu_show_pmu_cap(struct pmu *pmu);
+
+static inline int x86_pmu_num_counters(struct pmu *pmu)
+{
+ return hweight64(hybrid(pmu, cntr_mask64));
+}
+
+static inline int x86_pmu_max_num_counters(struct pmu *pmu)
+{
+ return fls64(hybrid(pmu, cntr_mask64));
+}
+
+static inline int x86_pmu_num_counters_fixed(struct pmu *pmu)
+{
+ return hweight64(hybrid(pmu, fixed_cntr_mask64));
+}
+
+static inline int x86_pmu_max_num_counters_fixed(struct pmu *pmu)
+{
+ return fls64(hybrid(pmu, fixed_cntr_mask64));
+}
extern struct event_constraint emptyconstraint;