From c87a31093c707eb0b8c48aab89922c1d0bf4bd90 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Wed, 4 Jan 2023 12:13:43 -0800 Subject: perf/x86: Support Retire Latency Retire Latency reports the number of elapsed core clocks between the retirement of the instruction indicated by the Instruction Pointer field of the PEBS record and the retirement of the prior instruction. It's enumerated by the IA32_PERF_CAPABILITIES.PEBS_TIMING_INFO[17]. Add flag PMU_FL_RETIRE_LATENCY to indicate the availability of the feature. The Retire Latency is not supported by the fixed counter 0 on p-core of MTL. Signed-off-by: Kan Liang Signed-off-by: Ingo Molnar Reviewed-by: Andi Kleen Acked-by: Peter Zijlstra Link: https://lore.kernel.org/r/20230104201349.1451191-3-kan.liang@linux.intel.com --- arch/x86/events/intel/core.c | 32 +++++++++++++++++++++++++++++++- arch/x86/events/intel/ds.c | 4 ++++ 2 files changed, 35 insertions(+), 1 deletion(-) (limited to 'arch/x86/events/intel') diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index d2030be04e4a..a5678ab6d3e3 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4210,6 +4210,9 @@ static struct event_constraint fixed0_counter0_constraint = static struct event_constraint fixed0_counter0_1_constraint = INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000003ULL); +static struct event_constraint counters_1_7_constraint = + INTEL_ALL_EVENT_CONSTRAINT(0, 0xfeULL); + static struct event_constraint * hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) @@ -4374,6 +4377,30 @@ cmt_get_event_constraints(struct cpu_hw_events *cpuc, int idx, return c; } +static struct event_constraint * +rwc_get_event_constraints(struct cpu_hw_events *cpuc, int idx, + struct perf_event *event) +{ + struct event_constraint *c; + + c = spr_get_event_constraints(cpuc, idx, event); + + /* The Retire Latency is not supported by the fixed counter 0. */ + if (event->attr.precise_ip && + (event->attr.sample_type & PERF_SAMPLE_WEIGHT_TYPE) && + constraint_match(&fixed0_constraint, event->hw.config)) { + /* + * The Instruction PDIR is only available + * on the fixed counter 0. Error out for this case. + */ + if (event->attr.precise_ip == 3) + return &emptyconstraint; + return &counters_1_7_constraint; + } + + return c; +} + static struct event_constraint * mtl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) @@ -4381,7 +4408,7 @@ mtl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); if (pmu->cpu_type == hybrid_big) - return spr_get_event_constraints(cpuc, idx, event); + return rwc_get_event_constraints(cpuc, idx, event); if (pmu->cpu_type == hybrid_small) return cmt_get_event_constraints(cpuc, idx, event); @@ -6718,6 +6745,9 @@ __init int intel_pmu_init(void) if (is_hybrid()) intel_pmu_check_hybrid_pmus((u64)fixed_mask); + if (x86_pmu.intel_cap.pebs_timing_info) + x86_pmu.flags |= PMU_FL_RETIRE_LATENCY; + intel_aux_output_init(); return 0; diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index e991c54916d1..6ec326b47e2e 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1753,6 +1753,7 @@ static void adaptive_pebs_save_regs(struct pt_regs *regs, #define PEBS_LATENCY_MASK 0xffff #define PEBS_CACHE_LATENCY_OFFSET 32 +#define PEBS_RETIRE_LATENCY_OFFSET 32 /* * With adaptive PEBS the layout depends on what fields are configured. @@ -1804,6 +1805,9 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event, set_linear_ip(regs, basic->ip); regs->flags = PERF_EFLAGS_EXACT; + if ((sample_type & PERF_SAMPLE_WEIGHT_STRUCT) && (x86_pmu.flags & PMU_FL_RETIRE_LATENCY)) + data->weight.var3_w = format_size >> PEBS_RETIRE_LATENCY_OFFSET & PEBS_LATENCY_MASK; + /* * The record for MEMINFO is in front of GP * But PERF_SAMPLE_TRANSACTION needs gprs->ax. -- cgit v1.2.3