From 0d6d062ca27ec7ef547712d34dcfcfb952bcef53 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 4 May 2023 16:30:00 +0530 Subject: perf/core: Rework forwarding of {task|cpu}-clock events Currently, PERF_TYPE_SOFTWARE is treated specially since task-clock and cpu-clock events are interfaced through it but internally gets forwarded to their own pmus. Rework this by overwriting event->attr.type in perf_swevent_init() which will cause perf_init_event() to retry with updated type and event will automatically get forwarded to right pmu. With the change, SW pmu no longer needs to be treated specially and can be included in 'pmu_idr' list. Suggested-by: Peter Zijlstra Signed-off-by: Ravi Bangoria Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20230504110003.2548-2-ravi.bangoria@amd.com --- include/linux/perf_event.h | 10 ++++++ kernel/events/core.c | 77 ++++++++++++++++++++++++---------------------- 2 files changed, 51 insertions(+), 36 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index d5628a7b5eaa..bf4f346d6d70 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -295,6 +295,8 @@ struct perf_event_pmu_context; struct perf_output_handle; +#define PMU_NULL_DEV ((void *)(~0UL)) + /** * struct pmu - generic performance monitoring unit */ @@ -827,6 +829,14 @@ struct perf_event { void *security; #endif struct list_head sb_list; + + /* + * Certain events gets forwarded to another pmu internally by over- + * writing kernel copy of event->attr.type without user being aware + * of it. event->orig_type contains original 'type' requested by + * user. + */ + __u32 orig_type; #endif /* CONFIG_PERF_EVENTS */ }; diff --git a/kernel/events/core.c b/kernel/events/core.c index 68baa8194d9f..c01bbe93e291 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6647,7 +6647,7 @@ static void perf_sigtrap(struct perf_event *event) return; send_sig_perf((void __user *)event->pending_addr, - event->attr.type, event->attr.sig_data); + event->orig_type, event->attr.sig_data); } /* @@ -9951,6 +9951,9 @@ static void sw_perf_event_destroy(struct perf_event *event) swevent_hlist_put(); } +static struct pmu perf_cpu_clock; /* fwd declaration */ +static struct pmu perf_task_clock; + static int perf_swevent_init(struct perf_event *event) { u64 event_id = event->attr.config; @@ -9966,7 +9969,10 @@ static int perf_swevent_init(struct perf_event *event) switch (event_id) { case PERF_COUNT_SW_CPU_CLOCK: + event->attr.type = perf_cpu_clock.type; + return -ENOENT; case PERF_COUNT_SW_TASK_CLOCK: + event->attr.type = perf_task_clock.type; return -ENOENT; default: @@ -11086,7 +11092,7 @@ static void cpu_clock_event_read(struct perf_event *event) static int cpu_clock_event_init(struct perf_event *event) { - if (event->attr.type != PERF_TYPE_SOFTWARE) + if (event->attr.type != perf_cpu_clock.type) return -ENOENT; if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) @@ -11107,6 +11113,7 @@ static struct pmu perf_cpu_clock = { .task_ctx_nr = perf_sw_context, .capabilities = PERF_PMU_CAP_NO_NMI, + .dev = PMU_NULL_DEV, .event_init = cpu_clock_event_init, .add = cpu_clock_event_add, @@ -11167,7 +11174,7 @@ static void task_clock_event_read(struct perf_event *event) static int task_clock_event_init(struct perf_event *event) { - if (event->attr.type != PERF_TYPE_SOFTWARE) + if (event->attr.type != perf_task_clock.type) return -ENOENT; if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) @@ -11188,6 +11195,7 @@ static struct pmu perf_task_clock = { .task_ctx_nr = perf_sw_context, .capabilities = PERF_PMU_CAP_NO_NMI, + .dev = PMU_NULL_DEV, .event_init = task_clock_event_init, .add = task_clock_event_add, @@ -11415,31 +11423,31 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type) goto unlock; pmu->type = -1; - if (!name) - goto skip_type; + if (WARN_ONCE(!name, "Can not register anonymous pmu.\n")) { + ret = -EINVAL; + goto free_pdc; + } + pmu->name = name; - if (type != PERF_TYPE_SOFTWARE) { - if (type >= 0) - max = type; + if (type >= 0) + max = type; - ret = idr_alloc(&pmu_idr, pmu, max, 0, GFP_KERNEL); - if (ret < 0) - goto free_pdc; + ret = idr_alloc(&pmu_idr, pmu, max, 0, GFP_KERNEL); + if (ret < 0) + goto free_pdc; - WARN_ON(type >= 0 && ret != type); + WARN_ON(type >= 0 && ret != type); - type = ret; - } + type = ret; pmu->type = type; - if (pmu_bus_running) { + if (pmu_bus_running && !pmu->dev) { ret = pmu_dev_alloc(pmu); if (ret) goto free_idr; } -skip_type: ret = -ENOMEM; pmu->cpu_pmu_context = alloc_percpu(struct perf_cpu_pmu_context); if (!pmu->cpu_pmu_context) @@ -11481,16 +11489,7 @@ skip_type: if (!pmu->event_idx) pmu->event_idx = perf_event_idx_default; - /* - * Ensure the TYPE_SOFTWARE PMUs are at the head of the list, - * since these cannot be in the IDR. This way the linear search - * is fast, provided a valid software event is provided. - */ - if (type == PERF_TYPE_SOFTWARE || !name) - list_add_rcu(&pmu->entry, &pmus); - else - list_add_tail_rcu(&pmu->entry, &pmus); - + list_add_rcu(&pmu->entry, &pmus); atomic_set(&pmu->exclusive_cnt, 0); ret = 0; unlock: @@ -11499,12 +11498,13 @@ unlock: return ret; free_dev: - device_del(pmu->dev); - put_device(pmu->dev); + if (pmu->dev && pmu->dev != PMU_NULL_DEV) { + device_del(pmu->dev); + put_device(pmu->dev); + } free_idr: - if (pmu->type != PERF_TYPE_SOFTWARE) - idr_remove(&pmu_idr, pmu->type); + idr_remove(&pmu_idr, pmu->type); free_pdc: free_percpu(pmu->pmu_disable_count); @@ -11525,9 +11525,8 @@ void perf_pmu_unregister(struct pmu *pmu) synchronize_rcu(); free_percpu(pmu->pmu_disable_count); - if (pmu->type != PERF_TYPE_SOFTWARE) - idr_remove(&pmu_idr, pmu->type); - if (pmu_bus_running) { + idr_remove(&pmu_idr, pmu->type); + if (pmu_bus_running && pmu->dev && pmu->dev != PMU_NULL_DEV) { if (pmu->nr_addr_filters) device_remove_file(pmu->dev, &dev_attr_nr_addr_filters); device_del(pmu->dev); @@ -11601,6 +11600,12 @@ static struct pmu *perf_init_event(struct perf_event *event) idx = srcu_read_lock(&pmus_srcu); + /* + * Save original type before calling pmu->event_init() since certain + * pmus overwrites event->attr.type to forward event to another pmu. + */ + event->orig_type = event->attr.type; + /* Try parent's PMU first: */ if (event->parent && event->parent->pmu) { pmu = event->parent->pmu; @@ -13640,8 +13645,8 @@ void __init perf_event_init(void) perf_event_init_all_cpus(); init_srcu_struct(&pmus_srcu); perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE); - perf_pmu_register(&perf_cpu_clock, NULL, -1); - perf_pmu_register(&perf_task_clock, NULL, -1); + perf_pmu_register(&perf_cpu_clock, "cpu_clock", -1); + perf_pmu_register(&perf_task_clock, "task_clock", -1); perf_tp_register(); perf_event_init_cpu(smp_processor_id()); register_reboot_notifier(&perf_reboot_notifier); @@ -13684,7 +13689,7 @@ static int __init perf_event_sysfs_init(void) goto unlock; list_for_each_entry(pmu, &pmus, entry) { - if (!pmu->name || pmu->type < 0) + if (pmu->dev) continue; ret = pmu_dev_alloc(pmu); -- cgit v1.2.3 From 2fad201fe38ff9a692acedb1990ece2c52a29f95 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 4 May 2023 16:30:01 +0530 Subject: perf/ibs: Fix interface via core pmu events Although, IBS pmus can be invoked via their own interface, indirect IBS invocation via core pmu events is also supported with fixed set of events: cpu-cycles:p, r076:p (same as cpu-cycles:p) and r0C1:p (micro-ops) for user convenience. This indirect IBS invocation is broken since commit 66d258c5b048 ("perf/core: Optimize perf_init_event()"), which added RAW pmu under 'pmu_idr' list and thus if event_init() fails with RAW pmu, it started returning error instead of trying other pmus. Forward precise events from core pmu to IBS by overwriting 'type' and 'config' in the kernel copy of perf_event_attr. Overwriting will cause perf_init_event() to retry with updated 'type' and 'config', which will automatically forward event to IBS pmu. Without patch: $ sudo ./perf record -C 0 -e r076:p -- sleep 1 Error: The r076:p event is not supported. With patch: $ sudo ./perf record -C 0 -e r076:p -- sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.341 MB perf.data (37 samples) ] Fixes: 66d258c5b048 ("perf/core: Optimize perf_init_event()") Reported-by: Stephane Eranian Signed-off-by: Ravi Bangoria Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20230504110003.2548-3-ravi.bangoria@amd.com --- arch/x86/events/amd/core.c | 2 +- arch/x86/events/amd/ibs.c | 53 +++++++++++++++++++-------------------- arch/x86/include/asm/perf_event.h | 2 ++ 3 files changed, 29 insertions(+), 28 deletions(-) diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index bccea57dee81..abadd5f23425 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -374,7 +374,7 @@ static int amd_pmu_hw_config(struct perf_event *event) /* pass precise event sampling to ibs: */ if (event->attr.precise_ip && get_ibs_caps()) - return -ENOENT; + return forward_event_to_ibs(event); if (has_branch_stack(event) && !x86_pmu.lbr_nr) return -EOPNOTSUPP; diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index 64582954b5f6..371014802191 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -190,7 +190,7 @@ static struct perf_ibs *get_ibs_pmu(int type) } /* - * Use IBS for precise event sampling: + * core pmu config -> IBS config * * perf record -a -e cpu-cycles:p ... # use ibs op counting cycle count * perf record -a -e r076:p ... # same as -e cpu-cycles:p @@ -199,25 +199,9 @@ static struct perf_ibs *get_ibs_pmu(int type) * IbsOpCntCtl (bit 19) of IBS Execution Control Register (IbsOpCtl, * MSRC001_1033) is used to select either cycle or micro-ops counting * mode. - * - * The rip of IBS samples has skid 0. Thus, IBS supports precise - * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the - * rip is invalid when IBS was not able to record the rip correctly. - * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then. - * */ -static int perf_ibs_precise_event(struct perf_event *event, u64 *config) +static int core_pmu_ibs_config(struct perf_event *event, u64 *config) { - switch (event->attr.precise_ip) { - case 0: - return -ENOENT; - case 1: - case 2: - break; - default: - return -EOPNOTSUPP; - } - switch (event->attr.type) { case PERF_TYPE_HARDWARE: switch (event->attr.config) { @@ -243,22 +227,37 @@ static int perf_ibs_precise_event(struct perf_event *event, u64 *config) return -EOPNOTSUPP; } +/* + * The rip of IBS samples has skid 0. Thus, IBS supports precise + * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the + * rip is invalid when IBS was not able to record the rip correctly. + * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then. + */ +int forward_event_to_ibs(struct perf_event *event) +{ + u64 config = 0; + + if (!event->attr.precise_ip || event->attr.precise_ip > 2) + return -EOPNOTSUPP; + + if (!core_pmu_ibs_config(event, &config)) { + event->attr.type = perf_ibs_op.pmu.type; + event->attr.config = config; + } + return -ENOENT; +} + static int perf_ibs_init(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; struct perf_ibs *perf_ibs; u64 max_cnt, config; - int ret; perf_ibs = get_ibs_pmu(event->attr.type); - if (perf_ibs) { - config = event->attr.config; - } else { - perf_ibs = &perf_ibs_op; - ret = perf_ibs_precise_event(event, &config); - if (ret) - return ret; - } + if (!perf_ibs) + return -ENOENT; + + config = event->attr.config; if (event->pmu != &perf_ibs->pmu) return -ENOENT; diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 8fc15ed5e60b..fc86248215e2 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -475,8 +475,10 @@ struct pebs_xmm { #ifdef CONFIG_X86_LOCAL_APIC extern u32 get_ibs_caps(void); +extern int forward_event_to_ibs(struct perf_event *event); #else static inline u32 get_ibs_caps(void) { return 0; } +static inline int forward_event_to_ibs(struct perf_event *event) { return -ENOENT; } #endif #ifdef CONFIG_PERF_EVENTS -- cgit v1.2.3 From 9551fbb64d094cc105964716224adeb7765df8fd Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 4 May 2023 16:30:02 +0530 Subject: perf/core: Remove pmu linear searching code Searching for the right pmu by iterating over all pmus is no longer required since all pmus now *must* be present in the 'pmu_idr' list. So, remove linear searching code. Signed-off-by: Ravi Bangoria Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20230504110003.2548-4-ravi.bangoria@amd.com --- kernel/events/core.c | 37 +++++++++++++------------------------ 1 file changed, 13 insertions(+), 24 deletions(-) diff --git a/kernel/events/core.c b/kernel/events/core.c index c01bbe93e291..231b187c1a3f 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -11630,38 +11630,27 @@ static struct pmu *perf_init_event(struct perf_event *event) } again: + ret = -ENOENT; rcu_read_lock(); pmu = idr_find(&pmu_idr, type); rcu_read_unlock(); - if (pmu) { - if (event->attr.type != type && type != PERF_TYPE_RAW && - !(pmu->capabilities & PERF_PMU_CAP_EXTENDED_HW_TYPE)) - goto fail; - - ret = perf_try_init_event(pmu, event); - if (ret == -ENOENT && event->attr.type != type && !extended_type) { - type = event->attr.type; - goto again; - } + if (!pmu) + goto fail; - if (ret) - pmu = ERR_PTR(ret); + if (event->attr.type != type && type != PERF_TYPE_RAW && + !(pmu->capabilities & PERF_PMU_CAP_EXTENDED_HW_TYPE)) + goto fail; - goto unlock; + ret = perf_try_init_event(pmu, event); + if (ret == -ENOENT && event->attr.type != type && !extended_type) { + type = event->attr.type; + goto again; } - list_for_each_entry_rcu(pmu, &pmus, entry, lockdep_is_held(&pmus_srcu)) { - ret = perf_try_init_event(pmu, event); - if (!ret) - goto unlock; - - if (ret != -ENOENT) { - pmu = ERR_PTR(ret); - goto unlock; - } - } fail: - pmu = ERR_PTR(-ENOENT); + if (ret) + pmu = ERR_PTR(ret); + unlock: srcu_read_unlock(&pmus_srcu, idx); -- cgit v1.2.3 From 78075d947534013b4575687d19ebcbbb6d3addcd Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 4 May 2023 16:30:03 +0530 Subject: perf test: Add selftest to test IBS invocation via core pmu events IBS pmu can be invoked via fixed set of core pmu events with 'precise_ip' set to 1. Add a simple event open test for all these events. Without kernel fix: $ sudo ./perf test -vv 76 76: AMD IBS via core pmu : --- start --- test child forked, pid 6553 Using CPUID AuthenticAMD-25-1-1 type: 0x0, config: 0x0, fd: 3 - Pass type: 0x0, config: 0x1, fd: -1 - Pass type: 0x4, config: 0x76, fd: -1 - Fail type: 0x4, config: 0xc1, fd: -1 - Fail type: 0x4, config: 0x12, fd: -1 - Pass test child finished with -1 ---- end ---- AMD IBS via core pmu: FAILED! With kernel fix: $ sudo ./perf test -vv 76 76: AMD IBS via core pmu : --- start --- test child forked, pid 7526 Using CPUID AuthenticAMD-25-1-1 type: 0x0, config: 0x0, fd: 3 - Pass type: 0x0, config: 0x1, fd: -1 - Pass type: 0x4, config: 0x76, fd: 3 - Pass type: 0x4, config: 0xc1, fd: 3 - Pass type: 0x4, config: 0x12, fd: -1 - Pass test child finished with 0 ---- end ---- AMD IBS via core pmu: Ok Signed-off-by: Ravi Bangoria Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20230504110003.2548-5-ravi.bangoria@amd.com --- tools/perf/arch/x86/include/arch-tests.h | 1 + tools/perf/arch/x86/tests/Build | 1 + tools/perf/arch/x86/tests/amd-ibs-via-core-pmu.c | 71 ++++++++++++++++++++++++ tools/perf/arch/x86/tests/arch-tests.c | 2 + 4 files changed, 75 insertions(+) create mode 100644 tools/perf/arch/x86/tests/amd-ibs-via-core-pmu.c diff --git a/tools/perf/arch/x86/include/arch-tests.h b/tools/perf/arch/x86/include/arch-tests.h index 902e9ea9b99e..93d3b8877baa 100644 --- a/tools/perf/arch/x86/include/arch-tests.h +++ b/tools/perf/arch/x86/include/arch-tests.h @@ -11,6 +11,7 @@ int test__intel_pt_pkt_decoder(struct test_suite *test, int subtest); int test__intel_pt_hybrid_compat(struct test_suite *test, int subtest); int test__bp_modify(struct test_suite *test, int subtest); int test__x86_sample_parsing(struct test_suite *test, int subtest); +int test__amd_ibs_via_core_pmu(struct test_suite *test, int subtest); extern struct test_suite *arch_tests[]; diff --git a/tools/perf/arch/x86/tests/Build b/tools/perf/arch/x86/tests/Build index 6f4e8636c3bf..fd02d8181718 100644 --- a/tools/perf/arch/x86/tests/Build +++ b/tools/perf/arch/x86/tests/Build @@ -5,3 +5,4 @@ perf-y += arch-tests.o perf-y += sample-parsing.o perf-$(CONFIG_AUXTRACE) += insn-x86.o intel-pt-test.o perf-$(CONFIG_X86_64) += bp-modify.o +perf-y += amd-ibs-via-core-pmu.o diff --git a/tools/perf/arch/x86/tests/amd-ibs-via-core-pmu.c b/tools/perf/arch/x86/tests/amd-ibs-via-core-pmu.c new file mode 100644 index 000000000000..2902798ca5c1 --- /dev/null +++ b/tools/perf/arch/x86/tests/amd-ibs-via-core-pmu.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "arch-tests.h" +#include "linux/perf_event.h" +#include "tests/tests.h" +#include "pmu.h" +#include "pmus.h" +#include "../perf-sys.h" +#include "debug.h" + +#define NR_SUB_TESTS 5 + +static struct sub_tests { + int type; + unsigned long config; + bool valid; +} sub_tests[NR_SUB_TESTS] = { + { PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES, true }, + { PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS, false }, + { PERF_TYPE_RAW, 0x076, true }, + { PERF_TYPE_RAW, 0x0C1, true }, + { PERF_TYPE_RAW, 0x012, false }, +}; + +static int event_open(int type, unsigned long config) +{ + struct perf_event_attr attr; + + memset(&attr, 0, sizeof(struct perf_event_attr)); + attr.type = type; + attr.size = sizeof(struct perf_event_attr); + attr.config = config; + attr.disabled = 1; + attr.precise_ip = 1; + attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; + attr.sample_period = 100000; + + return sys_perf_event_open(&attr, -1, 0, -1, 0); +} + +int test__amd_ibs_via_core_pmu(struct test_suite *test __maybe_unused, + int subtest __maybe_unused) +{ + struct perf_pmu *ibs_pmu; + int ret = TEST_OK; + int fd, i; + + if (list_empty(&pmus)) + perf_pmu__scan(NULL); + + ibs_pmu = perf_pmu__find("ibs_op"); + if (!ibs_pmu) + return TEST_SKIP; + + for (i = 0; i < NR_SUB_TESTS; i++) { + fd = event_open(sub_tests[i].type, sub_tests[i].config); + pr_debug("type: 0x%x, config: 0x%lx, fd: %d - ", sub_tests[i].type, + sub_tests[i].config, fd); + if ((sub_tests[i].valid && fd == -1) || + (!sub_tests[i].valid && fd > 0)) { + pr_debug("Fail\n"); + ret = TEST_FAIL; + } else { + pr_debug("Pass\n"); + } + + if (fd > 0) + close(fd); + } + + return ret; +} diff --git a/tools/perf/arch/x86/tests/arch-tests.c b/tools/perf/arch/x86/tests/arch-tests.c index aae6ea0fe52b..b5c85ab8d92e 100644 --- a/tools/perf/arch/x86/tests/arch-tests.c +++ b/tools/perf/arch/x86/tests/arch-tests.c @@ -22,6 +22,7 @@ struct test_suite suite__intel_pt = { DEFINE_SUITE("x86 bp modify", bp_modify); #endif DEFINE_SUITE("x86 Sample parsing", x86_sample_parsing); +DEFINE_SUITE("AMD IBS via core pmu", amd_ibs_via_core_pmu); struct test_suite *arch_tests[] = { #ifdef HAVE_DWARF_UNWIND_SUPPORT @@ -35,5 +36,6 @@ struct test_suite *arch_tests[] = { &suite__bp_modify, #endif &suite__x86_sample_parsing, + &suite__amd_ibs_via_core_pmu, NULL, }; -- cgit v1.2.3 From 10d95a317ec12ec7dd4587a646c6bd6aa03c7ded Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Thu, 4 May 2023 15:21:28 +0800 Subject: perf/x86/intel: Define bit macros for FixCntrCtl MSR Define bit macros for FixCntrCtl MSR and replace the bit hardcoding with these bit macros. This would make code be more human-readable. Perf commands 'perf stat -e "instructions,cycles,ref-cycles"' and 'perf record -e "instructions,cycles,ref-cycles"' pass. Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20230504072128.3653470-1-dapeng1.mi@linux.intel.com --- arch/x86/events/intel/core.c | 18 +++++++++--------- arch/x86/include/asm/perf_event.h | 10 ++++++++++ 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 070cc4ef2672..0d09245aa8df 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2451,7 +2451,7 @@ static void intel_pmu_disable_fixed(struct perf_event *event) intel_clear_masks(event, idx); - mask = 0xfULL << ((idx - INTEL_PMC_IDX_FIXED) * 4); + mask = intel_fixed_bits_by_idx(idx - INTEL_PMC_IDX_FIXED, INTEL_FIXED_BITS_MASK); cpuc->fixed_ctrl_val &= ~mask; } @@ -2750,25 +2750,25 @@ static void intel_pmu_enable_fixed(struct perf_event *event) * if requested: */ if (!event->attr.precise_ip) - bits |= 0x8; + bits |= INTEL_FIXED_0_ENABLE_PMI; if (hwc->config & ARCH_PERFMON_EVENTSEL_USR) - bits |= 0x2; + bits |= INTEL_FIXED_0_USER; if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) - bits |= 0x1; + bits |= INTEL_FIXED_0_KERNEL; /* * ANY bit is supported in v3 and up */ if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY) - bits |= 0x4; + bits |= INTEL_FIXED_0_ANYTHREAD; idx -= INTEL_PMC_IDX_FIXED; - bits <<= (idx * 4); - mask = 0xfULL << (idx * 4); + bits = intel_fixed_bits_by_idx(idx, bits); + mask = intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK); if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) { - bits |= ICL_FIXED_0_ADAPTIVE << (idx * 4); - mask |= ICL_FIXED_0_ADAPTIVE << (idx * 4); + bits |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE); + mask |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE); } cpuc->fixed_ctrl_val &= ~mask; diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index fc86248215e2..7d16fcfa7f25 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -32,11 +32,21 @@ #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL +#define INTEL_FIXED_BITS_MASK 0xFULL +#define INTEL_FIXED_BITS_STRIDE 4 +#define INTEL_FIXED_0_KERNEL (1ULL << 0) +#define INTEL_FIXED_0_USER (1ULL << 1) +#define INTEL_FIXED_0_ANYTHREAD (1ULL << 2) +#define INTEL_FIXED_0_ENABLE_PMI (1ULL << 3) + #define HSW_IN_TX (1ULL << 32) #define HSW_IN_TX_CHECKPOINTED (1ULL << 33) #define ICL_EVENTSEL_ADAPTIVE (1ULL << 34) #define ICL_FIXED_0_ADAPTIVE (1ULL << 32) +#define intel_fixed_bits_by_idx(_idx, _bits) \ + ((_bits) << ((_idx) * INTEL_FIXED_BITS_STRIDE)) + #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36) #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40) #define AMD64_EVENTSEL_HOSTONLY (1ULL << 41) -- cgit v1.2.3 From 228020b490eda9133c9cb6f59a5ee1278d8c463f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 5 Jun 2023 12:14:01 +0200 Subject: perf: Re-instate the linear PMU search Full revert of commit 9551fbb64d09 ("perf/core: Remove pmu linear searching code"). Some architectures (notably arm/arm64) still relied on the linear search in order to find the PMU that consumes PERF_TYPE_{HARDWARE,HW_CACHE,RAW}. This will need a more thorought audit and cleanup. Reported-by: Nathan Chancellor Reported-by: Krzysztof Kozlowski Acked-by: Mark Rutland Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20230605101401.GL38236@hirez.programming.kicks-ass.net --- kernel/events/core.c | 37 ++++++++++++++++++++++++------------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/kernel/events/core.c b/kernel/events/core.c index 231b187c1a3f..c01bbe93e291 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -11630,27 +11630,38 @@ static struct pmu *perf_init_event(struct perf_event *event) } again: - ret = -ENOENT; rcu_read_lock(); pmu = idr_find(&pmu_idr, type); rcu_read_unlock(); - if (!pmu) - goto fail; + if (pmu) { + if (event->attr.type != type && type != PERF_TYPE_RAW && + !(pmu->capabilities & PERF_PMU_CAP_EXTENDED_HW_TYPE)) + goto fail; - if (event->attr.type != type && type != PERF_TYPE_RAW && - !(pmu->capabilities & PERF_PMU_CAP_EXTENDED_HW_TYPE)) - goto fail; + ret = perf_try_init_event(pmu, event); + if (ret == -ENOENT && event->attr.type != type && !extended_type) { + type = event->attr.type; + goto again; + } - ret = perf_try_init_event(pmu, event); - if (ret == -ENOENT && event->attr.type != type && !extended_type) { - type = event->attr.type; - goto again; + if (ret) + pmu = ERR_PTR(ret); + + goto unlock; } -fail: - if (ret) - pmu = ERR_PTR(ret); + list_for_each_entry_rcu(pmu, &pmus, entry, lockdep_is_held(&pmus_srcu)) { + ret = perf_try_init_event(pmu, event); + if (!ret) + goto unlock; + if (ret != -ENOENT) { + pmu = ERR_PTR(ret); + goto unlock; + } + } +fail: + pmu = ERR_PTR(-ENOENT); unlock: srcu_read_unlock(&pmus_srcu, idx); -- cgit v1.2.3