From 0259bf63f71e2accfeca4a4e346ede8edcc86aab Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 6 Feb 2024 21:05:44 -0800 Subject: perf/core: Optimize perf_adjust_freq_unthr_context() It was unnecessarily disabling and enabling PMUs for each event. It should be done at PMU level. Add pmu_ctx->nr_freq counter to check it at each PMU. As PMU context has separate active lists for pinned group and flexible group, factor out a new function to do the job. Another minor optimization is that it can skip PMUs w/ CAP_NO_INTERRUPT even if it needs to unthrottle sampling events. Signed-off-by: Namhyung Kim Signed-off-by: Ingo Molnar Tested-by: Mingwei Zhang Reviewed-by: Ian Rogers Reviewed-by: Kan Liang Link: https://lore.kernel.org/r/20240207050545.2727923-1-namhyung@kernel.org --- kernel/events/core.c | 70 +++++++++++++++++++++++++++++++++------------------- 1 file changed, 44 insertions(+), 26 deletions(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index 724e6d7e128f..9566cfb27355 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2302,8 +2302,10 @@ event_sched_out(struct perf_event *event, struct perf_event_context *ctx) if (!is_software_event(event)) cpc->active_oncpu--; - if (event->attr.freq && event->attr.sample_freq) + if (event->attr.freq && event->attr.sample_freq) { ctx->nr_freq--; + epc->nr_freq--; + } if (event->attr.exclusive || !cpc->active_oncpu) cpc->exclusive = 0; @@ -2558,9 +2560,10 @@ event_sched_in(struct perf_event *event, struct perf_event_context *ctx) if (!is_software_event(event)) cpc->active_oncpu++; - if (event->attr.freq && event->attr.sample_freq) + if (event->attr.freq && event->attr.sample_freq) { ctx->nr_freq++; - + epc->nr_freq++; + } if (event->attr.exclusive) cpc->exclusive = 1; @@ -4123,30 +4126,14 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bo } } -/* - * combine freq adjustment with unthrottling to avoid two passes over the - * events. At the same time, make sure, having freq events does not change - * the rate of unthrottling as that would introduce bias. - */ -static void -perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle) +static void perf_adjust_freq_unthr_events(struct list_head *event_list) { struct perf_event *event; struct hw_perf_event *hwc; u64 now, period = TICK_NSEC; s64 delta; - /* - * only need to iterate over all events iff: - * - context have events in frequency mode (needs freq adjust) - * - there are events to unthrottle on this cpu - */ - if (!(ctx->nr_freq || unthrottle)) - return; - - raw_spin_lock(&ctx->lock); - - list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { + list_for_each_entry(event, event_list, active_list) { if (event->state != PERF_EVENT_STATE_ACTIVE) continue; @@ -4154,8 +4141,6 @@ perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle) if (!event_filter_match(event)) continue; - perf_pmu_disable(event->pmu); - hwc = &event->hw; if (hwc->interrupts == MAX_INTERRUPTS) { @@ -4165,7 +4150,7 @@ perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle) } if (!event->attr.freq || !event->attr.sample_freq) - goto next; + continue; /* * stop the event and update event->count @@ -4187,8 +4172,41 @@ perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle) perf_adjust_period(event, period, delta, false); event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); - next: - perf_pmu_enable(event->pmu); + } +} + +/* + * combine freq adjustment with unthrottling to avoid two passes over the + * events. At the same time, make sure, having freq events does not change + * the rate of unthrottling as that would introduce bias. + */ +static void +perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle) +{ + struct perf_event_pmu_context *pmu_ctx; + + /* + * only need to iterate over all events iff: + * - context have events in frequency mode (needs freq adjust) + * - there are events to unthrottle on this cpu + */ + if (!(ctx->nr_freq || unthrottle)) + return; + + raw_spin_lock(&ctx->lock); + + list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) { + if (!(pmu_ctx->nr_freq || unthrottle)) + continue; + if (!perf_pmu_ctx_is_active(pmu_ctx)) + continue; + if (pmu_ctx->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) + continue; + + perf_pmu_disable(pmu_ctx->pmu); + perf_adjust_freq_unthr_events(&pmu_ctx->pinned_active); + perf_adjust_freq_unthr_events(&pmu_ctx->flexible_active); + perf_pmu_enable(pmu_ctx->pmu); } raw_spin_unlock(&ctx->lock); -- cgit v1.2.3 From f38628b06c36222367e26820879789ae59e49f60 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 6 Feb 2024 21:05:45 -0800 Subject: perf/core: Reduce PMU access to adjust sample freq In perf_adjust_freq_unthr_context(), it first starts the event and then stop unnecessarily to adjust the sampling frequency if the event is throttled. For a throttled non-frequency event, it doesn't have a freq so no need to adjust. Just starting the event would be ok. For a frequency event, whether it's throttled or not, it needs to stop before adjusting the frequency. That means it should not start the even if it was throttled. I tried to skip calling the stop callback, but it didn't work well since the event count might not be up to date. It should call the stop callback with PERF_EF_UPDATE anyway. However not calling start would prevent unnecessary MSR accesses (which can be costly) for already stopped events as stop state is saved in the hw config. Signed-off-by: Namhyung Kim Signed-off-by: Ingo Molnar Reviewed-by: Ian Rogers Reviewed-by: Kan Liang Link: https://lore.kernel.org/r/20240207050545.2727923-2-namhyung@kernel.org --- kernel/events/core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index 9566cfb27355..fd94e45a9d86 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -4146,7 +4146,8 @@ static void perf_adjust_freq_unthr_events(struct list_head *event_list) if (hwc->interrupts == MAX_INTERRUPTS) { hwc->interrupts = 0; perf_log_throttle(event, 1); - event->pmu->start(event, 0); + if (!event->attr.freq || !event->attr.sample_freq) + event->pmu->start(event, 0); } if (!event->attr.freq || !event->attr.sample_freq) -- cgit v1.2.3 From 4c03fe11b96bda60610aca77002e83f37b4a2242 Mon Sep 17 00:00:00 2001 From: Kyle Huey Date: Thu, 11 Apr 2024 18:50:13 -0700 Subject: perf/bpf: Reorder bpf_overflow_handler() ahead of __perf_event_overflow() This will allow __perf_event_overflow() to call bpf_overflow_handler(). Signed-off-by: Kyle Huey Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20240412015019.7060-2-khuey@kylehuey.com --- kernel/events/core.c | 183 ++++++++++++++++++++++++++------------------------- 1 file changed, 92 insertions(+), 91 deletions(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index fd94e45a9d86..ca0a90648fe6 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -9563,6 +9563,98 @@ static inline bool sample_is_allowed(struct perf_event *event, struct pt_regs *r return true; } +#ifdef CONFIG_BPF_SYSCALL +static void bpf_overflow_handler(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + struct bpf_perf_event_data_kern ctx = { + .data = data, + .event = event, + }; + struct bpf_prog *prog; + int ret = 0; + + ctx.regs = perf_arch_bpf_user_pt_regs(regs); + if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) + goto out; + rcu_read_lock(); + prog = READ_ONCE(event->prog); + if (prog) { + perf_prepare_sample(data, event, regs); + ret = bpf_prog_run(prog, &ctx); + } + rcu_read_unlock(); +out: + __this_cpu_dec(bpf_prog_active); + if (!ret) + return; + + event->orig_overflow_handler(event, data, regs); +} + +static int perf_event_set_bpf_handler(struct perf_event *event, + struct bpf_prog *prog, + u64 bpf_cookie) +{ + if (event->overflow_handler_context) + /* hw breakpoint or kernel counter */ + return -EINVAL; + + if (event->prog) + return -EEXIST; + + if (prog->type != BPF_PROG_TYPE_PERF_EVENT) + return -EINVAL; + + if (event->attr.precise_ip && + prog->call_get_stack && + (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) || + event->attr.exclude_callchain_kernel || + event->attr.exclude_callchain_user)) { + /* + * On perf_event with precise_ip, calling bpf_get_stack() + * may trigger unwinder warnings and occasional crashes. + * bpf_get_[stack|stackid] works around this issue by using + * callchain attached to perf_sample_data. If the + * perf_event does not full (kernel and user) callchain + * attached to perf_sample_data, do not allow attaching BPF + * program that calls bpf_get_[stack|stackid]. + */ + return -EPROTO; + } + + event->prog = prog; + event->bpf_cookie = bpf_cookie; + event->orig_overflow_handler = READ_ONCE(event->overflow_handler); + WRITE_ONCE(event->overflow_handler, bpf_overflow_handler); + return 0; +} + +static void perf_event_free_bpf_handler(struct perf_event *event) +{ + struct bpf_prog *prog = event->prog; + + if (!prog) + return; + + WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler); + event->prog = NULL; + bpf_prog_put(prog); +} +#else +static int perf_event_set_bpf_handler(struct perf_event *event, + struct bpf_prog *prog, + u64 bpf_cookie) +{ + return -EOPNOTSUPP; +} + +static void perf_event_free_bpf_handler(struct perf_event *event) +{ +} +#endif + /* * Generic event overflow handling, sampling. */ @@ -10441,97 +10533,6 @@ static void perf_event_free_filter(struct perf_event *event) ftrace_profile_free_filter(event); } -#ifdef CONFIG_BPF_SYSCALL -static void bpf_overflow_handler(struct perf_event *event, - struct perf_sample_data *data, - struct pt_regs *regs) -{ - struct bpf_perf_event_data_kern ctx = { - .data = data, - .event = event, - }; - struct bpf_prog *prog; - int ret = 0; - - ctx.regs = perf_arch_bpf_user_pt_regs(regs); - if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) - goto out; - rcu_read_lock(); - prog = READ_ONCE(event->prog); - if (prog) { - perf_prepare_sample(data, event, regs); - ret = bpf_prog_run(prog, &ctx); - } - rcu_read_unlock(); -out: - __this_cpu_dec(bpf_prog_active); - if (!ret) - return; - - event->orig_overflow_handler(event, data, regs); -} - -static int perf_event_set_bpf_handler(struct perf_event *event, - struct bpf_prog *prog, - u64 bpf_cookie) -{ - if (event->overflow_handler_context) - /* hw breakpoint or kernel counter */ - return -EINVAL; - - if (event->prog) - return -EEXIST; - - if (prog->type != BPF_PROG_TYPE_PERF_EVENT) - return -EINVAL; - - if (event->attr.precise_ip && - prog->call_get_stack && - (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) || - event->attr.exclude_callchain_kernel || - event->attr.exclude_callchain_user)) { - /* - * On perf_event with precise_ip, calling bpf_get_stack() - * may trigger unwinder warnings and occasional crashes. - * bpf_get_[stack|stackid] works around this issue by using - * callchain attached to perf_sample_data. If the - * perf_event does not full (kernel and user) callchain - * attached to perf_sample_data, do not allow attaching BPF - * program that calls bpf_get_[stack|stackid]. - */ - return -EPROTO; - } - - event->prog = prog; - event->bpf_cookie = bpf_cookie; - event->orig_overflow_handler = READ_ONCE(event->overflow_handler); - WRITE_ONCE(event->overflow_handler, bpf_overflow_handler); - return 0; -} - -static void perf_event_free_bpf_handler(struct perf_event *event) -{ - struct bpf_prog *prog = event->prog; - - if (!prog) - return; - - WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler); - event->prog = NULL; - bpf_prog_put(prog); -} -#else -static int perf_event_set_bpf_handler(struct perf_event *event, - struct bpf_prog *prog, - u64 bpf_cookie) -{ - return -EOPNOTSUPP; -} -static void perf_event_free_bpf_handler(struct perf_event *event) -{ -} -#endif - /* * returns true if the event is a tracepoint, or a kprobe/upprobe created * with perf_event_open() -- cgit v1.2.3 From 924d934393f98fa6a41d6ea27352faf79c2bbaf6 Mon Sep 17 00:00:00 2001 From: Kyle Huey Date: Thu, 11 Apr 2024 18:50:14 -0700 Subject: perf/bpf: Create bpf_overflow_handler() stub for !CONFIG_BPF_SYSCALL This will allow __perf_event_overflow() (which is independent of CONFIG_BPF_SYSCALL) to call bpf_overflow_handler(). Signed-off-by: Kyle Huey Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20240412015019.7060-3-khuey@kylehuey.com --- kernel/events/core.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index ca0a90648fe6..d3f3f552e193 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -9643,6 +9643,12 @@ static void perf_event_free_bpf_handler(struct perf_event *event) bpf_prog_put(prog); } #else +static void bpf_overflow_handler(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs) +{ +} + static int perf_event_set_bpf_handler(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie) -- cgit v1.2.3 From f11f10bfa1ca23b32020b2073aa13131a27978fe Mon Sep 17 00:00:00 2001 From: Kyle Huey Date: Thu, 11 Apr 2024 18:50:16 -0700 Subject: perf/bpf: Call BPF handler directly, not through overflow machinery To ultimately allow BPF programs attached to perf events to completely suppress all of the effects of a perf event overflow (rather than just the sample output, as they do today), call bpf_overflow_handler() from __perf_event_overflow() directly rather than modifying struct perf_event's overflow_handler. Return the BPF program's return value from bpf_overflow_handler() so that __perf_event_overflow() knows how to proceed. Remove the now unnecessary orig_overflow_handler from struct perf_event. This patch is solely a refactoring and results in no behavior change. Suggested-by: Namhyung Kim Signed-off-by: Kyle Huey Signed-off-by: Ingo Molnar Acked-by: Song Liu Acked-by: Jiri Olsa Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20240412015019.7060-5-khuey@kylehuey.com --- include/linux/perf_event.h | 6 +----- kernel/events/core.c | 27 +++++++++++---------------- 2 files changed, 12 insertions(+), 21 deletions(-) (limited to 'kernel') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 50e01db083ee..2ce2fbc02ec6 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -809,7 +809,6 @@ struct perf_event { u64 (*clock)(void); perf_overflow_handler_t overflow_handler; void *overflow_handler_context; - perf_overflow_handler_t orig_overflow_handler; struct bpf_prog *prog; u64 bpf_cookie; @@ -1361,10 +1360,7 @@ __is_default_overflow_handler(perf_overflow_handler_t overflow_handler) #ifdef CONFIG_BPF_SYSCALL static inline bool uses_default_overflow_handler(struct perf_event *event) { - if (likely(is_default_overflow_handler(event))) - return true; - - return __is_default_overflow_handler(event->orig_overflow_handler); + return is_default_overflow_handler(event); } #else #define uses_default_overflow_handler(event) \ diff --git a/kernel/events/core.c b/kernel/events/core.c index d3f3f552e193..c6a6936183d5 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -9564,9 +9564,9 @@ static inline bool sample_is_allowed(struct perf_event *event, struct pt_regs *r } #ifdef CONFIG_BPF_SYSCALL -static void bpf_overflow_handler(struct perf_event *event, - struct perf_sample_data *data, - struct pt_regs *regs) +static int bpf_overflow_handler(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs) { struct bpf_perf_event_data_kern ctx = { .data = data, @@ -9587,10 +9587,8 @@ static void bpf_overflow_handler(struct perf_event *event, rcu_read_unlock(); out: __this_cpu_dec(bpf_prog_active); - if (!ret) - return; - event->orig_overflow_handler(event, data, regs); + return ret; } static int perf_event_set_bpf_handler(struct perf_event *event, @@ -9626,8 +9624,6 @@ static int perf_event_set_bpf_handler(struct perf_event *event, event->prog = prog; event->bpf_cookie = bpf_cookie; - event->orig_overflow_handler = READ_ONCE(event->overflow_handler); - WRITE_ONCE(event->overflow_handler, bpf_overflow_handler); return 0; } @@ -9638,15 +9634,15 @@ static void perf_event_free_bpf_handler(struct perf_event *event) if (!prog) return; - WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler); event->prog = NULL; bpf_prog_put(prog); } #else -static void bpf_overflow_handler(struct perf_event *event, - struct perf_sample_data *data, - struct pt_regs *regs) +static int bpf_overflow_handler(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs) { + return 1; } static int perf_event_set_bpf_handler(struct perf_event *event, @@ -9730,7 +9726,8 @@ static int __perf_event_overflow(struct perf_event *event, irq_work_queue(&event->pending_irq); } - READ_ONCE(event->overflow_handler)(event, data, regs); + if (!(event->prog && !bpf_overflow_handler(event, data, regs))) + READ_ONCE(event->overflow_handler)(event, data, regs); if (*perf_event_fasync(event) && event->pending_kill) { event->pending_wakeup = 1; @@ -11997,13 +11994,11 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, overflow_handler = parent_event->overflow_handler; context = parent_event->overflow_handler_context; #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING) - if (overflow_handler == bpf_overflow_handler) { + if (parent_event->prog) { struct bpf_prog *prog = parent_event->prog; bpf_prog_inc(prog); event->prog = prog; - event->orig_overflow_handler = - parent_event->orig_overflow_handler; } #endif } -- cgit v1.2.3 From c4fcc7d1f41532e878087c7c43f4cf247604d68b Mon Sep 17 00:00:00 2001 From: Kyle Huey Date: Thu, 11 Apr 2024 18:50:18 -0700 Subject: perf/bpf: Allow a BPF program to suppress all sample side effects Returning zero from a BPF program attached to a perf event already suppresses any data output. Return early from __perf_event_overflow() in this case so it will also suppress event_limit accounting, SIGTRAP generation, and F_ASYNC signalling. Signed-off-by: Kyle Huey Signed-off-by: Ingo Molnar Acked-by: Song Liu Acked-by: Jiri Olsa Acked-by: Namhyung Kim Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20240412015019.7060-7-khuey@kylehuey.com --- kernel/events/core.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index c6a6936183d5..2212670cbe9b 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -9677,6 +9677,9 @@ static int __perf_event_overflow(struct perf_event *event, ret = __perf_event_account_interrupt(event, throttle); + if (event->prog && !bpf_overflow_handler(event, data, regs)) + return ret; + /* * XXX event_limit might not quite work as expected on inherited * events @@ -9726,8 +9729,7 @@ static int __perf_event_overflow(struct perf_event *event, irq_work_queue(&event->pending_irq); } - if (!(event->prog && !bpf_overflow_handler(event, data, regs))) - READ_ONCE(event->overflow_handler)(event, data, regs); + READ_ONCE(event->overflow_handler)(event, data, regs); if (*perf_event_fasync(event) && event->pending_kill) { event->pending_wakeup = 1; -- cgit v1.2.3 From 93d3fde7fd19c2e2cde7220e7986f9a75e9c5680 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 12 Apr 2024 11:55:00 +0200 Subject: perf/bpf: Change the !CONFIG_BPF_SYSCALL stubs to static inlines Otherwise the compiler will be unhappy if they go unused, which they do on allnoconfigs. Signed-off-by: Ingo Molnar Cc: Kyle Huey Link: https://lore.kernel.org/r/ZhkE9F4dyfR2dH2D@gmail.com --- kernel/events/core.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index 2212670cbe9b..6708c1121b9f 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -9638,21 +9638,21 @@ static void perf_event_free_bpf_handler(struct perf_event *event) bpf_prog_put(prog); } #else -static int bpf_overflow_handler(struct perf_event *event, - struct perf_sample_data *data, - struct pt_regs *regs) +static inline int bpf_overflow_handler(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs) { return 1; } -static int perf_event_set_bpf_handler(struct perf_event *event, - struct bpf_prog *prog, - u64 bpf_cookie) +static inline int perf_event_set_bpf_handler(struct perf_event *event, + struct bpf_prog *prog, + u64 bpf_cookie) { return -EOPNOTSUPP; } -static void perf_event_free_bpf_handler(struct perf_event *event) +static inline void perf_event_free_bpf_handler(struct perf_event *event) { } #endif -- cgit v1.2.3 From 4a013980666857c1eb2df6a2137817caa21d38a6 Mon Sep 17 00:00:00 2001 From: Kyle Huey Date: Sat, 13 Apr 2024 07:16:16 -0700 Subject: perf: Move perf_event_fasync() to perf_event.h This will allow it to be called from perf_output_wakeup(). Signed-off-by: Kyle Huey Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20240413141618.4160-2-khuey@kylehuey.com --- include/linux/perf_event.h | 8 ++++++++ kernel/events/core.c | 8 -------- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index d5ff0c164875..a5304ae8c654 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1686,6 +1686,14 @@ perf_event_addr_filters(struct perf_event *event) return ifh; } +static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) +{ + /* Only the parent has fasync state */ + if (event->parent) + event = event->parent; + return &event->fasync; +} + extern void perf_event_addr_filters_sync(struct perf_event *event); extern void perf_report_aux_output_id(struct perf_event *event, u64 hw_id); diff --git a/kernel/events/core.c b/kernel/events/core.c index 6708c1121b9f..da9d9a1f4dca 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6703,14 +6703,6 @@ static const struct file_operations perf_fops = { * to user-space before waking everybody up. */ -static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) -{ - /* only the parent has fasync state */ - if (event->parent) - event = event->parent; - return &event->fasync; -} - void perf_event_wakeup(struct perf_event *event) { ring_buffer_wakeup(event); -- cgit v1.2.3 From fd20bb51ed3913e0d25085eb79e8c0babfb4ee28 Mon Sep 17 00:00:00 2001 From: Kyle Huey Date: Sat, 13 Apr 2024 07:16:18 -0700 Subject: perf/ring_buffer: Trigger IO signals for watermark_wakeup perf_output_wakeup() already marks the perf event fd available for polling. Trigger IO signals with FASYNC too. Signed-off-by: Kyle Huey Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20240413141618.4160-3-khuey@kylehuey.com --- kernel/events/ring_buffer.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'kernel') diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 60ed43d1c29e..4013408ce012 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -22,6 +22,10 @@ static void perf_output_wakeup(struct perf_output_handle *handle) atomic_set(&handle->rb->poll, EPOLLIN); handle->event->pending_wakeup = 1; + + if (*perf_event_fasync(handle->event) && !handle->event->pending_kill) + handle->event->pending_kill = POLL_IN; + irq_work_queue(&handle->event->pending_irq); } -- cgit v1.2.3 From 854dd99b5ddc9d90e31e5f112462a5994dd31810 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 14 Apr 2024 22:33:27 +0200 Subject: perf/bpf: Mark perf_event_set_bpf_handler() and perf_event_free_bpf_handler() as inline too MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit They can be unused with certain Kconfig variations: kernel/events/core.c:9622:13: warning: ‘perf_event_free_bpf_handler’ defined but not used [-Wunused-function] kernel/events/core.c:9586:12: warning: ‘perf_event_set_bpf_handler’ defined but not used [-Wunused-function] Since they are both single-use, mark them inline. Signed-off-by: Ingo Molnar Cc: linux-kernel@vger.kernel.org Cc: Kyle Huey --- kernel/events/core.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index da9d9a1f4dca..6b0a66ed2ae3 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -9583,9 +9583,9 @@ out: return ret; } -static int perf_event_set_bpf_handler(struct perf_event *event, - struct bpf_prog *prog, - u64 bpf_cookie) +static inline int perf_event_set_bpf_handler(struct perf_event *event, + struct bpf_prog *prog, + u64 bpf_cookie) { if (event->overflow_handler_context) /* hw breakpoint or kernel counter */ @@ -9619,7 +9619,7 @@ static int perf_event_set_bpf_handler(struct perf_event *event, return 0; } -static void perf_event_free_bpf_handler(struct perf_event *event) +static inline void perf_event_free_bpf_handler(struct perf_event *event) { struct bpf_prog *prog = event->prog; -- cgit v1.2.3