From 55d2eba8e7cd439c11cdb204898c2d384227629b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 16 Dec 2020 12:21:36 +0100 Subject: jump_label: Fix usage in module __init When the static_key is part of the module, and the module calls static_key_inc/enable() from it's __init section *AND* has a static_branch_*() user in that very same __init section, things go wobbly. If the static_key lives outside the module, jump_label_add_module() would append this module's sites to the key and jump_label_update() would take the static_key_linked() branch and all would be fine. If all the sites are outside of __init, then everything will be fine too. However, when all is aligned just as described above, jump_label_update() calls __jump_label_update(.init = false) and we'll not update sites in __init text. Fixes: 19483677684b ("jump_label: Annotate entries that operate on __init code earlier") Reported-by: Dexuan Cui Signed-off-by: Peter Zijlstra (Intel) Acked-by: Josh Poimboeuf Tested-by: Jessica Yu Link: https://lkml.kernel.org/r/20201216135435.GV3092@hirez.programming.kicks-ass.net --- kernel/jump_label.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 015ef903ce8c..c6a39d662935 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -793,6 +793,7 @@ int jump_label_text_reserved(void *start, void *end) static void jump_label_update(struct static_key *key) { struct jump_entry *stop = __stop___jump_table; + bool init = system_state < SYSTEM_RUNNING; struct jump_entry *entry; #ifdef CONFIG_MODULES struct module *mod; @@ -804,15 +805,16 @@ static void jump_label_update(struct static_key *key) preempt_disable(); mod = __module_address((unsigned long)key); - if (mod) + if (mod) { stop = mod->jump_entries + mod->num_jump_entries; + init = mod->state == MODULE_STATE_COMING; + } preempt_enable(); #endif entry = static_key_entries(key); /* if there are no users, entry can be NULL */ if (entry) - __jump_label_update(key, entry, stop, - system_state < SYSTEM_RUNNING); + __jump_label_update(key, entry, stop, init); } #ifdef CONFIG_STATIC_KEYS_SELFTEST -- cgit v1.2.3 From 91ea62d58bd661827c328a2c6c02a87fa4aae88b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 18 Dec 2020 16:39:14 +0100 Subject: softirq: Avoid bad tracing / lockdep interaction Similar to commit: 1a63dcd8765b ("softirq: Reorder trace_softirqs_on to prevent lockdep splat") __local_bh_enable_ip() can also call into tracing with inconsistent state. Unlike that commit we don't need to bother about the tracepoint because 'cnt-1' never matches preempt_count() (by construction). Reported-by: Heiko Carstens Signed-off-by: Peter Zijlstra (Intel) Tested-by: Heiko Carstens Link: https://lkml.kernel.org/r/20201218154519.GW3092@hirez.programming.kicks-ass.net --- kernel/softirq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/softirq.c b/kernel/softirq.c index 09229ad82209..0f1d3a32d53b 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -185,7 +185,7 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) * Keep preemption disabled until we are done with * softirq processing: */ - preempt_count_sub(cnt - 1); + __preempt_count_sub(cnt - 1); if (unlikely(!in_interrupt() && local_softirq_pending())) { /* -- cgit v1.2.3