diff options
Diffstat (limited to 'kernel')
70 files changed, 2643 insertions, 837 deletions
diff --git a/kernel/Kconfig.kexec b/kernel/Kconfig.kexec index 9bfe68fe9676..7aff28ded2f4 100644 --- a/kernel/Kconfig.kexec +++ b/kernel/Kconfig.kexec @@ -110,7 +110,7 @@ config CRASH_DUMP For more details see Documentation/admin-guide/kdump/kdump.rst For s390, this option also enables zfcpdump. - See also <file:Documentation/s390/zfcpdump.rst> + See also <file:Documentation/arch/s390/zfcpdump.rst> config CRASH_HOTPLUG bool "Update the crash elfcorehdr on system configuration changes" diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 85a5b306733b..1b07e6f12a07 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -87,8 +87,8 @@ static struct task_struct *prune_thread; * that makes a difference. Some. */ -static struct fsnotify_group *audit_tree_group; -static struct kmem_cache *audit_tree_mark_cachep __read_mostly; +static struct fsnotify_group *audit_tree_group __ro_after_init; +static struct kmem_cache *audit_tree_mark_cachep __ro_after_init; static struct audit_tree *alloc_tree(const char *s) { diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 91e82e34b51e..7a98cd176a12 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -531,7 +531,7 @@ int audit_exe_compare(struct task_struct *tsk, struct audit_fsnotify_mark *mark) if (tsk != current) return 0; - if (WARN_ON_ONCE(!current->mm)) + if (!current->mm) return 0; exe_file = get_mm_exe_file(current->mm); if (!exe_file) diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c index 833faa04461b..0fae79164187 100644 --- a/kernel/bpf/bpf_iter.c +++ b/kernel/bpf/bpf_iter.c @@ -782,9 +782,7 @@ struct bpf_iter_num_kern { int end; /* final value, exclusive */ } __aligned(8); -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in vmlinux BTF"); +__bpf_kfunc_start_defs(); __bpf_kfunc int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) { @@ -843,4 +841,4 @@ __bpf_kfunc void bpf_iter_num_destroy(struct bpf_iter_num *it) s->cur = s->end = 0; } -__diag_pop(); +__bpf_kfunc_end_defs(); diff --git a/kernel/bpf/cgroup_iter.c b/kernel/bpf/cgroup_iter.c index 209e5135f9fb..f04a468cf6a7 100644 --- a/kernel/bpf/cgroup_iter.c +++ b/kernel/bpf/cgroup_iter.c @@ -282,7 +282,7 @@ static struct bpf_iter_reg bpf_cgroup_reg_info = { .ctx_arg_info_size = 1, .ctx_arg_info = { { offsetof(struct bpf_iter__cgroup, cgroup), - PTR_TO_BTF_ID_OR_NULL }, + PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED }, }, .seq_info = &cgroup_iter_seq_info, }; @@ -305,9 +305,7 @@ struct bpf_iter_css_kern { unsigned int flags; } __attribute__((aligned(8))); -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in vmlinux BTF"); +__bpf_kfunc_start_defs(); __bpf_kfunc int bpf_iter_css_new(struct bpf_iter_css *it, struct cgroup_subsys_state *start, unsigned int flags) @@ -358,4 +356,4 @@ __bpf_kfunc void bpf_iter_css_destroy(struct bpf_iter_css *it) { } -__diag_pop();
\ No newline at end of file +__bpf_kfunc_end_defs(); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 08626b519ce2..cd3afe57ece3 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -64,8 +64,8 @@ #define OFF insn->off #define IMM insn->imm -struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma; -bool bpf_global_ma_set, bpf_global_percpu_ma_set; +struct bpf_mem_alloc bpf_global_ma; +bool bpf_global_ma_set; /* No hurry in this branch * @@ -2934,9 +2934,7 @@ static int __init bpf_global_ma_init(void) ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false); bpf_global_ma_set = !ret; - ret = bpf_mem_alloc_init(&bpf_global_percpu_ma, 0, true); - bpf_global_percpu_ma_set = !ret; - return !bpf_global_ma_set || !bpf_global_percpu_ma_set; + return ret; } late_initcall(bpf_global_ma_init); #endif diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c index 6983af8e093c..e01c741e54e7 100644 --- a/kernel/bpf/cpumask.c +++ b/kernel/bpf/cpumask.c @@ -34,9 +34,7 @@ static bool cpu_valid(u32 cpu) return cpu < nr_cpu_ids; } -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global kfuncs as their definitions will be in BTF"); +__bpf_kfunc_start_defs(); /** * bpf_cpumask_create() - Create a mutable BPF cpumask. @@ -407,7 +405,7 @@ __bpf_kfunc u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, return cpumask_any_and_distribute(src1, src2); } -__diag_pop(); +__bpf_kfunc_end_defs(); BTF_SET8_START(cpumask_kfunc_btf_ids) BTF_ID_FLAGS(func, bpf_cpumask_create, KF_ACQUIRE | KF_RET_NULL) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index e46ac288a108..56b0c1f678ee 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1177,13 +1177,6 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map ret = -EBUSY; goto out; } - if (!atomic64_read(&map->usercnt)) { - /* maps with timers must be either held by user space - * or pinned in bpffs. - */ - ret = -EPERM; - goto out; - } /* allocate hrtimer via map_kmalloc to use memcg accounting */ t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node); if (!t) { @@ -1196,7 +1189,21 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map rcu_assign_pointer(t->callback_fn, NULL); hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT); t->timer.function = bpf_timer_cb; - timer->timer = t; + WRITE_ONCE(timer->timer, t); + /* Guarantee the order between timer->timer and map->usercnt. So + * when there are concurrent uref release and bpf timer init, either + * bpf_timer_cancel_and_free() called by uref release reads a no-NULL + * timer or atomic64_read() below returns a zero usercnt. + */ + smp_mb(); + if (!atomic64_read(&map->usercnt)) { + /* maps with timers must be either held by user space + * or pinned in bpffs. + */ + WRITE_ONCE(timer->timer, NULL); + kfree(t); + ret = -EPERM; + } out: __bpf_spin_unlock_irqrestore(&timer->lock); return ret; @@ -1374,7 +1381,7 @@ void bpf_timer_cancel_and_free(void *val) /* The subsequent bpf_timer_start/cancel() helpers won't be able to use * this timer, since it won't be initialized. */ - timer->timer = NULL; + WRITE_ONCE(timer->timer, NULL); out: __bpf_spin_unlock_irqrestore(&timer->lock); if (!t) @@ -1886,9 +1893,7 @@ void bpf_rb_root_free(const struct btf_field *field, void *rb_root, } } -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in vmlinux BTF"); +__bpf_kfunc_start_defs(); __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign) { @@ -2505,7 +2510,7 @@ __bpf_kfunc void bpf_throw(u64 cookie) WARN(1, "A call to BPF exception callback should never return\n"); } -__diag_pop(); +__bpf_kfunc_end_defs(); BTF_SET8_START(generic_btf_ids) #ifdef CONFIG_KEXEC_CORE @@ -2564,15 +2569,17 @@ BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY) BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU) BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY) +#ifdef CONFIG_CGROUPS BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY) -BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED) -BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL) -BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY) BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED) BTF_ID_FLAGS(func, bpf_iter_css_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_css_destroy, KF_ITER_DESTROY) +#endif +BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED) +BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY) BTF_ID_FLAGS(func, bpf_dynptr_adjust) BTF_ID_FLAGS(func, bpf_dynptr_is_null) BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly) diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c index 6fc9dae9edc8..6abd7c5df4b3 100644 --- a/kernel/bpf/map_iter.c +++ b/kernel/bpf/map_iter.c @@ -193,9 +193,7 @@ static int __init bpf_map_iter_init(void) late_initcall(bpf_map_iter_init); -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in vmlinux BTF"); +__bpf_kfunc_start_defs(); __bpf_kfunc s64 bpf_map_sum_elem_count(const struct bpf_map *map) { @@ -213,7 +211,7 @@ __bpf_kfunc s64 bpf_map_sum_elem_count(const struct bpf_map *map) return ret; } -__diag_pop(); +__bpf_kfunc_end_defs(); BTF_SET8_START(bpf_map_iter_kfunc_ids) BTF_ID_FLAGS(func, bpf_map_sum_elem_count, KF_TRUSTED_ARGS) diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c index 654601dd6b49..26082b97894d 100644 --- a/kernel/bpf/task_iter.c +++ b/kernel/bpf/task_iter.c @@ -704,7 +704,7 @@ static struct bpf_iter_reg task_reg_info = { .ctx_arg_info_size = 1, .ctx_arg_info = { { offsetof(struct bpf_iter__task, task), - PTR_TO_BTF_ID_OR_NULL }, + PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED }, }, .seq_info = &task_seq_info, .fill_link_info = bpf_iter_fill_link_info, @@ -822,9 +822,7 @@ struct bpf_iter_task_vma_kern { struct bpf_iter_task_vma_kern_data *data; } __attribute__((aligned(8))); -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in vmlinux BTF"); +__bpf_kfunc_start_defs(); __bpf_kfunc int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it, struct task_struct *task, u64 addr) @@ -890,7 +888,9 @@ __bpf_kfunc void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) } } -__diag_pop(); +__bpf_kfunc_end_defs(); + +#ifdef CONFIG_CGROUPS struct bpf_iter_css_task { __u64 __opaque[1]; @@ -900,9 +900,7 @@ struct bpf_iter_css_task_kern { struct css_task_iter *css_it; } __attribute__((aligned(8))); -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in vmlinux BTF"); +__bpf_kfunc_start_defs(); __bpf_kfunc int bpf_iter_css_task_new(struct bpf_iter_css_task *it, struct cgroup_subsys_state *css, unsigned int flags) @@ -948,7 +946,9 @@ __bpf_kfunc void bpf_iter_css_task_destroy(struct bpf_iter_css_task *it) bpf_mem_free(&bpf_global_ma, kit->css_it); } -__diag_pop(); +__bpf_kfunc_end_defs(); + +#endif /* CONFIG_CGROUPS */ struct bpf_iter_task { __u64 __opaque[3]; @@ -969,9 +969,7 @@ enum { BPF_TASK_ITER_PROC_THREADS }; -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in vmlinux BTF"); +__bpf_kfunc_start_defs(); __bpf_kfunc int bpf_iter_task_new(struct bpf_iter_task *it, struct task_struct *task__nullable, unsigned int flags) @@ -1041,7 +1039,7 @@ __bpf_kfunc void bpf_iter_task_destroy(struct bpf_iter_task *it) { } -__diag_pop(); +__bpf_kfunc_end_defs(); DEFINE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 857d76694517..af2819d5c8ee 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -26,6 +26,7 @@ #include <linux/poison.h> #include <linux/module.h> #include <linux/cpumask.h> +#include <linux/bpf_mem_alloc.h> #include <net/xdp.h> #include "disasm.h" @@ -41,6 +42,9 @@ static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { #undef BPF_LINK_TYPE }; +struct bpf_mem_alloc bpf_global_percpu_ma; +static bool bpf_global_percpu_ma_set; + /* bpf_check() is a static code analyzer that walks eBPF program * instruction by instruction and updates register/stack state. * All paths of conditional branches are analyzed until 'bpf_exit' insn. @@ -336,6 +340,7 @@ struct bpf_kfunc_call_arg_meta { struct btf *btf_vmlinux; static DEFINE_MUTEX(bpf_verifier_lock); +static DEFINE_MUTEX(bpf_percpu_ma_lock); static const struct bpf_line_info * find_linfo(const struct bpf_verifier_env *env, u32 insn_off) @@ -542,13 +547,12 @@ static bool is_dynptr_ref_function(enum bpf_func_id func_id) return func_id == BPF_FUNC_dynptr_data; } -static bool is_callback_calling_kfunc(u32 btf_id); +static bool is_sync_callback_calling_kfunc(u32 btf_id); static bool is_bpf_throw_kfunc(struct bpf_insn *insn); -static bool is_callback_calling_function(enum bpf_func_id func_id) +static bool is_sync_callback_calling_function(enum bpf_func_id func_id) { return func_id == BPF_FUNC_for_each_map_elem || - func_id == BPF_FUNC_timer_set_callback || func_id == BPF_FUNC_find_vma || func_id == BPF_FUNC_loop || func_id == BPF_FUNC_user_ringbuf_drain; @@ -559,6 +563,18 @@ static bool is_async_callback_calling_function(enum bpf_func_id func_id) return func_id == BPF_FUNC_timer_set_callback; } +static bool is_callback_calling_function(enum bpf_func_id func_id) +{ + return is_sync_callback_calling_function(func_id) || + is_async_callback_calling_function(func_id); +} + +static bool is_sync_callback_calling_insn(struct bpf_insn *insn) +{ + return (bpf_helper_call(insn) && is_sync_callback_calling_function(insn->imm)) || + (bpf_pseudo_kfunc_call(insn) && is_sync_callback_calling_kfunc(insn->imm)); +} + static bool is_storage_get_function(enum bpf_func_id func_id) { return func_id == BPF_FUNC_sk_storage_get || @@ -1803,6 +1819,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state, dst_state->first_insn_idx = src->first_insn_idx; dst_state->last_insn_idx = src->last_insn_idx; dst_state->dfs_depth = src->dfs_depth; + dst_state->callback_unroll_depth = src->callback_unroll_depth; dst_state->used_as_loop_entry = src->used_as_loop_entry; for (i = 0; i <= src->curframe; i++) { dst = dst_state->frame[i]; @@ -3434,13 +3451,11 @@ static void mark_insn_zext(struct bpf_verifier_env *env, reg->subreg_def = DEF_NOT_SUBREG; } -static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, - enum reg_arg_type t) +static int __check_reg_arg(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno, + enum reg_arg_type t) { - struct bpf_verifier_state *vstate = env->cur_state; - struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; - struct bpf_reg_state *reg, *regs = state->regs; + struct bpf_reg_state *reg; bool rw64; if (regno >= MAX_BPF_REG) { @@ -3481,6 +3496,15 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, return 0; } +static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, + enum reg_arg_type t) +{ + struct bpf_verifier_state *vstate = env->cur_state; + struct bpf_func_state *state = vstate->frame[vstate->curframe]; + + return __check_reg_arg(env, state->regs, regno, t); +} + static void mark_jmp_point(struct bpf_verifier_env *env, int idx) { env->insn_aux_data[idx].jmp_point = true; @@ -3516,12 +3540,29 @@ static int push_jmp_history(struct bpf_verifier_env *env, /* Backtrack one insn at a time. If idx is not at the top of recorded * history then previous instruction came from straight line execution. + * Return -ENOENT if we exhausted all instructions within given state. + * + * It's legal to have a bit of a looping with the same starting and ending + * insn index within the same state, e.g.: 3->4->5->3, so just because current + * instruction index is the same as state's first_idx doesn't mean we are + * done. If there is still some jump history left, we should keep going. We + * need to take into account that we might have a jump history between given + * state's parent and itself, due to checkpointing. In this case, we'll have + * history entry recording a jump from last instruction of parent state and + * first instruction of given state. */ static int get_prev_insn_idx(struct bpf_verifier_state *st, int i, u32 *history) { u32 cnt = *history; + if (i == st->first_insn_idx) { + if (cnt == 0) + return -ENOENT; + if (cnt == 1 && st->jmp_history[0].idx == i) + return -ENOENT; + } + if (cnt && st->jmp_history[cnt - 1].idx == i) { i = st->jmp_history[cnt - 1].prev_idx; (*history)--; @@ -3702,6 +3743,8 @@ static void fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask) } } +static bool calls_callback(struct bpf_verifier_env *env, int insn_idx); + /* For given verifier state backtrack_insn() is called from the last insn to * the first insn. Its purpose is to compute a bitmask of registers and * stack slots that needs precision in the parent verifier state. @@ -3742,7 +3785,12 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, if (class == BPF_ALU || class == BPF_ALU64) { if (!bt_is_reg_set(bt, dreg)) return 0; - if (opcode == BPF_MOV) { + if (opcode == BPF_END || opcode == BPF_NEG) { + /* sreg is reserved and unused + * dreg still need precision before this insn + */ + return 0; + } else if (opcode == BPF_MOV) { if (BPF_SRC(insn->code) == BPF_X) { /* dreg = sreg or dreg = (s8, s16, s32)sreg * dreg needs precision after this insn @@ -3872,16 +3920,13 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, return -EFAULT; return 0; } - } else if ((bpf_helper_call(insn) && - is_callback_calling_function(insn->imm) && - !is_async_callback_calling_function(insn->imm)) || - (bpf_pseudo_kfunc_call(insn) && is_callback_calling_kfunc(insn->imm))) { - /* callback-calling helper or kfunc call, which means - * we are exiting from subprog, but unlike the subprog - * call handling above, we shouldn't propagate - * precision of r1-r5 (if any requested), as they are - * not actually arguments passed directly to callback - * subprogs + } else if (is_sync_callback_calling_insn(insn) && idx != subseq_idx - 1) { + /* exit from callback subprog to callback-calling helper or + * kfunc call. Use idx/subseq_idx check to discern it from + * straight line code backtracking. + * Unlike the subprog call handling above, we shouldn't + * propagate precision of r1-r5 (if any requested), as they are + * not actually arguments passed directly to callback subprogs */ if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) { verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); @@ -3916,10 +3961,18 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, } else if (opcode == BPF_EXIT) { bool r0_precise; + /* Backtracking to a nested function call, 'idx' is a part of + * the inner frame 'subseq_idx' is a part of the outer frame. + * In case of a regular function call, instructions giving + * precision to registers R1-R5 should have been found already. + * In case of a callback, it is ok to have R1-R5 marked for + * backtracking, as these registers are set by the function + * invoking callback. + */ + if (subseq_idx >= 0 && calls_callback(env, subseq_idx)) + for (i = BPF_REG_1; i <= BPF_REG_5; i++) + bt_clear_reg(bt, i); if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) { - /* if backtracing was looking for registers R1-R5 - * they should have been found already. - */ verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); WARN_ONCE(1, "verifier backtracking bug"); return -EFAULT; @@ -4396,10 +4449,10 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno) * Nothing to be tracked further in the parent state. */ return 0; - if (i == first_idx) - break; subseq_idx = i; i = get_prev_insn_idx(st, i, &history); + if (i == -ENOENT) + break; if (i >= env->prog->len) { /* This can happen if backtracking reached insn 0 * and there are still reg_mask or stack_mask @@ -4674,7 +4727,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, insn->imm != 0 && env->bpf_capable) { struct bpf_reg_state fake_reg = {}; - __mark_reg_known(&fake_reg, (u32)insn->imm); + __mark_reg_known(&fake_reg, insn->imm); fake_reg.type = SCALAR_VALUE; save_register_state(state, spi, &fake_reg, size); } else if (reg && is_spillable_regtype(reg->type)) { @@ -5388,7 +5441,9 @@ static bool in_rcu_cs(struct bpf_verifier_env *env) /* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */ BTF_SET_START(rcu_protected_types) BTF_ID(struct, prog_test_ref_kfunc) +#ifdef CONFIG_CGROUPS BTF_ID(struct, cgroup) +#endif BTF_ID(struct, bpf_cpumask) BTF_ID(struct, task_struct) BTF_SET_END(rcu_protected_types) @@ -9321,7 +9376,7 @@ static void clear_caller_saved_regs(struct bpf_verifier_env *env, /* after the call registers r0 - r5 were scratched */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); - check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); + __check_reg_arg(env, regs, caller_saved[i], DST_OP_NO_MARK); } } @@ -9334,11 +9389,10 @@ static int set_callee_state(struct bpf_verifier_env *env, struct bpf_func_state *caller, struct bpf_func_state *callee, int insn_idx); -static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, - int *insn_idx, int subprog, - set_callee_state_fn set_callee_state_cb) +static int setup_func_entry(struct bpf_verifier_env *env, int subprog, int callsite, + set_callee_state_fn set_callee_state_cb, + struct bpf_verifier_state *state) { - struct bpf_verifier_state *state = env->cur_state; struct bpf_func_state *caller, *callee; int err; @@ -9348,54 +9402,72 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn return -E2BIG; } - caller = state->frame[state->curframe]; if (state->frame[state->curframe + 1]) { verbose(env, "verifier bug. Frame %d already allocated\n", state->curframe + 1); return -EFAULT; } + caller = state->frame[state->curframe]; + callee = kzalloc(sizeof(*callee), GFP_KERNEL); + if (!callee) + return -ENOMEM; + state->frame[state->curframe + 1] = callee; + + /* callee cannot access r0, r6 - r9 for reading and has to write + * into its own stack before reading from it. + * callee can read/write into caller's stack + */ + init_func_state(env, callee, + /* remember the callsite, it will be used by bpf_exit */ + callsite, + state->curframe + 1 /* frameno within this callchain */, + subprog /* subprog number within this prog */); + /* Transfer references to the callee */ + err = copy_reference_state(callee, caller); + err = err ?: set_callee_state_cb(env, caller, callee, callsite); + if (err) + goto err_out; + + /* only increment it after check_reg_arg() finished */ + state->curframe++; + + return 0; + +err_out: + free_func_state(callee); + state->frame[state->curframe + 1] = NULL; + return err; +} + +static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *insn, + int insn_idx, int subprog, + set_callee_state_fn set_callee_state_cb) +{ + struct bpf_verifier_state *state = env->cur_state, *callback_state; + struct bpf_func_state *caller, *callee; + int err; + + caller = state->frame[state->curframe]; err = btf_check_subprog_call(env, subprog, caller->regs); if (err == -EFAULT) return err; - if (subprog_is_global(env, subprog)) { - if (err) { - verbose(env, "Caller passes invalid args into func#%d\n", - subprog); - return err; - } else { - if (env->log.level & BPF_LOG_LEVEL) - verbose(env, - "Func#%d is global and valid. Skipping.\n", - subprog); - clear_caller_saved_regs(env, caller->regs); - - /* All global functions return a 64-bit SCALAR_VALUE */ - mark_reg_unknown(env, caller->regs, BPF_REG_0); - caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; - - /* continue with next insn after call */ - return 0; - } - } /* set_callee_state is used for direct subprog calls, but we are * interested in validating only BPF helpers that can call subprogs as * callbacks */ - if (set_callee_state_cb != set_callee_state) { - env->subprog_info[subprog].is_cb = true; - if (bpf_pseudo_kfunc_call(insn) && - !is_callback_calling_kfunc(insn->imm)) { - verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n", - func_id_name(insn->imm), insn->imm); - return -EFAULT; - } else if (!bpf_pseudo_kfunc_call(insn) && - !is_callback_calling_function(insn->imm)) { /* helper */ - verbose(env, "verifier bug: helper %s#%d not marked as callback-calling\n", - func_id_name(insn->imm), insn->imm); - return -EFAULT; - } + env->subprog_info[subprog].is_cb = true; + if (bpf_pseudo_kfunc_call(insn) && + !is_sync_callback_calling_kfunc(insn->imm)) { + verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n", + func_id_name(insn->imm), insn->imm); + return -EFAULT; + } else if (!bpf_pseudo_kfunc_call(insn) && + !is_callback_calling_function(insn->imm)) { /* helper */ + verbose(env, "verifier bug: helper %s#%d not marked as callback-calling\n", + func_id_name(insn->imm), insn->imm); + return -EFAULT; } if (insn->code == (BPF_JMP | BPF_CALL) && @@ -9406,53 +9478,83 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn /* there is no real recursion here. timer callbacks are async */ env->subprog_info[subprog].is_async_cb = true; async_cb = push_async_cb(env, env->subprog_info[subprog].start, - *insn_idx, subprog); + insn_idx, subprog); if (!async_cb) return -EFAULT; callee = async_cb->frame[0]; callee->async_entry_cnt = caller->async_entry_cnt + 1; /* Convert bpf_timer_set_callback() args into timer callback args */ - err = set_callee_state_cb(env, caller, callee, *insn_idx); + err = set_callee_state_cb(env, caller, callee, insn_idx); if (err) return err; + return 0; + } + + /* for callback functions enqueue entry to callback and + * proceed with next instruction within current frame. + */ + callback_state = push_stack(env, env->subprog_info[subprog].start, insn_idx, false); + if (!callback_state) + return -ENOMEM; + + err = setup_func_entry(env, subprog, insn_idx, set_callee_state_cb, + callback_state); + if (err) + return err; + + callback_state->callback_unroll_depth++; + callback_state->frame[callback_state->curframe - 1]->callback_depth++; + caller->callback_depth = 0; + return 0; +} + +static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, + int *insn_idx) +{ + struct bpf_verifier_state *state = env->cur_state; + struct bpf_func_state *caller; + int err, subprog, target_insn; + + target_insn = *insn_idx + insn->imm + 1; + subprog = find_subprog(env, target_insn); + if (subprog < 0) { + verbose(env, "verifier bug. No program starts at insn %d\n", target_insn); + return -EFAULT; + } + + caller = state->frame[state->curframe]; + err = btf_check_subprog_call(env, subprog, caller->regs); + if (err == -EFAULT) + return err; + if (subprog_is_global(env, subprog)) { + if (err) { + verbose(env, "Caller passes invalid args into func#%d\n", subprog); + return err; + } + + if (env->log.level & BPF_LOG_LEVEL) + verbose(env, "Func#%d is global and valid. Skipping.\n", subprog); clear_caller_saved_regs(env, caller->regs); + + /* All global functions return a 64-bit SCALAR_VALUE */ mark_reg_unknown(env, caller->regs, BPF_REG_0); caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; + /* continue with next insn after call */ return 0; } - callee = kzalloc(sizeof(*callee), GFP_KERNEL); - if (!callee) - return -ENOMEM; - state->frame[state->curframe + 1] = callee; - - /* callee cannot access r0, r6 - r9 for reading and has to write - * into its own stack before reading from it. - * callee can read/write into caller's stack + /* for regular function entry setup new frame and continue + * from that frame. */ - init_func_state(env, callee, - /* remember the callsite, it will be used by bpf_exit */ - *insn_idx /* callsite */, - state->curframe + 1 /* frameno within this callchain */, - subprog /* subprog number within this prog */); - - /* Transfer references to the callee */ - err = copy_reference_state(callee, caller); - if (err) - goto err_out; - - err = set_callee_state_cb(env, caller, callee, *insn_idx); + err = setup_func_entry(env, subprog, *insn_idx, set_callee_state, state); if (err) - goto err_out; + return err; clear_caller_saved_regs(env, caller->regs); - /* only increment it after check_reg_arg() finished */ - state->curframe++; - /* and go analyze first insn of the callee */ *insn_idx = env->subprog_info[subprog].start - 1; @@ -9460,14 +9562,10 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn verbose(env, "caller:\n"); print_verifier_state(env, caller, true); verbose(env, "callee:\n"); - print_verifier_state(env, callee, true); + print_verifier_state(env, state->frame[state->curframe], true); } - return 0; -err_out: - free_func_state(callee); - state->frame[state->curframe + 1] = NULL; - return err; + return 0; } int map_set_for_each_callback_args(struct bpf_verifier_env *env, @@ -9511,22 +9609,6 @@ static int set_callee_state(struct bpf_verifier_env *env, return 0; } -static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, - int *insn_idx) -{ - int subprog, target_insn; - - target_insn = *insn_idx + insn->imm + 1; - subprog = find_subprog(env, target_insn); - if (subprog < 0) { - verbose(env, "verifier bug. No program starts at insn %d\n", - target_insn); - return -EFAULT; - } - - return __check_func_call(env, insn, insn_idx, subprog, set_callee_state); -} - static int set_map_elem_callback_state(struct bpf_verifier_env *env, struct bpf_func_state *caller, struct bpf_func_state *callee, @@ -9719,9 +9801,10 @@ static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env) static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) { - struct bpf_verifier_state *state = env->cur_state; + struct bpf_verifier_state *state = env->cur_state, *prev_st; struct bpf_func_state *caller, *callee; struct bpf_reg_state *r0; + bool in_callback_fn; int err; callee = state->frame[state->curframe]; @@ -9750,6 +9833,11 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) verbose_invalid_scalar(env, r0, &range, "callback return", "R0"); return -EINVAL; } + if (!calls_callback(env, callee->callsite)) { + verbose(env, "BUG: in callback at %d, callsite %d !calls_callback\n", + *insn_idx, callee->callsite); + return -EFAULT; + } } else { /* return to the caller whatever r0 had in the callee */ caller->regs[BPF_REG_0] = *r0; @@ -9767,7 +9855,16 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) return err; } - *insn_idx = callee->callsite + 1; + /* for callbacks like bpf_loop or bpf_for_each_map_elem go back to callsite, + * there function call logic would reschedule callback visit. If iteration + * converges is_state_visited() would prune that visit eventually. + */ + in_callback_fn = callee->in_callback_fn; + if (in_callback_fn) + *insn_idx = callee->callsite; + else + *insn_idx = callee->callsite + 1; + if (env->log.level & BPF_LOG_LEVEL) { verbose(env, "returning from callee:\n"); print_verifier_state(env, callee, true); @@ -9778,6 +9875,24 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) * bpf_throw, this will be done by copy_verifier_state for extra frames. */ free_func_state(callee); state->frame[state->curframe--] = NULL; + + /* for callbacks widen imprecise scalars to make programs like below verify: + * + * struct ctx { int i; } + * void cb(int idx, struct ctx *ctx) { ctx->i++; ... } + * ... + * struct ctx = { .i = 0; } + * bpf_loop(100, cb, &ctx, 0); + * + * This is similar to what is done in process_iter_next_call() for open + * coded iterators. + */ + prev_st = in_callback_fn ? find_prev_entry(env, state, *insn_idx) : NULL; + if (prev_st) { + err = widen_imprecise_scalars(env, prev_st, state); + if (err) + return err; + } return 0; } @@ -10180,24 +10295,37 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn } break; case BPF_FUNC_for_each_map_elem: - err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, - set_map_elem_callback_state); + err = push_callback_call(env, insn, insn_idx, meta.subprogno, + set_map_elem_callback_state); break; case BPF_FUNC_timer_set_callback: - err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, - set_timer_callback_state); + err = push_callback_call(env, insn, insn_idx, meta.subprogno, + set_timer_callback_state); break; case BPF_FUNC_find_vma: - err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, - set_find_vma_callback_state); + err = push_callback_call(env, insn, insn_idx, meta.subprogno, + set_find_vma_callback_state); break; case BPF_FUNC_snprintf: err = check_bpf_snprintf_call(env, regs); break; case BPF_FUNC_loop: update_loop_inline_state(env, meta.subprogno); - err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, - set_loop_callback_state); + /* Verifier relies on R1 value to determine if bpf_loop() iteration + * is finished, thus mark it precise. + */ + err = mark_chain_precision(env, BPF_REG_1); + if (err) + return err; + if (cur_func(env)->callback_depth < regs[BPF_REG_1].umax_value) { + err = push_callback_call(env, insn, insn_idx, meta.subprogno, + set_loop_callback_state); + } else { + cur_func(env)->callback_depth = 0; + if (env->log.level & BPF_LOG_LEVEL2) + verbose(env, "frame%d bpf_loop iteration limit reached\n", + env->cur_state->curframe); + } break; case BPF_FUNC_dynptr_from_mem: if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) { @@ -10293,8 +10421,8 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn break; } case BPF_FUNC_user_ringbuf_drain: - err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, - set_user_ringbuf_callback_state); + err = push_callback_call(env, insn, insn_idx, meta.subprogno, + set_user_ringbuf_callback_state); break; } @@ -10835,7 +10963,9 @@ BTF_ID(func, bpf_dynptr_clone) BTF_ID(func, bpf_percpu_obj_new_impl) BTF_ID(func, bpf_percpu_obj_drop_impl) BTF_ID(func, bpf_throw) +#ifdef CONFIG_CGROUPS BTF_ID(func, bpf_iter_css_task_new) +#endif BTF_SET_END(special_kfunc_set) BTF_ID_LIST(special_kfunc_list) @@ -10861,7 +10991,11 @@ BTF_ID(func, bpf_dynptr_clone) BTF_ID(func, bpf_percpu_obj_new_impl) BTF_ID(func, bpf_percpu_obj_drop_impl) BTF_ID(func, bpf_throw) +#ifdef CONFIG_CGROUPS BTF_ID(func, bpf_iter_css_task_new) +#else +BTF_ID_UNUSED +#endif static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta) { @@ -11176,7 +11310,7 @@ static bool is_bpf_graph_api_kfunc(u32 btf_id) btf_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]; } -static bool is_callback_calling_kfunc(u32 btf_id) +static bool is_sync_callback_calling_kfunc(u32 btf_id) { return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl]; } @@ -11394,6 +11528,12 @@ static int process_kf_arg_ptr_to_rbtree_node(struct bpf_verifier_env *env, &meta->arg_rbtree_root.field); } +/* + * css_task iter allowlist is needed to avoid dead locking on css_set_lock. + * LSM hooks and iters (both sleepable and non-sleepable) are safe. + * Any sleepable progs are also safe since bpf_check_attach_target() enforce + * them can only be attached to some specific hook points. + */ static bool check_css_task_iter_allowlist(struct bpf_verifier_env *env) { enum bpf_prog_type prog_type = resolve_prog_type(env->prog); @@ -11401,10 +11541,12 @@ static bool check_css_task_iter_allowlist(struct bpf_verifier_env *env) switch (prog_type) { case BPF_PROG_TYPE_LSM: return true; - case BPF_TRACE_ITER: - return env->prog->aux->sleepable; + case BPF_PROG_TYPE_TRACING: + if (env->prog->expected_attach_type == BPF_TRACE_ITER) + return true; + fallthrough; default: - return false; + return env->prog->aux->sleepable; } } @@ -11663,7 +11805,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ case KF_ARG_PTR_TO_ITER: if (meta->func_id == special_kfunc_list[KF_bpf_iter_css_task_new]) { if (!check_css_task_iter_allowlist(env)) { - verbose(env, "css_task_iter is only allowed in bpf_lsm and bpf iter-s\n"); + verbose(env, "css_task_iter is only allowed in bpf_lsm, bpf_iter and sleepable progs\n"); return -EINVAL; } } @@ -11920,6 +12062,21 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, return -EACCES; } + /* Check the arguments */ + err = check_kfunc_args(env, &meta, insn_idx); + if (err < 0) + return err; + + if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { + err = push_callback_call(env, insn, insn_idx, meta.subprogno, + set_rbtree_add_callback_state); + if (err) { + verbose(env, "kfunc %s#%d failed callback verification\n", + func_name, meta.func_id); + return err; + } + } + rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta); rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta); @@ -11955,10 +12112,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, return -EINVAL; } - /* Check the arguments */ - err = check_kfunc_args(env, &meta, insn_idx); - if (err < 0) - return err; /* In case of release function, we get register number of refcounted * PTR_TO_BTF_ID in bpf_kfunc_arg_meta, do the release now. */ @@ -11992,16 +12145,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, } } - if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { - err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, - set_rbtree_add_callback_state); - if (err) { - verbose(env, "kfunc %s#%d failed callback verification\n", - func_name, meta.func_id); - return err; - } - } - if (meta.func_id == special_kfunc_list[KF_bpf_throw]) { if (!bpf_jit_supports_exceptions()) { verbose(env, "JIT does not support calling kfunc %s#%d\n", @@ -12053,8 +12196,19 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl] && !bpf_global_ma_set) return -ENOMEM; - if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl] && !bpf_global_percpu_ma_set) - return -ENOMEM; + if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { + if (!bpf_global_percpu_ma_set) { + mutex_lock(&bpf_percpu_ma_lock); + if (!bpf_global_percpu_ma_set) { + err = bpf_mem_alloc_init(&bpf_global_percpu_ma, 0, true); + if (!err) + bpf_global_percpu_ma_set = true; + } + mutex_unlock(&bpf_percpu_ma_lock); + if (err) + return err; + } + } if (((u64)(u32)meta.arg_constant.value) != meta.arg_constant.value) { verbose(env, "local type ID argument must be in range [0, U32_MAX]\n"); @@ -15354,6 +15508,15 @@ static bool is_force_checkpoint(struct bpf_verifier_env *env, int insn_idx) return env->insn_aux_data[insn_idx].force_checkpoint; } +static void mark_calls_callback(struct bpf_verifier_env *env, int idx) +{ + env->insn_aux_data[idx].calls_callback = true; +} + +static bool calls_callback(struct bpf_verifier_env *env, int insn_idx) +{ + return env->insn_aux_data[insn_idx].calls_callback; +} enum { DONE_EXPLORING = 0, @@ -15365,8 +15528,7 @@ enum { * w - next instruction * e - edge */ -static int push_insn(int t, int w, int e, struct bpf_verifier_env *env, - bool loop_ok) +static int push_insn(int t, int w, int e, struct bpf_verifier_env *env) { int *insn_stack = env->cfg.insn_stack; int *insn_state = env->cfg.insn_state; @@ -15398,7 +15560,7 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env, insn_stack[env->cfg.cur_stack++] = w; return KEEP_EXPLORING; } else if ((insn_state[w] & 0xF0) == DISCOVERED) { - if (loop_ok && env->bpf_capable) + if (env->bpf_capable) return DONE_EXPLORING; verbose_linfo(env, t, "%d: ", t); verbose_linfo(env, w, "%d: ", w); @@ -15418,24 +15580,20 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns, struct bpf_verifier_env *env, bool visit_callee) { - int ret; + int ret, insn_sz; - ret = push_insn(t, t + 1, FALLTHROUGH, env, false); + insn_sz = bpf_is_ldimm64(&insns[t]) ? 2 : 1; + ret = push_insn(t, t + insn_sz, FALLTHROUGH, env); if (ret) return ret; - mark_prune_point(env, t + 1); + mark_prune_point(env, t + insn_sz); /* when we exit from subprog, we need to record non-linear history */ - mark_jmp_point(env, t + 1); + mark_jmp_point(env, t + insn_sz); if (visit_callee) { mark_prune_point(env, t); - ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env, - /* It's ok to allow recursion from CFG point of - * view. __check_func_call() will do the actual - * check. - */ - bpf_pseudo_func(insns + t)); + ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env); } return ret; } @@ -15448,15 +15606,17 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns, static int visit_insn(int t, struct bpf_verifier_env *env) { struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t]; - int ret, off; + int ret, off, insn_sz; if (bpf_pseudo_func(insn)) return visit_func_call_insn(t, insns, env, true); /* All non-branch instructions have a single fall-through edge. */ if (BPF_CLASS(insn->code) != BPF_JMP && - BPF_CLASS(insn->code) != BPF_JMP32) - return push_insn(t, t + 1, FALLTHROUGH, env, false); + BPF_CLASS(insn->code) != BPF_JMP32) { + insn_sz = bpf_is_ldimm64(insn) ? 2 : 1; + return push_insn(t, t + insn_sz, FALLTHROUGH, env); + } switch (BPF_OP(insn->code)) { case BPF_EXIT: @@ -15470,6 +15630,21 @@ static int visit_insn(int t, struct bpf_verifier_env *env) * async state will be pushed for further exploration. */ mark_prune_point(env, t); + /* For functions that invoke callbacks it is not known how many times + * callback would be called. Verifier models callback calling functions + * by repeatedly visiting callback bodies and returning to origin call + * instruction. + * In order to stop such iteration verifier needs to identify when a + * state identical some state from a previous iteration is reached. + * Check below forces creation of checkpoint before callback calling + * instruction to allow search for such identical states. + */ + if (is_sync_callback_calling_insn(insn)) { + mark_calls_callback(env, t); + mark_force_checkpoint(env, t); + mark_prune_point(env, t); + mark_jmp_point(env, t); + } if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { struct bpf_kfunc_call_arg_meta meta; @@ -15502,8 +15677,7 @@ static int visit_insn(int t, struct bpf_verifier_env *env) off = insn->imm; /* unconditional jump with single edge */ - ret = push_insn(t, t + off + 1, FALLTHROUGH, env, - true); + ret = push_insn(t, t + off + 1, FALLTHROUGH, env); if (ret) return ret; @@ -15516,11 +15690,11 @@ static int visit_insn(int t, struct bpf_verifier_env *env) /* conditional jump with two edges */ mark_prune_point(env, t); - ret = push_insn(t, t + 1, FALLTHROUGH, env, true); + ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret) return ret; - return push_insn(t, t + insn->off + 1, BRANCH, env, true); + return push_insn(t, t + insn->off + 1, BRANCH, env); } } @@ -15586,11 +15760,21 @@ walk_cfg: } for (i = 0; i < insn_cnt; i++) { + struct bpf_insn *insn = &env->prog->insnsi[i]; + if (insn_state[i] != EXPLORED) { verbose(env, "unreachable insn %d\n", i); ret = -EINVAL; goto err_free; } + if (bpf_is_ldimm64(insn)) { + if (insn_state[i + 1] != 0) { + verbose(env, "jump into the middle of ldimm64 insn %d\n", i); + ret = -EINVAL; + goto err_free; + } + i++; /* skip second half of ldimm64 */ + } } ret = 0; /* cfg looks good */ @@ -16930,10 +17114,16 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) } goto skip_inf_loop_check; } + if (calls_callback(env, insn_idx)) { + if (states_equal(env, &sl->state, cur, true)) + goto hit; + goto skip_inf_loop_check; + } /* attempt to detect infinite loop to avoid unnecessary doomed work */ if (states_maybe_looping(&sl->state, cur) && states_equal(env, &sl->state, cur, false) && - !iter_active_depths_differ(&sl->state, cur)) { + !iter_active_depths_differ(&sl->state, cur) && + sl->state.callback_unroll_depth == cur->callback_unroll_depth) { verbose_linfo(env, insn_idx, "; "); verbose(env, "infinite loop detected at insn %d\n", insn_idx); verbose(env, "cur state:"); diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 484adb375b15..4b9ff41ca603 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -1908,6 +1908,7 @@ enum cgroup2_param { Opt_favordynmods, Opt_memory_localevents, Opt_memory_recursiveprot, + Opt_memory_hugetlb_accounting, nr__cgroup2_params }; @@ -1916,6 +1917,7 @@ static const struct fs_parameter_spec cgroup2_fs_parameters[] = { fsparam_flag("favordynmods", Opt_favordynmods), fsparam_flag("memory_localevents", Opt_memory_localevents), fsparam_flag("memory_recursiveprot", Opt_memory_recursiveprot), + fsparam_flag("memory_hugetlb_accounting", Opt_memory_hugetlb_accounting), {} }; @@ -1942,6 +1944,9 @@ static int cgroup2_parse_param(struct fs_context *fc, struct fs_parameter *param case Opt_memory_recursiveprot: ctx->flags |= CGRP_ROOT_MEMORY_RECURSIVE_PROT; return 0; + case Opt_memory_hugetlb_accounting: + ctx->flags |= CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING; + return 0; } return -EINVAL; } @@ -1966,6 +1971,11 @@ static void apply_cgroup_root_flags(unsigned int root_flags) cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_RECURSIVE_PROT; else cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_RECURSIVE_PROT; + + if (root_flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING) + cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING; + else + cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING; } } @@ -1979,6 +1989,8 @@ static int cgroup_show_options(struct seq_file *seq, struct kernfs_root *kf_root seq_puts(seq, ",memory_localevents"); if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT) seq_puts(seq, ",memory_recursiveprot"); + if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING) + seq_puts(seq, ",memory_hugetlb_accounting"); return 0; } @@ -3873,14 +3885,6 @@ static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of, return psi_trigger_poll(&ctx->psi.trigger, of->file, pt); } -static int cgroup_pressure_open(struct kernfs_open_file *of) -{ - if (of->file->f_mode & FMODE_WRITE && !capable(CAP_SYS_RESOURCE)) - return -EPERM; - - return 0; -} - static void cgroup_pressure_release(struct kernfs_open_file *of) { struct cgroup_file_ctx *ctx = of->priv; @@ -5287,7 +5291,6 @@ static struct cftype cgroup_psi_files[] = { { .name = "io.pressure", .file_offset = offsetof(struct cgroup, psi_files[PSI_IO]), - .open = cgroup_pressure_open, .seq_show = cgroup_io_pressure_show, .write = cgroup_io_pressure_write, .poll = cgroup_pressure_poll, @@ -5296,7 +5299,6 @@ static struct cftype cgroup_psi_files[] = { { .name = "memory.pressure", .file_offset = offsetof(struct cgroup, psi_files[PSI_MEM]), - .open = cgroup_pressure_open, .seq_show = cgroup_memory_pressure_show, .write = cgroup_memory_pressure_write, .poll = cgroup_pressure_poll, @@ -5305,7 +5307,6 @@ static struct cftype cgroup_psi_files[] = { { .name = "cpu.pressure", .file_offset = offsetof(struct cgroup, psi_files[PSI_CPU]), - .open = cgroup_pressure_open, .seq_show = cgroup_cpu_pressure_show, .write = cgroup_cpu_pressure_write, .poll = cgroup_pressure_poll, @@ -5315,7 +5316,6 @@ static struct cftype cgroup_psi_files[] = { { .name = "irq.pressure", .file_offset = offsetof(struct cgroup, psi_files[PSI_IRQ]), - .open = cgroup_pressure_open, .seq_show = cgroup_irq_pressure_show, .write = cgroup_irq_pressure_write, .poll = cgroup_pressure_poll, @@ -7068,7 +7068,8 @@ static ssize_t features_show(struct kobject *kobj, struct kobj_attribute *attr, "nsdelegate\n" "favordynmods\n" "memory_localevents\n" - "memory_recursiveprot\n"); + "memory_recursiveprot\n" + "memory_hugetlb_accounting\n"); } static struct kobj_attribute cgroup_features_attr = __ATTR_RO(features); diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index d80d7a608141..c0adb7254b45 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -156,19 +156,16 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos, * optimize away the callsite. Therefore, __weak is needed to ensure that the * call is still emitted, by telling the compiler that we don't know what the * function might eventually be. - * - * __diag_* below are needed to dismiss the missing prototype warning. */ -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "kfuncs which will be used in BPF programs"); + +__bpf_hook_start(); __weak noinline void bpf_rstat_flush(struct cgroup *cgrp, struct cgroup *parent, int cpu) { } -__diag_pop(); +__bpf_hook_end(); /* see cgroup_rstat_flush() */ static void cgroup_rstat_flush_locked(struct cgroup *cgrp) diff --git a/kernel/cpu.c b/kernel/cpu.c index 9e4c6780adde..a86972a91991 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -2113,7 +2113,7 @@ static struct cpuhp_step cpuhp_hp_states[] = { [CPUHP_HRTIMERS_PREPARE] = { .name = "hrtimers:prepare", .startup.single = hrtimers_prepare_cpu, - .teardown.single = hrtimers_dead_cpu, + .teardown.single = NULL, }, [CPUHP_SMPCFD_PREPARE] = { .name = "smpcfd:prepare", @@ -2205,6 +2205,12 @@ static struct cpuhp_step cpuhp_hp_states[] = { .startup.single = NULL, .teardown.single = smpcfd_dying_cpu, }, + [CPUHP_AP_HRTIMERS_DYING] = { + .name = "hrtimers:dying", + .startup.single = NULL, + .teardown.single = hrtimers_cpu_dying, + }, + /* Entry state on starting. Interrupts enabled from here on. Transient * state for synchronsization */ [CPUHP_AP_ONLINE] = { diff --git a/kernel/crash_core.c b/kernel/crash_core.c index 2f675ef045d4..efe87d501c8c 100644 --- a/kernel/crash_core.c +++ b/kernel/crash_core.c @@ -5,7 +5,6 @@ */ #include <linux/buildid.h> -#include <linux/crash_core.h> #include <linux/init.h> #include <linux/utsname.h> #include <linux/vmalloc.h> @@ -13,6 +12,9 @@ #include <linux/kexec.h> #include <linux/memory.h> #include <linux/cpuhotplug.h> +#include <linux/memblock.h> +#include <linux/kexec.h> +#include <linux/kmemleak.h> #include <asm/page.h> #include <asm/sections.h> @@ -33,6 +35,22 @@ u32 *vmcoreinfo_note; /* trusted vmcoreinfo, e.g. we can make a copy in the crash memory */ static unsigned char *vmcoreinfo_data_safecopy; +/* Location of the reserved area for the crash kernel */ +struct resource crashk_res = { + .name = "Crash kernel", + .start = 0, + .end = 0, + .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, + .desc = IORES_DESC_CRASH_KERNEL +}; +struct resource crashk_low_res = { + .name = "Crash kernel", + .start = 0, + .end = 0, + .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, + .desc = IORES_DESC_CRASH_KERNEL +}; + /* * parsing the "crashkernel" commandline * @@ -248,11 +266,11 @@ static int __init __parse_crashkernel(char *cmdline, unsigned long long system_ram, unsigned long long *crash_size, unsigned long long *crash_base, - const char *name, const char *suffix) { char *first_colon, *first_space; char *ck_cmdline; + char *name = "crashkernel="; BUG_ON(!crash_size || !crash_base); *crash_size = 0; @@ -283,32 +301,53 @@ static int __init __parse_crashkernel(char *cmdline, /* * That function is the entry point for command line parsing and should be * called from the arch-specific code. + * + * If crashkernel=,high|low is supported on architecture, non-NULL values + * should be passed to parameters 'low_size' and 'high'. */ int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, unsigned long long *crash_size, - unsigned long long *crash_base) + unsigned long long *crash_base, + unsigned long long *low_size, + bool *high) { - return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, - "crashkernel=", NULL); -} + int ret; -int __init parse_crashkernel_high(char *cmdline, - unsigned long long system_ram, - unsigned long long *crash_size, - unsigned long long *crash_base) -{ - return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, - "crashkernel=", suffix_tbl[SUFFIX_HIGH]); -} + /* crashkernel=X[@offset] */ + ret = __parse_crashkernel(cmdline, system_ram, crash_size, + crash_base, NULL); +#ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION + /* + * If non-NULL 'high' passed in and no normal crashkernel + * setting detected, try parsing crashkernel=,high|low. + */ + if (high && ret == -ENOENT) { + ret = __parse_crashkernel(cmdline, 0, crash_size, + crash_base, suffix_tbl[SUFFIX_HIGH]); + if (ret || !*crash_size) + return -EINVAL; -int __init parse_crashkernel_low(char *cmdline, - unsigned long long system_ram, - unsigned long long *crash_size, - unsigned long long *crash_base) -{ - return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, - "crashkernel=", suffix_tbl[SUFFIX_LOW]); + /* + * crashkernel=Y,low can be specified or not, but invalid value + * is not allowed. + */ + ret = __parse_crashkernel(cmdline, 0, low_size, + crash_base, suffix_tbl[SUFFIX_LOW]); + if (ret == -ENOENT) { + *low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE; + ret = 0; + } else if (ret) { + return ret; + } + + *high = true; + } +#endif + if (!*crash_size) + ret = -EINVAL; + + return ret; } /* @@ -321,6 +360,109 @@ static int __init parse_crashkernel_dummy(char *arg) } early_param("crashkernel", parse_crashkernel_dummy); +#ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION +static int __init reserve_crashkernel_low(unsigned long long low_size) +{ +#ifdef CONFIG_64BIT + unsigned long long low_base; + + low_base = memblock_phys_alloc_range(low_size, CRASH_ALIGN, 0, CRASH_ADDR_LOW_MAX); + if (!low_base) { + pr_err("cannot allocate crashkernel low memory (size:0x%llx).\n", low_size); + return -ENOMEM; + } + + pr_info("crashkernel low memory reserved: 0x%08llx - 0x%08llx (%lld MB)\n", + low_base, low_base + low_size, low_size >> 20); + + crashk_low_res.start = low_base; + crashk_low_res.end = low_base + low_size - 1; + insert_resource(&iomem_resource, &crashk_low_res); +#endif + return 0; +} + +void __init reserve_crashkernel_generic(char *cmdline, + unsigned long long crash_size, + unsigned long long crash_base, + unsigned long long crash_low_size, + bool high) +{ + unsigned long long search_end = CRASH_ADDR_LOW_MAX, search_base = 0; + bool fixed_base = false; + + /* User specifies base address explicitly. */ + if (crash_base) { + fixed_base = true; + search_base = crash_base; + search_end = crash_base + crash_size; + } else if (high) { + search_base = CRASH_ADDR_LOW_MAX; + search_end = CRASH_ADDR_HIGH_MAX; + } + +retry: + crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN, + search_base, search_end); + if (!crash_base) { + /* + * For crashkernel=size[KMG]@offset[KMG], print out failure + * message if can't reserve the specified region. + */ + if (fixed_base) { + pr_warn("crashkernel reservation failed - memory is in use.\n"); + return; + } + + /* + * For crashkernel=size[KMG], if the first attempt was for + * low memory, fall back to high memory, the minimum required + * low memory will be reserved later. + */ + if (!high && search_end == CRASH_ADDR_LOW_MAX) { + search_end = CRASH_ADDR_HIGH_MAX; + search_base = CRASH_ADDR_LOW_MAX; + crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE; + goto retry; + } + + /* + * For crashkernel=size[KMG],high, if the first attempt was + * for high memory, fall back to low memory. + */ + if (high && search_end == CRASH_ADDR_HIGH_MAX) { + search_end = CRASH_ADDR_LOW_MAX; + search_base = 0; + goto retry; + } + pr_warn("cannot allocate crashkernel (size:0x%llx)\n", + crash_size); + return; + } + + if ((crash_base > CRASH_ADDR_LOW_MAX) && + crash_low_size && reserve_crashkernel_low(crash_low_size)) { + memblock_phys_free(crash_base, crash_size); + return; + } + + pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n", + crash_base, crash_base + crash_size, crash_size >> 20); + + /* + * The crashkernel memory will be removed from the kernel linear + * map. Inform kmemleak so that it won't try to access it. + */ + kmemleak_ignore_phys(crash_base); + if (crashk_low_res.end) + kmemleak_ignore_phys(crashk_low_res.start); + + crashk_res.start = crash_base; + crashk_res.end = crash_base + crash_size - 1; + insert_resource(&iomem_resource, &crashk_res); +} +#endif + int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map, void **addr, unsigned long *sz) { diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 621037a0aa87..ce1bb2301c06 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -1006,6 +1006,9 @@ void kgdb_panic(const char *msg) if (panic_timeout) return; + debug_locks_off(); + console_flush_on_panic(CONSOLE_FLUSH_PENDING); + if (dbg_kdb_mode) kdb_printf("PANIC: %s\n", msg); diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 438b868cbfa9..6b213c8252d6 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -272,11 +272,10 @@ char *kdbgetenv(const char *match) * kdballocenv - This function is used to allocate bytes for * environment entries. * Parameters: - * match A character string representing a numeric value - * Outputs: - * *value the unsigned long representation of the env variable 'match' + * bytes The number of bytes to allocate in the static buffer. * Returns: - * Zero on success, a kdb diagnostic on failure. + * A pointer to the allocated space in the buffer on success. + * NULL if bytes > size available in the envbuffer. * Remarks: * We use a static environment buffer (envbuffer) to hold the values * of dynamically generated environment variables (see kdb_set). Buffer diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index ed3056eb20b8..73c95815789a 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -587,6 +587,46 @@ int dma_direct_supported(struct device *dev, u64 mask) return mask >= phys_to_dma_unencrypted(dev, min_mask); } +/* + * To check whether all ram resource ranges are covered by dma range map + * Returns 0 when further check is needed + * Returns 1 if there is some RAM range can't be covered by dma_range_map + */ +static int check_ram_in_range_map(unsigned long start_pfn, + unsigned long nr_pages, void *data) +{ + unsigned long end_pfn = start_pfn + nr_pages; + const struct bus_dma_region *bdr = NULL; + const struct bus_dma_region *m; + struct device *dev = data; + + while (start_pfn < end_pfn) { + for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) { + unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start); + + if (start_pfn >= cpu_start_pfn && + start_pfn - cpu_start_pfn < PFN_DOWN(m->size)) { + bdr = m; + break; + } + } + if (!bdr) + return 1; + + start_pfn = PFN_DOWN(bdr->cpu_start) + PFN_DOWN(bdr->size); + } + + return 0; +} + +bool dma_direct_all_ram_mapped(struct device *dev) +{ + if (!dev->dma_range_map) + return true; + return !walk_system_ram_range(0, PFN_DOWN(ULONG_MAX) + 1, dev, + check_ram_in_range_map); +} + size_t dma_direct_max_mapping_size(struct device *dev) { /* If SWIOTLB is active, use its maximum mapping size */ diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h index 97ec892ea0b5..18d346118fe8 100644 --- a/kernel/dma/direct.h +++ b/kernel/dma/direct.h @@ -20,6 +20,7 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr); int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs); +bool dma_direct_all_ram_mapped(struct device *dev); size_t dma_direct_max_mapping_size(struct device *dev); #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index e323ca48f7f2..58db8fd70471 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -793,6 +793,28 @@ int dma_set_coherent_mask(struct device *dev, u64 mask) } EXPORT_SYMBOL(dma_set_coherent_mask); +/** + * dma_addressing_limited - return if the device is addressing limited + * @dev: device to check + * + * Return %true if the devices DMA mask is too small to address all memory in + * the system, else %false. Lack of addressing bits is the prime reason for + * bounce buffering, but might not be the only one. + */ +bool dma_addressing_limited(struct device *dev) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + if (min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) < + dma_get_required_mask(dev)) + return true; + + if (unlikely(ops)) + return false; + return !dma_direct_all_ram_mapped(dev); +} +EXPORT_SYMBOL_GPL(dma_addressing_limited); + size_t dma_max_mapping_size(struct device *dev) { const struct dma_map_ops *ops = get_dma_ops(dev); diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 26202274784f..33d942615be5 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -283,7 +283,8 @@ static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start, } for (i = 0; i < mem->nslabs; i++) { - mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i); + mem->slots[i].list = min(IO_TLB_SEGSIZE - io_tlb_offset(i), + mem->nslabs - i); mem->slots[i].orig_addr = INVALID_PHYS_ADDR; mem->slots[i].alloc_size = 0; } @@ -558,29 +559,40 @@ void __init swiotlb_exit(void) * alloc_dma_pages() - allocate pages to be used for DMA * @gfp: GFP flags for the allocation. * @bytes: Size of the buffer. + * @phys_limit: Maximum allowed physical address of the buffer. * * Allocate pages from the buddy allocator. If successful, make the allocated * pages decrypted that they can be used for DMA. * - * Return: Decrypted pages, or %NULL on failure. + * Return: Decrypted pages, %NULL on allocation failure, or ERR_PTR(-EAGAIN) + * if the allocated physical address was above @phys_limit. */ -static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes) +static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes, u64 phys_limit) { unsigned int order = get_order(bytes); struct page *page; + phys_addr_t paddr; void *vaddr; page = alloc_pages(gfp, order); if (!page) return NULL; - vaddr = page_address(page); + paddr = page_to_phys(page); + if (paddr + bytes - 1 > phys_limit) { + __free_pages(page, order); + return ERR_PTR(-EAGAIN); + } + + vaddr = phys_to_virt(paddr); if (set_memory_decrypted((unsigned long)vaddr, PFN_UP(bytes))) goto error; return page; error: - __free_pages(page, order); + /* Intentional leak if pages cannot be encrypted again. */ + if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes))) + __free_pages(page, order); return NULL; } @@ -618,11 +630,7 @@ static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes, else if (phys_limit <= DMA_BIT_MASK(32)) gfp |= __GFP_DMA32; - while ((page = alloc_dma_pages(gfp, bytes)) && - page_to_phys(page) + bytes - 1 > phys_limit) { - /* allocated, but too high */ - __free_pages(page, get_order(bytes)); - + while (IS_ERR(page = alloc_dma_pages(gfp, bytes, phys_limit))) { if (IS_ENABLED(CONFIG_ZONE_DMA32) && phys_limit < DMA_BIT_MASK(64) && !(gfp & (__GFP_DMA32 | __GFP_DMA))) diff --git a/kernel/events/core.c b/kernel/events/core.c index 683dc086ef10..b704d83a28b2 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -4828,6 +4828,11 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx, void *task_ctx_data = NULL; if (!ctx->task) { + /* + * perf_pmu_migrate_context() / __perf_pmu_install_event() + * relies on the fact that find_get_pmu_context() cannot fail + * for CPU contexts. + */ struct perf_cpu_pmu_context *cpc; cpc = per_cpu_ptr(pmu->cpu_pmu_context, event->cpu); @@ -12889,6 +12894,9 @@ static void __perf_pmu_install_event(struct pmu *pmu, int cpu, struct perf_event *event) { struct perf_event_pmu_context *epc; + struct perf_event_context *old_ctx = event->ctx; + + get_ctx(ctx); /* normally find_get_context() */ event->cpu = cpu; epc = find_get_pmu_context(pmu, ctx, event); @@ -12897,6 +12905,11 @@ static void __perf_pmu_install_event(struct pmu *pmu, if (event->state >= PERF_EVENT_STATE_OFF) event->state = PERF_EVENT_STATE_INACTIVE; perf_install_in_context(ctx, event, cpu); + + /* + * Now that event->ctx is updated and visible, put the old ctx. + */ + put_ctx(old_ctx); } static void __perf_pmu_install(struct perf_event_context *ctx, @@ -12935,6 +12948,10 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) struct perf_event_context *src_ctx, *dst_ctx; LIST_HEAD(events); + /* + * Since per-cpu context is persistent, no need to grab an extra + * reference. + */ src_ctx = &per_cpu_ptr(&perf_cpu_context, src_cpu)->ctx; dst_ctx = &per_cpu_ptr(&perf_cpu_context, dst_cpu)->ctx; diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 3048589e2e85..435aac1d8c27 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -474,8 +474,8 @@ retry: gup_flags |= FOLL_SPLIT_PMD; /* Read the page with vaddr into memory */ old_page = get_user_page_vma_remote(mm, vaddr, gup_flags, &vma); - if (IS_ERR_OR_NULL(old_page)) - return old_page ? PTR_ERR(old_page) : 0; + if (IS_ERR(old_page)) + return PTR_ERR(old_page); ret = verify_opcode(old_page, vaddr, &opcode); if (ret <= 0) diff --git a/kernel/exit.c b/kernel/exit.c index 2b4a232f2f68..ee9f43bed49a 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -135,7 +135,6 @@ static void __unhash_process(struct task_struct *p, bool group_dead) list_del_init(&p->sibling); __this_cpu_dec(process_counts); } - list_del_rcu(&p->thread_group); list_del_rcu(&p->thread_node); } @@ -541,7 +540,6 @@ static void exit_mm(void) exit_mm_release(current, mm); if (!mm) return; - sync_mm_rss(mm); mmap_read_lock(mm); mmgrab_lazy_tlb(mm); BUG_ON(mm != current->active_mm); @@ -831,9 +829,6 @@ void __noreturn do_exit(long code) io_uring_files_cancel(); exit_signals(tsk); /* sets PF_EXITING */ - /* sync mm's RSS info before statistics gathering */ - if (tsk->mm) - sync_mm_rss(tsk->mm); acct_update_integrals(tsk); group_dead = atomic_dec_and_test(&tsk->signal->live); if (group_dead) { diff --git a/kernel/fork.c b/kernel/fork.c index 70e301b63a7b..10917c3e1f03 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -733,7 +733,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, get_file(file); i_mmap_lock_write(mapping); - if (tmp->vm_flags & VM_SHARED) + if (vma_is_shared_maywrite(tmp)) mapping_allow_writable(mapping); flush_dcache_mmap_lock(mapping); /* insert tmp into the share list, just after mpnt */ @@ -1288,7 +1288,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, hugetlb_count_init(mm); if (current->mm) { - mm->flags = current->mm->flags & MMF_INIT_MASK; + mm->flags = mmf_init_flags(current->mm->flags); mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; } else { mm->flags = default_dump_filter; @@ -2411,10 +2411,6 @@ __latent_entropy struct task_struct *copy_process( p->io_uring = NULL; #endif -#if defined(SPLIT_RSS_COUNTING) - memset(&p->rss_stat, 0, sizeof(p->rss_stat)); -#endif - p->default_timer_slack_ns = current->timer_slack_ns; #ifdef CONFIG_PSI @@ -2581,7 +2577,6 @@ __latent_entropy struct task_struct *copy_process( p->dirty_paused_when = 0; p->pdeath_signal = 0; - INIT_LIST_HEAD(&p->thread_group); p->task_works = NULL; clear_posix_cputimers_work(p); @@ -2709,8 +2704,6 @@ __latent_entropy struct task_struct *copy_process( atomic_inc(¤t->signal->live); refcount_inc(¤t->signal->sigcnt); task_join_group_stop(p); - list_add_tail_rcu(&p->thread_group, - &p->group_leader->thread_group); list_add_tail_rcu(&p->thread_node, &p->signal->thread_head); } diff --git a/kernel/futex/core.c b/kernel/futex/core.c index 52695c59d041..dad981a865b8 100644 --- a/kernel/futex/core.c +++ b/kernel/futex/core.c @@ -700,7 +700,8 @@ retry: owner = uval & FUTEX_TID_MASK; if (pending_op && !pi && !owner) { - futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); + futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1, + FUTEX_BITSET_MATCH_ANY); return 0; } @@ -752,8 +753,10 @@ retry: * Wake robust non-PI futexes here. The wakeup of * PI futexes happens in exit_pi_state(): */ - if (!pi && (uval & FUTEX_WAITERS)) - futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); + if (!pi && (uval & FUTEX_WAITERS)) { + futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1, + FUTEX_BITSET_MATCH_ANY); + } return 0; } diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c index 5c3086cad8f9..01520689b57c 100644 --- a/kernel/gcov/fs.c +++ b/kernel/gcov/fs.c @@ -99,7 +99,7 @@ struct gcov_iterator { struct gcov_info *info; size_t size; loff_t pos; - char buffer[]; + char buffer[] __counted_by(size); }; /** diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index d309ba84e08a..1782f90cd8c6 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -1852,15 +1852,13 @@ out_thread: struct task_struct *t = new->thread; new->thread = NULL; - kthread_stop(t); - put_task_struct(t); + kthread_stop_put(t); } if (new->secondary && new->secondary->thread) { struct task_struct *t = new->secondary->thread; new->secondary->thread = NULL; - kthread_stop(t); - put_task_struct(t); + kthread_stop_put(t); } out_mput: module_put(desc->owner); @@ -1971,12 +1969,9 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id) * the same bit to a newly requested action. */ if (action->thread) { - kthread_stop(action->thread); - put_task_struct(action->thread); - if (action->secondary && action->secondary->thread) { - kthread_stop(action->secondary->thread); - put_task_struct(action->secondary->thread); - } + kthread_stop_put(action->thread); + if (action->secondary && action->secondary->thread) + kthread_stop_put(action->secondary->thread); } /* Last action releases resources */ diff --git a/kernel/kcsan/kcsan_test.c b/kernel/kcsan/kcsan_test.c index 0ddbdab5903d..015586217875 100644 --- a/kernel/kcsan/kcsan_test.c +++ b/kernel/kcsan/kcsan_test.c @@ -699,12 +699,9 @@ static void test_barrier_nothreads(struct kunit *test) KCSAN_EXPECT_RW_BARRIER(spin_unlock(&test_spinlock), true); KCSAN_EXPECT_RW_BARRIER(mutex_lock(&test_mutex), false); KCSAN_EXPECT_RW_BARRIER(mutex_unlock(&test_mutex), true); - -#ifdef clear_bit_unlock_is_negative_byte - KCSAN_EXPECT_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true); - KCSAN_EXPECT_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true); - KCSAN_EXPECT_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true); -#endif + KCSAN_EXPECT_READ_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true); + KCSAN_EXPECT_WRITE_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true); + KCSAN_EXPECT_RW_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true); kcsan_nestable_atomic_end(); } diff --git a/kernel/kcsan/selftest.c b/kernel/kcsan/selftest.c index 8679322450f2..84a1200271af 100644 --- a/kernel/kcsan/selftest.c +++ b/kernel/kcsan/selftest.c @@ -227,12 +227,9 @@ static bool __init test_barrier(void) KCSAN_CHECK_RW_BARRIER(arch_spin_unlock(&arch_spinlock)); spin_lock(&test_spinlock); KCSAN_CHECK_RW_BARRIER(spin_unlock(&test_spinlock)); - -#ifdef clear_bit_unlock_is_negative_byte - KCSAN_CHECK_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var)); - KCSAN_CHECK_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var)); - KCSAN_CHECK_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var)); -#endif + KCSAN_CHECK_RW_BARRIER(xor_unlock_is_negative_byte(1, &test_var)); + KCSAN_CHECK_READ_BARRIER(xor_unlock_is_negative_byte(1, &test_var)); + KCSAN_CHECK_WRITE_BARRIER(xor_unlock_is_negative_byte(1, &test_var)); kcsan_nestable_atomic_end(); return ret; diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 9dc728982d79..be5642a4ec49 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -52,23 +52,6 @@ atomic_t __kexec_lock = ATOMIC_INIT(0); /* Flag to indicate we are going to kexec a new kernel */ bool kexec_in_progress = false; - -/* Location of the reserved area for the crash kernel */ -struct resource crashk_res = { - .name = "Crash kernel", - .start = 0, - .end = 0, - .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, - .desc = IORES_DESC_CRASH_KERNEL -}; -struct resource crashk_low_res = { - .name = "Crash kernel", - .start = 0, - .end = 0, - .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, - .desc = IORES_DESC_CRASH_KERNEL -}; - int kexec_should_crash(struct task_struct *p) { /* diff --git a/kernel/kthread.c b/kernel/kthread.c index 1eea53050bab..c5e40830c1f2 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -715,6 +715,24 @@ int kthread_stop(struct task_struct *k) } EXPORT_SYMBOL(kthread_stop); +/** + * kthread_stop_put - stop a thread and put its task struct + * @k: thread created by kthread_create(). + * + * Stops a thread created by kthread_create() and put its task_struct. + * Only use when holding an extra task struct reference obtained by + * calling get_task_struct(). + */ +int kthread_stop_put(struct task_struct *k) +{ + int ret; + + ret = kthread_stop(k); + put_task_struct(k); + return ret; +} +EXPORT_SYMBOL(kthread_stop_put); + int kthreadd(void *unused) { struct task_struct *tsk = current; @@ -1469,7 +1487,6 @@ void kthread_unuse_mm(struct mm_struct *mm) * clearing tsk->mm. */ smp_mb__after_spinlock(); - sync_mm_rss(mm); local_irq_disable(); tsk->mm = NULL; membarrier_update_current_mm(NULL); diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 61328328c474..ecbc9b6aba3a 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -243,7 +243,7 @@ static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab, * symbols are exported and normal relas can be used instead. */ if (!sec_vmlinux && sym_vmlinux) { - pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section", + pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section\n", sym_name); return -EINVAL; } diff --git a/kernel/module/Kconfig b/kernel/module/Kconfig index 33a2e991f608..0ea1b2970a23 100644 --- a/kernel/module/Kconfig +++ b/kernel/module/Kconfig @@ -236,14 +236,6 @@ choice possible to load a signed module containing the algorithm to check the signature on that module. -config MODULE_SIG_SHA1 - bool "Sign modules with SHA-1" - select CRYPTO_SHA1 - -config MODULE_SIG_SHA224 - bool "Sign modules with SHA-224" - select CRYPTO_SHA256 - config MODULE_SIG_SHA256 bool "Sign modules with SHA-256" select CRYPTO_SHA256 @@ -256,16 +248,29 @@ config MODULE_SIG_SHA512 bool "Sign modules with SHA-512" select CRYPTO_SHA512 +config MODULE_SIG_SHA3_256 + bool "Sign modules with SHA3-256" + select CRYPTO_SHA3 + +config MODULE_SIG_SHA3_384 + bool "Sign modules with SHA3-384" + select CRYPTO_SHA3 + +config MODULE_SIG_SHA3_512 + bool "Sign modules with SHA3-512" + select CRYPTO_SHA3 + endchoice config MODULE_SIG_HASH string depends on MODULE_SIG || IMA_APPRAISE_MODSIG - default "sha1" if MODULE_SIG_SHA1 - default "sha224" if MODULE_SIG_SHA224 default "sha256" if MODULE_SIG_SHA256 default "sha384" if MODULE_SIG_SHA384 default "sha512" if MODULE_SIG_SHA512 + default "sha3-256" if MODULE_SIG_SHA3_256 + default "sha3-384" if MODULE_SIG_SHA3_384 + default "sha3-512" if MODULE_SIG_SHA3_512 choice prompt "Module compression mode" diff --git a/kernel/padata.c b/kernel/padata.c index 222d60195de6..179fb1518070 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -202,7 +202,7 @@ int padata_do_parallel(struct padata_shell *ps, *cb_cpu = cpu; } - err = -EBUSY; + err = -EBUSY; if ((pinst->flags & PADATA_RESET)) goto out; @@ -1102,12 +1102,16 @@ EXPORT_SYMBOL(padata_alloc_shell); */ void padata_free_shell(struct padata_shell *ps) { + struct parallel_data *pd; + if (!ps) return; mutex_lock(&ps->pinst->lock); list_del(&ps->list); - padata_free_pd(rcu_dereference_protected(ps->pd, 1)); + pd = rcu_dereference_protected(ps->pd, 1); + if (refcount_dec_and_test(&pd->refcnt)) + padata_free_pd(pd); mutex_unlock(&ps->pinst->lock); kfree(ps); diff --git a/kernel/panic.c b/kernel/panic.c index ffa037fa777d..2807639aab51 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -192,14 +192,15 @@ atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID); */ void nmi_panic(struct pt_regs *regs, const char *msg) { - int old_cpu, cpu; + int old_cpu, this_cpu; - cpu = raw_smp_processor_id(); - old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu); + old_cpu = PANIC_CPU_INVALID; + this_cpu = raw_smp_processor_id(); - if (old_cpu == PANIC_CPU_INVALID) + /* atomic_try_cmpxchg updates old_cpu on failure */ + if (atomic_try_cmpxchg(&panic_cpu, &old_cpu, this_cpu)) panic("%s", msg); - else if (old_cpu != cpu) + else if (old_cpu != this_cpu) nmi_panic_self_stop(regs); } EXPORT_SYMBOL(nmi_panic); @@ -311,15 +312,18 @@ void panic(const char *fmt, ...) * stop themself or will wait until they are stopped by the 1st CPU * with smp_send_stop(). * - * `old_cpu == PANIC_CPU_INVALID' means this is the 1st CPU which - * comes here, so go ahead. + * cmpxchg success means this is the 1st CPU which comes here, + * so go ahead. * `old_cpu == this_cpu' means we came from nmi_panic() which sets * panic_cpu to this CPU. In this case, this is also the 1st CPU. */ + old_cpu = PANIC_CPU_INVALID; this_cpu = raw_smp_processor_id(); - old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu); - if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu) + /* atomic_try_cmpxchg updates old_cpu on failure */ + if (atomic_try_cmpxchg(&panic_cpu, &old_cpu, this_cpu)) { + /* go ahead */ + } else if (old_cpu != this_cpu) panic_smp_self_stop(); console_verbose(); diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index 619972c78774..3028b2218aa4 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -286,12 +286,6 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write, if (write && !checkpoint_restore_ns_capable(pid_ns->user_ns)) return -EPERM; - /* - * Writing directly to ns' last_pid field is OK, since this field - * is volatile in a living namespace anyway and a code writing to - * it should synchronize its usage with external means. - */ - next = idr_get_cursor(&pid_ns->idr) - 1; tmp.data = &next; diff --git a/kernel/printk/Makefile b/kernel/printk/Makefile index f5b388e810b9..39a2b61c7232 100644 --- a/kernel/printk/Makefile +++ b/kernel/printk/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only obj-y = printk.o -obj-$(CONFIG_PRINTK) += printk_safe.o +obj-$(CONFIG_PRINTK) += printk_safe.o nbcon.o obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o obj-$(CONFIG_PRINTK_INDEX) += index.o diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h index 7d4979d5c3ce..6c2afee5ef62 100644 --- a/kernel/printk/internal.h +++ b/kernel/printk/internal.h @@ -3,6 +3,8 @@ * internal.h - printk internal definitions */ #include <linux/percpu.h> +#include <linux/console.h> +#include "printk_ringbuffer.h" #if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL) void __init printk_sysctl_init(void); @@ -12,6 +14,12 @@ int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, #define printk_sysctl_init() do { } while (0) #endif +#define con_printk(lvl, con, fmt, ...) \ + printk(lvl pr_fmt("%s%sconsole [%s%d] " fmt), \ + (con->flags & CON_NBCON) ? "" : "legacy ", \ + (con->flags & CON_BOOT) ? "boot" : "", \ + con->name, con->index, ##__VA_ARGS__) + #ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK_CALLER @@ -35,6 +43,8 @@ enum printk_info_flags { LOG_CONT = 8, /* text is a fragment of a continuation line */ }; +extern struct printk_ringbuffer *prb; + __printf(4, 0) int vprintk_store(int facility, int level, const struct dev_printk_info *dev_info, @@ -61,6 +71,13 @@ void defer_console_output(void); u16 printk_parse_prefix(const char *text, int *level, enum printk_info_flags *flags); + +u64 nbcon_seq_read(struct console *con); +void nbcon_seq_force(struct console *con, u64 seq); +bool nbcon_alloc(struct console *con); +void nbcon_init(struct console *con); +void nbcon_free(struct console *con); + #else #define PRINTK_PREFIX_MAX 0 @@ -76,8 +93,16 @@ u16 printk_parse_prefix(const char *text, int *level, #define printk_safe_exit_irqrestore(flags) local_irq_restore(flags) static inline bool printk_percpu_data_ready(void) { return false; } +static inline u64 nbcon_seq_read(struct console *con) { return 0; } +static inline void nbcon_seq_force(struct console *con, u64 seq) { } +static inline bool nbcon_alloc(struct console *con) { return false; } +static inline void nbcon_init(struct console *con) { } +static inline void nbcon_free(struct console *con) { } + #endif /* CONFIG_PRINTK */ +extern struct printk_buffers printk_shared_pbufs; + /** * struct printk_buffers - Buffers to read/format/output printk messages. * @outbuf: After formatting, contains text to output. @@ -105,3 +130,9 @@ struct printk_message { }; bool other_cpu_in_panic(void); +bool printk_get_next_message(struct printk_message *pmsg, u64 seq, + bool is_extended, bool may_supress); + +#ifdef CONFIG_PRINTK +void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped); +#endif diff --git a/kernel/printk/nbcon.c b/kernel/printk/nbcon.c new file mode 100644 index 000000000000..b96077152f49 --- /dev/null +++ b/kernel/printk/nbcon.c @@ -0,0 +1,1029 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (C) 2022 Linutronix GmbH, John Ogness +// Copyright (C) 2022 Intel, Thomas Gleixner + +#include <linux/kernel.h> +#include <linux/console.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include "internal.h" +/* + * Printk console printing implementation for consoles which does not depend + * on the legacy style console_lock mechanism. + * + * The state of the console is maintained in the "nbcon_state" atomic + * variable. + * + * The console is locked when: + * + * - The 'prio' field contains the priority of the context that owns the + * console. Only higher priority contexts are allowed to take over the + * lock. A value of 0 (NBCON_PRIO_NONE) means the console is not locked. + * + * - The 'cpu' field denotes on which CPU the console is locked. It is used + * to prevent busy waiting on the same CPU. Also it informs the lock owner + * that it has lost the lock in a more complex scenario when the lock was + * taken over by a higher priority context, released, and taken on another + * CPU with the same priority as the interrupted owner. + * + * The acquire mechanism uses a few more fields: + * + * - The 'req_prio' field is used by the handover approach to make the + * current owner aware that there is a context with a higher priority + * waiting for the friendly handover. + * + * - The 'unsafe' field allows to take over the console in a safe way in the + * middle of emitting a message. The field is set only when accessing some + * shared resources or when the console device is manipulated. It can be + * cleared, for example, after emitting one character when the console + * device is in a consistent state. + * + * - The 'unsafe_takeover' field is set when a hostile takeover took the + * console in an unsafe state. The console will stay in the unsafe state + * until re-initialized. + * + * The acquire mechanism uses three approaches: + * + * 1) Direct acquire when the console is not owned or is owned by a lower + * priority context and is in a safe state. + * + * 2) Friendly handover mechanism uses a request/grant handshake. It is used + * when the current owner has lower priority and the console is in an + * unsafe state. + * + * The requesting context: + * + * a) Sets its priority into the 'req_prio' field. + * + * b) Waits (with a timeout) for the owning context to unlock the + * console. + * + * c) Takes the lock and clears the 'req_prio' field. + * + * The owning context: + * + * a) Observes the 'req_prio' field set on exit from the unsafe + * console state. + * + * b) Gives up console ownership by clearing the 'prio' field. + * + * 3) Unsafe hostile takeover allows to take over the lock even when the + * console is an unsafe state. It is used only in panic() by the final + * attempt to flush consoles in a try and hope mode. + * + * Note that separate record buffers are used in panic(). As a result, + * the messages can be read and formatted without any risk even after + * using the hostile takeover in unsafe state. + * + * The release function simply clears the 'prio' field. + * + * All operations on @console::nbcon_state are atomic cmpxchg based to + * handle concurrency. + * + * The acquire/release functions implement only minimal policies: + * + * - Preference for higher priority contexts. + * - Protection of the panic CPU. + * + * All other policy decisions must be made at the call sites: + * + * - What is marked as an unsafe section. + * - Whether to spin-wait if there is already an owner and the console is + * in an unsafe state. + * - Whether to attempt an unsafe hostile takeover. + * + * The design allows to implement the well known: + * + * acquire() + * output_one_printk_record() + * release() + * + * The output of one printk record might be interrupted with a higher priority + * context. The new owner is supposed to reprint the entire interrupted record + * from scratch. + */ + +/** + * nbcon_state_set - Helper function to set the console state + * @con: Console to update + * @new: The new state to write + * + * Only to be used when the console is not yet or no longer visible in the + * system. Otherwise use nbcon_state_try_cmpxchg(). + */ +static inline void nbcon_state_set(struct console *con, struct nbcon_state *new) +{ + atomic_set(&ACCESS_PRIVATE(con, nbcon_state), new->atom); +} + +/** + * nbcon_state_read - Helper function to read the console state + * @con: Console to read + * @state: The state to store the result + */ +static inline void nbcon_state_read(struct console *con, struct nbcon_state *state) +{ + state->atom = atomic_read(&ACCESS_PRIVATE(con, nbcon_state)); +} + +/** + * nbcon_state_try_cmpxchg() - Helper function for atomic_try_cmpxchg() on console state + * @con: Console to update + * @cur: Old/expected state + * @new: New state + * + * Return: True on success. False on fail and @cur is updated. + */ +static inline bool nbcon_state_try_cmpxchg(struct console *con, struct nbcon_state *cur, + struct nbcon_state *new) +{ + return atomic_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_state), &cur->atom, new->atom); +} + +#ifdef CONFIG_64BIT + +#define __seq_to_nbcon_seq(seq) (seq) +#define __nbcon_seq_to_seq(seq) (seq) + +#else /* CONFIG_64BIT */ + +#define __seq_to_nbcon_seq(seq) ((u32)seq) + +static inline u64 __nbcon_seq_to_seq(u32 nbcon_seq) +{ + u64 seq; + u64 rb_next_seq; + + /* + * The provided sequence is only the lower 32 bits of the ringbuffer + * sequence. It needs to be expanded to 64bit. Get the next sequence + * number from the ringbuffer and fold it. + * + * Having a 32bit representation in the console is sufficient. + * If a console ever gets more than 2^31 records behind + * the ringbuffer then this is the least of the problems. + * + * Also the access to the ring buffer is always safe. + */ + rb_next_seq = prb_next_seq(prb); + seq = rb_next_seq - ((u32)rb_next_seq - nbcon_seq); + + return seq; +} + +#endif /* CONFIG_64BIT */ + +/** + * nbcon_seq_read - Read the current console sequence + * @con: Console to read the sequence of + * + * Return: Sequence number of the next record to print on @con. + */ +u64 nbcon_seq_read(struct console *con) +{ + unsigned long nbcon_seq = atomic_long_read(&ACCESS_PRIVATE(con, nbcon_seq)); + + return __nbcon_seq_to_seq(nbcon_seq); +} + +/** + * nbcon_seq_force - Force console sequence to a specific value + * @con: Console to work on + * @seq: Sequence number value to set + * + * Only to be used during init (before registration) or in extreme situations + * (such as panic with CONSOLE_REPLAY_ALL). + */ +void nbcon_seq_force(struct console *con, u64 seq) +{ + /* + * If the specified record no longer exists, the oldest available record + * is chosen. This is especially important on 32bit systems because only + * the lower 32 bits of the sequence number are stored. The upper 32 bits + * are derived from the sequence numbers available in the ringbuffer. + */ + u64 valid_seq = max_t(u64, seq, prb_first_valid_seq(prb)); + + atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), __seq_to_nbcon_seq(valid_seq)); + + /* Clear con->seq since nbcon consoles use con->nbcon_seq instead. */ + con->seq = 0; +} + +/** + * nbcon_seq_try_update - Try to update the console sequence number + * @ctxt: Pointer to an acquire context that contains + * all information about the acquire mode + * @new_seq: The new sequence number to set + * + * @ctxt->seq is updated to the new value of @con::nbcon_seq (expanded to + * the 64bit value). This could be a different value than @new_seq if + * nbcon_seq_force() was used or the current context no longer owns the + * console. In the later case, it will stop printing anyway. + */ +static void nbcon_seq_try_update(struct nbcon_context *ctxt, u64 new_seq) +{ + unsigned long nbcon_seq = __seq_to_nbcon_seq(ctxt->seq); + struct console *con = ctxt->console; + + if (atomic_long_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_seq), &nbcon_seq, + __seq_to_nbcon_seq(new_seq))) { + ctxt->seq = new_seq; + } else { + ctxt->seq = nbcon_seq_read(con); + } +} + +/** + * nbcon_context_try_acquire_direct - Try to acquire directly + * @ctxt: The context of the caller + * @cur: The current console state + * + * Acquire the console when it is released. Also acquire the console when + * the current owner has a lower priority and the console is in a safe state. + * + * Return: 0 on success. Otherwise, an error code on failure. Also @cur + * is updated to the latest state when failed to modify it. + * + * Errors: + * + * -EPERM: A panic is in progress and this is not the panic CPU. + * Or the current owner or waiter has the same or higher + * priority. No acquire method can be successful in + * this case. + * + * -EBUSY: The current owner has a lower priority but the console + * in an unsafe state. The caller should try using + * the handover acquire method. + */ +static int nbcon_context_try_acquire_direct(struct nbcon_context *ctxt, + struct nbcon_state *cur) +{ + unsigned int cpu = smp_processor_id(); + struct console *con = ctxt->console; + struct nbcon_state new; + + do { + if (other_cpu_in_panic()) + return -EPERM; + + if (ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio) + return -EPERM; + + if (cur->unsafe) + return -EBUSY; + + /* + * The console should never be safe for a direct acquire + * if an unsafe hostile takeover has ever happened. + */ + WARN_ON_ONCE(cur->unsafe_takeover); + + new.atom = cur->atom; + new.prio = ctxt->prio; + new.req_prio = NBCON_PRIO_NONE; + new.unsafe = cur->unsafe_takeover; + new.cpu = cpu; + + } while (!nbcon_state_try_cmpxchg(con, cur, &new)); + + return 0; +} + +static bool nbcon_waiter_matches(struct nbcon_state *cur, int expected_prio) +{ + /* + * The request context is well defined by the @req_prio because: + * + * - Only a context with a higher priority can take over the request. + * - There are only three priorities. + * - Only one CPU is allowed to request PANIC priority. + * - Lower priorities are ignored during panic() until reboot. + * + * As a result, the following scenario is *not* possible: + * + * 1. Another context with a higher priority directly takes ownership. + * 2. The higher priority context releases the ownership. + * 3. A lower priority context takes the ownership. + * 4. Another context with the same priority as this context + * creates a request and starts waiting. + */ + + return (cur->req_prio == expected_prio); +} + +/** + * nbcon_context_try_acquire_requested - Try to acquire after having + * requested a handover + * @ctxt: The context of the caller + * @cur: The current console state + * + * This is a helper function for nbcon_context_try_acquire_handover(). + * It is called when the console is in an unsafe state. The current + * owner will release the console on exit from the unsafe region. + * + * Return: 0 on success and @cur is updated to the new console state. + * Otherwise an error code on failure. + * + * Errors: + * + * -EPERM: A panic is in progress and this is not the panic CPU + * or this context is no longer the waiter. + * + * -EBUSY: The console is still locked. The caller should + * continue waiting. + * + * Note: The caller must still remove the request when an error has occurred + * except when this context is no longer the waiter. + */ +static int nbcon_context_try_acquire_requested(struct nbcon_context *ctxt, + struct nbcon_state *cur) +{ + unsigned int cpu = smp_processor_id(); + struct console *con = ctxt->console; + struct nbcon_state new; + + /* Note that the caller must still remove the request! */ + if (other_cpu_in_panic()) + return -EPERM; + + /* + * Note that the waiter will also change if there was an unsafe + * hostile takeover. + */ + if (!nbcon_waiter_matches(cur, ctxt->prio)) + return -EPERM; + + /* If still locked, caller should continue waiting. */ + if (cur->prio != NBCON_PRIO_NONE) + return -EBUSY; + + /* + * The previous owner should have never released ownership + * in an unsafe region. + */ + WARN_ON_ONCE(cur->unsafe); + + new.atom = cur->atom; + new.prio = ctxt->prio; + new.req_prio = NBCON_PRIO_NONE; + new.unsafe = cur->unsafe_takeover; + new.cpu = cpu; + + if (!nbcon_state_try_cmpxchg(con, cur, &new)) { + /* + * The acquire could fail only when it has been taken + * over by a higher priority context. + */ + WARN_ON_ONCE(nbcon_waiter_matches(cur, ctxt->prio)); + return -EPERM; + } + + /* Handover success. This context now owns the console. */ + return 0; +} + +/** + * nbcon_context_try_acquire_handover - Try to acquire via handover + * @ctxt: The context of the caller + * @cur: The current console state + * + * The function must be called only when the context has higher priority + * than the current owner and the console is in an unsafe state. + * It is the case when nbcon_context_try_acquire_direct() returns -EBUSY. + * + * The function sets "req_prio" field to make the current owner aware of + * the request. Then it waits until the current owner releases the console, + * or an even higher context takes over the request, or timeout expires. + * + * The current owner checks the "req_prio" field on exit from the unsafe + * region and releases the console. It does not touch the "req_prio" field + * so that the console stays reserved for the waiter. + * + * Return: 0 on success. Otherwise, an error code on failure. Also @cur + * is updated to the latest state when failed to modify it. + * + * Errors: + * + * -EPERM: A panic is in progress and this is not the panic CPU. + * Or a higher priority context has taken over the + * console or the handover request. + * + * -EBUSY: The current owner is on the same CPU so that the hand + * shake could not work. Or the current owner is not + * willing to wait (zero timeout). Or the console does + * not enter the safe state before timeout passed. The + * caller might still use the unsafe hostile takeover + * when allowed. + * + * -EAGAIN: @cur has changed when creating the handover request. + * The caller should retry with direct acquire. + */ +static int nbcon_context_try_acquire_handover(struct nbcon_context *ctxt, + struct nbcon_state *cur) +{ + unsigned int cpu = smp_processor_id(); + struct console *con = ctxt->console; + struct nbcon_state new; + int timeout; + int request_err = -EBUSY; + + /* + * Check that the handover is called when the direct acquire failed + * with -EBUSY. + */ + WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio); + WARN_ON_ONCE(!cur->unsafe); + + /* Handover is not possible on the same CPU. */ + if (cur->cpu == cpu) + return -EBUSY; + + /* + * Console stays unsafe after an unsafe takeover until re-initialized. + * Waiting is not going to help in this case. + */ + if (cur->unsafe_takeover) + return -EBUSY; + + /* Is the caller willing to wait? */ + if (ctxt->spinwait_max_us == 0) + return -EBUSY; + + /* + * Setup a request for the handover. The caller should try to acquire + * the console directly when the current state has been modified. + */ + new.atom = cur->atom; + new.req_prio = ctxt->prio; + if (!nbcon_state_try_cmpxchg(con, cur, &new)) + return -EAGAIN; + + cur->atom = new.atom; + + /* Wait until there is no owner and then acquire the console. */ + for (timeout = ctxt->spinwait_max_us; timeout >= 0; timeout--) { + /* On successful acquire, this request is cleared. */ + request_err = nbcon_context_try_acquire_requested(ctxt, cur); + if (!request_err) + return 0; + + /* + * If the acquire should be aborted, it must be ensured + * that the request is removed before returning to caller. + */ + if (request_err == -EPERM) + break; + + udelay(1); + + /* Re-read the state because some time has passed. */ + nbcon_state_read(con, cur); + } + + /* Timed out or aborted. Carefully remove handover request. */ + do { + /* + * No need to remove request if there is a new waiter. This + * can only happen if a higher priority context has taken over + * the console or the handover request. + */ + if (!nbcon_waiter_matches(cur, ctxt->prio)) + return -EPERM; + + /* Unset request for handover. */ + new.atom = cur->atom; + new.req_prio = NBCON_PRIO_NONE; + if (nbcon_state_try_cmpxchg(con, cur, &new)) { + /* + * Request successfully unset. Report failure of + * acquiring via handover. + */ + cur->atom = new.atom; + return request_err; + } + + /* + * Unable to remove request. Try to acquire in case + * the owner has released the lock. + */ + } while (nbcon_context_try_acquire_requested(ctxt, cur)); + + /* Lucky timing. The acquire succeeded while removing the request. */ + return 0; +} + +/** + * nbcon_context_try_acquire_hostile - Acquire via unsafe hostile takeover + * @ctxt: The context of the caller + * @cur: The current console state + * + * Acquire the console even in the unsafe state. + * + * It can be permitted by setting the 'allow_unsafe_takeover' field only + * by the final attempt to flush messages in panic(). + * + * Return: 0 on success. -EPERM when not allowed by the context. + */ +static int nbcon_context_try_acquire_hostile(struct nbcon_context *ctxt, + struct nbcon_state *cur) +{ + unsigned int cpu = smp_processor_id(); + struct console *con = ctxt->console; + struct nbcon_state new; + + if (!ctxt->allow_unsafe_takeover) + return -EPERM; + + /* Ensure caller is allowed to perform unsafe hostile takeovers. */ + if (WARN_ON_ONCE(ctxt->prio != NBCON_PRIO_PANIC)) + return -EPERM; + + /* + * Check that try_acquire_direct() and try_acquire_handover() returned + * -EBUSY in the right situation. + */ + WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio); + WARN_ON_ONCE(cur->unsafe != true); + + do { + new.atom = cur->atom; + new.cpu = cpu; + new.prio = ctxt->prio; + new.unsafe |= cur->unsafe_takeover; + new.unsafe_takeover |= cur->unsafe; + + } while (!nbcon_state_try_cmpxchg(con, cur, &new)); + + return 0; +} + +static struct printk_buffers panic_nbcon_pbufs; + +/** + * nbcon_context_try_acquire - Try to acquire nbcon console + * @ctxt: The context of the caller + * + * Return: True if the console was acquired. False otherwise. + * + * If the caller allowed an unsafe hostile takeover, on success the + * caller should check the current console state to see if it is + * in an unsafe state. Otherwise, on success the caller may assume + * the console is not in an unsafe state. + */ +__maybe_unused +static bool nbcon_context_try_acquire(struct nbcon_context *ctxt) +{ + unsigned int cpu = smp_processor_id(); + struct console *con = ctxt->console; + struct nbcon_state cur; + int err; + + nbcon_state_read(con, &cur); +try_again: + err = nbcon_context_try_acquire_direct(ctxt, &cur); + if (err != -EBUSY) + goto out; + + err = nbcon_context_try_acquire_handover(ctxt, &cur); + if (err == -EAGAIN) + goto try_again; + if (err != -EBUSY) + goto out; + + err = nbcon_context_try_acquire_hostile(ctxt, &cur); +out: + if (err) + return false; + + /* Acquire succeeded. */ + + /* Assign the appropriate buffer for this context. */ + if (atomic_read(&panic_cpu) == cpu) + ctxt->pbufs = &panic_nbcon_pbufs; + else + ctxt->pbufs = con->pbufs; + + /* Set the record sequence for this context to print. */ + ctxt->seq = nbcon_seq_read(ctxt->console); + + return true; +} + +static bool nbcon_owner_matches(struct nbcon_state *cur, int expected_cpu, + int expected_prio) +{ + /* + * Since consoles can only be acquired by higher priorities, + * owning contexts are uniquely identified by @prio. However, + * since contexts can unexpectedly lose ownership, it is + * possible that later another owner appears with the same + * priority. For this reason @cpu is also needed. + */ + + if (cur->prio != expected_prio) + return false; + + if (cur->cpu != expected_cpu) + return false; + + return true; +} + +/** + * nbcon_context_release - Release the console + * @ctxt: The nbcon context from nbcon_context_try_acquire() + */ +static void nbcon_context_release(struct nbcon_context *ctxt) +{ + unsigned int cpu = smp_processor_id(); + struct console *con = ctxt->console; + struct nbcon_state cur; + struct nbcon_state new; + + nbcon_state_read(con, &cur); + + do { + if (!nbcon_owner_matches(&cur, cpu, ctxt->prio)) + break; + + new.atom = cur.atom; + new.prio = NBCON_PRIO_NONE; + + /* + * If @unsafe_takeover is set, it is kept set so that + * the state remains permanently unsafe. + */ + new.unsafe |= cur.unsafe_takeover; + + } while (!nbcon_state_try_cmpxchg(con, &cur, &new)); + + ctxt->pbufs = NULL; +} + +/** + * nbcon_context_can_proceed - Check whether ownership can proceed + * @ctxt: The nbcon context from nbcon_context_try_acquire() + * @cur: The current console state + * + * Return: True if this context still owns the console. False if + * ownership was handed over or taken. + * + * Must be invoked when entering the unsafe state to make sure that it still + * owns the lock. Also must be invoked when exiting the unsafe context + * to eventually free the lock for a higher priority context which asked + * for the friendly handover. + * + * It can be called inside an unsafe section when the console is just + * temporary in safe state instead of exiting and entering the unsafe + * state. + * + * Also it can be called in the safe context before doing an expensive + * safe operation. It does not make sense to do the operation when + * a higher priority context took the lock. + * + * When this function returns false then the calling context no longer owns + * the console and is no longer allowed to go forward. In this case it must + * back out immediately and carefully. The buffer content is also no longer + * trusted since it no longer belongs to the calling context. + */ +static bool nbcon_context_can_proceed(struct nbcon_context *ctxt, struct nbcon_state *cur) +{ + unsigned int cpu = smp_processor_id(); + + /* Make sure this context still owns the console. */ + if (!nbcon_owner_matches(cur, cpu, ctxt->prio)) + return false; + + /* The console owner can proceed if there is no waiter. */ + if (cur->req_prio == NBCON_PRIO_NONE) + return true; + + /* + * A console owner within an unsafe region is always allowed to + * proceed, even if there are waiters. It can perform a handover + * when exiting the unsafe region. Otherwise the waiter will + * need to perform an unsafe hostile takeover. + */ + if (cur->unsafe) + return true; + + /* Waiters always have higher priorities than owners. */ + WARN_ON_ONCE(cur->req_prio <= cur->prio); + + /* + * Having a safe point for take over and eventually a few + * duplicated characters or a full line is way better than a + * hostile takeover. Post processing can take care of the garbage. + * Release and hand over. + */ + nbcon_context_release(ctxt); + + /* + * It is not clear whether the waiter really took over ownership. The + * outermost callsite must make the final decision whether console + * ownership is needed for it to proceed. If yes, it must reacquire + * ownership (possibly hostile) before carefully proceeding. + * + * The calling context no longer owns the console so go back all the + * way instead of trying to implement reacquire heuristics in tons of + * places. + */ + return false; +} + +/** + * nbcon_can_proceed - Check whether ownership can proceed + * @wctxt: The write context that was handed to the write function + * + * Return: True if this context still owns the console. False if + * ownership was handed over or taken. + * + * It is used in nbcon_enter_unsafe() to make sure that it still owns the + * lock. Also it is used in nbcon_exit_unsafe() to eventually free the lock + * for a higher priority context which asked for the friendly handover. + * + * It can be called inside an unsafe section when the console is just + * temporary in safe state instead of exiting and entering the unsafe state. + * + * Also it can be called in the safe context before doing an expensive safe + * operation. It does not make sense to do the operation when a higher + * priority context took the lock. + * + * When this function returns false then the calling context no longer owns + * the console and is no longer allowed to go forward. In this case it must + * back out immediately and carefully. The buffer content is also no longer + * trusted since it no longer belongs to the calling context. + */ +bool nbcon_can_proceed(struct nbcon_write_context *wctxt) +{ + struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); + struct console *con = ctxt->console; + struct nbcon_state cur; + + nbcon_state_read(con, &cur); + + return nbcon_context_can_proceed(ctxt, &cur); +} +EXPORT_SYMBOL_GPL(nbcon_can_proceed); + +#define nbcon_context_enter_unsafe(c) __nbcon_context_update_unsafe(c, true) +#define nbcon_context_exit_unsafe(c) __nbcon_context_update_unsafe(c, false) + +/** + * __nbcon_context_update_unsafe - Update the unsafe bit in @con->nbcon_state + * @ctxt: The nbcon context from nbcon_context_try_acquire() + * @unsafe: The new value for the unsafe bit + * + * Return: True if the unsafe state was updated and this context still + * owns the console. Otherwise false if ownership was handed + * over or taken. + * + * This function allows console owners to modify the unsafe status of the + * console. + * + * When this function returns false then the calling context no longer owns + * the console and is no longer allowed to go forward. In this case it must + * back out immediately and carefully. The buffer content is also no longer + * trusted since it no longer belongs to the calling context. + * + * Internal helper to avoid duplicated code. + */ +static bool __nbcon_context_update_unsafe(struct nbcon_context *ctxt, bool unsafe) +{ + struct console *con = ctxt->console; + struct nbcon_state cur; + struct nbcon_state new; + + nbcon_state_read(con, &cur); + + do { + /* + * The unsafe bit must not be cleared if an + * unsafe hostile takeover has occurred. + */ + if (!unsafe && cur.unsafe_takeover) + goto out; + + if (!nbcon_context_can_proceed(ctxt, &cur)) + return false; + + new.atom = cur.atom; + new.unsafe = unsafe; + } while (!nbcon_state_try_cmpxchg(con, &cur, &new)); + + cur.atom = new.atom; +out: + return nbcon_context_can_proceed(ctxt, &cur); +} + +/** + * nbcon_enter_unsafe - Enter an unsafe region in the driver + * @wctxt: The write context that was handed to the write function + * + * Return: True if this context still owns the console. False if + * ownership was handed over or taken. + * + * When this function returns false then the calling context no longer owns + * the console and is no longer allowed to go forward. In this case it must + * back out immediately and carefully. The buffer content is also no longer + * trusted since it no longer belongs to the calling context. + */ +bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) +{ + struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); + + return nbcon_context_enter_unsafe(ctxt); +} +EXPORT_SYMBOL_GPL(nbcon_enter_unsafe); + +/** + * nbcon_exit_unsafe - Exit an unsafe region in the driver + * @wctxt: The write context that was handed to the write function + * + * Return: True if this context still owns the console. False if + * ownership was handed over or taken. + * + * When this function returns false then the calling context no longer owns + * the console and is no longer allowed to go forward. In this case it must + * back out immediately and carefully. The buffer content is also no longer + * trusted since it no longer belongs to the calling context. + */ +bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) +{ + struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); + + return nbcon_context_exit_unsafe(ctxt); +} +EXPORT_SYMBOL_GPL(nbcon_exit_unsafe); + +/** + * nbcon_emit_next_record - Emit a record in the acquired context + * @wctxt: The write context that will be handed to the write function + * + * Return: True if this context still owns the console. False if + * ownership was handed over or taken. + * + * When this function returns false then the calling context no longer owns + * the console and is no longer allowed to go forward. In this case it must + * back out immediately and carefully. The buffer content is also no longer + * trusted since it no longer belongs to the calling context. If the caller + * wants to do more it must reacquire the console first. + * + * When true is returned, @wctxt->ctxt.backlog indicates whether there are + * still records pending in the ringbuffer, + */ +__maybe_unused +static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt) +{ + struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); + struct console *con = ctxt->console; + bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED; + struct printk_message pmsg = { + .pbufs = ctxt->pbufs, + }; + unsigned long con_dropped; + struct nbcon_state cur; + unsigned long dropped; + bool done; + + /* + * The printk buffers are filled within an unsafe section. This + * prevents NBCON_PRIO_NORMAL and NBCON_PRIO_EMERGENCY from + * clobbering each other. + */ + + if (!nbcon_context_enter_unsafe(ctxt)) + return false; + + ctxt->backlog = printk_get_next_message(&pmsg, ctxt->seq, is_extended, true); + if (!ctxt->backlog) + return nbcon_context_exit_unsafe(ctxt); + + /* + * @con->dropped is not protected in case of an unsafe hostile + * takeover. In that situation the update can be racy so + * annotate it accordingly. + */ + con_dropped = data_race(READ_ONCE(con->dropped)); + + dropped = con_dropped + pmsg.dropped; + if (dropped && !is_extended) + console_prepend_dropped(&pmsg, dropped); + + if (!nbcon_context_exit_unsafe(ctxt)) + return false; + + /* For skipped records just update seq/dropped in @con. */ + if (pmsg.outbuf_len == 0) + goto update_con; + + /* Initialize the write context for driver callbacks. */ + wctxt->outbuf = &pmsg.pbufs->outbuf[0]; + wctxt->len = pmsg.outbuf_len; + nbcon_state_read(con, &cur); + wctxt->unsafe_takeover = cur.unsafe_takeover; + + if (con->write_atomic) { + done = con->write_atomic(con, wctxt); + } else { + nbcon_context_release(ctxt); + WARN_ON_ONCE(1); + done = false; + } + + /* If not done, the emit was aborted. */ + if (!done) + return false; + + /* + * Since any dropped message was successfully output, reset the + * dropped count for the console. + */ + dropped = 0; +update_con: + /* + * The dropped count and the sequence number are updated within an + * unsafe section. This limits update races to the panic context and + * allows the panic context to win. + */ + + if (!nbcon_context_enter_unsafe(ctxt)) + return false; + + if (dropped != con_dropped) { + /* Counterpart to the READ_ONCE() above. */ + WRITE_ONCE(con->dropped, dropped); + } + + nbcon_seq_try_update(ctxt, pmsg.seq + 1); + + return nbcon_context_exit_unsafe(ctxt); +} + +/** + * nbcon_alloc - Allocate buffers needed by the nbcon console + * @con: Console to allocate buffers for + * + * Return: True on success. False otherwise and the console cannot + * be used. + * + * This is not part of nbcon_init() because buffer allocation must + * be performed earlier in the console registration process. + */ +bool nbcon_alloc(struct console *con) +{ + if (con->flags & CON_BOOT) { + /* + * Boot console printing is synchronized with legacy console + * printing, so boot consoles can share the same global printk + * buffers. + */ + con->pbufs = &printk_shared_pbufs; + } else { + con->pbufs = kmalloc(sizeof(*con->pbufs), GFP_KERNEL); + if (!con->pbufs) { + con_printk(KERN_ERR, con, "failed to allocate printing buffer\n"); + return false; + } + } + + return true; +} + +/** + * nbcon_init - Initialize the nbcon console specific data + * @con: Console to initialize + * + * nbcon_alloc() *must* be called and succeed before this function + * is called. + * + * This function expects that the legacy @con->seq has been set. + */ +void nbcon_init(struct console *con) +{ + struct nbcon_state state = { }; + + /* nbcon_alloc() must have been called and successful! */ + BUG_ON(!con->pbufs); + + nbcon_seq_force(con, con->seq); + nbcon_state_set(con, &state); +} + +/** + * nbcon_free - Free and cleanup the nbcon console specific data + * @con: Console to free/cleanup nbcon data + */ +void nbcon_free(struct console *con) +{ + struct nbcon_state state = { }; + + nbcon_state_set(con, &state); + + /* Boot consoles share global printk buffers. */ + if (!(con->flags & CON_BOOT)) + kfree(con->pbufs); + + con->pbufs = NULL; +} diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 0b3af1529778..f2444b581e16 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -102,12 +102,6 @@ DEFINE_STATIC_SRCU(console_srcu); */ int __read_mostly suppress_printk; -/* - * During panic, heavy printk by other CPUs can delay the - * panic and risk deadlock on console resources. - */ -static int __read_mostly suppress_panic_printk; - #ifdef CONFIG_LOCKDEP static struct lockdep_map console_lock_dep_map = { .name = "console_lock" @@ -445,6 +439,12 @@ static int console_msg_format = MSG_FORMAT_DEFAULT; static DEFINE_MUTEX(syslog_lock); #ifdef CONFIG_PRINTK +/* + * During panic, heavy printk by other CPUs can delay the + * panic and risk deadlock on console resources. + */ +static int __read_mostly suppress_panic_printk; + DECLARE_WAIT_QUEUE_HEAD(log_wait); /* All 3 protected by @syslog_lock. */ /* the next printk record to read by syslog(READ) or /proc/kmsg */ @@ -494,7 +494,7 @@ _DEFINE_PRINTKRB(printk_rb_static, CONFIG_LOG_BUF_SHIFT - PRB_AVGBITS, static struct printk_ringbuffer printk_rb_dynamic; -static struct printk_ringbuffer *prb = &printk_rb_static; +struct printk_ringbuffer *prb = &printk_rb_static; /* * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before @@ -698,9 +698,6 @@ out: return len; } -static bool printk_get_next_message(struct printk_message *pmsg, u64 seq, - bool is_extended, bool may_supress); - /* /dev/kmsg - userspace message inject/listen interface */ struct devkmsg_user { atomic64_t seq; @@ -1669,7 +1666,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear) prb_rec_init_rd(&r, &info, text, PRINTK_MESSAGE_MAX); - len = 0; prb_for_each_record(seq, prb, seq, &r) { int textlen; @@ -2349,22 +2345,6 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre static u64 syslog_seq; -static size_t record_print_text(const struct printk_record *r, - bool syslog, bool time) -{ - return 0; -} -static ssize_t info_print_ext_header(char *buf, size_t size, - struct printk_info *info) -{ - return 0; -} -static ssize_t msg_print_ext_body(char *buf, size_t size, - char *text, size_t text_len, - struct dev_printk_info *dev_info) { return 0; } -static void console_lock_spinning_enable(void) { } -static int console_lock_spinning_disable_and_check(int cookie) { return 0; } -static bool suppress_message_printing(int level) { return false; } static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; } static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; } @@ -2404,13 +2384,21 @@ static void set_user_specified(struct console_cmdline *c, bool user_specified) console_set_on_cmdline = 1; } -static int __add_preferred_console(char *name, int idx, char *options, +static int __add_preferred_console(const char *name, const short idx, char *options, char *brl_options, bool user_specified) { struct console_cmdline *c; int i; /* + * We use a signed short index for struct console for device drivers to + * indicate a not yet assigned index or port. However, a negative index + * value is not valid for preferred console. + */ + if (idx < 0) + return -EINVAL; + + /* * See if this tty is not yet registered, and * if we have a slot free. */ @@ -2513,7 +2501,7 @@ __setup("console=", console_setup); * commonly to provide a default console (ie from PROM variables) when * the user has not supplied one. */ -int add_preferred_console(char *name, int idx, char *options) +int add_preferred_console(const char *name, const short idx, char *options) { return __add_preferred_console(name, idx, options, NULL, false); } @@ -2718,6 +2706,8 @@ static void __console_unlock(void) up_console_sem(); } +#ifdef CONFIG_PRINTK + /* * Prepend the message in @pmsg->pbufs->outbuf with a "dropped message". This * is achieved by shifting the existing message over and inserting the dropped @@ -2732,8 +2722,7 @@ static void __console_unlock(void) * * If @pmsg->pbufs->outbuf is modified, @pmsg->outbuf_len is updated. */ -#ifdef CONFIG_PRINTK -static void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped) +void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped) { struct printk_buffers *pbufs = pmsg->pbufs; const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf); @@ -2764,9 +2753,6 @@ static void console_prepend_dropped(struct printk_message *pmsg, unsigned long d memcpy(outbuf, scratchbuf, len); pmsg->outbuf_len += len; } -#else -#define console_prepend_dropped(pmsg, dropped) -#endif /* CONFIG_PRINTK */ /* * Read and format the specified record (or a later record if the specified @@ -2787,8 +2773,8 @@ static void console_prepend_dropped(struct printk_message *pmsg, unsigned long d * of @pmsg are valid. (See the documentation of struct printk_message * for information about the @pmsg fields.) */ -static bool printk_get_next_message(struct printk_message *pmsg, u64 seq, - bool is_extended, bool may_suppress) +bool printk_get_next_message(struct printk_message *pmsg, u64 seq, + bool is_extended, bool may_suppress) { static int panic_console_dropped; @@ -2847,6 +2833,13 @@ out: } /* + * Used as the printk buffers for non-panic, serialized console printing. + * This is for legacy (!CON_NBCON) as well as all boot (CON_BOOT) consoles. + * Its usage requires the console_lock held. + */ +struct printk_buffers printk_shared_pbufs; + +/* * Print one record for the given console. The record printed is whatever * record is the next available record for the given console. * @@ -2863,12 +2856,10 @@ out: */ static bool console_emit_next_record(struct console *con, bool *handover, int cookie) { - static struct printk_buffers pbufs; - bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED; - char *outbuf = &pbufs.outbuf[0]; + char *outbuf = &printk_shared_pbufs.outbuf[0]; struct printk_message pmsg = { - .pbufs = &pbufs, + .pbufs = &printk_shared_pbufs, }; unsigned long flags; @@ -2919,6 +2910,16 @@ skip: return true; } +#else + +static bool console_emit_next_record(struct console *con, bool *handover, int cookie) +{ + *handover = false; + return false; +} + +#endif /* CONFIG_PRINTK */ + /* * Print out all remaining records to all consoles. * @@ -3163,6 +3164,7 @@ void console_flush_on_panic(enum con_flush_mode mode) if (mode == CONSOLE_REPLAY_ALL) { struct console *c; + short flags; int cookie; u64 seq; @@ -3170,11 +3172,17 @@ void console_flush_on_panic(enum con_flush_mode mode) cookie = console_srcu_read_lock(); for_each_console_srcu(c) { - /* - * This is an unsynchronized assignment, but the - * kernel is in "hope and pray" mode anyway. - */ - c->seq = seq; + flags = console_srcu_read_flags(c); + + if (flags & CON_NBCON) { + nbcon_seq_force(c, seq); + } else { + /* + * This is an unsynchronized assignment. On + * panic legacy consoles are only best effort. + */ + c->seq = seq; + } } console_srcu_read_unlock(cookie); } @@ -3326,11 +3334,6 @@ static void try_enable_default_console(struct console *newcon) newcon->flags |= CON_CONSDEV; } -#define con_printk(lvl, con, fmt, ...) \ - printk(lvl pr_fmt("%sconsole [%s%d] " fmt), \ - (con->flags & CON_BOOT) ? "boot" : "", \ - con->name, con->index, ##__VA_ARGS__) - static void console_init_seq(struct console *newcon, bool bootcon_registered) { struct console *con; @@ -3444,6 +3447,15 @@ void register_console(struct console *newcon) goto unlock; } + if (newcon->flags & CON_NBCON) { + /* + * Ensure the nbcon console buffers can be allocated + * before modifying any global data. + */ + if (!nbcon_alloc(newcon)) + goto unlock; + } + /* * See if we want to enable this console driver by default. * @@ -3471,8 +3483,11 @@ void register_console(struct console *newcon) err = try_enable_preferred_console(newcon, false); /* printk() messages are not printed to the Braille console. */ - if (err || newcon->flags & CON_BRL) + if (err || newcon->flags & CON_BRL) { + if (newcon->flags & CON_NBCON) + nbcon_free(newcon); goto unlock; + } /* * If we have a bootconsole, and are switching to a real console, @@ -3488,6 +3503,9 @@ void register_console(struct console *newcon) newcon->dropped = 0; console_init_seq(newcon, bootcon_registered); + if (newcon->flags & CON_NBCON) + nbcon_init(newcon); + /* * Put this console in the list - keep the * preferred driver at the head of the list. @@ -3579,6 +3597,9 @@ static int unregister_console_locked(struct console *console) */ synchronize_srcu(&console_srcu); + if (console->flags & CON_NBCON) + nbcon_free(console); + console_sysfs_notify(); if (console->exit) @@ -3728,10 +3749,12 @@ late_initcall(printk_late_init); /* If @con is specified, only wait for that console. Otherwise wait for all. */ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { - int remaining = timeout_ms; + unsigned long timeout_jiffies = msecs_to_jiffies(timeout_ms); + unsigned long remaining_jiffies = timeout_jiffies; struct console *c; u64 last_diff = 0; u64 printk_seq; + short flags; int cookie; u64 diff; u64 seq; @@ -3745,6 +3768,9 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre console_unlock(); for (;;) { + unsigned long begin_jiffies; + unsigned long slept_jiffies; + diff = 0; /* @@ -3759,6 +3785,9 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre for_each_console_srcu(c) { if (con && con != c) continue; + + flags = console_srcu_read_flags(c); + /* * If consoles are not usable, it cannot be expected * that they make forward progress, so only increment @@ -3766,31 +3795,33 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre */ if (!console_is_usable(c)) continue; - printk_seq = c->seq; + + if (flags & CON_NBCON) { + printk_seq = nbcon_seq_read(c); + } else { + printk_seq = c->seq; + } + if (printk_seq < seq) diff += seq - printk_seq; } console_srcu_read_unlock(cookie); if (diff != last_diff && reset_on_progress) - remaining = timeout_ms; + remaining_jiffies = timeout_jiffies; console_unlock(); /* Note: @diff is 0 if there are no usable consoles. */ - if (diff == 0 || remaining == 0) + if (diff == 0 || remaining_jiffies == 0) break; - if (remaining < 0) { - /* no timeout limit */ - msleep(100); - } else if (remaining < 100) { - msleep(remaining); - remaining = 0; - } else { - msleep(100); - remaining -= 100; - } + /* msleep(1) might sleep much longer. Check time by jiffies. */ + begin_jiffies = jiffies; + msleep(1); + slept_jiffies = jiffies - begin_jiffies; + + remaining_jiffies -= min(slept_jiffies, remaining_jiffies); last_diff = diff; } @@ -4194,7 +4225,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog, prb_rec_init_rd(&r, &info, buf, size); - len = 0; prb_for_each_record(seq, prb, seq, &r) { if (r.info->seq >= iter->next_seq) break; diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 443057bee87c..d8b5e13a2229 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -59,7 +59,7 @@ int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, return 0; } - ret = __access_remote_vm(mm, addr, buf, len, gup_flags); + ret = access_remote_vm(mm, addr, buf, len, gup_flags); mmput(mm); return ret; diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 0d866eaa4cc8..b531c33e9545 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -500,6 +500,7 @@ static inline void rcu_expedite_gp(void) { } static inline void rcu_unexpedite_gp(void) { } static inline void rcu_async_hurry(void) { } static inline void rcu_async_relax(void) { } +static inline bool rcu_cpu_online(int cpu) { return true; } #else /* #ifdef CONFIG_TINY_RCU */ bool rcu_gp_is_normal(void); /* Internal RCU use. */ bool rcu_gp_is_expedited(void); /* Internal RCU use. */ @@ -509,6 +510,7 @@ void rcu_unexpedite_gp(void); void rcu_async_hurry(void); void rcu_async_relax(void); void rcupdate_announce_bootup_oddness(void); +bool rcu_cpu_online(int cpu); #ifdef CONFIG_TASKS_RCU_GENERIC void show_rcu_tasks_gp_kthreads(void); #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 1fa631168594..f54d5782eca0 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -895,10 +895,36 @@ static void rcu_tasks_pregp_step(struct list_head *hop) synchronize_rcu(); } +/* Check for quiescent states since the pregp's synchronize_rcu() */ +static bool rcu_tasks_is_holdout(struct task_struct *t) +{ + int cpu; + + /* Has the task been seen voluntarily sleeping? */ + if (!READ_ONCE(t->on_rq)) + return false; + + /* + * Idle tasks (or idle injection) within the idle loop are RCU-tasks + * quiescent states. But CPU boot code performed by the idle task + * isn't a quiescent state. + */ + if (is_idle_task(t)) + return false; + + cpu = task_cpu(t); + + /* Idle tasks on offline CPUs are RCU-tasks quiescent states. */ + if (t == idle_task(cpu) && !rcu_cpu_online(cpu)) + return false; + + return true; +} + /* Per-task initial processing. */ static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop) { - if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) { + if (t != current && rcu_tasks_is_holdout(t)) { get_task_struct(t); t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); WRITE_ONCE(t->rcu_tasks_holdout, true); @@ -947,7 +973,7 @@ static void check_holdout_task(struct task_struct *t, if (!READ_ONCE(t->rcu_tasks_holdout) || t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || - !READ_ONCE(t->on_rq) || + !rcu_tasks_is_holdout(t) || (IS_ENABLED(CONFIG_NO_HZ_FULL) && !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { WRITE_ONCE(t->rcu_tasks_holdout, false); @@ -1525,7 +1551,7 @@ static int trc_inspect_reader(struct task_struct *t, void *bhp_in) } else { // The task is not running, so C-language access is safe. nesting = t->trc_reader_nesting; - WARN_ON_ONCE(ofl && task_curr(t) && !is_idle_task(t)); + WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t)))); if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl) n_heavy_reader_ofl_updates++; } diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 700524726079..3ac3c846105f 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -755,14 +755,19 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp) } /* - * Return true if the specified CPU has passed through a quiescent - * state by virtue of being in or having passed through an dynticks - * idle state since the last call to dyntick_save_progress_counter() - * for this same CPU, or by virtue of having been offline. + * Returns positive if the specified CPU has passed through a quiescent state + * by virtue of being in or having passed through an dynticks idle state since + * the last call to dyntick_save_progress_counter() for this same CPU, or by + * virtue of having been offline. + * + * Returns negative if the specified CPU needs a force resched. + * + * Returns zero otherwise. */ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) { unsigned long jtsq; + int ret = 0; struct rcu_node *rnp = rdp->mynode; /* @@ -848,8 +853,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) || rcu_state.cbovld)) { WRITE_ONCE(rdp->rcu_urgent_qs, true); - resched_cpu(rdp->cpu); WRITE_ONCE(rdp->last_fqs_resched, jiffies); + ret = -1; } /* @@ -862,8 +867,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) if (time_after(jiffies, rcu_state.jiffies_resched)) { if (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq)) { - resched_cpu(rdp->cpu); WRITE_ONCE(rdp->last_fqs_resched, jiffies); + ret = -1; } if (IS_ENABLED(CONFIG_IRQ_WORK) && !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && @@ -892,7 +897,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) } } - return 0; + return ret; } /* Trace-event wrapper function for trace_rcu_future_grace_period. */ @@ -2271,15 +2276,15 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) { int cpu; unsigned long flags; - unsigned long mask; - struct rcu_data *rdp; struct rcu_node *rnp; rcu_state.cbovld = rcu_state.cbovldnext; rcu_state.cbovldnext = false; rcu_for_each_leaf_node(rnp) { + unsigned long mask = 0; + unsigned long rsmask = 0; + cond_resched_tasks_rcu_qs(); - mask = 0; raw_spin_lock_irqsave_rcu_node(rnp, flags); rcu_state.cbovldnext |= !!rnp->cbovldmask; if (rnp->qsmask == 0) { @@ -2297,11 +2302,17 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) continue; } for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) { + struct rcu_data *rdp; + int ret; + rdp = per_cpu_ptr(&rcu_data, cpu); - if (f(rdp)) { + ret = f(rdp); + if (ret > 0) { mask |= rdp->grpmask; rcu_disable_urgency_upon_qs(rdp); } + if (ret < 0) + rsmask |= rdp->grpmask; } if (mask != 0) { /* Idle/offline CPUs, report (releases rnp->lock). */ @@ -2310,6 +2321,9 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) /* Nothing to do here, so just drop the lock. */ raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } + + for_each_leaf_node_cpu_mask(rnp, cpu, rsmask) + resched_cpu(cpu); } } @@ -3471,13 +3485,6 @@ kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) return freed == 0 ? SHRINK_STOP : freed; } -static struct shrinker kfree_rcu_shrinker = { - .count_objects = kfree_rcu_shrink_count, - .scan_objects = kfree_rcu_shrink_scan, - .batch = 0, - .seeks = DEFAULT_SEEKS, -}; - void __init kfree_rcu_scheduler_running(void) { int cpu; @@ -4202,6 +4209,13 @@ static bool rcu_rdp_cpu_online(struct rcu_data *rdp) return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode)); } +bool rcu_cpu_online(int cpu) +{ + struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); + + return rcu_rdp_cpu_online(rdp); +} + #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) /* @@ -5029,6 +5043,7 @@ static void __init kfree_rcu_batch_init(void) { int cpu; int i, j; + struct shrinker *kfree_rcu_shrinker; /* Clamp it to [0:100] seconds interval. */ if (rcu_delay_page_cache_fill_msec < 0 || @@ -5060,8 +5075,17 @@ static void __init kfree_rcu_batch_init(void) INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func); krcp->initialized = true; } - if (register_shrinker(&kfree_rcu_shrinker, "rcu-kfree")) - pr_err("Failed to register kfree_rcu() shrinker!\n"); + + kfree_rcu_shrinker = shrinker_alloc(0, "rcu-kfree"); + if (!kfree_rcu_shrinker) { + pr_err("Failed to allocate kfree_rcu() shrinker!\n"); + return; + } + + kfree_rcu_shrinker->count_objects = kfree_rcu_shrink_count; + kfree_rcu_shrinker->scan_objects = kfree_rcu_shrink_scan; + + shrinker_register(kfree_rcu_shrinker); } void __init rcu_init(void) diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index 5598212d1f27..4efbf7333d4e 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -1396,13 +1396,6 @@ lazy_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) return count ? count : SHRINK_STOP; } - -static struct shrinker lazy_rcu_shrinker = { - .count_objects = lazy_rcu_shrink_count, - .scan_objects = lazy_rcu_shrink_scan, - .batch = 0, - .seeks = DEFAULT_SEEKS, -}; #endif // #ifdef CONFIG_RCU_LAZY void __init rcu_init_nohz(void) @@ -1410,6 +1403,7 @@ void __init rcu_init_nohz(void) int cpu; struct rcu_data *rdp; const struct cpumask *cpumask = NULL; + struct shrinker * __maybe_unused lazy_rcu_shrinker; #if defined(CONFIG_NO_HZ_FULL) if (tick_nohz_full_running && !cpumask_empty(tick_nohz_full_mask)) @@ -1436,8 +1430,15 @@ void __init rcu_init_nohz(void) return; #ifdef CONFIG_RCU_LAZY - if (register_shrinker(&lazy_rcu_shrinker, "rcu-lazy")) - pr_err("Failed to register lazy_rcu shrinker!\n"); + lazy_rcu_shrinker = shrinker_alloc(0, "rcu-lazy"); + if (!lazy_rcu_shrinker) { + pr_err("Failed to allocate lazy_rcu shrinker!\n"); + } else { + lazy_rcu_shrinker->count_objects = lazy_rcu_shrink_count; + lazy_rcu_shrinker->scan_objects = lazy_rcu_shrink_scan; + + shrinker_register(lazy_rcu_shrinker); + } #endif // #ifdef CONFIG_RCU_LAZY if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) { diff --git a/kernel/reboot.c b/kernel/reboot.c index 3bba88c7ffc6..395a0ea3c7a8 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c @@ -55,6 +55,7 @@ struct sys_off_handler { enum sys_off_mode mode; bool blocking; void *list; + struct device *dev; }; /* @@ -74,6 +75,7 @@ void __weak (*pm_power_off)(void); void emergency_restart(void) { kmsg_dump(KMSG_DUMP_EMERG); + system_state = SYSTEM_RESTART; machine_emergency_restart(); } EXPORT_SYMBOL_GPL(emergency_restart); @@ -323,6 +325,7 @@ static int sys_off_notify(struct notifier_block *nb, data.cb_data = handler->cb_data; data.mode = mode; data.cmd = cmd; + data.dev = handler->dev; return handler->sys_off_cb(&data); } @@ -510,6 +513,7 @@ int devm_register_sys_off_handler(struct device *dev, handler = register_sys_off_handler(mode, priority, callback, cb_data); if (IS_ERR(handler)) return PTR_ERR(handler); + handler->dev = dev; return devm_add_action_or_reset(dev, devm_unregister_sys_off_handler, handler); diff --git a/kernel/resource.c b/kernel/resource.c index b1763b2fd7ef..866ef3663a0b 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -56,33 +56,17 @@ struct resource_constraint { static DEFINE_RWLOCK(resource_lock); -static struct resource *next_resource(struct resource *p) +static struct resource *next_resource(struct resource *p, bool skip_children) { - if (p->child) + if (!skip_children && p->child) return p->child; while (!p->sibling && p->parent) p = p->parent; return p->sibling; } -static struct resource *next_resource_skip_children(struct resource *p) -{ - while (!p->sibling && p->parent) - p = p->parent; - return p->sibling; -} - #define for_each_resource(_root, _p, _skip_children) \ - for ((_p) = (_root)->child; (_p); \ - (_p) = (_skip_children) ? next_resource_skip_children(_p) : \ - next_resource(_p)) - -static void *r_next(struct seq_file *m, void *v, loff_t *pos) -{ - struct resource *p = v; - (*pos)++; - return (void *)next_resource(p); -} + for ((_p) = (_root)->child; (_p); (_p) = next_resource(_p, _skip_children)) #ifdef CONFIG_PROC_FS @@ -91,14 +75,28 @@ enum { MAX_IORES_LEVEL = 5 }; static void *r_start(struct seq_file *m, loff_t *pos) __acquires(resource_lock) { - struct resource *p = pde_data(file_inode(m->file)); - loff_t l = 0; + struct resource *root = pde_data(file_inode(m->file)); + struct resource *p; + loff_t l = *pos; + read_lock(&resource_lock); - for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) - ; + for_each_resource(root, p, false) { + if (l-- == 0) + break; + } + return p; } +static void *r_next(struct seq_file *m, void *v, loff_t *pos) +{ + struct resource *p = v; + + (*pos)++; + + return (void *)next_resource(p, false); +} + static void r_stop(struct seq_file *m, void *v) __releases(resource_lock) { @@ -336,7 +334,7 @@ static int find_next_iomem_res(resource_size_t start, resource_size_t end, read_lock(&resource_lock); - for (p = iomem_resource.child; p; p = next_resource(p)) { + for_each_resource(&iomem_resource, p, false) { /* If we passed the resource we are looking for, stop */ if (p->start > end) { p = NULL; @@ -1641,13 +1639,12 @@ __setup("reserve=", reserve_setup); */ int iomem_map_sanity_check(resource_size_t addr, unsigned long size) { - struct resource *p = &iomem_resource; resource_size_t end = addr + size - 1; + struct resource *p; int err = 0; - loff_t l; read_lock(&resource_lock); - for (p = p->child; p ; p = r_next(NULL, p, &l)) { + for_each_resource(&iomem_resource, p, false) { /* * We can probably skip the resources without * IORESOURCE_IO attribute? diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3d7e2d702699..a708d225c28e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9869,7 +9869,7 @@ struct task_group root_task_group; LIST_HEAD(task_groups); /* Cacheline aligned slab cache for task_group */ -static struct kmem_cache *task_group_cache __read_mostly; +static struct kmem_cache *task_group_cache __ro_after_init; #endif void __init sched_init(void) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8767988242ee..d7a3c63a2171 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1759,12 +1759,12 @@ static bool pgdat_free_space_enough(struct pglist_data *pgdat) * The smaller the hint page fault latency, the higher the possibility * for the page to be hot. */ -static int numa_hint_fault_latency(struct page *page) +static int numa_hint_fault_latency(struct folio *folio) { int last_time, time; time = jiffies_to_msecs(jiffies); - last_time = xchg_page_access_time(page, time); + last_time = folio_xchg_access_time(folio, time); return (time - last_time) & PAGE_ACCESS_TIME_MASK; } @@ -1821,7 +1821,7 @@ static void numa_promotion_adjust_threshold(struct pglist_data *pgdat, } } -bool should_numa_migrate_memory(struct task_struct *p, struct page * page, +bool should_numa_migrate_memory(struct task_struct *p, struct folio *folio, int src_nid, int dst_cpu) { struct numa_group *ng = deref_curr_numa_group(p); @@ -1851,16 +1851,16 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page, numa_promotion_adjust_threshold(pgdat, rate_limit, def_th); th = pgdat->nbp_threshold ? : def_th; - latency = numa_hint_fault_latency(page); + latency = numa_hint_fault_latency(folio); if (latency >= th) return false; return !numa_promotion_rate_limit(pgdat, rate_limit, - thp_nr_pages(page)); + folio_nr_pages(folio)); } this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid); - last_cpupid = page_cpupid_xchg_last(page, this_cpupid); + last_cpupid = folio_xchg_last_cpupid(folio, this_cpupid); if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) && !node_is_toptier(src_nid) && !cpupid_valid(last_cpupid)) @@ -3666,41 +3666,140 @@ static inline void dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } #endif +static void reweight_eevdf(struct cfs_rq *cfs_rq, struct sched_entity *se, + unsigned long weight) +{ + unsigned long old_weight = se->load.weight; + u64 avruntime = avg_vruntime(cfs_rq); + s64 vlag, vslice; + + /* + * VRUNTIME + * ======== + * + * COROLLARY #1: The virtual runtime of the entity needs to be + * adjusted if re-weight at !0-lag point. + * + * Proof: For contradiction assume this is not true, so we can + * re-weight without changing vruntime at !0-lag point. + * + * Weight VRuntime Avg-VRuntime + * before w v V + * after w' v' V' + * + * Since lag needs to be preserved through re-weight: + * + * lag = (V - v)*w = (V'- v')*w', where v = v' + * ==> V' = (V - v)*w/w' + v (1) + * + * Let W be the total weight of the entities before reweight, + * since V' is the new weighted average of entities: + * + * V' = (WV + w'v - wv) / (W + w' - w) (2) + * + * by using (1) & (2) we obtain: + * + * (WV + w'v - wv) / (W + w' - w) = (V - v)*w/w' + v + * ==> (WV-Wv+Wv+w'v-wv)/(W+w'-w) = (V - v)*w/w' + v + * ==> (WV - Wv)/(W + w' - w) + v = (V - v)*w/w' + v + * ==> (V - v)*W/(W + w' - w) = (V - v)*w/w' (3) + * + * Since we are doing at !0-lag point which means V != v, we + * can simplify (3): + * + * ==> W / (W + w' - w) = w / w' + * ==> Ww' = Ww + ww' - ww + * ==> W * (w' - w) = w * (w' - w) + * ==> W = w (re-weight indicates w' != w) + * + * So the cfs_rq contains only one entity, hence vruntime of + * the entity @v should always equal to the cfs_rq's weighted + * average vruntime @V, which means we will always re-weight + * at 0-lag point, thus breach assumption. Proof completed. + * + * + * COROLLARY #2: Re-weight does NOT affect weighted average + * vruntime of all the entities. + * + * Proof: According to corollary #1, Eq. (1) should be: + * + * (V - v)*w = (V' - v')*w' + * ==> v' = V' - (V - v)*w/w' (4) + * + * According to the weighted average formula, we have: + * + * V' = (WV - wv + w'v') / (W - w + w') + * = (WV - wv + w'(V' - (V - v)w/w')) / (W - w + w') + * = (WV - wv + w'V' - Vw + wv) / (W - w + w') + * = (WV + w'V' - Vw) / (W - w + w') + * + * ==> V'*(W - w + w') = WV + w'V' - Vw + * ==> V' * (W - w) = (W - w) * V (5) + * + * If the entity is the only one in the cfs_rq, then reweight + * always occurs at 0-lag point, so V won't change. Or else + * there are other entities, hence W != w, then Eq. (5) turns + * into V' = V. So V won't change in either case, proof done. + * + * + * So according to corollary #1 & #2, the effect of re-weight + * on vruntime should be: + * + * v' = V' - (V - v) * w / w' (4) + * = V - (V - v) * w / w' + * = V - vl * w / w' + * = V - vl' + */ + if (avruntime != se->vruntime) { + vlag = (s64)(avruntime - se->vruntime); + vlag = div_s64(vlag * old_weight, weight); + se->vruntime = avruntime - vlag; + } + + /* + * DEADLINE + * ======== + * + * When the weight changes, the virtual time slope changes and + * we should adjust the relative virtual deadline accordingly. + * + * d' = v' + (d - v)*w/w' + * = V' - (V - v)*w/w' + (d - v)*w/w' + * = V - (V - v)*w/w' + (d - v)*w/w' + * = V + (d - V)*w/w' + */ + vslice = (s64)(se->deadline - avruntime); + vslice = div_s64(vslice * old_weight, weight); + se->deadline = avruntime + vslice; +} + static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, unsigned long weight) { - unsigned long old_weight = se->load.weight; + bool curr = cfs_rq->curr == se; if (se->on_rq) { /* commit outstanding execution time */ - if (cfs_rq->curr == se) + if (curr) update_curr(cfs_rq); else - avg_vruntime_sub(cfs_rq, se); + __dequeue_entity(cfs_rq, se); update_load_sub(&cfs_rq->load, se->load.weight); } dequeue_load_avg(cfs_rq, se); - update_load_set(&se->load, weight); - if (!se->on_rq) { /* * Because we keep se->vlag = V - v_i, while: lag_i = w_i*(V - v_i), * we need to scale se->vlag when w_i changes. */ - se->vlag = div_s64(se->vlag * old_weight, weight); + se->vlag = div_s64(se->vlag * se->load.weight, weight); } else { - s64 deadline = se->deadline - se->vruntime; - /* - * When the weight changes, the virtual time slope changes and - * we should adjust the relative virtual deadline accordingly. - */ - deadline = div_s64(deadline * old_weight, weight); - se->deadline = se->vruntime + deadline; - if (se != cfs_rq->curr) - min_deadline_cb_propagate(&se->run_node, NULL); + reweight_eevdf(cfs_rq, se, weight); } + update_load_set(&se->load, weight); + #ifdef CONFIG_SMP do { u32 divider = get_pelt_divider(&se->avg); @@ -3712,8 +3811,17 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, enqueue_load_avg(cfs_rq, se); if (se->on_rq) { update_load_add(&cfs_rq->load, se->load.weight); - if (cfs_rq->curr != se) - avg_vruntime_add(cfs_rq, se); + if (!curr) { + /* + * The entity's vruntime has been adjusted, so let's check + * whether the rq-wide min_vruntime needs updated too. Since + * the calculations above require stable min_vruntime rather + * than up-to-date one, we do the update at the end of the + * reweight process. + */ + __enqueue_entity(cfs_rq, se); + update_min_vruntime(cfs_rq); + } } } @@ -3857,14 +3965,11 @@ static void update_cfs_group(struct sched_entity *se) #ifndef CONFIG_SMP shares = READ_ONCE(gcfs_rq->tg->shares); - - if (likely(se->load.weight == shares)) - return; #else - shares = calc_group_shares(gcfs_rq); + shares = calc_group_shares(gcfs_rq); #endif - - reweight_entity(cfs_rq_of(se), se, shares); + if (unlikely(se->load.weight != shares)) + reweight_entity(cfs_rq_of(se), se, shares); } #else /* CONFIG_FAIR_GROUP_SCHED */ @@ -11079,12 +11184,16 @@ static int should_we_balance(struct lb_env *env) continue; } - /* Are we the first idle CPU? */ + /* + * Are we the first idle core in a non-SMT domain or higher, + * or the first idle CPU in a SMT domain? + */ return cpu == env->dst_cpu; } - if (idle_smt == env->dst_cpu) - return true; + /* Are we the first idle CPU with busy siblings? */ + if (idle_smt != -1) + return idle_smt == env->dst_cpu; /* Are we the first CPU of this group ? */ return group_balance_cpu(sg) == env->dst_cpu; diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 802d98cf2de3..51e38f5f4701 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -58,13 +58,6 @@ void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry EXPORT_SYMBOL(remove_wait_queue); /* - * Scan threshold to break wait queue walk. - * This allows a waker to take a break from holding the - * wait queue lock during the wait queue walk. - */ -#define WAITQUEUE_WALK_BREAK_CNT 64 - -/* * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve * number) then we wake that number of exclusive tasks, and potentially all @@ -78,21 +71,13 @@ EXPORT_SYMBOL(remove_wait_queue); * zero in this (rare) case, and we handle it by continuing to scan the queue. */ static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode, - int nr_exclusive, int wake_flags, void *key, - wait_queue_entry_t *bookmark) + int nr_exclusive, int wake_flags, void *key) { wait_queue_entry_t *curr, *next; - int cnt = 0; lockdep_assert_held(&wq_head->lock); - if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) { - curr = list_next_entry(bookmark, entry); - - list_del(&bookmark->entry); - bookmark->flags = 0; - } else - curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); + curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); if (&curr->entry == &wq_head->head) return nr_exclusive; @@ -101,21 +86,11 @@ static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode, unsigned flags = curr->flags; int ret; - if (flags & WQ_FLAG_BOOKMARK) - continue; - ret = curr->func(curr, mode, wake_flags, key); if (ret < 0) break; if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) break; - - if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) && - (&next->entry != &wq_head->head)) { - bookmark->flags = WQ_FLAG_BOOKMARK; - list_add_tail(&bookmark->entry, &next->entry); - break; - } } return nr_exclusive; @@ -125,20 +100,12 @@ static int __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int m int nr_exclusive, int wake_flags, void *key) { unsigned long flags; - wait_queue_entry_t bookmark; - int remaining = nr_exclusive; + int remaining; - bookmark.flags = 0; - bookmark.private = NULL; - bookmark.func = NULL; - INIT_LIST_HEAD(&bookmark.entry); - - do { - spin_lock_irqsave(&wq_head->lock, flags); - remaining = __wake_up_common(wq_head, mode, remaining, - wake_flags, key, &bookmark); - spin_unlock_irqrestore(&wq_head->lock, flags); - } while (bookmark.flags & WQ_FLAG_BOOKMARK); + spin_lock_irqsave(&wq_head->lock, flags); + remaining = __wake_up_common(wq_head, mode, nr_exclusive, wake_flags, + key); + spin_unlock_irqrestore(&wq_head->lock, flags); return nr_exclusive - remaining; } @@ -171,23 +138,16 @@ void __wake_up_on_current_cpu(struct wait_queue_head *wq_head, unsigned int mode */ void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr) { - __wake_up_common(wq_head, mode, nr, 0, NULL, NULL); + __wake_up_common(wq_head, mode, nr, 0, NULL); } EXPORT_SYMBOL_GPL(__wake_up_locked); void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key) { - __wake_up_common(wq_head, mode, 1, 0, key, NULL); + __wake_up_common(wq_head, mode, 1, 0, key); } EXPORT_SYMBOL_GPL(__wake_up_locked_key); -void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head, - unsigned int mode, void *key, wait_queue_entry_t *bookmark) -{ - __wake_up_common(wq_head, mode, 1, 0, key, bookmark); -} -EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark); - /** * __wake_up_sync_key - wake up threads blocked on a waitqueue. * @wq_head: the waitqueue @@ -233,7 +193,7 @@ EXPORT_SYMBOL_GPL(__wake_up_sync_key); void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key) { - __wake_up_common(wq_head, mode, 1, WF_SYNC, key, NULL); + __wake_up_common(wq_head, mode, 1, WF_SYNC, key); } EXPORT_SYMBOL_GPL(__wake_up_locked_sync_key); diff --git a/kernel/signal.c b/kernel/signal.c index 83fcbaf0e82d..47a7602dfe8d 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -415,7 +415,7 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags, int override_rlimit, const unsigned int sigqueue_flags) { struct sigqueue *q = NULL; - struct ucounts *ucounts = NULL; + struct ucounts *ucounts; long sigpending; /* @@ -1058,12 +1058,11 @@ static void complete_signal(int sig, struct task_struct *p, enum pid_type type) signal->flags = SIGNAL_GROUP_EXIT; signal->group_exit_code = sig; signal->group_stop_count = 0; - t = p; - do { + __for_each_thread(signal, t) { task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); sigaddset(&t->pending.signal, SIGKILL); signal_wake_up(t, 1); - } while_each_thread(p, t); + } return; } } @@ -1471,16 +1470,21 @@ int group_send_sig_info(int sig, struct kernel_siginfo *info, int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp) { struct task_struct *p = NULL; - int retval, success; + int ret = -ESRCH; - success = 0; - retval = -ESRCH; do_each_pid_task(pgrp, PIDTYPE_PGID, p) { int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID); - success |= !err; - retval = err; + /* + * If group_send_sig_info() succeeds at least once ret + * becomes 0 and after that the code below has no effect. + * Otherwise we return the last err or -ESRCH if this + * process group is empty. + */ + if (ret) + ret = err; } while_each_pid_task(pgrp, PIDTYPE_PGID, p); - return success ? 0 : retval; + + return ret; } int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid) diff --git a/kernel/smpboot.c b/kernel/smpboot.c index f47d8f375946..1992b62e980b 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c @@ -272,8 +272,7 @@ static void smpboot_destroy_threads(struct smp_hotplug_thread *ht) struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); if (tsk) { - kthread_stop(tsk); - put_task_struct(tsk); + kthread_stop_put(tsk); *per_cpu_ptr(ht->store, cpu) = NULL; } } diff --git a/kernel/sys.c b/kernel/sys.c index 2410e3999ebe..e219fcfa112d 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1786,6 +1786,7 @@ void getrusage(struct task_struct *p, int who, struct rusage *r) unsigned long flags; u64 tgutime, tgstime, utime, stime; unsigned long maxrss = 0; + struct signal_struct *sig = p->signal; memset((char *)r, 0, sizeof (*r)); utime = stime = 0; @@ -1793,7 +1794,7 @@ void getrusage(struct task_struct *p, int who, struct rusage *r) if (who == RUSAGE_THREAD) { task_cputime_adjusted(current, &utime, &stime); accumulate_thread_rusage(p, r); - maxrss = p->signal->maxrss; + maxrss = sig->maxrss; goto out; } @@ -1803,15 +1804,15 @@ void getrusage(struct task_struct *p, int who, struct rusage *r) switch (who) { case RUSAGE_BOTH: case RUSAGE_CHILDREN: - utime = p->signal->cutime; - stime = p->signal->cstime; - r->ru_nvcsw = p->signal->cnvcsw; - r->ru_nivcsw = p->signal->cnivcsw; - r->ru_minflt = p->signal->cmin_flt; - r->ru_majflt = p->signal->cmaj_flt; - r->ru_inblock = p->signal->cinblock; - r->ru_oublock = p->signal->coublock; - maxrss = p->signal->cmaxrss; + utime = sig->cutime; + stime = sig->cstime; + r->ru_nvcsw = sig->cnvcsw; + r->ru_nivcsw = sig->cnivcsw; + r->ru_minflt = sig->cmin_flt; + r->ru_majflt = sig->cmaj_flt; + r->ru_inblock = sig->cinblock; + r->ru_oublock = sig->coublock; + maxrss = sig->cmaxrss; if (who == RUSAGE_CHILDREN) break; @@ -1821,18 +1822,16 @@ void getrusage(struct task_struct *p, int who, struct rusage *r) thread_group_cputime_adjusted(p, &tgutime, &tgstime); utime += tgutime; stime += tgstime; - r->ru_nvcsw += p->signal->nvcsw; - r->ru_nivcsw += p->signal->nivcsw; - r->ru_minflt += p->signal->min_flt; - r->ru_majflt += p->signal->maj_flt; - r->ru_inblock += p->signal->inblock; - r->ru_oublock += p->signal->oublock; - if (maxrss < p->signal->maxrss) - maxrss = p->signal->maxrss; - t = p; - do { + r->ru_nvcsw += sig->nvcsw; + r->ru_nivcsw += sig->nivcsw; + r->ru_minflt += sig->min_flt; + r->ru_majflt += sig->maj_flt; + r->ru_inblock += sig->inblock; + r->ru_oublock += sig->oublock; + if (maxrss < sig->maxrss) + maxrss = sig->maxrss; + __for_each_thread(sig, t) accumulate_thread_rusage(t, r); - } while_each_thread(p, t); break; default: @@ -2368,19 +2367,45 @@ static int prctl_set_vma(unsigned long opt, unsigned long start, } #endif /* CONFIG_ANON_VMA_NAME */ +static inline unsigned long get_current_mdwe(void) +{ + unsigned long ret = 0; + + if (test_bit(MMF_HAS_MDWE, ¤t->mm->flags)) + ret |= PR_MDWE_REFUSE_EXEC_GAIN; + if (test_bit(MMF_HAS_MDWE_NO_INHERIT, ¤t->mm->flags)) + ret |= PR_MDWE_NO_INHERIT; + + return ret; +} + static inline int prctl_set_mdwe(unsigned long bits, unsigned long arg3, unsigned long arg4, unsigned long arg5) { + unsigned long current_bits; + if (arg3 || arg4 || arg5) return -EINVAL; - if (bits & ~(PR_MDWE_REFUSE_EXEC_GAIN)) + if (bits & ~(PR_MDWE_REFUSE_EXEC_GAIN | PR_MDWE_NO_INHERIT)) return -EINVAL; + /* NO_INHERIT only makes sense with REFUSE_EXEC_GAIN */ + if (bits & PR_MDWE_NO_INHERIT && !(bits & PR_MDWE_REFUSE_EXEC_GAIN)) + return -EINVAL; + + /* PARISC cannot allow mdwe as it needs writable stacks */ + if (IS_ENABLED(CONFIG_PARISC)) + return -EINVAL; + + current_bits = get_current_mdwe(); + if (current_bits && current_bits != bits) + return -EPERM; /* Cannot unset the flags */ + + if (bits & PR_MDWE_NO_INHERIT) + set_bit(MMF_HAS_MDWE_NO_INHERIT, ¤t->mm->flags); if (bits & PR_MDWE_REFUSE_EXEC_GAIN) set_bit(MMF_HAS_MDWE, ¤t->mm->flags); - else if (test_bit(MMF_HAS_MDWE, ¤t->mm->flags)) - return -EPERM; /* Cannot unset the flag */ return 0; } @@ -2390,9 +2415,7 @@ static inline int prctl_get_mdwe(unsigned long arg2, unsigned long arg3, { if (arg2 || arg3 || arg4 || arg5) return -EINVAL; - - return test_bit(MMF_HAS_MDWE, ¤t->mm->flags) ? - PR_MDWE_REFUSE_EXEC_GAIN : 0; + return get_current_mdwe(); } static int prctl_get_auxv(void __user *addr, unsigned long len) diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 8ce3fa0c19e2..4354ea231fab 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -233,9 +233,8 @@ static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats) else memset(stats, 0, sizeof(*stats)); - tsk = first; start_time = ktime_get_ns(); - do { + for_each_thread(first, tsk) { if (tsk->exit_state) continue; /* @@ -258,7 +257,7 @@ static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats) stats->nvcsw += tsk->nvcsw; stats->nivcsw += tsk->nivcsw; - } while_each_thread(first, tsk); + } unlock_task_sighand(first, &flags); rc = 0; diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 238262e4aba7..760793998cdd 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -2219,29 +2219,22 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, } } -int hrtimers_dead_cpu(unsigned int scpu) +int hrtimers_cpu_dying(unsigned int dying_cpu) { struct hrtimer_cpu_base *old_base, *new_base; - int i; + int i, ncpu = cpumask_first(cpu_active_mask); - BUG_ON(cpu_online(scpu)); - tick_cancel_sched_timer(scpu); + tick_cancel_sched_timer(dying_cpu); + + old_base = this_cpu_ptr(&hrtimer_bases); + new_base = &per_cpu(hrtimer_bases, ncpu); - /* - * this BH disable ensures that raise_softirq_irqoff() does - * not wakeup ksoftirqd (and acquire the pi-lock) while - * holding the cpu_base lock - */ - local_bh_disable(); - local_irq_disable(); - old_base = &per_cpu(hrtimer_bases, scpu); - new_base = this_cpu_ptr(&hrtimer_bases); /* * The caller is globally serialized and nobody else * takes two locks at once, deadlock is not possible. */ - raw_spin_lock(&new_base->lock); - raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); + raw_spin_lock(&old_base->lock); + raw_spin_lock_nested(&new_base->lock, SINGLE_DEPTH_NESTING); for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { migrate_hrtimer_list(&old_base->clock_base[i], @@ -2252,15 +2245,13 @@ int hrtimers_dead_cpu(unsigned int scpu) * The migration might have changed the first expiring softirq * timer on this CPU. Update it. */ - hrtimer_update_softirq_timer(new_base, false); + __hrtimer_get_next_event(new_base, HRTIMER_ACTIVE_SOFT); + /* Tell the other CPU to retrigger the next event */ + smp_call_function_single(ncpu, retrigger_next_event, NULL, 0); - raw_spin_unlock(&old_base->lock); raw_spin_unlock(&new_base->lock); + raw_spin_unlock(&old_base->lock); - /* Check, if we got expired work to do */ - __hrtimer_peek_ahead_timers(); - local_irq_enable(); - local_bh_enable(); return 0; } diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index df697c74d519..84e8a0f6e4e0 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1252,9 +1252,7 @@ static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = { }; #ifdef CONFIG_KEYS -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "kfuncs which will be used in BPF programs"); +__bpf_kfunc_start_defs(); /** * bpf_lookup_user_key - lookup a key by its serial @@ -1404,7 +1402,7 @@ __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr, } #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */ -__diag_pop(); +__bpf_kfunc_end_defs(); BTF_SET8_START(key_sig_kfunc_set) BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE) diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 515cafdb18d9..43cc47d7faaf 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2056,7 +2056,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) retries = 10; success = false; while (retries--) { - struct list_head *head_page, *prev_page, *r; + struct list_head *head_page, *prev_page; struct list_head *last_page, *first_page; struct list_head *head_page_with_bit; struct buffer_page *hpage = rb_set_head_page(cpu_buffer); @@ -2075,9 +2075,9 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) last_page->next = head_page_with_bit; first_page->prev = prev_page; - r = cmpxchg(&prev_page->next, head_page_with_bit, first_page); - - if (r == head_page_with_bit) { + /* caution: head_page_with_bit gets updated on cmpxchg failure */ + if (try_cmpxchg(&prev_page->next, + &head_page_with_bit, first_page)) { /* * yay, we replaced the page pointer to our new list, * now, we just have to update to head page's prev diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index abaaf516fcae..9aebf904ff97 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -54,12 +54,6 @@ #include "trace.h" #include "trace_output.h" -/* - * On boot up, the ring buffer is set to the minimum size, so that - * we do not waste memory on systems that are not using tracing. - */ -bool ring_buffer_expanded; - #ifdef CONFIG_FTRACE_STARTUP_TEST /* * We need to change this state when a selftest is running. @@ -202,7 +196,7 @@ static int __init set_cmdline_ftrace(char *str) strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); default_bootup_tracer = bootup_tracer_buf; /* We are using ftrace early, expand it */ - ring_buffer_expanded = true; + trace_set_ring_buffer_expanded(NULL); return 1; } __setup("ftrace=", set_cmdline_ftrace); @@ -247,7 +241,7 @@ static int __init boot_alloc_snapshot(char *str) } else { allocate_snapshot = true; /* We also need the main ring buffer expanded */ - ring_buffer_expanded = true; + trace_set_ring_buffer_expanded(NULL); } return 1; } @@ -490,6 +484,13 @@ static struct trace_array global_trace = { .trace_flags = TRACE_DEFAULT_FLAGS, }; +void trace_set_ring_buffer_expanded(struct trace_array *tr) +{ + if (!tr) + tr = &global_trace; + tr->ring_buffer_expanded = true; +} + LIST_HEAD(ftrace_trace_arrays); int trace_array_get(struct trace_array *this_tr) @@ -1730,15 +1731,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) { int len; - if (trace_seq_used(s) <= s->seq.readpos) + if (trace_seq_used(s) <= s->readpos) return -EBUSY; - len = trace_seq_used(s) - s->seq.readpos; + len = trace_seq_used(s) - s->readpos; if (cnt > len) cnt = len; - memcpy(buf, s->buffer + s->seq.readpos, cnt); + memcpy(buf, s->buffer + s->readpos, cnt); - s->seq.readpos += cnt; + s->readpos += cnt; return cnt; } @@ -2012,7 +2013,7 @@ static int run_tracer_selftest(struct tracer *type) #ifdef CONFIG_TRACER_MAX_TRACE if (type->use_max_tr) { /* If we expanded the buffers, make sure the max is expanded too */ - if (ring_buffer_expanded) + if (tr->ring_buffer_expanded) ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, RING_BUFFER_ALL_CPUS); tr->allocated_snapshot = true; @@ -2038,7 +2039,7 @@ static int run_tracer_selftest(struct tracer *type) tr->allocated_snapshot = false; /* Shrink the max buffer again */ - if (ring_buffer_expanded) + if (tr->ring_buffer_expanded) ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); } @@ -3403,7 +3404,7 @@ void trace_printk_init_buffers(void) pr_warn("**********************************************************\n"); /* Expand the buffers to set size */ - tracing_update_buffers(); + tracing_update_buffers(&global_trace); buffers_allocated = 1; @@ -3827,15 +3828,6 @@ static bool trace_safe_str(struct trace_iterator *iter, const char *str, return false; } -static const char *show_buffer(struct trace_seq *s) -{ - struct seq_buf *seq = &s->seq; - - seq_buf_terminate(seq); - - return seq->buffer; -} - static DEFINE_STATIC_KEY_FALSE(trace_no_verify); static int test_can_verify_check(const char *fmt, ...) @@ -3975,7 +3967,7 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt, */ if (WARN_ONCE(!trace_safe_str(iter, str, star, len), "fmt: '%s' current_buffer: '%s'", - fmt, show_buffer(&iter->seq))) { + fmt, seq_buf_str(&iter->seq.seq))) { int ret; /* Try to safely read the string */ @@ -4986,6 +4978,20 @@ int tracing_open_file_tr(struct inode *inode, struct file *filp) if (ret) return ret; + mutex_lock(&event_mutex); + + /* Fail if the file is marked for removal */ + if (file->flags & EVENT_FILE_FL_FREED) { + trace_array_put(file->tr); + ret = -ENODEV; + } else { + event_file_get(file); + } + + mutex_unlock(&event_mutex); + if (ret) + return ret; + filp->private_data = inode->i_private; return 0; @@ -4996,6 +5002,7 @@ int tracing_release_file_tr(struct inode *inode, struct file *filp) struct trace_event_file *file = inode->i_private; trace_array_put(file->tr); + event_file_put(file); return 0; } @@ -6374,7 +6381,7 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr, * we use the size that was given, and we can forget about * expanding it later. */ - ring_buffer_expanded = true; + trace_set_ring_buffer_expanded(tr); /* May be called before buffers are initialized */ if (!tr->array_buffer.buffer) @@ -6452,6 +6459,7 @@ out: /** * tracing_update_buffers - used by tracing facility to expand ring buffers + * @tr: The tracing instance * * To save on memory when the tracing is never used on a system with it * configured in. The ring buffers are set to a minimum size. But once @@ -6460,13 +6468,13 @@ out: * * This function is to be called when a tracer is about to be used. */ -int tracing_update_buffers(void) +int tracing_update_buffers(struct trace_array *tr) { int ret = 0; mutex_lock(&trace_types_lock); - if (!ring_buffer_expanded) - ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size, + if (!tr->ring_buffer_expanded) + ret = __tracing_resize_ring_buffer(tr, trace_buf_size, RING_BUFFER_ALL_CPUS); mutex_unlock(&trace_types_lock); @@ -6520,7 +6528,7 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf) mutex_lock(&trace_types_lock); - if (!ring_buffer_expanded) { + if (!tr->ring_buffer_expanded) { ret = __tracing_resize_ring_buffer(tr, trace_buf_size, RING_BUFFER_ALL_CPUS); if (ret < 0) @@ -7006,7 +7014,7 @@ waitagain: /* Now copy what we have to the user */ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); - if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq)) + if (iter->seq.readpos >= trace_seq_used(&iter->seq)) trace_seq_init(&iter->seq); /* @@ -7192,7 +7200,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf, } if (buf_size_same) { - if (!ring_buffer_expanded) + if (!tr->ring_buffer_expanded) r = sprintf(buf, "%lu (expanded: %lu)\n", size >> 10, trace_buf_size >> 10); @@ -7249,10 +7257,10 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf, mutex_lock(&trace_types_lock); for_each_tracing_cpu(cpu) { size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10; - if (!ring_buffer_expanded) + if (!tr->ring_buffer_expanded) expanded_size += trace_buf_size >> 10; } - if (ring_buffer_expanded) + if (tr->ring_buffer_expanded) r = sprintf(buf, "%lu\n", size); else r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size); @@ -7646,7 +7654,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, unsigned long val; int ret; - ret = tracing_update_buffers(); + ret = tracing_update_buffers(tr); if (ret < 0) return ret; @@ -9550,6 +9558,9 @@ static struct trace_array *trace_array_create(const char *name) if (allocate_trace_buffers(tr, trace_buf_size) < 0) goto out_free_tr; + /* The ring buffer is defaultly expanded */ + trace_set_ring_buffer_expanded(tr); + if (ftrace_allocate_ftrace_ops(tr) < 0) goto out_free_tr; @@ -9759,7 +9770,6 @@ static __init void create_trace_instances(struct dentry *d_tracer) static void init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) { - struct trace_event_file *file; int cpu; trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer, @@ -9792,11 +9802,7 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) trace_create_file("trace_marker", 0220, d_tracer, tr, &tracing_mark_fops); - file = __find_event_file(tr, "ftrace", "print"); - if (file && file->ef) - eventfs_add_file("trigger", TRACE_MODE_WRITE, file->ef, - file, &event_trigger_fops); - tr->trace_marker_file = file; + tr->trace_marker_file = __find_event_file(tr, "ftrace", "print"); trace_create_file("trace_marker_raw", 0220, d_tracer, tr, &tracing_mark_raw_fops); @@ -10444,7 +10450,7 @@ __init static int tracer_alloc_buffers(void) trace_printk_init_buffers(); /* To save memory, keep the ring buffer size to its minimum */ - if (ring_buffer_expanded) + if (global_trace.ring_buffer_expanded) ring_buf_size = trace_buf_size; else ring_buf_size = 1; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 77debe53f07c..b7f4ea25a194 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -381,7 +381,7 @@ struct trace_array { struct dentry *dir; struct dentry *options; struct dentry *percpu_dir; - struct dentry *event_dir; + struct eventfs_inode *event_dir; struct trace_options *topts; struct list_head systems; struct list_head events; @@ -410,6 +410,11 @@ struct trace_array { struct cond_snapshot *cond_snapshot; #endif struct trace_func_repeats __percpu *last_func_repeats; + /* + * On boot up, the ring buffer is set to the minimum size, so that + * we do not waste memory on systems that are not using tracing. + */ + bool ring_buffer_expanded; }; enum { @@ -761,7 +766,7 @@ extern int DYN_FTRACE_TEST_NAME(void); #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 extern int DYN_FTRACE_TEST_NAME2(void); -extern bool ring_buffer_expanded; +extern void trace_set_ring_buffer_expanded(struct trace_array *tr); extern bool tracing_selftest_disabled; #ifdef CONFIG_FTRACE_STARTUP_TEST @@ -1305,7 +1310,7 @@ static inline void trace_branch_disable(void) #endif /* CONFIG_BRANCH_TRACER */ /* set ring buffers to default size if not already done so */ -int tracing_update_buffers(void); +int tracing_update_buffers(struct trace_array *tr); union trace_synth_field { u8 as_u8; @@ -1344,7 +1349,7 @@ struct trace_subsystem_dir { struct list_head list; struct event_subsystem *subsystem; struct trace_array *tr; - struct eventfs_file *ef; + struct eventfs_inode *ei; int ref_count; int nr_events; }; @@ -1664,6 +1669,9 @@ extern void event_trigger_unregister(struct event_command *cmd_ops, char *glob, struct event_trigger_data *trigger_data); +extern void event_file_get(struct trace_event_file *file); +extern void event_file_put(struct trace_event_file *file); + /** * struct event_trigger_ops - callbacks for trace event triggers * diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index f49d6ddb6342..f29e815ca5b2 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -984,19 +984,41 @@ static void remove_subsystem(struct trace_subsystem_dir *dir) return; if (!--dir->nr_events) { - eventfs_remove(dir->ef); + eventfs_remove_dir(dir->ei); list_del(&dir->list); __put_system_dir(dir); } } +void event_file_get(struct trace_event_file *file) +{ + atomic_inc(&file->ref); +} + +void event_file_put(struct trace_event_file *file) +{ + if (WARN_ON_ONCE(!atomic_read(&file->ref))) { + if (file->flags & EVENT_FILE_FL_FREED) + kmem_cache_free(file_cachep, file); + return; + } + + if (atomic_dec_and_test(&file->ref)) { + /* Count should only go to zero when it is freed */ + if (WARN_ON_ONCE(!(file->flags & EVENT_FILE_FL_FREED))) + return; + kmem_cache_free(file_cachep, file); + } +} + static void remove_event_file_dir(struct trace_event_file *file) { - eventfs_remove(file->ef); + eventfs_remove_dir(file->ei); list_del(&file->list); remove_subsystem(file->system); free_event_filter(file->filter); - kmem_cache_free(file_cachep, file); + file->flags |= EVENT_FILE_FL_FREED; + event_file_put(file); } /* @@ -1166,7 +1188,7 @@ ftrace_event_write(struct file *file, const char __user *ubuf, if (!cnt) return 0; - ret = tracing_update_buffers(); + ret = tracing_update_buffers(tr); if (ret < 0) return ret; @@ -1369,7 +1391,7 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, flags = file->flags; mutex_unlock(&event_mutex); - if (!file) + if (!file || flags & EVENT_FILE_FL_FREED) return -ENODEV; if (flags & EVENT_FILE_FL_ENABLED && @@ -1397,18 +1419,20 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, if (ret) return ret; - ret = tracing_update_buffers(); - if (ret < 0) - return ret; - switch (val) { case 0: case 1: ret = -ENODEV; mutex_lock(&event_mutex); file = event_file_data(filp); - if (likely(file)) + if (likely(file && !(file->flags & EVENT_FILE_FL_FREED))) { + ret = tracing_update_buffers(file->tr); + if (ret < 0) { + mutex_unlock(&event_mutex); + return ret; + } ret = ftrace_event_enable_disable(file, val); + } mutex_unlock(&event_mutex); break; @@ -1482,7 +1506,7 @@ system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, if (ret) return ret; - ret = tracing_update_buffers(); + ret = tracing_update_buffers(dir->tr); if (ret < 0) return ret; @@ -1681,7 +1705,7 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, mutex_lock(&event_mutex); file = event_file_data(filp); - if (file) + if (file && !(file->flags & EVENT_FILE_FL_FREED)) print_event_filter(file, s); mutex_unlock(&event_mutex); @@ -1956,7 +1980,7 @@ event_pid_write(struct file *filp, const char __user *ubuf, if (!cnt) return 0; - ret = tracing_update_buffers(); + ret = tracing_update_buffers(tr); if (ret < 0) return ret; @@ -2280,14 +2304,40 @@ create_new_subsystem(const char *name) return NULL; } -static struct eventfs_file * +static int system_callback(const char *name, umode_t *mode, void **data, + const struct file_operations **fops) +{ + if (strcmp(name, "filter") == 0) + *fops = &ftrace_subsystem_filter_fops; + + else if (strcmp(name, "enable") == 0) + *fops = &ftrace_system_enable_fops; + + else + return 0; + + *mode = TRACE_MODE_WRITE; + return 1; +} + +static struct eventfs_inode * event_subsystem_dir(struct trace_array *tr, const char *name, - struct trace_event_file *file, struct dentry *parent) + struct trace_event_file *file, struct eventfs_inode *parent) { struct event_subsystem *system, *iter; struct trace_subsystem_dir *dir; - struct eventfs_file *ef; - int res; + struct eventfs_inode *ei; + int nr_entries; + static struct eventfs_entry system_entries[] = { + { + .name = "filter", + .callback = system_callback, + }, + { + .name = "enable", + .callback = system_callback, + } + }; /* First see if we did not already create this dir */ list_for_each_entry(dir, &tr->systems, list) { @@ -2295,7 +2345,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name, if (strcmp(system->name, name) == 0) { dir->nr_events++; file->system = dir; - return dir->ef; + return dir->ei; } } @@ -2319,39 +2369,29 @@ event_subsystem_dir(struct trace_array *tr, const char *name, } else __get_system(system); - ef = eventfs_add_subsystem_dir(name, parent); - if (IS_ERR(ef)) { + /* ftrace only has directories no files */ + if (strcmp(name, "ftrace") == 0) + nr_entries = 0; + else + nr_entries = ARRAY_SIZE(system_entries); + + ei = eventfs_create_dir(name, parent, system_entries, nr_entries, dir); + if (IS_ERR(ei)) { pr_warn("Failed to create system directory %s\n", name); __put_system(system); goto out_free; } - dir->ef = ef; + dir->ei = ei; dir->tr = tr; dir->ref_count = 1; dir->nr_events = 1; dir->subsystem = system; file->system = dir; - /* the ftrace system is special, do not create enable or filter files */ - if (strcmp(name, "ftrace") != 0) { - - res = eventfs_add_file("filter", TRACE_MODE_WRITE, - dir->ef, dir, - &ftrace_subsystem_filter_fops); - if (res) { - kfree(system->filter); - system->filter = NULL; - pr_warn("Could not create tracefs '%s/filter' entry\n", name); - } - - eventfs_add_file("enable", TRACE_MODE_WRITE, dir->ef, dir, - &ftrace_system_enable_fops); - } - list_add(&dir->list, &tr->systems); - return dir->ef; + return dir->ei; out_free: kfree(dir); @@ -2400,15 +2440,134 @@ event_define_fields(struct trace_event_call *call) return ret; } +static int event_callback(const char *name, umode_t *mode, void **data, + const struct file_operations **fops) +{ + struct trace_event_file *file = *data; + struct trace_event_call *call = file->event_call; + + if (strcmp(name, "format") == 0) { + *mode = TRACE_MODE_READ; + *fops = &ftrace_event_format_fops; + *data = call; + return 1; + } + + /* + * Only event directories that can be enabled should have + * triggers or filters, with the exception of the "print" + * event that can have a "trigger" file. + */ + if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) { + if (call->class->reg && strcmp(name, "enable") == 0) { + *mode = TRACE_MODE_WRITE; + *fops = &ftrace_enable_fops; + return 1; + } + + if (strcmp(name, "filter") == 0) { + *mode = TRACE_MODE_WRITE; + *fops = &ftrace_event_filter_fops; + return 1; + } + } + + if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) || + strcmp(trace_event_name(call), "print") == 0) { + if (strcmp(name, "trigger") == 0) { + *mode = TRACE_MODE_WRITE; + *fops = &event_trigger_fops; + return 1; + } + } + +#ifdef CONFIG_PERF_EVENTS + if (call->event.type && call->class->reg && + strcmp(name, "id") == 0) { + *mode = TRACE_MODE_READ; + *data = (void *)(long)call->event.type; + *fops = &ftrace_event_id_fops; + return 1; + } +#endif + +#ifdef CONFIG_HIST_TRIGGERS + if (strcmp(name, "hist") == 0) { + *mode = TRACE_MODE_READ; + *fops = &event_hist_fops; + return 1; + } +#endif +#ifdef CONFIG_HIST_TRIGGERS_DEBUG + if (strcmp(name, "hist_debug") == 0) { + *mode = TRACE_MODE_READ; + *fops = &event_hist_debug_fops; + return 1; + } +#endif +#ifdef CONFIG_TRACE_EVENT_INJECT + if (call->event.type && call->class->reg && + strcmp(name, "inject") == 0) { + *mode = 0200; + *fops = &event_inject_fops; + return 1; + } +#endif + return 0; +} + static int -event_create_dir(struct dentry *parent, struct trace_event_file *file) +event_create_dir(struct eventfs_inode *parent, struct trace_event_file *file) { struct trace_event_call *call = file->event_call; - struct eventfs_file *ef_subsystem = NULL; struct trace_array *tr = file->tr; - struct eventfs_file *ef; + struct eventfs_inode *e_events; + struct eventfs_inode *ei; const char *name; + int nr_entries; int ret; + static struct eventfs_entry event_entries[] = { + { + .name = "enable", + .callback = event_callback, + }, + { + .name = "filter", + .callback = event_callback, + }, + { + .name = "trigger", + .callback = event_callback, + }, + { + .name = "format", + .callback = event_callback, + }, +#ifdef CONFIG_PERF_EVENTS + { + .name = "id", + .callback = event_callback, + }, +#endif +#ifdef CONFIG_HIST_TRIGGERS + { + .name = "hist", + .callback = event_callback, + }, +#endif +#ifdef CONFIG_HIST_TRIGGERS_DEBUG + { + .name = "hist_debug", + .callback = event_callback, + }, +#endif +#ifdef CONFIG_TRACE_EVENT_INJECT + { + .name = "inject", + .callback = event_callback, + }, +#endif + }; /* * If the trace point header did not define TRACE_SYSTEM @@ -2418,29 +2577,20 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file) if (WARN_ON_ONCE(strcmp(call->class->system, TRACE_SYSTEM) == 0)) return -ENODEV; - ef_subsystem = event_subsystem_dir(tr, call->class->system, file, parent); - if (!ef_subsystem) + e_events = event_subsystem_dir(tr, call->class->system, file, parent); + if (!e_events) return -ENOMEM; + nr_entries = ARRAY_SIZE(event_entries); + name = trace_event_name(call); - ef = eventfs_add_dir(name, ef_subsystem); - if (IS_ERR(ef)) { + ei = eventfs_create_dir(name, e_events, event_entries, nr_entries, file); + if (IS_ERR(ei)) { pr_warn("Could not create tracefs '%s' directory\n", name); return -1; } - file->ef = ef; - - if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) - eventfs_add_file("enable", TRACE_MODE_WRITE, file->ef, file, - &ftrace_enable_fops); - -#ifdef CONFIG_PERF_EVENTS - if (call->event.type && call->class->reg) - eventfs_add_file("id", TRACE_MODE_READ, file->ef, - (void *)(long)call->event.type, - &ftrace_event_id_fops); -#endif + file->ei = ei; ret = event_define_fields(call); if (ret < 0) { @@ -2448,35 +2598,6 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file) return ret; } - /* - * Only event directories that can be enabled should have - * triggers or filters. - */ - if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) { - eventfs_add_file("filter", TRACE_MODE_WRITE, file->ef, - file, &ftrace_event_filter_fops); - - eventfs_add_file("trigger", TRACE_MODE_WRITE, file->ef, - file, &event_trigger_fops); - } - -#ifdef CONFIG_HIST_TRIGGERS - eventfs_add_file("hist", TRACE_MODE_READ, file->ef, file, - &event_hist_fops); -#endif -#ifdef CONFIG_HIST_TRIGGERS_DEBUG - eventfs_add_file("hist_debug", TRACE_MODE_READ, file->ef, file, - &event_hist_debug_fops); -#endif - eventfs_add_file("format", TRACE_MODE_READ, file->ef, call, - &ftrace_event_format_fops); - -#ifdef CONFIG_TRACE_EVENT_INJECT - if (call->event.type && call->class->reg) - eventfs_add_file("inject", 0200, file->ef, file, - &event_inject_fops); -#endif - return 0; } @@ -2803,6 +2924,7 @@ trace_create_new_event(struct trace_event_call *call, atomic_set(&file->tm_ref, 0); INIT_LIST_HEAD(&file->triggers); list_add(&file->list, &tr->events); + event_file_get(file); return file; } @@ -2824,7 +2946,7 @@ static __init int setup_trace_triggers(char *str) int i; strscpy(bootup_trigger_buf, str, COMMAND_LINE_SIZE); - ring_buffer_expanded = true; + trace_set_ring_buffer_expanded(NULL); disable_tracing_selftest("running event triggers"); buf = bootup_trigger_buf; @@ -3614,37 +3736,72 @@ static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata; static __init int setup_trace_event(char *str) { strscpy(bootup_event_buf, str, COMMAND_LINE_SIZE); - ring_buffer_expanded = true; + trace_set_ring_buffer_expanded(NULL); disable_tracing_selftest("running event tracing"); return 1; } __setup("trace_event=", setup_trace_event); +static int events_callback(const char *name, umode_t *mode, void **data, + const struct file_operations **fops) +{ + if (strcmp(name, "enable") == 0) { + *mode = TRACE_MODE_WRITE; + *fops = &ftrace_tr_enable_fops; + return 1; + } + + if (strcmp(name, "header_page") == 0) + *data = ring_buffer_print_page_header; + + else if (strcmp(name, "header_event") == 0) + *data = ring_buffer_print_entry_header; + + else + return 0; + + *mode = TRACE_MODE_READ; + *fops = &ftrace_show_header_fops; + return 1; +} + /* Expects to have event_mutex held when called */ static int create_event_toplevel_files(struct dentry *parent, struct trace_array *tr) { - struct dentry *d_events; + struct eventfs_inode *e_events; struct dentry *entry; - int error = 0; + int nr_entries; + static struct eventfs_entry events_entries[] = { + { + .name = "enable", + .callback = events_callback, + }, + { + .name = "header_page", + .callback = events_callback, + }, + { + .name = "header_event", + .callback = events_callback, + }, + }; entry = trace_create_file("set_event", TRACE_MODE_WRITE, parent, tr, &ftrace_set_event_fops); if (!entry) return -ENOMEM; - d_events = eventfs_create_events_dir("events", parent); - if (IS_ERR(d_events)) { + nr_entries = ARRAY_SIZE(events_entries); + + e_events = eventfs_create_events_dir("events", parent, events_entries, + nr_entries, tr); + if (IS_ERR(e_events)) { pr_warn("Could not create tracefs 'events' directory\n"); return -ENOMEM; } - error = eventfs_add_events_file("enable", TRACE_MODE_WRITE, d_events, - tr, &ftrace_tr_enable_fops); - if (error) - return -ENOMEM; - /* There are not as crucial, just warn if they are not created */ trace_create_file("set_event_pid", TRACE_MODE_WRITE, parent, @@ -3654,16 +3811,7 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr) TRACE_MODE_WRITE, parent, tr, &ftrace_set_event_notrace_pid_fops); - /* ring buffer internal formats */ - eventfs_add_events_file("header_page", TRACE_MODE_READ, d_events, - ring_buffer_print_page_header, - &ftrace_show_header_fops); - - eventfs_add_events_file("header_event", TRACE_MODE_READ, d_events, - ring_buffer_print_entry_header, - &ftrace_show_header_fops); - - tr->event_dir = d_events; + tr->event_dir = e_events; return 0; } diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 33264e510d16..0c611b281a5b 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -2349,6 +2349,9 @@ int apply_event_filter(struct trace_event_file *file, char *filter_string) struct event_filter *filter = NULL; int err; + if (file->flags & EVENT_FILE_FL_FREED) + return -ENODEV; + if (!strcmp(strstrip(filter_string), "0")) { filter_disable(file); filter = event_filter(file); diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index d06938ae0717..1abc07fba1b9 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -774,23 +774,16 @@ static void last_cmd_set(struct trace_event_file *file, char *str) { const char *system = NULL, *name = NULL; struct trace_event_call *call; - int len; if (!str) return; - /* sizeof() contains the nul byte */ - len = sizeof(HIST_PREFIX) + strlen(str); kfree(last_cmd); - last_cmd = kzalloc(len, GFP_KERNEL); + + last_cmd = kasprintf(GFP_KERNEL, HIST_PREFIX "%s", str); if (!last_cmd) return; - strcpy(last_cmd, HIST_PREFIX); - /* Again, sizeof() contains the nul byte */ - len -= sizeof(HIST_PREFIX); - strncat(last_cmd, str, len); - if (file) { call = file->event_call; system = call->class->system; diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index 14cb275a0bab..846e02c0fb59 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -452,7 +452,7 @@ static unsigned int trace_string(struct synth_trace_event *entry, #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE if ((unsigned long)str_val < TASK_SIZE) - ret = strncpy_from_user_nofault(str_field, str_val, STR_VAR_LEN_MAX); + ret = strncpy_from_user_nofault(str_field, (const void __user *)str_val, STR_VAR_LEN_MAX); else #endif ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX); diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c index b87f41187c6a..9365ce407426 100644 --- a/kernel/trace/trace_events_user.c +++ b/kernel/trace/trace_events_user.c @@ -50,18 +50,6 @@ #define EVENT_STATUS_OTHER BIT(7) /* - * User register flags are not allowed yet, keep them here until we are - * ready to expose them out to the user ABI. - */ -enum user_reg_flag { - /* Event will not delete upon last reference closing */ - USER_EVENT_REG_PERSIST = 1U << 0, - - /* This value or above is currently non-ABI */ - USER_EVENT_REG_MAX = 1U << 1, -}; - -/* * Stores the system name, tables, and locks for a group of events. This * allows isolation for events by various means. */ @@ -220,6 +208,17 @@ static u32 user_event_key(char *name) return jhash(name, strlen(name), 0); } +static bool user_event_capable(u16 reg_flags) +{ + /* Persistent events require CAP_PERFMON / CAP_SYS_ADMIN */ + if (reg_flags & USER_EVENT_REG_PERSIST) { + if (!perfmon_capable()) + return false; + } + + return true; +} + static struct user_event *user_event_get(struct user_event *user) { refcount_inc(&user->refcnt); @@ -1811,6 +1810,9 @@ static int user_event_free(struct dyn_event *ev) if (!user_event_last_ref(user)) return -EBUSY; + if (!user_event_capable(user->reg_flags)) + return -EPERM; + return destroy_user_event(user); } @@ -1926,10 +1928,13 @@ static int user_event_parse(struct user_event_group *group, char *name, int argc = 0; char **argv; - /* User register flags are not ready yet */ - if (reg_flags != 0 || flags != NULL) + /* Currently don't support any text based flags */ + if (flags != NULL) return -EINVAL; + if (!user_event_capable(reg_flags)) + return -EPERM; + /* Prevent dyn_event from racing */ mutex_lock(&event_mutex); user = find_user_event(group, name, &key); @@ -2062,6 +2067,9 @@ static int delete_user_event(struct user_event_group *group, char *name) if (!user_event_last_ref(user)) return -EBUSY; + if (!user_event_capable(user->reg_flags)) + return -EPERM; + return destroy_user_event(user); } diff --git a/kernel/trace/trace_fprobe.c b/kernel/trace/trace_fprobe.c index 8bfe23af9c73..7d2ddbcfa377 100644 --- a/kernel/trace/trace_fprobe.c +++ b/kernel/trace/trace_fprobe.c @@ -927,11 +927,12 @@ static int parse_symbol_and_return(int argc, const char *argv[], for (i = 2; i < argc; i++) { tmp = strstr(argv[i], "$retval"); if (tmp && !isalnum(tmp[7]) && tmp[7] != '_') { + if (is_tracepoint) { + trace_probe_log_set_index(i); + trace_probe_log_err(tmp - argv[i], RETVAL_ON_PROBE); + return -EINVAL; + } *is_return = true; - /* - * NOTE: Don't check is_tracepoint here, because it will - * be checked when the argument is parsed. - */ break; } } diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index a3442db35670..52f8b537dd0a 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1020,9 +1020,9 @@ EXPORT_SYMBOL_GPL(kprobe_event_cmd_init); /** * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list * @cmd: A pointer to the dynevent_cmd struct representing the new event + * @kretprobe: Is this a return probe? * @name: The name of the kprobe event * @loc: The location of the kprobe event - * @kretprobe: Is this a return probe? * @...: Variable number of arg (pairs), one pair for each field * * NOTE: Users normally won't want to call this function directly, but diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c index bac06ee3b98b..7be97229ddf8 100644 --- a/kernel/trace/trace_seq.c +++ b/kernel/trace/trace_seq.c @@ -370,8 +370,12 @@ EXPORT_SYMBOL_GPL(trace_seq_path); */ int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, int cnt) { + int ret; __trace_seq_init(s); - return seq_buf_to_user(&s->seq, ubuf, cnt); + ret = seq_buf_to_user(&s->seq, ubuf, s->readpos, cnt); + if (ret > 0) + s->readpos += ret; + return ret; } EXPORT_SYMBOL_GPL(trace_seq_to_user); diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index d52a894ecf57..eabe8bcc7042 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -22,7 +22,7 @@ #include <linux/bsearch.h> #include <linux/sort.h> -static struct kmem_cache *user_ns_cachep __read_mostly; +static struct kmem_cache *user_ns_cachep __ro_after_init; static DEFINE_MUTEX(userns_state_mutex); static bool new_idmap_permitted(const struct file *file, diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 0f682da96e1c..6e578f576a6f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -418,21 +418,21 @@ static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS]; * process context while holding a pool lock. Bounce to a dedicated kthread * worker to avoid A-A deadlocks. */ -static struct kthread_worker *pwq_release_worker; +static struct kthread_worker *pwq_release_worker __ro_after_init; -struct workqueue_struct *system_wq __read_mostly; +struct workqueue_struct *system_wq __ro_after_init; EXPORT_SYMBOL(system_wq); -struct workqueue_struct *system_highpri_wq __read_mostly; +struct workqueue_struct *system_highpri_wq __ro_after_init; EXPORT_SYMBOL_GPL(system_highpri_wq); -struct workqueue_struct *system_long_wq __read_mostly; +struct workqueue_struct *system_long_wq __ro_after_init; EXPORT_SYMBOL_GPL(system_long_wq); -struct workqueue_struct *system_unbound_wq __read_mostly; +struct workqueue_struct *system_unbound_wq __ro_after_init; EXPORT_SYMBOL_GPL(system_unbound_wq); -struct workqueue_struct *system_freezable_wq __read_mostly; +struct workqueue_struct *system_freezable_wq __ro_after_init; EXPORT_SYMBOL_GPL(system_freezable_wq); -struct workqueue_struct *system_power_efficient_wq __read_mostly; +struct workqueue_struct *system_power_efficient_wq __ro_after_init; EXPORT_SYMBOL_GPL(system_power_efficient_wq); -struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly; +struct workqueue_struct *system_freezable_power_efficient_wq __ro_after_init; EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); static int worker_thread(void *__worker); |