From 0b0757244754ea1d0721195c824770f5576e119e Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Thu, 9 Feb 2023 00:12:11 +0100 Subject: selftests/bpf: Fix out-of-srctree build Building BPF selftests out of srctree fails with: make: *** No rule to make target '/linux-build//ima_setup.sh', needed by 'ima_setup.sh'. Stop. The culprit is the rule that defines convenient shorthands like "make test_progs", which builds $(OUTPUT)/test_progs. These shorthands make sense only for binaries that are built though; scripts that live in the source tree do not end up in $(OUTPUT). Therefore drop $(TEST_PROGS) and $(TEST_PROGS_EXTENDED) from the rule. The issue exists for a while, but it became a problem only after commit d68ae4982cb7 ("selftests/bpf: Install all required files to run selftests"), which added dependencies on these scripts. Fixes: 03dcb78460c2 ("selftests/bpf: Add simple per-test targets to Makefile") Signed-off-by: Ilya Leoshkevich Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20230208231211.283606-1-iii@linux.ibm.com --- tools/testing/selftests/bpf/Makefile | 2 -- 1 file changed, 2 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index c4b5c44cdee2..f7771592a920 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -152,8 +152,6 @@ endif # NOTE: Semicolon at the end is critical to override lib.mk's default static # rule for binaries. $(notdir $(TEST_GEN_PROGS) \ - $(TEST_PROGS) \ - $(TEST_PROGS_EXTENDED) \ $(TEST_GEN_PROGS_EXTENDED) \ $(TEST_CUSTOM_PROGS)): %: $(OUTPUT)/% ; -- cgit v1.2.3 From 6a3cd3318ff65622415e34e8ee39d76331e7c869 Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Sun, 12 Feb 2023 01:27:07 -0800 Subject: bpf: Migrate release_on_unlock logic to non-owning ref semantics This patch introduces non-owning reference semantics to the verifier, specifically linked_list API kfunc handling. release_on_unlock logic for refs is refactored - with small functional changes - to implement these semantics, and bpf_list_push_{front,back} are migrated to use them. When a list node is pushed to a list, the program still has a pointer to the node: n = bpf_obj_new(typeof(*n)); bpf_spin_lock(&l); bpf_list_push_back(&l, n); /* n still points to the just-added node */ bpf_spin_unlock(&l); What the verifier considers n to be after the push, and thus what can be done with n, are changed by this patch. Common properties both before/after this patch: * After push, n is only a valid reference to the node until end of critical section * After push, n cannot be pushed to any list * After push, the program can read the node's fields using n Before: * After push, n retains the ref_obj_id which it received on bpf_obj_new, but the associated bpf_reference_state's release_on_unlock field is set to true * release_on_unlock field and associated logic is used to implement "n is only a valid ref until end of critical section" * After push, n cannot be written to, the node must be removed from the list before writing to its fields * After push, n is marked PTR_UNTRUSTED After: * After push, n's ref is released and ref_obj_id set to 0. NON_OWN_REF type flag is added to reg's type, indicating that it's a non-owning reference. * NON_OWN_REF flag and logic is used to implement "n is only a valid ref until end of critical section" * n can be written to (except for special fields e.g. bpf_list_node, timer, ...) Summary of specific implementation changes to achieve the above: * release_on_unlock field, ref_set_release_on_unlock helper, and logic to "release on unlock" based on that field are removed * The anonymous active_lock struct used by bpf_verifier_state is pulled out into a named struct bpf_active_lock. * NON_OWN_REF type flag is introduced along with verifier logic changes to handle non-owning refs * Helpers are added to use NON_OWN_REF flag to implement non-owning ref semantics as described above * invalidate_non_owning_refs - helper to clobber all non-owning refs matching a particular bpf_active_lock identity. Replaces release_on_unlock logic in process_spin_lock. * ref_set_non_owning - set NON_OWN_REF type flag after doing some sanity checking * ref_convert_owning_non_owning - convert owning reference w/ specified ref_obj_id to non-owning references. Set NON_OWN_REF flag for each reg with that ref_obj_id and 0-out its ref_obj_id * Update linked_list selftests to account for minor semantic differences introduced by this patch * Writes to a release_on_unlock node ref are not allowed, while writes to non-owning reference pointees are. As a result the linked_list "write after push" failure tests are no longer scenarios that should fail. * The test##missing_lock##op and test##incorrect_lock##op macro-generated failure tests need to have a valid node argument in order to have the same error output as before. Otherwise verification will fail early and the expected error output won't be seen. Signed-off-by: Dave Marchevsky Link: https://lore.kernel.org/r/20230212092715.1422619-2-davemarchevsky@fb.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 6 + include/linux/bpf_verifier.h | 38 +++-- kernel/bpf/verifier.c | 168 +++++++++++++++------ .../testing/selftests/bpf/prog_tests/linked_list.c | 2 - tools/testing/selftests/bpf/progs/linked_list.c | 2 +- .../testing/selftests/bpf/progs/linked_list_fail.c | 100 +++++++----- 6 files changed, 206 insertions(+), 110 deletions(-) (limited to 'tools') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 4385418118f6..8b5d0b4c4ada 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -181,6 +181,7 @@ enum btf_field_type { BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF, BPF_LIST_HEAD = (1 << 4), BPF_LIST_NODE = (1 << 5), + BPF_GRAPH_NODE_OR_ROOT = BPF_LIST_NODE | BPF_LIST_HEAD, }; struct btf_field_kptr { @@ -576,6 +577,11 @@ enum bpf_type_flag { /* MEM is tagged with rcu and memory access needs rcu_read_lock protection. */ MEM_RCU = BIT(13 + BPF_BASE_TYPE_BITS), + /* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning. + * Currently only valid for linked-list and rbtree nodes. + */ + NON_OWN_REF = BIT(14 + BPF_BASE_TYPE_BITS), + __BPF_TYPE_FLAG_MAX, __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1, }; diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index aa83de1fe755..cf1bb1cf4a7b 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -43,6 +43,22 @@ enum bpf_reg_liveness { REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */ }; +/* For every reg representing a map value or allocated object pointer, + * we consider the tuple of (ptr, id) for them to be unique in verifier + * context and conside them to not alias each other for the purposes of + * tracking lock state. + */ +struct bpf_active_lock { + /* This can either be reg->map_ptr or reg->btf. If ptr is NULL, + * there's no active lock held, and other fields have no + * meaning. If non-NULL, it indicates that a lock is held and + * id member has the reg->id of the register which can be >= 0. + */ + void *ptr; + /* This will be reg->id */ + u32 id; +}; + struct bpf_reg_state { /* Ordering of fields matters. See states_equal() */ enum bpf_reg_type type; @@ -226,11 +242,6 @@ struct bpf_reference_state { * exiting a callback function. */ int callback_ref; - /* Mark the reference state to release the registers sharing the same id - * on bpf_spin_unlock (for nodes that we will lose ownership to but are - * safe to access inside the critical section). - */ - bool release_on_unlock; }; /* state of the program: @@ -331,21 +342,8 @@ struct bpf_verifier_state { u32 branches; u32 insn_idx; u32 curframe; - /* For every reg representing a map value or allocated object pointer, - * we consider the tuple of (ptr, id) for them to be unique in verifier - * context and conside them to not alias each other for the purposes of - * tracking lock state. - */ - struct { - /* This can either be reg->map_ptr or reg->btf. If ptr is NULL, - * there's no active lock held, and other fields have no - * meaning. If non-NULL, it indicates that a lock is held and - * id member has the reg->id of the register which can be >= 0. - */ - void *ptr; - /* This will be reg->id */ - u32 id; - } active_lock; + + struct bpf_active_lock active_lock; bool speculative; bool active_rcu_lock; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 388245e8826e..f176bc15c879 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -190,6 +190,9 @@ struct bpf_verifier_stack_elem { static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx); static int release_reference(struct bpf_verifier_env *env, int ref_obj_id); +static void invalidate_non_owning_refs(struct bpf_verifier_env *env); +static int ref_set_non_owning(struct bpf_verifier_env *env, + struct bpf_reg_state *reg); static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) { @@ -457,6 +460,11 @@ static bool type_is_ptr_alloc_obj(u32 type) return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC; } +static bool type_is_non_owning_ref(u32 type) +{ + return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF; +} + static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg) { struct btf_record *rec = NULL; @@ -1073,6 +1081,8 @@ static void print_verifier_state(struct bpf_verifier_env *env, verbose_a("id=%d", reg->id); if (reg->ref_obj_id) verbose_a("ref_obj_id=%d", reg->ref_obj_id); + if (type_is_non_owning_ref(reg->type)) + verbose_a("%s", "non_own_ref"); if (t != SCALAR_VALUE) verbose_a("off=%d", reg->off); if (type_is_pkt_pointer(t)) @@ -5052,7 +5062,8 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env, return -EACCES; } - if (type_is_alloc(reg->type) && !reg->ref_obj_id) { + if (type_is_alloc(reg->type) && !type_is_non_owning_ref(reg->type) && + !reg->ref_obj_id) { verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n"); return -EFAULT; } @@ -6042,9 +6053,7 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno, cur->active_lock.ptr = btf; cur->active_lock.id = reg->id; } else { - struct bpf_func_state *fstate = cur_func(env); void *ptr; - int i; if (map) ptr = map; @@ -6060,25 +6069,11 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno, verbose(env, "bpf_spin_unlock of different lock\n"); return -EINVAL; } - cur->active_lock.ptr = NULL; - cur->active_lock.id = 0; - for (i = fstate->acquired_refs - 1; i >= 0; i--) { - int err; + invalidate_non_owning_refs(env); - /* Complain on error because this reference state cannot - * be freed before this point, as bpf_spin_lock critical - * section does not allow functions that release the - * allocated object immediately. - */ - if (!fstate->refs[i].release_on_unlock) - continue; - err = release_reference(env, fstate->refs[i].id); - if (err) { - verbose(env, "failed to release release_on_unlock reference"); - return err; - } - } + cur->active_lock.ptr = NULL; + cur->active_lock.id = 0; } return 0; } @@ -6546,6 +6541,23 @@ found: return 0; } +static struct btf_field * +reg_find_field_offset(const struct bpf_reg_state *reg, s32 off, u32 fields) +{ + struct btf_field *field; + struct btf_record *rec; + + rec = reg_btf_record(reg); + if (!rec) + return NULL; + + field = btf_record_find(rec, off, fields); + if (!field) + return NULL; + + return field; +} + int check_func_arg_reg_off(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int regno, enum bpf_arg_type arg_type) @@ -6567,6 +6579,18 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env, */ if (arg_type_is_dynptr(arg_type) && type == PTR_TO_STACK) return 0; + + if ((type_is_ptr_alloc_obj(type) || type_is_non_owning_ref(type)) && reg->off) { + if (reg_find_field_offset(reg, reg->off, BPF_GRAPH_NODE_OR_ROOT)) + return __check_ptr_off_reg(env, reg, regno, true); + + verbose(env, "R%d must have zero offset when passed to release func\n", + regno); + verbose(env, "No graph node or root found at R%d type:%s off:%d\n", regno, + kernel_type_name(reg->btf, reg->btf_id), reg->off); + return -EINVAL; + } + /* Doing check_ptr_off_reg check for the offset will catch this * because fixed_off_ok is false, but checking here allows us * to give the user a better error message. @@ -6601,6 +6625,7 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env, case PTR_TO_BTF_ID | PTR_TRUSTED: case PTR_TO_BTF_ID | MEM_RCU: case PTR_TO_BTF_ID | MEM_ALLOC | PTR_TRUSTED: + case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF: /* When referenced PTR_TO_BTF_ID is passed to release function, * its fixed offset must be 0. In the other cases, fixed offset * can be non-zero. This was already checked above. So pass @@ -7363,6 +7388,17 @@ static int release_reference(struct bpf_verifier_env *env, return 0; } +static void invalidate_non_owning_refs(struct bpf_verifier_env *env) +{ + struct bpf_func_state *unused; + struct bpf_reg_state *reg; + + bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({ + if (type_is_non_owning_ref(reg->type)) + __mark_reg_unknown(env, reg); + })); +} + static void clear_caller_saved_regs(struct bpf_verifier_env *env, struct bpf_reg_state *regs) { @@ -8915,38 +8951,54 @@ static int process_kf_arg_ptr_to_kptr(struct bpf_verifier_env *env, return 0; } -static int ref_set_release_on_unlock(struct bpf_verifier_env *env, u32 ref_obj_id) +static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg) { - struct bpf_func_state *state = cur_func(env); + struct bpf_verifier_state *state = env->cur_state; + + if (!state->active_lock.ptr) { + verbose(env, "verifier internal error: ref_set_non_owning w/o active lock\n"); + return -EFAULT; + } + + if (type_flag(reg->type) & NON_OWN_REF) { + verbose(env, "verifier internal error: NON_OWN_REF already set\n"); + return -EFAULT; + } + + reg->type |= NON_OWN_REF; + return 0; +} + +static int ref_convert_owning_non_owning(struct bpf_verifier_env *env, u32 ref_obj_id) +{ + struct bpf_func_state *state, *unused; struct bpf_reg_state *reg; int i; - /* bpf_spin_lock only allows calling list_push and list_pop, no BPF - * subprogs, no global functions. This means that the references would - * not be released inside the critical section but they may be added to - * the reference state, and the acquired_refs are never copied out for a - * different frame as BPF to BPF calls don't work in bpf_spin_lock - * critical sections. - */ + state = cur_func(env); + if (!ref_obj_id) { - verbose(env, "verifier internal error: ref_obj_id is zero for release_on_unlock\n"); + verbose(env, "verifier internal error: ref_obj_id is zero for " + "owning -> non-owning conversion\n"); return -EFAULT; } + for (i = 0; i < state->acquired_refs; i++) { - if (state->refs[i].id == ref_obj_id) { - if (state->refs[i].release_on_unlock) { - verbose(env, "verifier internal error: expected false release_on_unlock"); - return -EFAULT; + if (state->refs[i].id != ref_obj_id) + continue; + + /* Clear ref_obj_id here so release_reference doesn't clobber + * the whole reg + */ + bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({ + if (reg->ref_obj_id == ref_obj_id) { + reg->ref_obj_id = 0; + ref_set_non_owning(env, reg); } - state->refs[i].release_on_unlock = true; - /* Now mark everyone sharing same ref_obj_id as untrusted */ - bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ - if (reg->ref_obj_id == ref_obj_id) - reg->type |= PTR_UNTRUSTED; - })); - return 0; - } + })); + return 0; } + verbose(env, "verifier internal error: ref state missing for ref_obj_id\n"); return -EFAULT; } @@ -9081,7 +9133,6 @@ static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env, { const struct btf_type *et, *t; struct btf_field *field; - struct btf_record *rec; u32 list_node_off; if (meta->btf != btf_vmlinux || @@ -9098,9 +9149,8 @@ static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env, return -EINVAL; } - rec = reg_btf_record(reg); list_node_off = reg->off + reg->var_off.value; - field = btf_record_find(rec, list_node_off, BPF_LIST_NODE); + field = reg_find_field_offset(reg, list_node_off, BPF_LIST_NODE); if (!field || field->offset != list_node_off) { verbose(env, "bpf_list_node not found at offset=%u\n", list_node_off); return -EINVAL; @@ -9126,8 +9176,8 @@ static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env, btf_name_by_offset(field->graph_root.btf, et->name_off)); return -EINVAL; } - /* Set arg#1 for expiration after unlock */ - return ref_set_release_on_unlock(env, reg->ref_obj_id); + + return 0; } static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta) @@ -9406,11 +9456,11 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx_p) { const struct btf_type *t, *func, *func_proto, *ptr_type; + u32 i, nargs, func_id, ptr_type_id, release_ref_obj_id; struct bpf_reg_state *regs = cur_regs(env); const char *func_name, *ptr_type_name; bool sleepable, rcu_lock, rcu_unlock; struct bpf_kfunc_call_arg_meta meta; - u32 i, nargs, func_id, ptr_type_id; int err, insn_idx = *insn_idx_p; const struct btf_param *args; const struct btf_type *ret_t; @@ -9505,6 +9555,24 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, } } + if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front] || + meta.func_id == special_kfunc_list[KF_bpf_list_push_back]) { + release_ref_obj_id = regs[BPF_REG_2].ref_obj_id; + err = ref_convert_owning_non_owning(env, release_ref_obj_id); + if (err) { + verbose(env, "kfunc %s#%d conversion of owning ref to non-owning failed\n", + func_name, func_id); + return err; + } + + err = release_reference(env, release_ref_obj_id); + if (err) { + verbose(env, "kfunc %s#%d reference has not been acquired before\n", + func_name, func_id); + return err; + } + } + for (i = 0; i < CALLER_SAVED_REGS; i++) mark_reg_not_init(env, regs, caller_saved[i]); @@ -11825,8 +11893,10 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state, */ if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0))) return; - if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC | PTR_MAYBE_NULL) && WARN_ON_ONCE(reg->off)) + if (!(type_is_ptr_alloc_obj(reg->type) || type_is_non_owning_ref(reg->type)) && + WARN_ON_ONCE(reg->off)) return; + if (is_null) { reg->type = SCALAR_VALUE; /* We don't need id and ref_obj_id from this point diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c index 9a7d4c47af63..2592b8aa5e41 100644 --- a/tools/testing/selftests/bpf/prog_tests/linked_list.c +++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c @@ -78,8 +78,6 @@ static struct { { "direct_write_head", "direct access to bpf_list_head is disallowed" }, { "direct_read_node", "direct access to bpf_list_node is disallowed" }, { "direct_write_node", "direct access to bpf_list_node is disallowed" }, - { "write_after_push_front", "only read is supported" }, - { "write_after_push_back", "only read is supported" }, { "use_after_unlock_push_front", "invalid mem access 'scalar'" }, { "use_after_unlock_push_back", "invalid mem access 'scalar'" }, { "double_push_front", "arg#1 expected pointer to allocated object" }, diff --git a/tools/testing/selftests/bpf/progs/linked_list.c b/tools/testing/selftests/bpf/progs/linked_list.c index 4ad88da5cda2..4fa4a9b01bde 100644 --- a/tools/testing/selftests/bpf/progs/linked_list.c +++ b/tools/testing/selftests/bpf/progs/linked_list.c @@ -260,7 +260,7 @@ int test_list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head { int ret; - ret = list_push_pop_multiple(lock ,head, false); + ret = list_push_pop_multiple(lock, head, false); if (ret) return ret; return list_push_pop_multiple(lock, head, true); diff --git a/tools/testing/selftests/bpf/progs/linked_list_fail.c b/tools/testing/selftests/bpf/progs/linked_list_fail.c index 1d9017240e19..69cdc07cba13 100644 --- a/tools/testing/selftests/bpf/progs/linked_list_fail.c +++ b/tools/testing/selftests/bpf/progs/linked_list_fail.c @@ -54,28 +54,44 @@ return 0; \ } -CHECK(kptr, push_front, &f->head); -CHECK(kptr, push_back, &f->head); CHECK(kptr, pop_front, &f->head); CHECK(kptr, pop_back, &f->head); -CHECK(global, push_front, &ghead); -CHECK(global, push_back, &ghead); CHECK(global, pop_front, &ghead); CHECK(global, pop_back, &ghead); -CHECK(map, push_front, &v->head); -CHECK(map, push_back, &v->head); CHECK(map, pop_front, &v->head); CHECK(map, pop_back, &v->head); -CHECK(inner_map, push_front, &iv->head); -CHECK(inner_map, push_back, &iv->head); CHECK(inner_map, pop_front, &iv->head); CHECK(inner_map, pop_back, &iv->head); #undef CHECK +#define CHECK(test, op, hexpr, nexpr) \ + SEC("?tc") \ + int test##_missing_lock_##op(void *ctx) \ + { \ + INIT; \ + void (*p)(void *, void *) = (void *)&bpf_list_##op; \ + p(hexpr, nexpr); \ + return 0; \ + } + +CHECK(kptr, push_front, &f->head, b); +CHECK(kptr, push_back, &f->head, b); + +CHECK(global, push_front, &ghead, f); +CHECK(global, push_back, &ghead, f); + +CHECK(map, push_front, &v->head, f); +CHECK(map, push_back, &v->head, f); + +CHECK(inner_map, push_front, &iv->head, f); +CHECK(inner_map, push_back, &iv->head, f); + +#undef CHECK + #define CHECK(test, op, lexpr, hexpr) \ SEC("?tc") \ int test##_incorrect_lock_##op(void *ctx) \ @@ -108,11 +124,47 @@ CHECK(inner_map, pop_back, &iv->head); CHECK(inner_map_global, op, &iv->lock, &ghead); \ CHECK(inner_map_map, op, &iv->lock, &v->head); -CHECK_OP(push_front); -CHECK_OP(push_back); CHECK_OP(pop_front); CHECK_OP(pop_back); +#undef CHECK +#undef CHECK_OP + +#define CHECK(test, op, lexpr, hexpr, nexpr) \ + SEC("?tc") \ + int test##_incorrect_lock_##op(void *ctx) \ + { \ + INIT; \ + void (*p)(void *, void*) = (void *)&bpf_list_##op; \ + bpf_spin_lock(lexpr); \ + p(hexpr, nexpr); \ + return 0; \ + } + +#define CHECK_OP(op) \ + CHECK(kptr_kptr, op, &f1->lock, &f2->head, b); \ + CHECK(kptr_global, op, &f1->lock, &ghead, f); \ + CHECK(kptr_map, op, &f1->lock, &v->head, f); \ + CHECK(kptr_inner_map, op, &f1->lock, &iv->head, f); \ + \ + CHECK(global_global, op, &glock2, &ghead, f); \ + CHECK(global_kptr, op, &glock, &f1->head, b); \ + CHECK(global_map, op, &glock, &v->head, f); \ + CHECK(global_inner_map, op, &glock, &iv->head, f); \ + \ + CHECK(map_map, op, &v->lock, &v2->head, f); \ + CHECK(map_kptr, op, &v->lock, &f2->head, b); \ + CHECK(map_global, op, &v->lock, &ghead, f); \ + CHECK(map_inner_map, op, &v->lock, &iv->head, f); \ + \ + CHECK(inner_map_inner_map, op, &iv->lock, &iv2->head, f); \ + CHECK(inner_map_kptr, op, &iv->lock, &f2->head, b); \ + CHECK(inner_map_global, op, &iv->lock, &ghead, f); \ + CHECK(inner_map_map, op, &iv->lock, &v->head, f); + +CHECK_OP(push_front); +CHECK_OP(push_back); + #undef CHECK #undef CHECK_OP #undef INIT @@ -303,34 +355,6 @@ int direct_write_node(void *ctx) return 0; } -static __always_inline -int write_after_op(void (*push_op)(void *head, void *node)) -{ - struct foo *f; - - f = bpf_obj_new(typeof(*f)); - if (!f) - return 0; - bpf_spin_lock(&glock); - push_op(&ghead, &f->node); - f->data = 42; - bpf_spin_unlock(&glock); - - return 0; -} - -SEC("?tc") -int write_after_push_front(void *ctx) -{ - return write_after_op((void *)bpf_list_push_front); -} - -SEC("?tc") -int write_after_push_back(void *ctx) -{ - return write_after_op((void *)bpf_list_push_back); -} - static __always_inline int use_after_unlock(void (*op)(void *head, void *node)) { -- cgit v1.2.3 From 9c395c1b99bd23f74bc628fa000480c49593d17f Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Mon, 13 Feb 2023 16:40:10 -0800 Subject: bpf: Add basic bpf_rb_{root,node} support This patch adds special BPF_RB_{ROOT,NODE} btf_field_types similar to BPF_LIST_{HEAD,NODE}, adds the necessary plumbing to detect the new types, and adds bpf_rb_root_free function for freeing bpf_rb_root in map_values. structs bpf_rb_root and bpf_rb_node are opaque types meant to obscure structs rb_root_cached rb_node, respectively. btf_struct_access will prevent BPF programs from touching these special fields automatically now that they're recognized. btf_check_and_fixup_fields now groups list_head and rb_root together as "graph root" fields and {list,rb}_node as "graph node", and does same ownership cycle checking as before. Note that this function does _not_ prevent ownership type mixups (e.g. rb_root owning list_node) - that's handled by btf_parse_graph_root. After this patch, a bpf program can have a struct bpf_rb_root in a map_value, but not add anything to nor do anything useful with it. Signed-off-by: Dave Marchevsky Link: https://lore.kernel.org/r/20230214004017.2534011-2-davemarchevsky@fb.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 20 ++- include/uapi/linux/bpf.h | 11 ++ kernel/bpf/btf.c | 162 ++++++++++++++------- kernel/bpf/helpers.c | 40 +++++ kernel/bpf/syscall.c | 28 ++-- kernel/bpf/verifier.c | 5 +- tools/include/uapi/linux/bpf.h | 11 ++ .../testing/selftests/bpf/prog_tests/linked_list.c | 12 +- 8 files changed, 216 insertions(+), 73 deletions(-) (limited to 'tools') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 8b5d0b4c4ada..be34f7deb6c3 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -181,7 +181,10 @@ enum btf_field_type { BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF, BPF_LIST_HEAD = (1 << 4), BPF_LIST_NODE = (1 << 5), - BPF_GRAPH_NODE_OR_ROOT = BPF_LIST_NODE | BPF_LIST_HEAD, + BPF_RB_ROOT = (1 << 6), + BPF_RB_NODE = (1 << 7), + BPF_GRAPH_NODE_OR_ROOT = BPF_LIST_NODE | BPF_LIST_HEAD | + BPF_RB_NODE | BPF_RB_ROOT, }; struct btf_field_kptr { @@ -285,6 +288,10 @@ static inline const char *btf_field_type_name(enum btf_field_type type) return "bpf_list_head"; case BPF_LIST_NODE: return "bpf_list_node"; + case BPF_RB_ROOT: + return "bpf_rb_root"; + case BPF_RB_NODE: + return "bpf_rb_node"; default: WARN_ON_ONCE(1); return "unknown"; @@ -305,6 +312,10 @@ static inline u32 btf_field_type_size(enum btf_field_type type) return sizeof(struct bpf_list_head); case BPF_LIST_NODE: return sizeof(struct bpf_list_node); + case BPF_RB_ROOT: + return sizeof(struct bpf_rb_root); + case BPF_RB_NODE: + return sizeof(struct bpf_rb_node); default: WARN_ON_ONCE(1); return 0; @@ -325,6 +336,10 @@ static inline u32 btf_field_type_align(enum btf_field_type type) return __alignof__(struct bpf_list_head); case BPF_LIST_NODE: return __alignof__(struct bpf_list_node); + case BPF_RB_ROOT: + return __alignof__(struct bpf_rb_root); + case BPF_RB_NODE: + return __alignof__(struct bpf_rb_node); default: WARN_ON_ONCE(1); return 0; @@ -435,6 +450,9 @@ void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, void bpf_timer_cancel_and_free(void *timer); void bpf_list_head_free(const struct btf_field *field, void *list_head, struct bpf_spin_lock *spin_lock); +void bpf_rb_root_free(const struct btf_field *field, void *rb_root, + struct bpf_spin_lock *spin_lock); + int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 17afd2b35ee5..1503f61336b6 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -6917,6 +6917,17 @@ struct bpf_list_node { __u64 :64; } __attribute__((aligned(8))); +struct bpf_rb_root { + __u64 :64; + __u64 :64; +} __attribute__((aligned(8))); + +struct bpf_rb_node { + __u64 :64; + __u64 :64; + __u64 :64; +} __attribute__((aligned(8))); + struct bpf_sysctl { __u32 write; /* Sysctl is being read (= 0) or written (= 1). * Allows 1,2,4-byte read, but no write. diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 740bdb045b14..b9d1f5c4e316 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3324,12 +3324,14 @@ static const char *btf_find_decl_tag_value(const struct btf *btf, return NULL; } -static int btf_find_list_head(const struct btf *btf, const struct btf_type *pt, - const struct btf_type *t, int comp_idx, - u32 off, int sz, struct btf_field_info *info) +static int +btf_find_graph_root(const struct btf *btf, const struct btf_type *pt, + const struct btf_type *t, int comp_idx, u32 off, + int sz, struct btf_field_info *info, + enum btf_field_type head_type) { + const char *node_field_name; const char *value_type; - const char *list_node; s32 id; if (!__btf_type_is_struct(t)) @@ -3339,26 +3341,32 @@ static int btf_find_list_head(const struct btf *btf, const struct btf_type *pt, value_type = btf_find_decl_tag_value(btf, pt, comp_idx, "contains:"); if (!value_type) return -EINVAL; - list_node = strstr(value_type, ":"); - if (!list_node) + node_field_name = strstr(value_type, ":"); + if (!node_field_name) return -EINVAL; - value_type = kstrndup(value_type, list_node - value_type, GFP_KERNEL | __GFP_NOWARN); + value_type = kstrndup(value_type, node_field_name - value_type, GFP_KERNEL | __GFP_NOWARN); if (!value_type) return -ENOMEM; id = btf_find_by_name_kind(btf, value_type, BTF_KIND_STRUCT); kfree(value_type); if (id < 0) return id; - list_node++; - if (str_is_empty(list_node)) + node_field_name++; + if (str_is_empty(node_field_name)) return -EINVAL; - info->type = BPF_LIST_HEAD; + info->type = head_type; info->off = off; info->graph_root.value_btf_id = id; - info->graph_root.node_name = list_node; + info->graph_root.node_name = node_field_name; return BTF_FIELD_FOUND; } +#define field_mask_test_name(field_type, field_type_str) \ + if (field_mask & field_type && !strcmp(name, field_type_str)) { \ + type = field_type; \ + goto end; \ + } + static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask, int *align, int *sz) { @@ -3382,18 +3390,11 @@ static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask, goto end; } } - if (field_mask & BPF_LIST_HEAD) { - if (!strcmp(name, "bpf_list_head")) { - type = BPF_LIST_HEAD; - goto end; - } - } - if (field_mask & BPF_LIST_NODE) { - if (!strcmp(name, "bpf_list_node")) { - type = BPF_LIST_NODE; - goto end; - } - } + field_mask_test_name(BPF_LIST_HEAD, "bpf_list_head"); + field_mask_test_name(BPF_LIST_NODE, "bpf_list_node"); + field_mask_test_name(BPF_RB_ROOT, "bpf_rb_root"); + field_mask_test_name(BPF_RB_NODE, "bpf_rb_node"); + /* Only return BPF_KPTR when all other types with matchable names fail */ if (field_mask & BPF_KPTR) { type = BPF_KPTR_REF; @@ -3406,6 +3407,8 @@ end: return type; } +#undef field_mask_test_name + static int btf_find_struct_field(const struct btf *btf, const struct btf_type *t, u32 field_mask, struct btf_field_info *info, int info_cnt) @@ -3438,6 +3441,7 @@ static int btf_find_struct_field(const struct btf *btf, case BPF_SPIN_LOCK: case BPF_TIMER: case BPF_LIST_NODE: + case BPF_RB_NODE: ret = btf_find_struct(btf, member_type, off, sz, field_type, idx < info_cnt ? &info[idx] : &tmp); if (ret < 0) @@ -3451,8 +3455,11 @@ static int btf_find_struct_field(const struct btf *btf, return ret; break; case BPF_LIST_HEAD: - ret = btf_find_list_head(btf, t, member_type, i, off, sz, - idx < info_cnt ? &info[idx] : &tmp); + case BPF_RB_ROOT: + ret = btf_find_graph_root(btf, t, member_type, + i, off, sz, + idx < info_cnt ? &info[idx] : &tmp, + field_type); if (ret < 0) return ret; break; @@ -3499,6 +3506,7 @@ static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t, case BPF_SPIN_LOCK: case BPF_TIMER: case BPF_LIST_NODE: + case BPF_RB_NODE: ret = btf_find_struct(btf, var_type, off, sz, field_type, idx < info_cnt ? &info[idx] : &tmp); if (ret < 0) @@ -3512,8 +3520,11 @@ static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t, return ret; break; case BPF_LIST_HEAD: - ret = btf_find_list_head(btf, var, var_type, -1, off, sz, - idx < info_cnt ? &info[idx] : &tmp); + case BPF_RB_ROOT: + ret = btf_find_graph_root(btf, var, var_type, + -1, off, sz, + idx < info_cnt ? &info[idx] : &tmp, + field_type); if (ret < 0) return ret; break; @@ -3615,8 +3626,11 @@ end_btf: return ret; } -static int btf_parse_list_head(const struct btf *btf, struct btf_field *field, - struct btf_field_info *info) +static int btf_parse_graph_root(const struct btf *btf, + struct btf_field *field, + struct btf_field_info *info, + const char *node_type_name, + size_t node_type_align) { const struct btf_type *t, *n = NULL; const struct btf_member *member; @@ -3638,13 +3652,13 @@ static int btf_parse_list_head(const struct btf *btf, struct btf_field *field, n = btf_type_by_id(btf, member->type); if (!__btf_type_is_struct(n)) return -EINVAL; - if (strcmp("bpf_list_node", __btf_name_by_offset(btf, n->name_off))) + if (strcmp(node_type_name, __btf_name_by_offset(btf, n->name_off))) return -EINVAL; offset = __btf_member_bit_offset(n, member); if (offset % 8) return -EINVAL; offset /= 8; - if (offset % __alignof__(struct bpf_list_node)) + if (offset % node_type_align) return -EINVAL; field->graph_root.btf = (struct btf *)btf; @@ -3656,6 +3670,20 @@ static int btf_parse_list_head(const struct btf *btf, struct btf_field *field, return 0; } +static int btf_parse_list_head(const struct btf *btf, struct btf_field *field, + struct btf_field_info *info) +{ + return btf_parse_graph_root(btf, field, info, "bpf_list_node", + __alignof__(struct bpf_list_node)); +} + +static int btf_parse_rb_root(const struct btf *btf, struct btf_field *field, + struct btf_field_info *info) +{ + return btf_parse_graph_root(btf, field, info, "bpf_rb_node", + __alignof__(struct bpf_rb_node)); +} + struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t, u32 field_mask, u32 value_size) { @@ -3718,7 +3746,13 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type if (ret < 0) goto end; break; + case BPF_RB_ROOT: + ret = btf_parse_rb_root(btf, &rec->fields[i], &info_arr[i]); + if (ret < 0) + goto end; + break; case BPF_LIST_NODE: + case BPF_RB_NODE: break; default: ret = -EFAULT; @@ -3727,8 +3761,9 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type rec->cnt++; } - /* bpf_list_head requires bpf_spin_lock */ - if (btf_record_has_field(rec, BPF_LIST_HEAD) && rec->spin_lock_off < 0) { + /* bpf_{list_head, rb_node} require bpf_spin_lock */ + if ((btf_record_has_field(rec, BPF_LIST_HEAD) || + btf_record_has_field(rec, BPF_RB_ROOT)) && rec->spin_lock_off < 0) { ret = -EINVAL; goto end; } @@ -3739,22 +3774,28 @@ end: return ERR_PTR(ret); } +#define GRAPH_ROOT_MASK (BPF_LIST_HEAD | BPF_RB_ROOT) +#define GRAPH_NODE_MASK (BPF_LIST_NODE | BPF_RB_NODE) + int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec) { int i; - /* There are two owning types, kptr_ref and bpf_list_head. The former - * only supports storing kernel types, which can never store references - * to program allocated local types, atleast not yet. Hence we only need - * to ensure that bpf_list_head ownership does not form cycles. + /* There are three types that signify ownership of some other type: + * kptr_ref, bpf_list_head, bpf_rb_root. + * kptr_ref only supports storing kernel types, which can't store + * references to program allocated local types. + * + * Hence we only need to ensure that bpf_{list_head,rb_root} ownership + * does not form cycles. */ - if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & BPF_LIST_HEAD)) + if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & GRAPH_ROOT_MASK)) return 0; for (i = 0; i < rec->cnt; i++) { struct btf_struct_meta *meta; u32 btf_id; - if (!(rec->fields[i].type & BPF_LIST_HEAD)) + if (!(rec->fields[i].type & GRAPH_ROOT_MASK)) continue; btf_id = rec->fields[i].graph_root.value_btf_id; meta = btf_find_struct_meta(btf, btf_id); @@ -3762,39 +3803,47 @@ int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec) return -EFAULT; rec->fields[i].graph_root.value_rec = meta->record; - if (!(rec->field_mask & BPF_LIST_NODE)) + /* We need to set value_rec for all root types, but no need + * to check ownership cycle for a type unless it's also a + * node type. + */ + if (!(rec->field_mask & GRAPH_NODE_MASK)) continue; /* We need to ensure ownership acyclicity among all types. The * proper way to do it would be to topologically sort all BTF * IDs based on the ownership edges, since there can be multiple - * bpf_list_head in a type. Instead, we use the following - * reasoning: + * bpf_{list_head,rb_node} in a type. Instead, we use the + * following resaoning: * * - A type can only be owned by another type in user BTF if it - * has a bpf_list_node. + * has a bpf_{list,rb}_node. Let's call these node types. * - A type can only _own_ another type in user BTF if it has a - * bpf_list_head. + * bpf_{list_head,rb_root}. Let's call these root types. * - * We ensure that if a type has both bpf_list_head and - * bpf_list_node, its element types cannot be owning types. + * We ensure that if a type is both a root and node, its + * element types cannot be root types. * * To ensure acyclicity: * - * When A only has bpf_list_head, ownership chain can be: + * When A is an root type but not a node, its ownership + * chain can be: * A -> B -> C * Where: - * - B has both bpf_list_head and bpf_list_node. - * - C only has bpf_list_node. + * - A is an root, e.g. has bpf_rb_root. + * - B is both a root and node, e.g. has bpf_rb_node and + * bpf_list_head. + * - C is only an root, e.g. has bpf_list_node * - * When A has both bpf_list_head and bpf_list_node, some other - * type already owns it in the BTF domain, hence it can not own - * another owning type through any of the bpf_list_head edges. + * When A is both a root and node, some other type already + * owns it in the BTF domain, hence it can not own + * another root type through any of the ownership edges. * A -> B * Where: - * - B only has bpf_list_node. + * - A is both an root and node. + * - B is only an node. */ - if (meta->record->field_mask & BPF_LIST_HEAD) + if (meta->record->field_mask & GRAPH_ROOT_MASK) return -ELOOP; } return 0; @@ -5256,6 +5305,8 @@ static const char *alloc_obj_fields[] = { "bpf_spin_lock", "bpf_list_head", "bpf_list_node", + "bpf_rb_root", + "bpf_rb_node", }; static struct btf_struct_metas * @@ -5329,7 +5380,8 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf) type = &tab->types[tab->cnt]; type->btf_id = i; - record = btf_parse_fields(btf, t, BPF_SPIN_LOCK | BPF_LIST_HEAD | BPF_LIST_NODE, t->size); + record = btf_parse_fields(btf, t, BPF_SPIN_LOCK | BPF_LIST_HEAD | BPF_LIST_NODE | + BPF_RB_ROOT | BPF_RB_NODE, t->size); /* The record cannot be unset, treat it as an error if so */ if (IS_ERR_OR_NULL(record)) { ret = PTR_ERR_OR_ZERO(record) ?: -EFAULT; diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 2dae44581922..192184b5156e 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1772,6 +1772,46 @@ unlock: } } +/* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are + * 'rb_node *', so field name of rb_node within containing struct is not + * needed. + * + * Since bpf_rb_tree's node type has a corresponding struct btf_field with + * graph_root.node_offset, it's not necessary to know field name + * or type of node struct + */ +#define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \ + for (pos = rb_first_postorder(root); \ + pos && ({ n = rb_next_postorder(pos); 1; }); \ + pos = n) + +void bpf_rb_root_free(const struct btf_field *field, void *rb_root, + struct bpf_spin_lock *spin_lock) +{ + struct rb_root_cached orig_root, *root = rb_root; + struct rb_node *pos, *n; + void *obj; + + BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root)); + BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root)); + + __bpf_spin_lock_irqsave(spin_lock); + orig_root = *root; + *root = RB_ROOT_CACHED; + __bpf_spin_unlock_irqrestore(spin_lock); + + bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) { + obj = pos; + obj -= field->graph_root.node_offset; + + bpf_obj_free_fields(field->graph_root.value_rec, obj); + + migrate_disable(); + bpf_mem_free(&bpf_global_ma, obj); + migrate_enable(); + } +} + __diag_push(); __diag_ignore_all("-Wmissing-prototypes", "Global functions as their definitions will be in vmlinux BTF"); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index cda8d00f3762..e3fcdc9836a6 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -537,9 +537,6 @@ void btf_record_free(struct btf_record *rec) return; for (i = 0; i < rec->cnt; i++) { switch (rec->fields[i].type) { - case BPF_SPIN_LOCK: - case BPF_TIMER: - break; case BPF_KPTR_UNREF: case BPF_KPTR_REF: if (rec->fields[i].kptr.module) @@ -548,7 +545,11 @@ void btf_record_free(struct btf_record *rec) break; case BPF_LIST_HEAD: case BPF_LIST_NODE: - /* Nothing to release for bpf_list_head */ + case BPF_RB_ROOT: + case BPF_RB_NODE: + case BPF_SPIN_LOCK: + case BPF_TIMER: + /* Nothing to release */ break; default: WARN_ON_ONCE(1); @@ -581,9 +582,6 @@ struct btf_record *btf_record_dup(const struct btf_record *rec) new_rec->cnt = 0; for (i = 0; i < rec->cnt; i++) { switch (fields[i].type) { - case BPF_SPIN_LOCK: - case BPF_TIMER: - break; case BPF_KPTR_UNREF: case BPF_KPTR_REF: btf_get(fields[i].kptr.btf); @@ -594,7 +592,11 @@ struct btf_record *btf_record_dup(const struct btf_record *rec) break; case BPF_LIST_HEAD: case BPF_LIST_NODE: - /* Nothing to acquire for bpf_list_head */ + case BPF_RB_ROOT: + case BPF_RB_NODE: + case BPF_SPIN_LOCK: + case BPF_TIMER: + /* Nothing to acquire */ break; default: ret = -EFAULT; @@ -674,7 +676,13 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj) continue; bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off); break; + case BPF_RB_ROOT: + if (WARN_ON_ONCE(rec->spin_lock_off < 0)) + continue; + bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off); + break; case BPF_LIST_NODE: + case BPF_RB_NODE: break; default: WARN_ON_ONCE(1); @@ -1010,7 +1018,8 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf, return -EINVAL; map->record = btf_parse_fields(btf, value_type, - BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD, + BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD | + BPF_RB_ROOT, map->value_size); if (!IS_ERR_OR_NULL(map->record)) { int i; @@ -1058,6 +1067,7 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf, } break; case BPF_LIST_HEAD: + case BPF_RB_ROOT: if (map->map_type != BPF_MAP_TYPE_HASH && map->map_type != BPF_MAP_TYPE_LRU_HASH && map->map_type != BPF_MAP_TYPE_ARRAY) { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index f176bc15c879..4fd098851f43 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -14703,9 +14703,10 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env, { enum bpf_prog_type prog_type = resolve_prog_type(prog); - if (btf_record_has_field(map->record, BPF_LIST_HEAD)) { + if (btf_record_has_field(map->record, BPF_LIST_HEAD) || + btf_record_has_field(map->record, BPF_RB_ROOT)) { if (is_tracing_prog_type(prog_type)) { - verbose(env, "tracing progs cannot use bpf_list_head yet\n"); + verbose(env, "tracing progs cannot use bpf_{list_head,rb_root} yet\n"); return -EINVAL; } } diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 17afd2b35ee5..1503f61336b6 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -6917,6 +6917,17 @@ struct bpf_list_node { __u64 :64; } __attribute__((aligned(8))); +struct bpf_rb_root { + __u64 :64; + __u64 :64; +} __attribute__((aligned(8))); + +struct bpf_rb_node { + __u64 :64; + __u64 :64; + __u64 :64; +} __attribute__((aligned(8))); + struct bpf_sysctl { __u32 write; /* Sysctl is being read (= 0) or written (= 1). * Allows 1,2,4-byte read, but no write. diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c index 2592b8aa5e41..c456b34a823a 100644 --- a/tools/testing/selftests/bpf/prog_tests/linked_list.c +++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c @@ -58,12 +58,12 @@ static struct { TEST(inner_map, pop_front) TEST(inner_map, pop_back) #undef TEST - { "map_compat_kprobe", "tracing progs cannot use bpf_list_head yet" }, - { "map_compat_kretprobe", "tracing progs cannot use bpf_list_head yet" }, - { "map_compat_tp", "tracing progs cannot use bpf_list_head yet" }, - { "map_compat_perf", "tracing progs cannot use bpf_list_head yet" }, - { "map_compat_raw_tp", "tracing progs cannot use bpf_list_head yet" }, - { "map_compat_raw_tp_w", "tracing progs cannot use bpf_list_head yet" }, + { "map_compat_kprobe", "tracing progs cannot use bpf_{list_head,rb_root} yet" }, + { "map_compat_kretprobe", "tracing progs cannot use bpf_{list_head,rb_root} yet" }, + { "map_compat_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" }, + { "map_compat_perf", "tracing progs cannot use bpf_{list_head,rb_root} yet" }, + { "map_compat_raw_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" }, + { "map_compat_raw_tp_w", "tracing progs cannot use bpf_{list_head,rb_root} yet" }, { "obj_type_id_oor", "local type ID argument must be in range [0, U32_MAX]" }, { "obj_new_no_composite", "bpf_obj_new type ID argument must be of a struct" }, { "obj_new_no_struct", "bpf_obj_new type ID argument must be of a struct" }, -- cgit v1.2.3 From a40d3632436b1677a94c16e77be8da798ee9e12b Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Mon, 13 Feb 2023 16:40:14 -0800 Subject: bpf: Special verifier handling for bpf_rbtree_{remove, first} Newly-added bpf_rbtree_{remove,first} kfuncs have some special properties that require handling in the verifier: * both bpf_rbtree_remove and bpf_rbtree_first return the type containing the bpf_rb_node field, with the offset set to that field's offset, instead of a struct bpf_rb_node * * mark_reg_graph_node helper added in previous patch generalizes this logic, use it * bpf_rbtree_remove's node input is a node that's been inserted in the tree - a non-owning reference. * bpf_rbtree_remove must invalidate non-owning references in order to avoid aliasing issue. Use previously-added invalidate_non_owning_refs helper to mark this function as a non-owning ref invalidation point. * Unlike other functions, which convert one of their input arg regs to non-owning reference, bpf_rbtree_first takes no arguments and just returns a non-owning reference (possibly null) * For now verifier logic for this is special-cased instead of adding new kfunc flag. This patch, along with the previous one, complete special verifier handling for all rbtree API functions added in this series. With functional verifier handling of rbtree_remove, under current non-owning reference scheme, a node type with both bpf_{list,rb}_node fields could cause the verifier to accept programs which remove such nodes from collections they haven't been added to. In order to prevent this, this patch adds a check to btf_parse_fields which rejects structs with both bpf_{list,rb}_node fields. This is a temporary measure that can be removed after "collection identity" followup. See comment added in btf_parse_fields. A linked_list BTF test exercising the new check is added in this patch as well. Signed-off-by: Dave Marchevsky Link: https://lore.kernel.org/r/20230214004017.2534011-6-davemarchevsky@fb.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/btf.c | 24 ++++++++++++ kernel/bpf/verifier.c | 43 ++++++++++++++++------ .../testing/selftests/bpf/prog_tests/linked_list.c | 37 +++++++++++++++++++ 3 files changed, 92 insertions(+), 12 deletions(-) (limited to 'tools') diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index b9d1f5c4e316..6582735ef1fc 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3768,6 +3768,30 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type goto end; } + /* need collection identity for non-owning refs before allowing this + * + * Consider a node type w/ both list and rb_node fields: + * struct node { + * struct bpf_list_node l; + * struct bpf_rb_node r; + * } + * + * Used like so: + * struct node *n = bpf_obj_new(....); + * bpf_list_push_front(&list_head, &n->l); + * bpf_rbtree_remove(&rb_root, &n->r); + * + * It should not be possible to rbtree_remove the node since it hasn't + * been added to a tree. But push_front converts n to a non-owning + * reference, and rbtree_remove accepts the non-owning reference to + * a type w/ bpf_rb_node field. + */ + if (btf_record_has_field(rec, BPF_LIST_NODE) && + btf_record_has_field(rec, BPF_RB_NODE)) { + ret = -EINVAL; + goto end; + } + return rec; end: btf_record_free(rec); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 88c8edf67007..21e08c111702 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -9682,14 +9682,26 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ return ret; break; case KF_ARG_PTR_TO_RB_NODE: - if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { - verbose(env, "arg#%d expected pointer to allocated object\n", i); - return -EINVAL; - } - if (!reg->ref_obj_id) { - verbose(env, "allocated object must be referenced\n"); - return -EINVAL; + if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_remove]) { + if (!type_is_non_owning_ref(reg->type) || reg->ref_obj_id) { + verbose(env, "rbtree_remove node input must be non-owning ref\n"); + return -EINVAL; + } + if (in_rbtree_lock_required_cb(env)) { + verbose(env, "rbtree_remove not allowed in rbtree cb\n"); + return -EINVAL; + } + } else { + if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { + verbose(env, "arg#%d expected pointer to allocated object\n", i); + return -EINVAL; + } + if (!reg->ref_obj_id) { + verbose(env, "allocated object must be referenced\n"); + return -EINVAL; + } } + ret = process_kf_arg_ptr_to_rbtree_node(env, reg, regno, meta); if (ret < 0) return ret; @@ -9940,11 +9952,12 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, meta.func_id == special_kfunc_list[KF_bpf_list_pop_back]) { struct btf_field *field = meta.arg_list_head.field; - mark_reg_known_zero(env, regs, BPF_REG_0); - regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC; - regs[BPF_REG_0].btf = field->graph_root.btf; - regs[BPF_REG_0].btf_id = field->graph_root.value_btf_id; - regs[BPF_REG_0].off = field->graph_root.node_offset; + mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); + } else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_remove] || + meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) { + struct btf_field *field = meta.arg_rbtree_root.field; + + mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); } else if (meta.func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) { mark_reg_known_zero(env, regs, BPF_REG_0); regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_TRUSTED; @@ -10010,7 +10023,13 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, if (is_kfunc_ret_null(&meta)) regs[BPF_REG_0].id = id; regs[BPF_REG_0].ref_obj_id = id; + } else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) { + ref_set_non_owning(env, ®s[BPF_REG_0]); } + + if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_remove]) + invalidate_non_owning_refs(env); + if (reg_may_point_to_spin_lock(®s[BPF_REG_0]) && !regs[BPF_REG_0].id) regs[BPF_REG_0].id = ++env->id_gen; } /* else { add_kfunc_call() ensures it is btf_type_is_void(t) } */ diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c index c456b34a823a..0ed8132ce1c3 100644 --- a/tools/testing/selftests/bpf/prog_tests/linked_list.c +++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c @@ -715,6 +715,43 @@ static void test_btf(void) btf__free(btf); break; } + + while (test__start_subtest("btf: list_node and rb_node in same struct")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + + id = btf__add_struct(btf, "bpf_rb_node", 24); + if (!ASSERT_EQ(id, 5, "btf__add_struct bpf_rb_node")) + break; + id = btf__add_struct(btf, "bar", 40); + if (!ASSERT_EQ(id, 6, "btf__add_struct bar")) + break; + err = btf__add_field(btf, "a", LIST_NODE, 0, 0); + if (!ASSERT_OK(err, "btf__add_field bar::a")) + break; + err = btf__add_field(btf, "c", 5, 128, 0); + if (!ASSERT_OK(err, "btf__add_field bar::c")) + break; + + id = btf__add_struct(btf, "foo", 20); + if (!ASSERT_EQ(id, 7, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + id = btf__add_decl_tag(btf, "contains:bar:a", 7, 0); + if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:bar:a")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, -EINVAL, "check btf"); + btf__free(btf); + break; + } } void test_linked_list(void) -- cgit v1.2.3 From c834df847ee60eeb678171eb0f1e59f611c62a99 Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Mon, 13 Feb 2023 16:40:15 -0800 Subject: bpf: Add bpf_rbtree_{add,remove,first} decls to bpf_experimental.h These kfuncs will be used by selftests in following patches Signed-off-by: Dave Marchevsky Link: https://lore.kernel.org/r/20230214004017.2534011-7-davemarchevsky@fb.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/bpf_experimental.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h index 424f7bbbfe9b..dbd2c729781a 100644 --- a/tools/testing/selftests/bpf/bpf_experimental.h +++ b/tools/testing/selftests/bpf/bpf_experimental.h @@ -65,4 +65,28 @@ extern struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ks */ extern struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym; +/* Description + * Remove 'node' from rbtree with root 'root' + * Returns + * Pointer to the removed node, or NULL if 'root' didn't contain 'node' + */ +extern struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, + struct bpf_rb_node *node) __ksym; + +/* Description + * Add 'node' to rbtree with root 'root' using comparator 'less' + * Returns + * Nothing + */ +extern void bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node, + bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b)) __ksym; + +/* Description + * Return the first (leftmost) node in input tree + * Returns + * Pointer to the node, which is _not_ removed from the tree. If the tree + * contains no nodes, returns NULL. + */ +extern struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym; + #endif -- cgit v1.2.3 From 215249f6adc0359e3546829e7ee622b5e309b0ad Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Mon, 13 Feb 2023 16:40:16 -0800 Subject: selftests/bpf: Add rbtree selftests This patch adds selftests exercising the logic changed/added in the previous patches in the series. A variety of successful and unsuccessful rbtree usages are validated: Success: * Add some nodes, let map_value bpf_rbtree_root destructor clean them up * Add some nodes, remove one using the non-owning ref leftover by successful rbtree_add() call * Add some nodes, remove one using the non-owning ref returned by rbtree_first() call Failure: * BTF where bpf_rb_root owns bpf_list_node should fail to load * BTF where node of type X is added to tree containing nodes of type Y should fail to load * No calling rbtree api functions in 'less' callback for rbtree_add * No releasing lock in 'less' callback for rbtree_add * No removing a node which hasn't been added to any tree * No adding a node which has already been added to a tree * No escaping of non-owning references past their lock's critical section * No escaping of non-owning references past other invalidation points (rbtree_remove) These tests mostly focus on rbtree-specific additions, but some of the failure cases revalidate scenarios common to both linked_list and rbtree which are covered in the former's tests. Better to be a bit redundant in case linked_list and rbtree semantics deviate over time. Signed-off-by: Dave Marchevsky Link: https://lore.kernel.org/r/20230214004017.2534011-8-davemarchevsky@fb.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/rbtree.c | 117 ++++++++ tools/testing/selftests/bpf/progs/rbtree.c | 176 +++++++++++ .../bpf/progs/rbtree_btf_fail__add_wrong_type.c | 52 ++++ .../bpf/progs/rbtree_btf_fail__wrong_node_type.c | 49 ++++ tools/testing/selftests/bpf/progs/rbtree_fail.c | 322 +++++++++++++++++++++ 5 files changed, 716 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/rbtree.c create mode 100644 tools/testing/selftests/bpf/progs/rbtree.c create mode 100644 tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c create mode 100644 tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c create mode 100644 tools/testing/selftests/bpf/progs/rbtree_fail.c (limited to 'tools') diff --git a/tools/testing/selftests/bpf/prog_tests/rbtree.c b/tools/testing/selftests/bpf/prog_tests/rbtree.c new file mode 100644 index 000000000000..156fa95c42f6 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/rbtree.c @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#include +#include + +#include "rbtree.skel.h" +#include "rbtree_fail.skel.h" +#include "rbtree_btf_fail__wrong_node_type.skel.h" +#include "rbtree_btf_fail__add_wrong_type.skel.h" + +static void test_rbtree_add_nodes(void) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + struct rbtree *skel; + int ret; + + skel = rbtree__open_and_load(); + if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load")) + return; + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_nodes), &opts); + ASSERT_OK(ret, "rbtree_add_nodes run"); + ASSERT_OK(opts.retval, "rbtree_add_nodes retval"); + ASSERT_EQ(skel->data->less_callback_ran, 1, "rbtree_add_nodes less_callback_ran"); + + rbtree__destroy(skel); +} + +static void test_rbtree_add_and_remove(void) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + struct rbtree *skel; + int ret; + + skel = rbtree__open_and_load(); + if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load")) + return; + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_and_remove), &opts); + ASSERT_OK(ret, "rbtree_add_and_remove"); + ASSERT_OK(opts.retval, "rbtree_add_and_remove retval"); + ASSERT_EQ(skel->data->removed_key, 5, "rbtree_add_and_remove first removed key"); + + rbtree__destroy(skel); +} + +static void test_rbtree_first_and_remove(void) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + struct rbtree *skel; + int ret; + + skel = rbtree__open_and_load(); + if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load")) + return; + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_first_and_remove), &opts); + ASSERT_OK(ret, "rbtree_first_and_remove"); + ASSERT_OK(opts.retval, "rbtree_first_and_remove retval"); + ASSERT_EQ(skel->data->first_data[0], 2, "rbtree_first_and_remove first rbtree_first()"); + ASSERT_EQ(skel->data->removed_key, 1, "rbtree_first_and_remove first removed key"); + ASSERT_EQ(skel->data->first_data[1], 4, "rbtree_first_and_remove second rbtree_first()"); + + rbtree__destroy(skel); +} + +void test_rbtree_success(void) +{ + if (test__start_subtest("rbtree_add_nodes")) + test_rbtree_add_nodes(); + if (test__start_subtest("rbtree_add_and_remove")) + test_rbtree_add_and_remove(); + if (test__start_subtest("rbtree_first_and_remove")) + test_rbtree_first_and_remove(); +} + +#define BTF_FAIL_TEST(suffix) \ +void test_rbtree_btf_fail__##suffix(void) \ +{ \ + struct rbtree_btf_fail__##suffix *skel; \ + \ + skel = rbtree_btf_fail__##suffix##__open_and_load(); \ + if (!ASSERT_ERR_PTR(skel, \ + "rbtree_btf_fail__" #suffix "__open_and_load unexpected success")) \ + rbtree_btf_fail__##suffix##__destroy(skel); \ +} + +#define RUN_BTF_FAIL_TEST(suffix) \ + if (test__start_subtest("rbtree_btf_fail__" #suffix)) \ + test_rbtree_btf_fail__##suffix(); + +BTF_FAIL_TEST(wrong_node_type); +BTF_FAIL_TEST(add_wrong_type); + +void test_rbtree_btf_fail(void) +{ + RUN_BTF_FAIL_TEST(wrong_node_type); + RUN_BTF_FAIL_TEST(add_wrong_type); +} + +void test_rbtree_fail(void) +{ + RUN_TESTS(rbtree_fail); +} diff --git a/tools/testing/selftests/bpf/progs/rbtree.c b/tools/testing/selftests/bpf/progs/rbtree.c new file mode 100644 index 000000000000..e5db1a4287e5 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/rbtree.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include +#include +#include "bpf_experimental.h" + +struct node_data { + long key; + long data; + struct bpf_rb_node node; +}; + +long less_callback_ran = -1; +long removed_key = -1; +long first_data[2] = {-1, -1}; + +#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) +private(A) struct bpf_spin_lock glock; +private(A) struct bpf_rb_root groot __contains(node_data, node); + +static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) +{ + struct node_data *node_a; + struct node_data *node_b; + + node_a = container_of(a, struct node_data, node); + node_b = container_of(b, struct node_data, node); + less_callback_ran = 1; + + return node_a->key < node_b->key; +} + +static long __add_three(struct bpf_rb_root *root, struct bpf_spin_lock *lock) +{ + struct node_data *n, *m; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + n->key = 5; + + m = bpf_obj_new(typeof(*m)); + if (!m) { + bpf_obj_drop(n); + return 2; + } + m->key = 1; + + bpf_spin_lock(&glock); + bpf_rbtree_add(&groot, &n->node, less); + bpf_rbtree_add(&groot, &m->node, less); + bpf_spin_unlock(&glock); + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 3; + n->key = 3; + + bpf_spin_lock(&glock); + bpf_rbtree_add(&groot, &n->node, less); + bpf_spin_unlock(&glock); + return 0; +} + +SEC("tc") +long rbtree_add_nodes(void *ctx) +{ + return __add_three(&groot, &glock); +} + +SEC("tc") +long rbtree_add_and_remove(void *ctx) +{ + struct bpf_rb_node *res = NULL; + struct node_data *n, *m; + + n = bpf_obj_new(typeof(*n)); + if (!n) + goto err_out; + n->key = 5; + + m = bpf_obj_new(typeof(*m)); + if (!m) + goto err_out; + m->key = 3; + + bpf_spin_lock(&glock); + bpf_rbtree_add(&groot, &n->node, less); + bpf_rbtree_add(&groot, &m->node, less); + res = bpf_rbtree_remove(&groot, &n->node); + bpf_spin_unlock(&glock); + + n = container_of(res, struct node_data, node); + removed_key = n->key; + + bpf_obj_drop(n); + + return 0; +err_out: + if (n) + bpf_obj_drop(n); + if (m) + bpf_obj_drop(m); + return 1; +} + +SEC("tc") +long rbtree_first_and_remove(void *ctx) +{ + struct bpf_rb_node *res = NULL; + struct node_data *n, *m, *o; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + n->key = 3; + n->data = 4; + + m = bpf_obj_new(typeof(*m)); + if (!m) + goto err_out; + m->key = 5; + m->data = 6; + + o = bpf_obj_new(typeof(*o)); + if (!o) + goto err_out; + o->key = 1; + o->data = 2; + + bpf_spin_lock(&glock); + bpf_rbtree_add(&groot, &n->node, less); + bpf_rbtree_add(&groot, &m->node, less); + bpf_rbtree_add(&groot, &o->node, less); + + res = bpf_rbtree_first(&groot); + if (!res) { + bpf_spin_unlock(&glock); + return 2; + } + + o = container_of(res, struct node_data, node); + first_data[0] = o->data; + + res = bpf_rbtree_remove(&groot, &o->node); + bpf_spin_unlock(&glock); + + o = container_of(res, struct node_data, node); + removed_key = o->key; + + bpf_obj_drop(o); + + bpf_spin_lock(&glock); + res = bpf_rbtree_first(&groot); + if (!res) { + bpf_spin_unlock(&glock); + return 3; + } + + o = container_of(res, struct node_data, node); + first_data[1] = o->data; + bpf_spin_unlock(&glock); + + return 0; +err_out: + if (n) + bpf_obj_drop(n); + if (m) + bpf_obj_drop(m); + return 1; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c b/tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c new file mode 100644 index 000000000000..60079b202c07 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include +#include +#include "bpf_experimental.h" + +struct node_data { + int key; + int data; + struct bpf_rb_node node; +}; + +struct node_data2 { + int key; + struct bpf_rb_node node; + int data; +}; + +static bool less2(struct bpf_rb_node *a, const struct bpf_rb_node *b) +{ + struct node_data2 *node_a; + struct node_data2 *node_b; + + node_a = container_of(a, struct node_data2, node); + node_b = container_of(b, struct node_data2, node); + + return node_a->key < node_b->key; +} + +#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) +private(A) struct bpf_spin_lock glock; +private(A) struct bpf_rb_root groot __contains(node_data, node); + +SEC("tc") +long rbtree_api_add__add_wrong_type(void *ctx) +{ + struct node_data2 *n; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + + bpf_spin_lock(&glock); + bpf_rbtree_add(&groot, &n->node, less2); + bpf_spin_unlock(&glock); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c b/tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c new file mode 100644 index 000000000000..340f97da1084 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include +#include +#include "bpf_experimental.h" + +/* BTF load should fail as bpf_rb_root __contains this type and points to + * 'node', but 'node' is not a bpf_rb_node + */ +struct node_data { + int key; + int data; + struct bpf_list_node node; +}; + +static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) +{ + struct node_data *node_a; + struct node_data *node_b; + + node_a = container_of(a, struct node_data, node); + node_b = container_of(b, struct node_data, node); + + return node_a->key < node_b->key; +} + +#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) +private(A) struct bpf_spin_lock glock; +private(A) struct bpf_rb_root groot __contains(node_data, node); + +SEC("tc") +long rbtree_api_add__wrong_node_type(void *ctx) +{ + struct node_data *n; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + + bpf_spin_lock(&glock); + bpf_rbtree_first(&groot); + bpf_spin_unlock(&glock); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/rbtree_fail.c b/tools/testing/selftests/bpf/progs/rbtree_fail.c new file mode 100644 index 000000000000..bf3cba115897 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/rbtree_fail.c @@ -0,0 +1,322 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include "bpf_experimental.h" +#include "bpf_misc.h" + +struct node_data { + long key; + long data; + struct bpf_rb_node node; +}; + +#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) +private(A) struct bpf_spin_lock glock; +private(A) struct bpf_rb_root groot __contains(node_data, node); +private(A) struct bpf_rb_root groot2 __contains(node_data, node); + +static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) +{ + struct node_data *node_a; + struct node_data *node_b; + + node_a = container_of(a, struct node_data, node); + node_b = container_of(b, struct node_data, node); + + return node_a->key < node_b->key; +} + +SEC("?tc") +__failure __msg("bpf_spin_lock at off=16 must be held for bpf_rb_root") +long rbtree_api_nolock_add(void *ctx) +{ + struct node_data *n; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + + bpf_rbtree_add(&groot, &n->node, less); + return 0; +} + +SEC("?tc") +__failure __msg("bpf_spin_lock at off=16 must be held for bpf_rb_root") +long rbtree_api_nolock_remove(void *ctx) +{ + struct node_data *n; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + + bpf_spin_lock(&glock); + bpf_rbtree_add(&groot, &n->node, less); + bpf_spin_unlock(&glock); + + bpf_rbtree_remove(&groot, &n->node); + return 0; +} + +SEC("?tc") +__failure __msg("bpf_spin_lock at off=16 must be held for bpf_rb_root") +long rbtree_api_nolock_first(void *ctx) +{ + bpf_rbtree_first(&groot); + return 0; +} + +SEC("?tc") +__failure __msg("rbtree_remove node input must be non-owning ref") +long rbtree_api_remove_unadded_node(void *ctx) +{ + struct node_data *n, *m; + struct bpf_rb_node *res; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + + m = bpf_obj_new(typeof(*m)); + if (!m) { + bpf_obj_drop(n); + return 1; + } + + bpf_spin_lock(&glock); + bpf_rbtree_add(&groot, &n->node, less); + + /* This remove should pass verifier */ + res = bpf_rbtree_remove(&groot, &n->node); + n = container_of(res, struct node_data, node); + + /* This remove shouldn't, m isn't in an rbtree */ + res = bpf_rbtree_remove(&groot, &m->node); + m = container_of(res, struct node_data, node); + bpf_spin_unlock(&glock); + + if (n) + bpf_obj_drop(n); + if (m) + bpf_obj_drop(m); + return 0; +} + +SEC("?tc") +__failure __msg("Unreleased reference id=2 alloc_insn=11") +long rbtree_api_remove_no_drop(void *ctx) +{ + struct bpf_rb_node *res; + struct node_data *n; + + bpf_spin_lock(&glock); + res = bpf_rbtree_first(&groot); + if (!res) + goto unlock_err; + + res = bpf_rbtree_remove(&groot, res); + + n = container_of(res, struct node_data, node); + bpf_spin_unlock(&glock); + + /* bpf_obj_drop(n) is missing here */ + return 0; + +unlock_err: + bpf_spin_unlock(&glock); + return 1; +} + +SEC("?tc") +__failure __msg("arg#1 expected pointer to allocated object") +long rbtree_api_add_to_multiple_trees(void *ctx) +{ + struct node_data *n; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + + bpf_spin_lock(&glock); + bpf_rbtree_add(&groot, &n->node, less); + + /* This add should fail since n already in groot's tree */ + bpf_rbtree_add(&groot2, &n->node, less); + bpf_spin_unlock(&glock); + return 0; +} + +SEC("?tc") +__failure __msg("rbtree_remove node input must be non-owning ref") +long rbtree_api_add_release_unlock_escape(void *ctx) +{ + struct node_data *n; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + + bpf_spin_lock(&glock); + bpf_rbtree_add(&groot, &n->node, less); + bpf_spin_unlock(&glock); + + bpf_spin_lock(&glock); + /* After add() in previous critical section, n should be + * release_on_unlock and released after previous spin_unlock, + * so should not be possible to use it here + */ + bpf_rbtree_remove(&groot, &n->node); + bpf_spin_unlock(&glock); + return 0; +} + +SEC("?tc") +__failure __msg("rbtree_remove node input must be non-owning ref") +long rbtree_api_release_aliasing(void *ctx) +{ + struct node_data *n, *m, *o; + struct bpf_rb_node *res; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + + bpf_spin_lock(&glock); + bpf_rbtree_add(&groot, &n->node, less); + bpf_spin_unlock(&glock); + + bpf_spin_lock(&glock); + + /* m and o point to the same node, + * but verifier doesn't know this + */ + res = bpf_rbtree_first(&groot); + if (!res) + return 1; + o = container_of(res, struct node_data, node); + + res = bpf_rbtree_first(&groot); + if (!res) + return 1; + m = container_of(res, struct node_data, node); + + bpf_rbtree_remove(&groot, &m->node); + /* This second remove shouldn't be possible. Retval of previous + * remove returns owning reference to m, which is the same + * node o's non-owning ref is pointing at + * + * In order to preserve property + * * owning ref must not be in rbtree + * * non-owning ref must be in rbtree + * + * o's ref must be invalidated after previous remove. Otherwise + * we'd have non-owning ref to node that isn't in rbtree, and + * verifier wouldn't be able to use type system to prevent remove + * of ref that already isn't in any tree. Would have to do runtime + * checks in that case. + */ + bpf_rbtree_remove(&groot, &o->node); + + bpf_spin_unlock(&glock); + return 0; +} + +SEC("?tc") +__failure __msg("rbtree_remove node input must be non-owning ref") +long rbtree_api_first_release_unlock_escape(void *ctx) +{ + struct bpf_rb_node *res; + struct node_data *n; + + bpf_spin_lock(&glock); + res = bpf_rbtree_first(&groot); + if (res) + n = container_of(res, struct node_data, node); + bpf_spin_unlock(&glock); + + bpf_spin_lock(&glock); + /* After first() in previous critical section, n should be + * release_on_unlock and released after previous spin_unlock, + * so should not be possible to use it here + */ + bpf_rbtree_remove(&groot, &n->node); + bpf_spin_unlock(&glock); + return 0; +} + +static bool less__bad_fn_call_add(struct bpf_rb_node *a, const struct bpf_rb_node *b) +{ + struct node_data *node_a; + struct node_data *node_b; + + node_a = container_of(a, struct node_data, node); + node_b = container_of(b, struct node_data, node); + bpf_rbtree_add(&groot, &node_a->node, less); + + return node_a->key < node_b->key; +} + +static bool less__bad_fn_call_remove(struct bpf_rb_node *a, const struct bpf_rb_node *b) +{ + struct node_data *node_a; + struct node_data *node_b; + + node_a = container_of(a, struct node_data, node); + node_b = container_of(b, struct node_data, node); + bpf_rbtree_remove(&groot, &node_a->node); + + return node_a->key < node_b->key; +} + +static bool less__bad_fn_call_first_unlock_after(struct bpf_rb_node *a, const struct bpf_rb_node *b) +{ + struct node_data *node_a; + struct node_data *node_b; + + node_a = container_of(a, struct node_data, node); + node_b = container_of(b, struct node_data, node); + bpf_rbtree_first(&groot); + bpf_spin_unlock(&glock); + + return node_a->key < node_b->key; +} + +static __always_inline +long add_with_cb(bool (cb)(struct bpf_rb_node *a, const struct bpf_rb_node *b)) +{ + struct node_data *n; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + + bpf_spin_lock(&glock); + bpf_rbtree_add(&groot, &n->node, cb); + bpf_spin_unlock(&glock); + return 0; +} + +SEC("?tc") +__failure __msg("arg#1 expected pointer to allocated object") +long rbtree_api_add_bad_cb_bad_fn_call_add(void *ctx) +{ + return add_with_cb(less__bad_fn_call_add); +} + +SEC("?tc") +__failure __msg("rbtree_remove not allowed in rbtree cb") +long rbtree_api_add_bad_cb_bad_fn_call_remove(void *ctx) +{ + return add_with_cb(less__bad_fn_call_remove); +} + +SEC("?tc") +__failure __msg("can't spin_{lock,unlock} in rbtree cb") +long rbtree_api_add_bad_cb_bad_fn_call_first_unlock_after(void *ctx) +{ + return add_with_cb(less__bad_fn_call_first_unlock_after); +} + +char _license[] SEC("license") = "GPL"; -- cgit v1.2.3 From 8032cad1030279066ce4a1f82b76d0fe7eb578e2 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Mon, 13 Feb 2023 21:13:31 -0800 Subject: selftests/bpf: Clean up user_ringbuf, cgrp_kfunc, kfunc_dynptr_param tests Clean up user_ringbuf, cgrp_kfunc, and kfunc_dynptr_param tests to use the generic verification tester for checking verifier rejections. The generic verification tester uses btf_decl_tag-based annotations for verifying that the tests fail with the expected log messages. Signed-off-by: Joanne Koong Acked-by: David Vernet Reviewed-by: Roberto Sassu Link: https://lore.kernel.org/r/20230214051332.4007131-1-joannelkoong@gmail.com Signed-off-by: Alexei Starovoitov --- .../testing/selftests/bpf/prog_tests/cgrp_kfunc.c | 69 +-------------------- .../selftests/bpf/prog_tests/kfunc_dynptr_param.c | 72 +++++----------------- .../selftests/bpf/prog_tests/user_ringbuf.c | 62 +------------------ .../selftests/bpf/progs/cgrp_kfunc_failure.c | 17 ++++- .../selftests/bpf/progs/test_kfunc_dynptr_param.c | 4 ++ .../selftests/bpf/progs/user_ringbuf_fail.c | 31 +++++++--- 6 files changed, 58 insertions(+), 197 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c b/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c index f3bb0e16e088..b3f7985c8504 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c +++ b/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c @@ -8,9 +8,6 @@ #include "cgrp_kfunc_failure.skel.h" #include "cgrp_kfunc_success.skel.h" -static size_t log_buf_sz = 1 << 20; /* 1 MB */ -static char obj_log_buf[1048576]; - static struct cgrp_kfunc_success *open_load_cgrp_kfunc_skel(void) { struct cgrp_kfunc_success *skel; @@ -89,65 +86,6 @@ static const char * const success_tests[] = { "test_cgrp_get_ancestors", }; -static struct { - const char *prog_name; - const char *expected_err_msg; -} failure_tests[] = { - {"cgrp_kfunc_acquire_untrusted", "Possibly NULL pointer passed to trusted arg0"}, - {"cgrp_kfunc_acquire_fp", "arg#0 pointer type STRUCT cgroup must point"}, - {"cgrp_kfunc_acquire_unsafe_kretprobe", "reg type unsupported for arg#0 function"}, - {"cgrp_kfunc_acquire_trusted_walked", "R1 must be referenced or trusted"}, - {"cgrp_kfunc_acquire_null", "Possibly NULL pointer passed to trusted arg0"}, - {"cgrp_kfunc_acquire_unreleased", "Unreleased reference"}, - {"cgrp_kfunc_get_non_kptr_param", "arg#0 expected pointer to map value"}, - {"cgrp_kfunc_get_non_kptr_acquired", "arg#0 expected pointer to map value"}, - {"cgrp_kfunc_get_null", "arg#0 expected pointer to map value"}, - {"cgrp_kfunc_xchg_unreleased", "Unreleased reference"}, - {"cgrp_kfunc_get_unreleased", "Unreleased reference"}, - {"cgrp_kfunc_release_untrusted", "arg#0 is untrusted_ptr_or_null_ expected ptr_ or socket"}, - {"cgrp_kfunc_release_fp", "arg#0 pointer type STRUCT cgroup must point"}, - {"cgrp_kfunc_release_null", "arg#0 is ptr_or_null_ expected ptr_ or socket"}, - {"cgrp_kfunc_release_unacquired", "release kernel function bpf_cgroup_release expects"}, -}; - -static void verify_fail(const char *prog_name, const char *expected_err_msg) -{ - LIBBPF_OPTS(bpf_object_open_opts, opts); - struct cgrp_kfunc_failure *skel; - int err, i; - - opts.kernel_log_buf = obj_log_buf; - opts.kernel_log_size = log_buf_sz; - opts.kernel_log_level = 1; - - skel = cgrp_kfunc_failure__open_opts(&opts); - if (!ASSERT_OK_PTR(skel, "cgrp_kfunc_failure__open_opts")) - goto cleanup; - - for (i = 0; i < ARRAY_SIZE(failure_tests); i++) { - struct bpf_program *prog; - const char *curr_name = failure_tests[i].prog_name; - - prog = bpf_object__find_program_by_name(skel->obj, curr_name); - if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) - goto cleanup; - - bpf_program__set_autoload(prog, !strcmp(curr_name, prog_name)); - } - - err = cgrp_kfunc_failure__load(skel); - if (!ASSERT_ERR(err, "unexpected load success")) - goto cleanup; - - if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) { - fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg); - fprintf(stderr, "Verifier output: %s\n", obj_log_buf); - } - -cleanup: - cgrp_kfunc_failure__destroy(skel); -} - void test_cgrp_kfunc(void) { int i, err; @@ -163,12 +101,7 @@ void test_cgrp_kfunc(void) run_success_test(success_tests[i]); } - for (i = 0; i < ARRAY_SIZE(failure_tests); i++) { - if (!test__start_subtest(failure_tests[i].prog_name)) - continue; - - verify_fail(failure_tests[i].prog_name, failure_tests[i].expected_err_msg); - } + RUN_TESTS(cgrp_kfunc_failure); cleanup: cleanup_cgroup_environment(); diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c b/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c index 72800b1e8395..8cd298b78e44 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c @@ -10,17 +10,11 @@ #include #include "test_kfunc_dynptr_param.skel.h" -static size_t log_buf_sz = 1048576; /* 1 MB */ -static char obj_log_buf[1048576]; - static struct { const char *prog_name; - const char *expected_verifier_err_msg; int expected_runtime_err; } kfunc_dynptr_tests[] = { - {"not_valid_dynptr", "cannot pass in dynptr at an offset=-8", 0}, - {"not_ptr_to_stack", "arg#0 expected pointer to stack or dynptr_ptr", 0}, - {"dynptr_data_null", NULL, -EBADMSG}, + {"dynptr_data_null", -EBADMSG}, }; static bool kfunc_not_supported; @@ -38,29 +32,15 @@ static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt, return 0; } -static void verify_fail(const char *prog_name, const char *expected_err_msg) +static bool has_pkcs7_kfunc_support(void) { struct test_kfunc_dynptr_param *skel; - LIBBPF_OPTS(bpf_object_open_opts, opts); libbpf_print_fn_t old_print_cb; - struct bpf_program *prog; int err; - opts.kernel_log_buf = obj_log_buf; - opts.kernel_log_size = log_buf_sz; - opts.kernel_log_level = 1; - - skel = test_kfunc_dynptr_param__open_opts(&opts); - if (!ASSERT_OK_PTR(skel, "test_kfunc_dynptr_param__open_opts")) - goto cleanup; - - prog = bpf_object__find_program_by_name(skel->obj, prog_name); - if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) - goto cleanup; - - bpf_program__set_autoload(prog, true); - - bpf_map__set_max_entries(skel->maps.ringbuf, getpagesize()); + skel = test_kfunc_dynptr_param__open(); + if (!ASSERT_OK_PTR(skel, "test_kfunc_dynptr_param__open")) + return false; kfunc_not_supported = false; @@ -72,26 +52,18 @@ static void verify_fail(const char *prog_name, const char *expected_err_msg) fprintf(stderr, "%s:SKIP:bpf_verify_pkcs7_signature() kfunc not supported\n", __func__); - test__skip(); - goto cleanup; - } - - if (!ASSERT_ERR(err, "unexpected load success")) - goto cleanup; - - if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) { - fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg); - fprintf(stderr, "Verifier output: %s\n", obj_log_buf); + test_kfunc_dynptr_param__destroy(skel); + return false; } -cleanup: test_kfunc_dynptr_param__destroy(skel); + + return true; } static void verify_success(const char *prog_name, int expected_runtime_err) { struct test_kfunc_dynptr_param *skel; - libbpf_print_fn_t old_print_cb; struct bpf_program *prog; struct bpf_link *link; __u32 next_id; @@ -103,21 +75,7 @@ static void verify_success(const char *prog_name, int expected_runtime_err) skel->bss->pid = getpid(); - bpf_map__set_max_entries(skel->maps.ringbuf, getpagesize()); - - kfunc_not_supported = false; - - old_print_cb = libbpf_set_print(libbpf_print_cb); err = test_kfunc_dynptr_param__load(skel); - libbpf_set_print(old_print_cb); - - if (err < 0 && kfunc_not_supported) { - fprintf(stderr, - "%s:SKIP:bpf_verify_pkcs7_signature() kfunc not supported\n", - __func__); - test__skip(); - goto cleanup; - } if (!ASSERT_OK(err, "test_kfunc_dynptr_param__load")) goto cleanup; @@ -147,15 +105,15 @@ void test_kfunc_dynptr_param(void) { int i; + if (!has_pkcs7_kfunc_support()) + return; + for (i = 0; i < ARRAY_SIZE(kfunc_dynptr_tests); i++) { if (!test__start_subtest(kfunc_dynptr_tests[i].prog_name)) continue; - if (kfunc_dynptr_tests[i].expected_verifier_err_msg) - verify_fail(kfunc_dynptr_tests[i].prog_name, - kfunc_dynptr_tests[i].expected_verifier_err_msg); - else - verify_success(kfunc_dynptr_tests[i].prog_name, - kfunc_dynptr_tests[i].expected_runtime_err); + verify_success(kfunc_dynptr_tests[i].prog_name, + kfunc_dynptr_tests[i].expected_runtime_err); } + RUN_TESTS(test_kfunc_dynptr_param); } diff --git a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c index dae68de285b9..3a13e102c149 100644 --- a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c +++ b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c @@ -19,8 +19,6 @@ #include "../progs/test_user_ringbuf.h" -static size_t log_buf_sz = 1 << 20; /* 1 MB */ -static char obj_log_buf[1048576]; static const long c_sample_size = sizeof(struct sample) + BPF_RINGBUF_HDR_SZ; static const long c_ringbuf_size = 1 << 12; /* 1 small page */ static const long c_max_entries = c_ringbuf_size / c_sample_size; @@ -663,23 +661,6 @@ cleanup: user_ringbuf_success__destroy(skel); } -static struct { - const char *prog_name; - const char *expected_err_msg; -} failure_tests[] = { - /* failure cases */ - {"user_ringbuf_callback_bad_access1", "negative offset dynptr_ptr ptr"}, - {"user_ringbuf_callback_bad_access2", "dereference of modified dynptr_ptr ptr"}, - {"user_ringbuf_callback_write_forbidden", "invalid mem access 'dynptr_ptr'"}, - {"user_ringbuf_callback_null_context_write", "invalid mem access 'scalar'"}, - {"user_ringbuf_callback_null_context_read", "invalid mem access 'scalar'"}, - {"user_ringbuf_callback_discard_dynptr", "cannot release unowned const bpf_dynptr"}, - {"user_ringbuf_callback_submit_dynptr", "cannot release unowned const bpf_dynptr"}, - {"user_ringbuf_callback_invalid_return", "At callback return the register R0 has value"}, - {"user_ringbuf_callback_reinit_dynptr_mem", "Dynptr has to be an uninitialized dynptr"}, - {"user_ringbuf_callback_reinit_dynptr_ringbuf", "Dynptr has to be an uninitialized dynptr"}, -}; - #define SUCCESS_TEST(_func) { _func, #_func } static struct { @@ -700,42 +681,6 @@ static struct { SUCCESS_TEST(test_user_ringbuf_blocking_reserve), }; -static void verify_fail(const char *prog_name, const char *expected_err_msg) -{ - LIBBPF_OPTS(bpf_object_open_opts, opts); - struct bpf_program *prog; - struct user_ringbuf_fail *skel; - int err; - - opts.kernel_log_buf = obj_log_buf; - opts.kernel_log_size = log_buf_sz; - opts.kernel_log_level = 1; - - skel = user_ringbuf_fail__open_opts(&opts); - if (!ASSERT_OK_PTR(skel, "dynptr_fail__open_opts")) - goto cleanup; - - prog = bpf_object__find_program_by_name(skel->obj, prog_name); - if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) - goto cleanup; - - bpf_program__set_autoload(prog, true); - - bpf_map__set_max_entries(skel->maps.user_ringbuf, getpagesize()); - - err = user_ringbuf_fail__load(skel); - if (!ASSERT_ERR(err, "unexpected load success")) - goto cleanup; - - if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) { - fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg); - fprintf(stderr, "Verifier output: %s\n", obj_log_buf); - } - -cleanup: - user_ringbuf_fail__destroy(skel); -} - void test_user_ringbuf(void) { int i; @@ -747,10 +692,5 @@ void test_user_ringbuf(void) success_tests[i].test_callback(); } - for (i = 0; i < ARRAY_SIZE(failure_tests); i++) { - if (!test__start_subtest(failure_tests[i].prog_name)) - continue; - - verify_fail(failure_tests[i].prog_name, failure_tests[i].expected_err_msg); - } + RUN_TESTS(user_ringbuf_fail); } diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c index a1369b5ebcf8..4ad7fe24966d 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c +++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c @@ -5,6 +5,7 @@ #include #include +#include "bpf_misc.h" #include "cgrp_kfunc_common.h" char _license[] SEC("license") = "GPL"; @@ -28,6 +29,7 @@ static struct __cgrps_kfunc_map_value *insert_lookup_cgrp(struct cgroup *cgrp) } SEC("tp_btf/cgroup_mkdir") +__failure __msg("Possibly NULL pointer passed to trusted arg0") int BPF_PROG(cgrp_kfunc_acquire_untrusted, struct cgroup *cgrp, const char *path) { struct cgroup *acquired; @@ -45,6 +47,7 @@ int BPF_PROG(cgrp_kfunc_acquire_untrusted, struct cgroup *cgrp, const char *path } SEC("tp_btf/cgroup_mkdir") +__failure __msg("arg#0 pointer type STRUCT cgroup must point") int BPF_PROG(cgrp_kfunc_acquire_fp, struct cgroup *cgrp, const char *path) { struct cgroup *acquired, *stack_cgrp = (struct cgroup *)&path; @@ -57,6 +60,7 @@ int BPF_PROG(cgrp_kfunc_acquire_fp, struct cgroup *cgrp, const char *path) } SEC("kretprobe/cgroup_destroy_locked") +__failure __msg("reg type unsupported for arg#0 function") int BPF_PROG(cgrp_kfunc_acquire_unsafe_kretprobe, struct cgroup *cgrp) { struct cgroup *acquired; @@ -69,6 +73,7 @@ int BPF_PROG(cgrp_kfunc_acquire_unsafe_kretprobe, struct cgroup *cgrp) } SEC("tp_btf/cgroup_mkdir") +__failure __msg("cgrp_kfunc_acquire_trusted_walked") int BPF_PROG(cgrp_kfunc_acquire_trusted_walked, struct cgroup *cgrp, const char *path) { struct cgroup *acquired; @@ -80,8 +85,8 @@ int BPF_PROG(cgrp_kfunc_acquire_trusted_walked, struct cgroup *cgrp, const char return 0; } - SEC("tp_btf/cgroup_mkdir") +__failure __msg("Possibly NULL pointer passed to trusted arg0") int BPF_PROG(cgrp_kfunc_acquire_null, struct cgroup *cgrp, const char *path) { struct cgroup *acquired; @@ -96,6 +101,7 @@ int BPF_PROG(cgrp_kfunc_acquire_null, struct cgroup *cgrp, const char *path) } SEC("tp_btf/cgroup_mkdir") +__failure __msg("Unreleased reference") int BPF_PROG(cgrp_kfunc_acquire_unreleased, struct cgroup *cgrp, const char *path) { struct cgroup *acquired; @@ -108,6 +114,7 @@ int BPF_PROG(cgrp_kfunc_acquire_unreleased, struct cgroup *cgrp, const char *pat } SEC("tp_btf/cgroup_mkdir") +__failure __msg("arg#0 expected pointer to map value") int BPF_PROG(cgrp_kfunc_get_non_kptr_param, struct cgroup *cgrp, const char *path) { struct cgroup *kptr; @@ -123,6 +130,7 @@ int BPF_PROG(cgrp_kfunc_get_non_kptr_param, struct cgroup *cgrp, const char *pat } SEC("tp_btf/cgroup_mkdir") +__failure __msg("arg#0 expected pointer to map value") int BPF_PROG(cgrp_kfunc_get_non_kptr_acquired, struct cgroup *cgrp, const char *path) { struct cgroup *kptr, *acquired; @@ -141,6 +149,7 @@ int BPF_PROG(cgrp_kfunc_get_non_kptr_acquired, struct cgroup *cgrp, const char * } SEC("tp_btf/cgroup_mkdir") +__failure __msg("arg#0 expected pointer to map value") int BPF_PROG(cgrp_kfunc_get_null, struct cgroup *cgrp, const char *path) { struct cgroup *kptr; @@ -156,6 +165,7 @@ int BPF_PROG(cgrp_kfunc_get_null, struct cgroup *cgrp, const char *path) } SEC("tp_btf/cgroup_mkdir") +__failure __msg("Unreleased reference") int BPF_PROG(cgrp_kfunc_xchg_unreleased, struct cgroup *cgrp, const char *path) { struct cgroup *kptr; @@ -175,6 +185,7 @@ int BPF_PROG(cgrp_kfunc_xchg_unreleased, struct cgroup *cgrp, const char *path) } SEC("tp_btf/cgroup_mkdir") +__failure __msg("Unreleased reference") int BPF_PROG(cgrp_kfunc_get_unreleased, struct cgroup *cgrp, const char *path) { struct cgroup *kptr; @@ -194,6 +205,7 @@ int BPF_PROG(cgrp_kfunc_get_unreleased, struct cgroup *cgrp, const char *path) } SEC("tp_btf/cgroup_mkdir") +__failure __msg("arg#0 is untrusted_ptr_or_null_ expected ptr_ or socket") int BPF_PROG(cgrp_kfunc_release_untrusted, struct cgroup *cgrp, const char *path) { struct __cgrps_kfunc_map_value *v; @@ -209,6 +221,7 @@ int BPF_PROG(cgrp_kfunc_release_untrusted, struct cgroup *cgrp, const char *path } SEC("tp_btf/cgroup_mkdir") +__failure __msg("arg#0 pointer type STRUCT cgroup must point") int BPF_PROG(cgrp_kfunc_release_fp, struct cgroup *cgrp, const char *path) { struct cgroup *acquired = (struct cgroup *)&path; @@ -220,6 +233,7 @@ int BPF_PROG(cgrp_kfunc_release_fp, struct cgroup *cgrp, const char *path) } SEC("tp_btf/cgroup_mkdir") +__failure __msg("arg#0 is ptr_or_null_ expected ptr_ or socket") int BPF_PROG(cgrp_kfunc_release_null, struct cgroup *cgrp, const char *path) { struct __cgrps_kfunc_map_value local, *v; @@ -251,6 +265,7 @@ int BPF_PROG(cgrp_kfunc_release_null, struct cgroup *cgrp, const char *path) } SEC("tp_btf/cgroup_mkdir") +__failure __msg("release kernel function bpf_cgroup_release expects") int BPF_PROG(cgrp_kfunc_release_unacquired, struct cgroup *cgrp, const char *path) { /* Cannot release trusted cgroup pointer which was not acquired. */ diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c index f4a8250329b2..2fbef3cc7ad8 100644 --- a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c +++ b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c @@ -10,6 +10,7 @@ #include #include #include +#include "bpf_misc.h" extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym; extern void bpf_key_put(struct bpf_key *key) __ksym; @@ -19,6 +20,7 @@ extern int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_ptr, struct { __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 4096); } ringbuf SEC(".maps"); struct { @@ -33,6 +35,7 @@ int err, pid; char _license[] SEC("license") = "GPL"; SEC("?lsm.s/bpf") +__failure __msg("cannot pass in dynptr at an offset=-8") int BPF_PROG(not_valid_dynptr, int cmd, union bpf_attr *attr, unsigned int size) { unsigned long val; @@ -42,6 +45,7 @@ int BPF_PROG(not_valid_dynptr, int cmd, union bpf_attr *attr, unsigned int size) } SEC("?lsm.s/bpf") +__failure __msg("arg#0 expected pointer to stack or dynptr_ptr") int BPF_PROG(not_ptr_to_stack, int cmd, union bpf_attr *attr, unsigned int size) { unsigned long val; diff --git a/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c b/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c index f3201dc69a60..03ee946c6bf7 100644 --- a/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c +++ b/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c @@ -16,6 +16,7 @@ struct sample { struct { __uint(type, BPF_MAP_TYPE_USER_RINGBUF); + __uint(max_entries, 4096); } user_ringbuf SEC(".maps"); struct { @@ -39,7 +40,8 @@ bad_access1(struct bpf_dynptr *dynptr, void *context) /* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should * not be able to read before the pointer. */ -SEC("?raw_tp/") +SEC("?raw_tp") +__failure __msg("negative offset dynptr_ptr ptr") int user_ringbuf_callback_bad_access1(void *ctx) { bpf_user_ringbuf_drain(&user_ringbuf, bad_access1, NULL, 0); @@ -61,7 +63,8 @@ bad_access2(struct bpf_dynptr *dynptr, void *context) /* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should * not be able to read past the end of the pointer. */ -SEC("?raw_tp/") +SEC("?raw_tp") +__failure __msg("dereference of modified dynptr_ptr ptr") int user_ringbuf_callback_bad_access2(void *ctx) { bpf_user_ringbuf_drain(&user_ringbuf, bad_access2, NULL, 0); @@ -80,7 +83,8 @@ write_forbidden(struct bpf_dynptr *dynptr, void *context) /* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should * not be able to write to that pointer. */ -SEC("?raw_tp/") +SEC("?raw_tp") +__failure __msg("invalid mem access 'dynptr_ptr'") int user_ringbuf_callback_write_forbidden(void *ctx) { bpf_user_ringbuf_drain(&user_ringbuf, write_forbidden, NULL, 0); @@ -99,7 +103,8 @@ null_context_write(struct bpf_dynptr *dynptr, void *context) /* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should * not be able to write to that pointer. */ -SEC("?raw_tp/") +SEC("?raw_tp") +__failure __msg("invalid mem access 'scalar'") int user_ringbuf_callback_null_context_write(void *ctx) { bpf_user_ringbuf_drain(&user_ringbuf, null_context_write, NULL, 0); @@ -120,7 +125,8 @@ null_context_read(struct bpf_dynptr *dynptr, void *context) /* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should * not be able to write to that pointer. */ -SEC("?raw_tp/") +SEC("?raw_tp") +__failure __msg("invalid mem access 'scalar'") int user_ringbuf_callback_null_context_read(void *ctx) { bpf_user_ringbuf_drain(&user_ringbuf, null_context_read, NULL, 0); @@ -139,7 +145,8 @@ try_discard_dynptr(struct bpf_dynptr *dynptr, void *context) /* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should * not be able to read past the end of the pointer. */ -SEC("?raw_tp/") +SEC("?raw_tp") +__failure __msg("cannot release unowned const bpf_dynptr") int user_ringbuf_callback_discard_dynptr(void *ctx) { bpf_user_ringbuf_drain(&user_ringbuf, try_discard_dynptr, NULL, 0); @@ -158,7 +165,8 @@ try_submit_dynptr(struct bpf_dynptr *dynptr, void *context) /* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should * not be able to read past the end of the pointer. */ -SEC("?raw_tp/") +SEC("?raw_tp") +__failure __msg("cannot release unowned const bpf_dynptr") int user_ringbuf_callback_submit_dynptr(void *ctx) { bpf_user_ringbuf_drain(&user_ringbuf, try_submit_dynptr, NULL, 0); @@ -175,7 +183,8 @@ invalid_drain_callback_return(struct bpf_dynptr *dynptr, void *context) /* A callback that accesses a dynptr in a bpf_user_ringbuf_drain callback should * not be able to write to that pointer. */ -SEC("?raw_tp/") +SEC("?raw_tp") +__failure __msg("At callback return the register R0 has value") int user_ringbuf_callback_invalid_return(void *ctx) { bpf_user_ringbuf_drain(&user_ringbuf, invalid_drain_callback_return, NULL, 0); @@ -197,14 +206,16 @@ try_reinit_dynptr_ringbuf(struct bpf_dynptr *dynptr, void *context) return 0; } -SEC("?raw_tp/") +SEC("?raw_tp") +__failure __msg("Dynptr has to be an uninitialized dynptr") int user_ringbuf_callback_reinit_dynptr_mem(void *ctx) { bpf_user_ringbuf_drain(&user_ringbuf, try_reinit_dynptr_mem, NULL, 0); return 0; } -SEC("?raw_tp/") +SEC("?raw_tp") +__failure __msg("Dynptr has to be an uninitialized dynptr") int user_ringbuf_callback_reinit_dynptr_ringbuf(void *ctx) { bpf_user_ringbuf_drain(&user_ringbuf, try_reinit_dynptr_ringbuf, NULL, 0); -- cgit v1.2.3 From 50a7cedb150a628b54aa7f8ce1e922a72c773273 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Mon, 13 Feb 2023 21:13:32 -0800 Subject: selftests/bpf: Clean up dynptr prog_tests Clean up prog_tests/dynptr.c by removing the unneeded "expected_err_msg" in the dynptr_tests struct, which is a remnant from converting the fail tests cases to use the generic verification tester. Signed-off-by: Joanne Koong Link: https://lore.kernel.org/r/20230214051332.4007131-2-joannelkoong@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/dynptr.c | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c index 7faaf6d9e0d4..b99264ec0d9c 100644 --- a/tools/testing/selftests/bpf/prog_tests/dynptr.c +++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c @@ -5,14 +5,10 @@ #include "dynptr_fail.skel.h" #include "dynptr_success.skel.h" -static struct { - const char *prog_name; - const char *expected_err_msg; -} dynptr_tests[] = { - /* success cases */ - {"test_read_write", NULL}, - {"test_data_slice", NULL}, - {"test_ringbuf", NULL}, +static const char * const success_tests[] = { + "test_read_write", + "test_data_slice", + "test_ringbuf", }; static void verify_success(const char *prog_name) @@ -53,11 +49,11 @@ void test_dynptr(void) { int i; - for (i = 0; i < ARRAY_SIZE(dynptr_tests); i++) { - if (!test__start_subtest(dynptr_tests[i].prog_name)) + for (i = 0; i < ARRAY_SIZE(success_tests); i++) { + if (!test__start_subtest(success_tests[i])) continue; - verify_success(dynptr_tests[i].prog_name); + verify_success(success_tests[i]); } RUN_TESTS(dynptr_fail); -- cgit v1.2.3 From 524581d1216411a807d34181cb880d991fcb4b96 Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Wed, 15 Feb 2023 19:01:07 +0800 Subject: selftests/bpf: Fix build error for LoongArch There exists build error when make -C tools/testing/selftests/bpf/ on LoongArch: BINARY test_verifier In file included from test_verifier.c:27: tools/include/uapi/linux/bpf_perf_event.h:14:28: error: field 'regs' has incomplete type 14 | bpf_user_pt_regs_t regs; | ^~~~ make: *** [Makefile:577: tools/testing/selftests/bpf/test_verifier] Error 1 make: Leaving directory 'tools/testing/selftests/bpf' Add missing uapi header for LoongArch to use the following definition: typedef struct user_pt_regs bpf_user_pt_regs_t; Signed-off-by: Tiezhu Yang Link: https://lore.kernel.org/r/1676458867-22052-1-git-send-email-yangtiezhu@loongson.cn Signed-off-by: Alexei Starovoitov --- tools/include/uapi/asm/bpf_perf_event.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'tools') diff --git a/tools/include/uapi/asm/bpf_perf_event.h b/tools/include/uapi/asm/bpf_perf_event.h index d7dfeab0d71a..ff52668abf8c 100644 --- a/tools/include/uapi/asm/bpf_perf_event.h +++ b/tools/include/uapi/asm/bpf_perf_event.h @@ -6,6 +6,8 @@ #include "../../arch/s390/include/uapi/asm/bpf_perf_event.h" #elif defined(__riscv) #include "../../arch/riscv/include/uapi/asm/bpf_perf_event.h" +#elif defined(__loongarch__) +#include "../../arch/loongarch/include/uapi/asm/bpf_perf_event.h" #else #include #endif -- cgit v1.2.3 From 5e53e5c7edc6d69b8cb48b3b370cfe531e4b4132 Mon Sep 17 00:00:00 2001 From: Björn Töpel Date: Tue, 14 Feb 2023 17:12:53 +0100 Subject: selftests/bpf: Cross-compile bpftool MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When the BPF selftests are cross-compiled, only the a host version of bpftool is built. This version of bpftool is used on the host-side to generate various intermediates, e.g., skeletons. The test runners are also using bpftool, so the Makefile will symlink bpftool from the selftest/bpf root, where the test runners will look the tool: | $(Q)ln -sf $(if $2,..,.)/tools/build/bpftool/bootstrap/bpftool \ | $(OUTPUT)/$(if $2,$2/)bpftool There are two problems for cross-compilation builds: 1. There is no native (cross-compilation target) of bpftool 2. The bootstrap/bpftool is never cross-compiled (by design) Make sure that a native/cross-compiled version of bpftool is built, and if CROSS_COMPILE is set, symlink the native/non-bootstrap version. Acked-by: Quentin Monnet Signed-off-by: Björn Töpel Link: https://lore.kernel.org/r/20230214161253.183458-1-bjorn@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/Makefile | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index f7771592a920..521933bc15fe 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -156,8 +156,9 @@ $(notdir $(TEST_GEN_PROGS) \ $(TEST_CUSTOM_PROGS)): %: $(OUTPUT)/% ; # sort removes libbpf duplicates when not cross-building -MAKE_DIRS := $(sort $(BUILD_DIR)/libbpf $(HOST_BUILD_DIR)/libbpf \ - $(HOST_BUILD_DIR)/bpftool $(HOST_BUILD_DIR)/resolve_btfids \ +MAKE_DIRS := $(sort $(BUILD_DIR)/libbpf $(HOST_BUILD_DIR)/libbpf \ + $(BUILD_DIR)/bpftool $(HOST_BUILD_DIR)/bpftool \ + $(HOST_BUILD_DIR)/resolve_btfids \ $(RUNQSLOWER_OUTPUT) $(INCLUDE_DIR)) $(MAKE_DIRS): $(call msg,MKDIR,,$@) @@ -207,6 +208,14 @@ $(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(wildcard bpf_testmod/Makefile bpf_tes $(Q)cp bpf_testmod/bpf_testmod.ko $@ DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool +ifneq ($(CROSS_COMPILE),) +CROSS_BPFTOOL := $(SCRATCH_DIR)/sbin/bpftool +TRUNNER_BPFTOOL := $(CROSS_BPFTOOL) +USE_BOOTSTRAP := "" +else +TRUNNER_BPFTOOL := $(DEFAULT_BPFTOOL) +USE_BOOTSTRAP := "bootstrap/" +endif $(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT) $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower \ @@ -218,7 +227,7 @@ $(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT) EXTRA_LDFLAGS='$(SAN_LDFLAGS)' && \ cp $(RUNQSLOWER_OUTPUT)runqslower $@ -TEST_GEN_PROGS_EXTENDED += $(DEFAULT_BPFTOOL) +TEST_GEN_PROGS_EXTENDED += $(TRUNNER_BPFTOOL) $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(BPFOBJ) @@ -256,6 +265,18 @@ $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \ LIBBPF_DESTDIR=$(HOST_SCRATCH_DIR)/ \ prefix= DESTDIR=$(HOST_SCRATCH_DIR)/ install-bin +ifneq ($(CROSS_COMPILE),) +$(CROSS_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \ + $(BPFOBJ) | $(BUILD_DIR)/bpftool + $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \ + ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) \ + EXTRA_CFLAGS='-g -O0' \ + OUTPUT=$(BUILD_DIR)/bpftool/ \ + LIBBPF_OUTPUT=$(BUILD_DIR)/libbpf/ \ + LIBBPF_DESTDIR=$(SCRATCH_DIR)/ \ + prefix= DESTDIR=$(SCRATCH_DIR)/ install-bin +endif + all: docs docs: @@ -521,11 +542,12 @@ endif $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \ $(TRUNNER_EXTRA_OBJS) $$(BPFOBJ) \ $(RESOLVE_BTFIDS) \ + $(TRUNNER_BPFTOOL) \ | $(TRUNNER_BINARY)-extras $$(call msg,BINARY,,$$@) $(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@ $(Q)$(RESOLVE_BTFIDS) --btf $(TRUNNER_OUTPUT)/btf_data.bpf.o $$@ - $(Q)ln -sf $(if $2,..,.)/tools/build/bpftool/bootstrap/bpftool \ + $(Q)ln -sf $(if $2,..,.)/tools/build/bpftool/$(USE_BOOTSTRAP)bpftool \ $(OUTPUT)/$(if $2,$2/)bpftool endef -- cgit v1.2.3 From 62d101d5f422cde39b269f7eb4cbbe2f1e26f9d4 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 14 Feb 2023 15:50:51 -0800 Subject: selftests/bpf: Fix map_kptr test. The compiler is optimizing out majority of unref_ptr read/writes, so the test wasn't testing much. For example, one could delete '__kptr' tag from 'struct prog_test_ref_kfunc __kptr *unref_ptr;' and the test would still "pass". Convert it to volatile stores. Confirmed by comparing bpf asm before/after. Fixes: 2cbc469a6fc3 ("selftests/bpf: Add C tests for kptr") Signed-off-by: Alexei Starovoitov Acked-by: Stanislav Fomichev Acked-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/r/20230214235051.22938-1-alexei.starovoitov@gmail.com Signed-off-by: Martin KaFai Lau --- tools/testing/selftests/bpf/progs/map_kptr.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c index eb8217803493..228ec45365a8 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr.c +++ b/tools/testing/selftests/bpf/progs/map_kptr.c @@ -62,21 +62,23 @@ extern struct prog_test_ref_kfunc * bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym; extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; +#define WRITE_ONCE(x, val) ((*(volatile typeof(x) *) &(x)) = (val)) + static void test_kptr_unref(struct map_value *v) { struct prog_test_ref_kfunc *p; p = v->unref_ptr; /* store untrusted_ptr_or_null_ */ - v->unref_ptr = p; + WRITE_ONCE(v->unref_ptr, p); if (!p) return; if (p->a + p->b > 100) return; /* store untrusted_ptr_ */ - v->unref_ptr = p; + WRITE_ONCE(v->unref_ptr, p); /* store NULL */ - v->unref_ptr = NULL; + WRITE_ONCE(v->unref_ptr, NULL); } static void test_kptr_ref(struct map_value *v) @@ -85,7 +87,7 @@ static void test_kptr_ref(struct map_value *v) p = v->ref_ptr; /* store ptr_or_null_ */ - v->unref_ptr = p; + WRITE_ONCE(v->unref_ptr, p); if (!p) return; if (p->a + p->b > 100) @@ -99,7 +101,7 @@ static void test_kptr_ref(struct map_value *v) return; } /* store ptr_ */ - v->unref_ptr = p; + WRITE_ONCE(v->unref_ptr, p); bpf_kfunc_call_test_release(p); p = bpf_kfunc_call_test_acquire(&(unsigned long){0}); -- cgit v1.2.3 From ecdf985d7615356b78241fdb159c091830ed0380 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Wed, 15 Feb 2023 01:20:27 +0200 Subject: bpf: track immediate values written to stack by BPF_ST instruction For aligned stack writes using BPF_ST instruction track stored values in a same way BPF_STX is handled, e.g. make sure that the following commands produce similar verifier knowledge: fp[-8] = 42; r1 = 42; fp[-8] = r1; This covers two cases: - non-null values written to stack are stored as spill of fake registers; - null values written to stack are stored as STACK_ZERO marks. Previously both cases above used STACK_MISC marks instead. Some verifier test cases relied on the old logic to obtain STACK_MISC marks for some stack values. These test cases are updated in the same commit to avoid failures during bisect. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230214232030.1502829-2-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 18 +++- .../bpf/verifier/bounds_mix_sign_unsign.c | 110 ++++++++++++--------- 2 files changed, 80 insertions(+), 48 deletions(-) (limited to 'tools') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 21e08c111702..c28afae60874 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3473,6 +3473,11 @@ static void save_register_state(struct bpf_func_state *state, scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]); } +static bool is_bpf_st_mem(struct bpf_insn *insn) +{ + return BPF_CLASS(insn->code) == BPF_ST && BPF_MODE(insn->code) == BPF_MEM; +} + /* check_stack_{read,write}_fixed_off functions track spill/fill of registers, * stack boundary and alignment are checked in check_mem_access() */ @@ -3484,8 +3489,9 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, { struct bpf_func_state *cur; /* state of the current function */ int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; - u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg; + struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; struct bpf_reg_state *reg = NULL; + u32 dst_reg = insn->dst_reg; err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE)); if (err) @@ -3538,6 +3544,13 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, return err; } save_register_state(state, spi, reg, size); + } else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) && + insn->imm != 0 && env->bpf_capable) { + struct bpf_reg_state fake_reg = {}; + + __mark_reg_known(&fake_reg, (u32)insn->imm); + fake_reg.type = SCALAR_VALUE; + save_register_state(state, spi, &fake_reg, size); } else if (reg && is_spillable_regtype(reg->type)) { /* register containing pointer is being spilled into stack */ if (size != BPF_REG_SIZE) { @@ -3572,7 +3585,8 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; /* when we zero initialize stack slots mark them as such */ - if (reg && register_is_null(reg)) { + if ((reg && register_is_null(reg)) || + (!reg && is_bpf_st_mem(insn) && insn->imm == 0)) { /* backtracking doesn't work for STACK_ZERO yet. */ err = mark_chain_precision(env, value_regno); if (err) diff --git a/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c b/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c index c2aa6f26738b..bf82b923c5fe 100644 --- a/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c +++ b/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c @@ -1,13 +1,14 @@ { "bounds checks mixing signed and unsigned, positive bounds", .insns = { + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_MOV64_IMM(BPF_REG_2, 2), BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3), @@ -17,20 +18,21 @@ BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .fixup_map_hash_8b = { 3 }, + .fixup_map_hash_8b = { 5 }, .errstr = "unbounded min value", .result = REJECT, }, { "bounds checks mixing signed and unsigned", .insns = { + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_MOV64_IMM(BPF_REG_2, -1), BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3), @@ -40,20 +42,21 @@ BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .fixup_map_hash_8b = { 3 }, + .fixup_map_hash_8b = { 5 }, .errstr = "unbounded min value", .result = REJECT, }, { "bounds checks mixing signed and unsigned, variant 2", .insns = { + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_MOV64_IMM(BPF_REG_2, -1), BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5), @@ -65,20 +68,21 @@ BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .fixup_map_hash_8b = { 3 }, + .fixup_map_hash_8b = { 5 }, .errstr = "unbounded min value", .result = REJECT, }, { "bounds checks mixing signed and unsigned, variant 3", .insns = { + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_MOV64_IMM(BPF_REG_2, -1), BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4), @@ -89,20 +93,21 @@ BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .fixup_map_hash_8b = { 3 }, + .fixup_map_hash_8b = { 5 }, .errstr = "unbounded min value", .result = REJECT, }, { "bounds checks mixing signed and unsigned, variant 4", .insns = { + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_MOV64_IMM(BPF_REG_2, 1), BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), @@ -112,19 +117,20 @@ BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .fixup_map_hash_8b = { 3 }, + .fixup_map_hash_8b = { 5 }, .result = ACCEPT, }, { "bounds checks mixing signed and unsigned, variant 5", .insns = { + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_MOV64_IMM(BPF_REG_2, -1), BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5), @@ -135,17 +141,20 @@ BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .fixup_map_hash_8b = { 3 }, + .fixup_map_hash_8b = { 5 }, .errstr = "unbounded min value", .result = REJECT, }, { "bounds checks mixing signed and unsigned, variant 6", .insns = { + BPF_MOV64_REG(BPF_REG_9, BPF_REG_1), + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_9), BPF_MOV64_IMM(BPF_REG_2, 0), BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16), BPF_MOV64_IMM(BPF_REG_6, -1), BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5), @@ -163,13 +172,14 @@ { "bounds checks mixing signed and unsigned, variant 7", .insns = { + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024), BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3), @@ -179,19 +189,20 @@ BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .fixup_map_hash_8b = { 3 }, + .fixup_map_hash_8b = { 5 }, .result = ACCEPT, }, { "bounds checks mixing signed and unsigned, variant 8", .insns = { + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_MOV64_IMM(BPF_REG_2, -1), BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2), @@ -203,20 +214,21 @@ BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .fixup_map_hash_8b = { 3 }, + .fixup_map_hash_8b = { 5 }, .errstr = "unbounded min value", .result = REJECT, }, { "bounds checks mixing signed and unsigned, variant 9", .insns = { + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL), BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2), @@ -228,19 +240,20 @@ BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .fixup_map_hash_8b = { 3 }, + .fixup_map_hash_8b = { 5 }, .result = ACCEPT, }, { "bounds checks mixing signed and unsigned, variant 10", .insns = { + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_MOV64_IMM(BPF_REG_2, 0), BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2), @@ -252,20 +265,21 @@ BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .fixup_map_hash_8b = { 3 }, + .fixup_map_hash_8b = { 5 }, .errstr = "unbounded min value", .result = REJECT, }, { "bounds checks mixing signed and unsigned, variant 11", .insns = { + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_MOV64_IMM(BPF_REG_2, -1), BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), @@ -278,20 +292,21 @@ BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .fixup_map_hash_8b = { 3 }, + .fixup_map_hash_8b = { 5 }, .errstr = "unbounded min value", .result = REJECT, }, { "bounds checks mixing signed and unsigned, variant 12", .insns = { + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_MOV64_IMM(BPF_REG_2, -6), BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), @@ -303,20 +318,21 @@ BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .fixup_map_hash_8b = { 3 }, + .fixup_map_hash_8b = { 5 }, .errstr = "unbounded min value", .result = REJECT, }, { "bounds checks mixing signed and unsigned, variant 13", .insns = { + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_MOV64_IMM(BPF_REG_2, 2), BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), @@ -331,7 +347,7 @@ BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .fixup_map_hash_8b = { 3 }, + .fixup_map_hash_8b = { 5 }, .errstr = "unbounded min value", .result = REJECT, }, @@ -340,13 +356,14 @@ .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1, offsetof(struct __sk_buff, mark)), + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_MOV64_IMM(BPF_REG_2, -1), BPF_MOV64_IMM(BPF_REG_8, 2), @@ -360,20 +377,21 @@ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3), BPF_JMP_IMM(BPF_JA, 0, 0, -7), }, - .fixup_map_hash_8b = { 4 }, + .fixup_map_hash_8b = { 6 }, .errstr = "unbounded min value", .result = REJECT, }, { "bounds checks mixing signed and unsigned, variant 15", .insns = { + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_MOV64_IMM(BPF_REG_2, -6), BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), @@ -387,7 +405,7 @@ BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .fixup_map_hash_8b = { 3 }, + .fixup_map_hash_8b = { 5 }, .errstr = "unbounded min value", .result = REJECT, }, -- cgit v1.2.3 From 1a24af65bb5fed673a9377e794ee3cf416fec64d Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Wed, 15 Feb 2023 01:20:28 +0200 Subject: selftests/bpf: check if verifier tracks constants spilled by BPF_ST_MEM Check that verifier tracks the value of 'imm' spilled to stack by BPF_ST_MEM instruction. Cover the following cases: - write of non-zero constant to stack; - write of a zero constant to stack. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230214232030.1502829-3-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/verifier/bpf_st_mem.c | 37 +++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 tools/testing/selftests/bpf/verifier/bpf_st_mem.c (limited to 'tools') diff --git a/tools/testing/selftests/bpf/verifier/bpf_st_mem.c b/tools/testing/selftests/bpf/verifier/bpf_st_mem.c new file mode 100644 index 000000000000..932903f9e585 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/bpf_st_mem.c @@ -0,0 +1,37 @@ +{ + "BPF_ST_MEM stack imm non-zero", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 42), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -42), + /* if value is tracked correctly R0 is zero */ + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + /* Use prog type that requires return value in range [0, 1] */ + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, + .runs = -1, +}, +{ + "BPF_ST_MEM stack imm zero", + .insns = { + /* mark stack 0000 0000 */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + /* read and sum a few bytes */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_10, -8), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_10, -4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_10, -1), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + /* if value is tracked correctly R0 is zero */ + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + /* Use prog type that requires return value in range [0, 1] */ + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, + .runs = -1, +}, -- cgit v1.2.3 From 2a33c5a25ef4fb574e6744fe7636956b124ad78f Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Wed, 15 Feb 2023 01:20:30 +0200 Subject: selftests/bpf: check if BPF_ST with variable offset preserves STACK_ZERO A test case to verify that variable offset BPF_ST instruction preserves STACK_ZERO marks when writes zeros, e.g. in the following situation: *(u64*)(r10 - 8) = 0 ; STACK_ZERO marks for fp[-8] r0 = random(-7, -1) ; some random number in range of [-7, -1] r0 += r10 ; r0 is now variable offset pointer to stack *(u8*)(r0) = 0 ; BPF_ST writing zero, STACK_ZERO mark for ; fp[-8] should be preserved. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230214232030.1502829-5-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/verifier/bpf_st_mem.c | 30 +++++++++++++++++++++++ 1 file changed, 30 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/verifier/bpf_st_mem.c b/tools/testing/selftests/bpf/verifier/bpf_st_mem.c index 932903f9e585..3af2501082b2 100644 --- a/tools/testing/selftests/bpf/verifier/bpf_st_mem.c +++ b/tools/testing/selftests/bpf/verifier/bpf_st_mem.c @@ -35,3 +35,33 @@ .expected_attach_type = BPF_SK_LOOKUP, .runs = -1, }, +{ + "BPF_ST_MEM stack imm zero, variable offset", + .insns = { + /* set fp[-16], fp[-24] to zeros */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0), + /* r0 = random value in range [-32, -15] */ + BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), + BPF_JMP_IMM(BPF_JLE, BPF_REG_0, 16, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 32), + /* fp[r0] = 0, make a variable offset write of zero, + * this should preserve zero marks on stack. + */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_10), + BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), + /* r0 = fp[-20], if variable offset write was tracked correctly + * r0 would be a known zero. + */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_10, -20), + /* Would fail return code verification if r0 range is not tracked correctly. */ + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + /* Use prog type that requires return value in range [0, 1] */ + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, + .runs = -1, +}, -- cgit v1.2.3 From f88da2d46cc9a19b0c233285339659cae36c5d9a Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Wed, 15 Feb 2023 16:21:32 +0800 Subject: selftests/bpf: Add test case for element reuse in htab map The reinitialization of spin-lock in map value after immediate reuse may corrupt lookup with BPF_F_LOCK flag and result in hard lock-up, so add one test case to demonstrate the problem. Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20230215082132.3856544-3-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- .../testing/selftests/bpf/prog_tests/htab_reuse.c | 101 +++++++++++++++++++++ tools/testing/selftests/bpf/progs/htab_reuse.c | 19 ++++ 2 files changed, 120 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/htab_reuse.c create mode 100644 tools/testing/selftests/bpf/progs/htab_reuse.c (limited to 'tools') diff --git a/tools/testing/selftests/bpf/prog_tests/htab_reuse.c b/tools/testing/selftests/bpf/prog_tests/htab_reuse.c new file mode 100644 index 000000000000..a742dd994d60 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/htab_reuse.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023. Huawei Technologies Co., Ltd */ +#define _GNU_SOURCE +#include +#include +#include +#include "htab_reuse.skel.h" + +struct htab_op_ctx { + int fd; + int loop; + bool stop; +}; + +struct htab_val { + unsigned int lock; + unsigned int data; +}; + +static void *htab_lookup_fn(void *arg) +{ + struct htab_op_ctx *ctx = arg; + int i = 0; + + while (i++ < ctx->loop && !ctx->stop) { + struct htab_val value; + unsigned int key; + + /* Use BPF_F_LOCK to use spin-lock in map value. */ + key = 7; + bpf_map_lookup_elem_flags(ctx->fd, &key, &value, BPF_F_LOCK); + } + + return NULL; +} + +static void *htab_update_fn(void *arg) +{ + struct htab_op_ctx *ctx = arg; + int i = 0; + + while (i++ < ctx->loop && !ctx->stop) { + struct htab_val value; + unsigned int key; + + key = 7; + value.lock = 0; + value.data = key; + bpf_map_update_elem(ctx->fd, &key, &value, BPF_F_LOCK); + bpf_map_delete_elem(ctx->fd, &key); + + key = 24; + value.lock = 0; + value.data = key; + bpf_map_update_elem(ctx->fd, &key, &value, BPF_F_LOCK); + bpf_map_delete_elem(ctx->fd, &key); + } + + return NULL; +} + +void test_htab_reuse(void) +{ + unsigned int i, wr_nr = 1, rd_nr = 4; + pthread_t tids[wr_nr + rd_nr]; + struct htab_reuse *skel; + struct htab_op_ctx ctx; + int err; + + skel = htab_reuse__open_and_load(); + if (!ASSERT_OK_PTR(skel, "htab_reuse__open_and_load")) + return; + + ctx.fd = bpf_map__fd(skel->maps.htab); + ctx.loop = 500; + ctx.stop = false; + + memset(tids, 0, sizeof(tids)); + for (i = 0; i < wr_nr; i++) { + err = pthread_create(&tids[i], NULL, htab_update_fn, &ctx); + if (!ASSERT_OK(err, "pthread_create")) { + ctx.stop = true; + goto reap; + } + } + for (i = 0; i < rd_nr; i++) { + err = pthread_create(&tids[i + wr_nr], NULL, htab_lookup_fn, &ctx); + if (!ASSERT_OK(err, "pthread_create")) { + ctx.stop = true; + goto reap; + } + } + +reap: + for (i = 0; i < wr_nr + rd_nr; i++) { + if (!tids[i]) + continue; + pthread_join(tids[i], NULL); + } + htab_reuse__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/htab_reuse.c b/tools/testing/selftests/bpf/progs/htab_reuse.c new file mode 100644 index 000000000000..7f7368cb3095 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/htab_reuse.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023. Huawei Technologies Co., Ltd */ +#include +#include + +char _license[] SEC("license") = "GPL"; + +struct htab_val { + struct bpf_spin_lock lock; + unsigned int data; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 64); + __type(key, unsigned int); + __type(value, struct htab_val); + __uint(map_flags, BPF_F_NO_PREALLOC); +} htab SEC(".maps"); -- cgit v1.2.3 From 4db98ab445c58bd26c303ef7a10ccd8f049acc22 Mon Sep 17 00:00:00 2001 From: Anton Protopopov Date: Mon, 13 Feb 2023 09:15:13 +0000 Subject: selftest/bpf/benchs: Fix a typo in bpf_hashmap_full_update To call the bpf_hashmap_full_update benchmark, one should say: bench bpf-hashmap-ful-update The patch adds a missing 'l' to the benchmark name. Signed-off-by: Anton Protopopov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20230213091519.1202813-2-aspsk@isovalent.com --- tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c | 2 +- tools/testing/selftests/bpf/benchs/run_bench_bpf_hashmap_full_update.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c index cec51e0ff4b8..44706acf632a 100644 --- a/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c +++ b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c @@ -85,7 +85,7 @@ void hashmap_report_final(struct bench_res res[], int res_cnt) } const struct bench bench_bpf_hashmap_full_update = { - .name = "bpf-hashmap-ful-update", + .name = "bpf-hashmap-full-update", .validate = validate, .setup = setup, .producer_thread = producer, diff --git a/tools/testing/selftests/bpf/benchs/run_bench_bpf_hashmap_full_update.sh b/tools/testing/selftests/bpf/benchs/run_bench_bpf_hashmap_full_update.sh index 1e2de838f9fa..cd2efd3fdef3 100755 --- a/tools/testing/selftests/bpf/benchs/run_bench_bpf_hashmap_full_update.sh +++ b/tools/testing/selftests/bpf/benchs/run_bench_bpf_hashmap_full_update.sh @@ -6,6 +6,6 @@ source ./benchs/run_common.sh set -eufo pipefail nr_threads=`expr $(cat /proc/cpuinfo | grep "processor"| wc -l) - 1` -summary=$($RUN_BENCH -p $nr_threads bpf-hashmap-ful-update) +summary=$($RUN_BENCH -p $nr_threads bpf-hashmap-full-update) printf "$summary" printf "\n" -- cgit v1.2.3 From 2f1c59637fb17dbb2a725c3bd48e4d9d3809df89 Mon Sep 17 00:00:00 2001 From: Anton Protopopov Date: Mon, 13 Feb 2023 09:15:14 +0000 Subject: selftest/bpf/benchs: Make a function static in bpf_hashmap_full_update The hashmap_report_final callback function defined in the benchs/bench_bpf_hashmap_full_update.c file should be static. Signed-off-by: Anton Protopopov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20230213091519.1202813-3-aspsk@isovalent.com --- tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c index 44706acf632a..67f76415a362 100644 --- a/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c +++ b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c @@ -68,7 +68,7 @@ static void setup(void) bpf_map_update_elem(map_fd, &i, &i, BPF_ANY); } -void hashmap_report_final(struct bench_res res[], int res_cnt) +static void hashmap_report_final(struct bench_res res[], int res_cnt) { unsigned int nr_cpus = bpf_num_possible_cpus(); int i; -- cgit v1.2.3 From 22ff7aeaa9e3d0533df613da3500db1ecf452253 Mon Sep 17 00:00:00 2001 From: Anton Protopopov Date: Mon, 13 Feb 2023 09:15:15 +0000 Subject: selftest/bpf/benchs: Enhance argp parsing To parse command line the bench utility uses the argp_parse() function. This function takes as an argument a parent 'struct argp' structure which defines common command line options and an array of children 'struct argp' structures which defines additional command line options for particular benchmarks. This implementation doesn't allow benchmarks to share option names, e.g., if two benchmarks want to use, say, the --option option, then only one of them will succeed (the first one encountered in the array). This will be convenient if same option names could be used in different benchmarks (with the same semantics, e.g., --nr_loops=N). Fix this by calling the argp_parse() function twice. The first call is the same as it was before, with all children argps, and helps to find the benchmark name and to print a combined help message if anything is wrong. Given the name, we can call the argp_parse the second time, but now the children array points only to a correct benchmark thus always calling the correct parsers. (If there's no a specific list of arguments, then only one call to argp_parse will be done.) Signed-off-by: Anton Protopopov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20230213091519.1202813-4-aspsk@isovalent.com --- tools/testing/selftests/bpf/bench.c | 44 +++++++++++++++++----- tools/testing/selftests/bpf/bench.h | 1 + .../selftests/bpf/benchs/bench_bloom_filter_map.c | 5 +++ .../testing/selftests/bpf/benchs/bench_bpf_loop.c | 1 + .../selftests/bpf/benchs/bench_local_storage.c | 3 ++ .../benchs/bench_local_storage_rcu_tasks_trace.c | 1 + .../testing/selftests/bpf/benchs/bench_ringbufs.c | 4 ++ tools/testing/selftests/bpf/benchs/bench_strncmp.c | 2 + 8 files changed, 51 insertions(+), 10 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c index c1f20a147462..12c3b3ab84aa 100644 --- a/tools/testing/selftests/bpf/bench.c +++ b/tools/testing/selftests/bpf/bench.c @@ -287,10 +287,11 @@ static const struct argp_child bench_parsers[] = { {}, }; +/* Make pos_args global, so that we can run argp_parse twice, if necessary */ +static int pos_args; + static error_t parse_arg(int key, char *arg, struct argp_state *state) { - static int pos_args; - switch (key) { case 'v': env.verbose = true; @@ -359,7 +360,7 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) return 0; } -static void parse_cmdline_args(int argc, char **argv) +static void parse_cmdline_args_init(int argc, char **argv) { static const struct argp argp = { .options = opts, @@ -369,9 +370,25 @@ static void parse_cmdline_args(int argc, char **argv) }; if (argp_parse(&argp, argc, argv, 0, NULL, NULL)) exit(1); - if (!env.list && !env.bench_name) { - argp_help(&argp, stderr, ARGP_HELP_DOC, "bench"); - exit(1); +} + +static void parse_cmdline_args_final(int argc, char **argv) +{ + struct argp_child bench_parsers[2] = {}; + const struct argp argp = { + .options = opts, + .parser = parse_arg, + .doc = argp_program_doc, + .children = bench_parsers, + }; + + /* Parse arguments the second time with the correct set of parsers */ + if (bench->argp) { + bench_parsers[0].argp = bench->argp; + bench_parsers[0].header = bench->name; + pos_args = 0; + if (argp_parse(&argp, argc, argv, 0, NULL, NULL)) + exit(1); } } @@ -531,15 +548,14 @@ static const struct bench *benchs[] = { &bench_local_storage_tasks_trace, }; -static void setup_benchmark() +static void find_benchmark(void) { - int i, err; + int i; if (!env.bench_name) { fprintf(stderr, "benchmark name is not specified\n"); exit(1); } - for (i = 0; i < ARRAY_SIZE(benchs); i++) { if (strcmp(benchs[i]->name, env.bench_name) == 0) { bench = benchs[i]; @@ -550,6 +566,11 @@ static void setup_benchmark() fprintf(stderr, "benchmark '%s' not found\n", env.bench_name); exit(1); } +} + +static void setup_benchmark(void) +{ + int i, err; printf("Setting up benchmark '%s'...\n", bench->name); @@ -621,7 +642,7 @@ static void collect_measurements(long delta_ns) { int main(int argc, char **argv) { - parse_cmdline_args(argc, argv); + parse_cmdline_args_init(argc, argv); if (env.list) { int i; @@ -633,6 +654,9 @@ int main(int argc, char **argv) return 0; } + find_benchmark(); + parse_cmdline_args_final(argc, argv); + setup_benchmark(); setup_timer(); diff --git a/tools/testing/selftests/bpf/bench.h b/tools/testing/selftests/bpf/bench.h index d748255877e2..3c8afa0131a3 100644 --- a/tools/testing/selftests/bpf/bench.h +++ b/tools/testing/selftests/bpf/bench.h @@ -47,6 +47,7 @@ struct bench_res { struct bench { const char *name; + const struct argp *argp; void (*validate)(void); void (*setup)(void); void *(*producer_thread)(void *ctx); diff --git a/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c b/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c index 5bcb8a8cdeb2..7c8ccc108313 100644 --- a/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c +++ b/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c @@ -428,6 +428,7 @@ static void *consumer(void *input) const struct bench bench_bloom_lookup = { .name = "bloom-lookup", + .argp = &bench_bloom_map_argp, .validate = validate, .setup = bloom_lookup_setup, .producer_thread = producer, @@ -439,6 +440,7 @@ const struct bench bench_bloom_lookup = { const struct bench bench_bloom_update = { .name = "bloom-update", + .argp = &bench_bloom_map_argp, .validate = validate, .setup = bloom_update_setup, .producer_thread = producer, @@ -450,6 +452,7 @@ const struct bench bench_bloom_update = { const struct bench bench_bloom_false_positive = { .name = "bloom-false-positive", + .argp = &bench_bloom_map_argp, .validate = validate, .setup = false_positive_setup, .producer_thread = producer, @@ -461,6 +464,7 @@ const struct bench bench_bloom_false_positive = { const struct bench bench_hashmap_without_bloom = { .name = "hashmap-without-bloom", + .argp = &bench_bloom_map_argp, .validate = validate, .setup = hashmap_no_bloom_setup, .producer_thread = producer, @@ -472,6 +476,7 @@ const struct bench bench_hashmap_without_bloom = { const struct bench bench_hashmap_with_bloom = { .name = "hashmap-with-bloom", + .argp = &bench_bloom_map_argp, .validate = validate, .setup = hashmap_with_bloom_setup, .producer_thread = producer, diff --git a/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c b/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c index d0a6572bfab6..d8a0394e10b1 100644 --- a/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c +++ b/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c @@ -95,6 +95,7 @@ static void setup(void) const struct bench bench_bpf_loop = { .name = "bpf-loop", + .argp = &bench_bpf_loop_argp, .validate = validate, .setup = setup, .producer_thread = producer, diff --git a/tools/testing/selftests/bpf/benchs/bench_local_storage.c b/tools/testing/selftests/bpf/benchs/bench_local_storage.c index 5a378c84e81f..d4b2817306d4 100644 --- a/tools/testing/selftests/bpf/benchs/bench_local_storage.c +++ b/tools/testing/selftests/bpf/benchs/bench_local_storage.c @@ -255,6 +255,7 @@ static void *producer(void *input) */ const struct bench bench_local_storage_cache_seq_get = { .name = "local-storage-cache-seq-get", + .argp = &bench_local_storage_argp, .validate = validate, .setup = local_storage_cache_get_setup, .producer_thread = producer, @@ -266,6 +267,7 @@ const struct bench bench_local_storage_cache_seq_get = { const struct bench bench_local_storage_cache_interleaved_get = { .name = "local-storage-cache-int-get", + .argp = &bench_local_storage_argp, .validate = validate, .setup = local_storage_cache_get_interleaved_setup, .producer_thread = producer, @@ -277,6 +279,7 @@ const struct bench bench_local_storage_cache_interleaved_get = { const struct bench bench_local_storage_cache_hashmap_control = { .name = "local-storage-cache-hashmap-control", + .argp = &bench_local_storage_argp, .validate = validate, .setup = hashmap_setup, .producer_thread = producer, diff --git a/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c b/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c index 43f109d93130..4f9401ecf09c 100644 --- a/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c +++ b/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c @@ -271,6 +271,7 @@ static void report_final(struct bench_res res[], int res_cnt) */ const struct bench bench_local_storage_tasks_trace = { .name = "local-storage-tasks-trace", + .argp = &bench_local_storage_rcu_tasks_trace_argp, .validate = validate, .setup = local_storage_tasks_trace_setup, .producer_thread = producer, diff --git a/tools/testing/selftests/bpf/benchs/bench_ringbufs.c b/tools/testing/selftests/bpf/benchs/bench_ringbufs.c index c2554f9695ff..fc91fdac4faa 100644 --- a/tools/testing/selftests/bpf/benchs/bench_ringbufs.c +++ b/tools/testing/selftests/bpf/benchs/bench_ringbufs.c @@ -518,6 +518,7 @@ static void *perfbuf_custom_consumer(void *input) const struct bench bench_rb_libbpf = { .name = "rb-libbpf", + .argp = &bench_ringbufs_argp, .validate = bufs_validate, .setup = ringbuf_libbpf_setup, .producer_thread = bufs_sample_producer, @@ -529,6 +530,7 @@ const struct bench bench_rb_libbpf = { const struct bench bench_rb_custom = { .name = "rb-custom", + .argp = &bench_ringbufs_argp, .validate = bufs_validate, .setup = ringbuf_custom_setup, .producer_thread = bufs_sample_producer, @@ -540,6 +542,7 @@ const struct bench bench_rb_custom = { const struct bench bench_pb_libbpf = { .name = "pb-libbpf", + .argp = &bench_ringbufs_argp, .validate = bufs_validate, .setup = perfbuf_libbpf_setup, .producer_thread = bufs_sample_producer, @@ -551,6 +554,7 @@ const struct bench bench_pb_libbpf = { const struct bench bench_pb_custom = { .name = "pb-custom", + .argp = &bench_ringbufs_argp, .validate = bufs_validate, .setup = perfbuf_libbpf_setup, .producer_thread = bufs_sample_producer, diff --git a/tools/testing/selftests/bpf/benchs/bench_strncmp.c b/tools/testing/selftests/bpf/benchs/bench_strncmp.c index 494b591c0289..d3fad2ba6916 100644 --- a/tools/testing/selftests/bpf/benchs/bench_strncmp.c +++ b/tools/testing/selftests/bpf/benchs/bench_strncmp.c @@ -140,6 +140,7 @@ static void strncmp_measure(struct bench_res *res) const struct bench bench_strncmp_no_helper = { .name = "strncmp-no-helper", + .argp = &bench_strncmp_argp, .validate = strncmp_validate, .setup = strncmp_no_helper_setup, .producer_thread = strncmp_producer, @@ -151,6 +152,7 @@ const struct bench bench_strncmp_no_helper = { const struct bench bench_strncmp_helper = { .name = "strncmp-helper", + .argp = &bench_strncmp_argp, .validate = strncmp_validate, .setup = strncmp_helper_setup, .producer_thread = strncmp_producer, -- cgit v1.2.3 From 9644546260eac49348b2c0694b01bdf72c627194 Mon Sep 17 00:00:00 2001 From: Anton Protopopov Date: Mon, 13 Feb 2023 09:15:16 +0000 Subject: selftest/bpf/benchs: Remove an unused header The benchs/bench_bpf_hashmap_full_update.c doesn't set a custom argp, so it shouldn't include the header. Signed-off-by: Anton Protopopov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20230213091519.1202813-5-aspsk@isovalent.com --- tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c | 1 - 1 file changed, 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c index 67f76415a362..75abe8137b6c 100644 --- a/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c +++ b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2022 Bytedance */ -#include #include "bench.h" #include "bpf_hashmap_full_update_bench.skel.h" #include "bpf_util.h" -- cgit v1.2.3 From 90c22503cd8910c54a8cd4bfe5bb6873d9ba8e0b Mon Sep 17 00:00:00 2001 From: Anton Protopopov Date: Mon, 13 Feb 2023 09:15:17 +0000 Subject: selftest/bpf/benchs: Make quiet option common The "local-storage-tasks-trace" benchmark has a `--quiet` option. Move it to the list of common options, so that the main code and other benchmarks can use (new) env.quiet variable. Patch the run_bench_local_storage_rcu_tasks_trace.sh helper script accordingly. Signed-off-by: Anton Protopopov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20230213091519.1202813-6-aspsk@isovalent.com --- tools/testing/selftests/bpf/bench.c | 5 +++++ tools/testing/selftests/bpf/bench.h | 1 + .../bpf/benchs/bench_local_storage_rcu_tasks_trace.c | 15 +-------------- .../bpf/benchs/run_bench_local_storage_rcu_tasks_trace.sh | 2 +- 4 files changed, 8 insertions(+), 15 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c index 12c3b3ab84aa..23c24c346130 100644 --- a/tools/testing/selftests/bpf/bench.c +++ b/tools/testing/selftests/bpf/bench.c @@ -16,6 +16,7 @@ struct env env = { .warmup_sec = 1, .duration_sec = 5, .affinity = false, + .quiet = false, .consumer_cnt = 1, .producer_cnt = 1, }; @@ -262,6 +263,7 @@ static const struct argp_option opts[] = { { "consumers", 'c', "NUM", 0, "Number of consumer threads"}, { "verbose", 'v', NULL, 0, "Verbose debug output"}, { "affinity", 'a', NULL, 0, "Set consumer/producer thread affinity"}, + { "quiet", 'q', NULL, 0, "Be more quiet"}, { "prod-affinity", ARG_PROD_AFFINITY_SET, "CPUSET", 0, "Set of CPUs for producer threads; implies --affinity"}, { "cons-affinity", ARG_CONS_AFFINITY_SET, "CPUSET", 0, @@ -330,6 +332,9 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) case 'a': env.affinity = true; break; + case 'q': + env.quiet = true; + break; case ARG_PROD_AFFINITY_SET: env.affinity = true; if (parse_num_list(arg, &env.prod_cpus.cpus, diff --git a/tools/testing/selftests/bpf/bench.h b/tools/testing/selftests/bpf/bench.h index 3c8afa0131a3..402729c6a3ac 100644 --- a/tools/testing/selftests/bpf/bench.h +++ b/tools/testing/selftests/bpf/bench.h @@ -24,6 +24,7 @@ struct env { bool verbose; bool list; bool affinity; + bool quiet; int consumer_cnt; int producer_cnt; struct cpu_set prod_cpus; diff --git a/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c b/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c index 4f9401ecf09c..d5eb5587f2aa 100644 --- a/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c +++ b/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c @@ -12,17 +12,14 @@ static struct { __u32 nr_procs; __u32 kthread_pid; - bool quiet; } args = { .nr_procs = 1000, .kthread_pid = 0, - .quiet = false, }; enum { ARG_NR_PROCS = 7000, ARG_KTHREAD_PID = 7001, - ARG_QUIET = 7002, }; static const struct argp_option opts[] = { @@ -30,8 +27,6 @@ static const struct argp_option opts[] = { "Set number of user processes to spin up"}, { "kthread_pid", ARG_KTHREAD_PID, "PID", 0, "Pid of rcu_tasks_trace kthread for ticks tracking"}, - { "quiet", ARG_QUIET, "{0,1}", 0, - "If true, don't report progress"}, {}, }; @@ -56,14 +51,6 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) } args.kthread_pid = ret; break; - case ARG_QUIET: - ret = strtol(arg, NULL, 10); - if (ret < 0 || ret > 1) { - fprintf(stderr, "invalid quiet %ld\n", ret); - argp_usage(state); - } - args.quiet = ret; - break; break; default: return ARGP_ERR_UNKNOWN; @@ -230,7 +217,7 @@ static void report_progress(int iter, struct bench_res *res, long delta_ns) exit(1); } - if (args.quiet) + if (env.quiet) return; printf("Iter %d\t avg tasks_trace grace period latency\t%lf ns\n", diff --git a/tools/testing/selftests/bpf/benchs/run_bench_local_storage_rcu_tasks_trace.sh b/tools/testing/selftests/bpf/benchs/run_bench_local_storage_rcu_tasks_trace.sh index 5dac1f02892c..3e8a969f2096 100755 --- a/tools/testing/selftests/bpf/benchs/run_bench_local_storage_rcu_tasks_trace.sh +++ b/tools/testing/selftests/bpf/benchs/run_bench_local_storage_rcu_tasks_trace.sh @@ -8,4 +8,4 @@ if [ -z $kthread_pid ]; then exit 1 fi -./bench --nr_procs 15000 --kthread_pid $kthread_pid -d 600 --quiet 1 local-storage-tasks-trace +./bench --nr_procs 15000 --kthread_pid $kthread_pid -d 600 --quiet local-storage-tasks-trace -- cgit v1.2.3 From a237dda05e9101404a634ac53ee65c8f8c8fce58 Mon Sep 17 00:00:00 2001 From: Anton Protopopov Date: Mon, 13 Feb 2023 09:15:18 +0000 Subject: selftest/bpf/benchs: Print less if the quiet option is set The bench utility will print Setting up benchmark ''... Benchmark '' started. on startup to stdout. Suppress this output if --quiet option if given. This makes it simpler to parse benchmark output by a script. Signed-off-by: Anton Protopopov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20230213091519.1202813-7-aspsk@isovalent.com --- tools/testing/selftests/bpf/bench.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c index 23c24c346130..767ca679ee67 100644 --- a/tools/testing/selftests/bpf/bench.c +++ b/tools/testing/selftests/bpf/bench.c @@ -577,7 +577,8 @@ static void setup_benchmark(void) { int i, err; - printf("Setting up benchmark '%s'...\n", bench->name); + if (!env.quiet) + printf("Setting up benchmark '%s'...\n", bench->name); state.producers = calloc(env.producer_cnt, sizeof(*state.producers)); state.consumers = calloc(env.consumer_cnt, sizeof(*state.consumers)); @@ -623,7 +624,8 @@ static void setup_benchmark(void) next_cpu(&env.prod_cpus)); } - printf("Benchmark '%s' started.\n", bench->name); + if (!env.quiet) + printf("Benchmark '%s' started.\n", bench->name); } static pthread_mutex_t bench_done_mtx = PTHREAD_MUTEX_INITIALIZER; -- cgit v1.2.3 From f371f2dc53d107af25171f29c852a3908ee0afb6 Mon Sep 17 00:00:00 2001 From: Anton Protopopov Date: Mon, 13 Feb 2023 09:15:19 +0000 Subject: selftest/bpf/benchs: Add benchmark for hashmap lookups MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a new benchmark which measures hashmap lookup operations speed. A user can control the following parameters of the benchmark: * key_size (max 1024): the key size to use * max_entries: the hashmap max entries * nr_entries: the number of entries to insert/lookup * nr_loops: the number of loops for the benchmark * map_flags The hashmap flags passed to BPF_MAP_CREATE The BPF program performing the benchmarks calls two nested bpf_loop: bpf_loop(nr_loops/nr_entries) bpf_loop(nr_entries) bpf_map_lookup() So the nr_loops determines the number of actual map lookups. All lookups are successful. Example (the output is generated on a AMD Ryzen 9 3950X machine): for nr_entries in `seq 4096 4096 65536`; do echo -n "$((nr_entries*100/65536))% full: "; sudo ./bench -d2 -a bpf-hashmap-lookup --key_size=4 --nr_entries=$nr_entries --max_entries=65536 --nr_loops=1000000 --map_flags=0x40 | grep cpu; done 6% full: cpu01: lookup 50.739M ± 0.018M events/sec (approximated from 32 samples of ~19ms) 12% full: cpu01: lookup 47.751M ± 0.015M events/sec (approximated from 32 samples of ~20ms) 18% full: cpu01: lookup 45.153M ± 0.013M events/sec (approximated from 32 samples of ~22ms) 25% full: cpu01: lookup 43.826M ± 0.014M events/sec (approximated from 32 samples of ~22ms) 31% full: cpu01: lookup 41.971M ± 0.012M events/sec (approximated from 32 samples of ~23ms) 37% full: cpu01: lookup 41.034M ± 0.015M events/sec (approximated from 32 samples of ~24ms) 43% full: cpu01: lookup 39.946M ± 0.012M events/sec (approximated from 32 samples of ~25ms) 50% full: cpu01: lookup 38.256M ± 0.014M events/sec (approximated from 32 samples of ~26ms) 56% full: cpu01: lookup 36.580M ± 0.018M events/sec (approximated from 32 samples of ~27ms) 62% full: cpu01: lookup 36.252M ± 0.012M events/sec (approximated from 32 samples of ~27ms) 68% full: cpu01: lookup 35.200M ± 0.012M events/sec (approximated from 32 samples of ~28ms) 75% full: cpu01: lookup 34.061M ± 0.009M events/sec (approximated from 32 samples of ~29ms) 81% full: cpu01: lookup 34.374M ± 0.010M events/sec (approximated from 32 samples of ~29ms) 87% full: cpu01: lookup 33.244M ± 0.011M events/sec (approximated from 32 samples of ~30ms) 93% full: cpu01: lookup 32.182M ± 0.013M events/sec (approximated from 32 samples of ~31ms) 100% full: cpu01: lookup 31.497M ± 0.016M events/sec (approximated from 32 samples of ~31ms) Signed-off-by: Anton Protopopov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20230213091519.1202813-8-aspsk@isovalent.com --- tools/testing/selftests/bpf/Makefile | 5 +- tools/testing/selftests/bpf/bench.c | 4 + .../bpf/benchs/bench_bpf_hashmap_lookup.c | 283 +++++++++++++++++++++ .../selftests/bpf/progs/bpf_hashmap_lookup.c | 63 +++++ 4 files changed, 354 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c create mode 100644 tools/testing/selftests/bpf/progs/bpf_hashmap_lookup.c (limited to 'tools') diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 521933bc15fe..b677dcd0b77a 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -638,6 +638,7 @@ $(OUTPUT)/bench_strncmp.o: $(OUTPUT)/strncmp_bench.skel.h $(OUTPUT)/bench_bpf_hashmap_full_update.o: $(OUTPUT)/bpf_hashmap_full_update_bench.skel.h $(OUTPUT)/bench_local_storage.o: $(OUTPUT)/local_storage_bench.skel.h $(OUTPUT)/bench_local_storage_rcu_tasks_trace.o: $(OUTPUT)/local_storage_rcu_tasks_trace_bench.skel.h +$(OUTPUT)/bench_bpf_hashmap_lookup.o: $(OUTPUT)/bpf_hashmap_lookup.skel.h $(OUTPUT)/bench.o: bench.h testing_helpers.h $(BPFOBJ) $(OUTPUT)/bench: LDLIBS += -lm $(OUTPUT)/bench: $(OUTPUT)/bench.o \ @@ -652,7 +653,9 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o \ $(OUTPUT)/bench_strncmp.o \ $(OUTPUT)/bench_bpf_hashmap_full_update.o \ $(OUTPUT)/bench_local_storage.o \ - $(OUTPUT)/bench_local_storage_rcu_tasks_trace.o + $(OUTPUT)/bench_local_storage_rcu_tasks_trace.o \ + $(OUTPUT)/bench_bpf_hashmap_lookup.o \ + # $(call msg,BINARY,,$@) $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@ diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c index 767ca679ee67..0b2a53bb8460 100644 --- a/tools/testing/selftests/bpf/bench.c +++ b/tools/testing/selftests/bpf/bench.c @@ -277,6 +277,7 @@ extern struct argp bench_bpf_loop_argp; extern struct argp bench_local_storage_argp; extern struct argp bench_local_storage_rcu_tasks_trace_argp; extern struct argp bench_strncmp_argp; +extern struct argp bench_hashmap_lookup_argp; static const struct argp_child bench_parsers[] = { { &bench_ringbufs_argp, 0, "Ring buffers benchmark", 0 }, @@ -286,6 +287,7 @@ static const struct argp_child bench_parsers[] = { { &bench_strncmp_argp, 0, "bpf_strncmp helper benchmark", 0 }, { &bench_local_storage_rcu_tasks_trace_argp, 0, "local_storage RCU Tasks Trace slowdown benchmark", 0 }, + { &bench_hashmap_lookup_argp, 0, "Hashmap lookup benchmark", 0 }, {}, }; @@ -512,6 +514,7 @@ extern const struct bench bench_local_storage_cache_seq_get; extern const struct bench bench_local_storage_cache_interleaved_get; extern const struct bench bench_local_storage_cache_hashmap_control; extern const struct bench bench_local_storage_tasks_trace; +extern const struct bench bench_bpf_hashmap_lookup; static const struct bench *benchs[] = { &bench_count_global, @@ -551,6 +554,7 @@ static const struct bench *benchs[] = { &bench_local_storage_cache_interleaved_get, &bench_local_storage_cache_hashmap_control, &bench_local_storage_tasks_trace, + &bench_bpf_hashmap_lookup, }; static void find_benchmark(void) diff --git a/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c new file mode 100644 index 000000000000..8dbb02f75cff --- /dev/null +++ b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Isovalent */ + +#include +#include +#include "bench.h" +#include "bpf_hashmap_lookup.skel.h" +#include "bpf_util.h" + +/* BPF triggering benchmarks */ +static struct ctx { + struct bpf_hashmap_lookup *skel; +} ctx; + +/* only available to kernel, so define it here */ +#define BPF_MAX_LOOPS (1<<23) + +#define MAX_KEY_SIZE 1024 /* the size of the key map */ + +static struct { + __u32 key_size; + __u32 map_flags; + __u32 max_entries; + __u32 nr_entries; + __u32 nr_loops; +} args = { + .key_size = 4, + .map_flags = 0, + .max_entries = 1000, + .nr_entries = 500, + .nr_loops = 1000000, +}; + +enum { + ARG_KEY_SIZE = 8001, + ARG_MAP_FLAGS, + ARG_MAX_ENTRIES, + ARG_NR_ENTRIES, + ARG_NR_LOOPS, +}; + +static const struct argp_option opts[] = { + { "key_size", ARG_KEY_SIZE, "KEY_SIZE", 0, + "The hashmap key size (max 1024)"}, + { "map_flags", ARG_MAP_FLAGS, "MAP_FLAGS", 0, + "The hashmap flags passed to BPF_MAP_CREATE"}, + { "max_entries", ARG_MAX_ENTRIES, "MAX_ENTRIES", 0, + "The hashmap max entries"}, + { "nr_entries", ARG_NR_ENTRIES, "NR_ENTRIES", 0, + "The number of entries to insert/lookup"}, + { "nr_loops", ARG_NR_LOOPS, "NR_LOOPS", 0, + "The number of loops for the benchmark"}, + {}, +}; + +static error_t parse_arg(int key, char *arg, struct argp_state *state) +{ + long ret; + + switch (key) { + case ARG_KEY_SIZE: + ret = strtol(arg, NULL, 10); + if (ret < 1 || ret > MAX_KEY_SIZE) { + fprintf(stderr, "invalid key_size"); + argp_usage(state); + } + args.key_size = ret; + break; + case ARG_MAP_FLAGS: + ret = strtol(arg, NULL, 0); + if (ret < 0 || ret > UINT_MAX) { + fprintf(stderr, "invalid map_flags"); + argp_usage(state); + } + args.map_flags = ret; + break; + case ARG_MAX_ENTRIES: + ret = strtol(arg, NULL, 10); + if (ret < 1 || ret > UINT_MAX) { + fprintf(stderr, "invalid max_entries"); + argp_usage(state); + } + args.max_entries = ret; + break; + case ARG_NR_ENTRIES: + ret = strtol(arg, NULL, 10); + if (ret < 1 || ret > UINT_MAX) { + fprintf(stderr, "invalid nr_entries"); + argp_usage(state); + } + args.nr_entries = ret; + break; + case ARG_NR_LOOPS: + ret = strtol(arg, NULL, 10); + if (ret < 1 || ret > BPF_MAX_LOOPS) { + fprintf(stderr, "invalid nr_loops: %ld (min=1 max=%u)\n", + ret, BPF_MAX_LOOPS); + argp_usage(state); + } + args.nr_loops = ret; + break; + default: + return ARGP_ERR_UNKNOWN; + } + + return 0; +} + +const struct argp bench_hashmap_lookup_argp = { + .options = opts, + .parser = parse_arg, +}; + +static void validate(void) +{ + if (env.consumer_cnt != 1) { + fprintf(stderr, "benchmark doesn't support multi-consumer!\n"); + exit(1); + } + + if (args.nr_entries > args.max_entries) { + fprintf(stderr, "args.nr_entries is too big! (max %u, got %u)\n", + args.max_entries, args.nr_entries); + exit(1); + } +} + +static void *producer(void *input) +{ + while (true) { + /* trigger the bpf program */ + syscall(__NR_getpgid); + } + return NULL; +} + +static void *consumer(void *input) +{ + return NULL; +} + +static void measure(struct bench_res *res) +{ +} + +static inline void patch_key(u32 i, u32 *key) +{ +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + *key = i + 1; +#else + *key = __builtin_bswap32(i + 1); +#endif + /* the rest of key is random */ +} + +static void setup(void) +{ + struct bpf_link *link; + int map_fd; + int ret; + int i; + + setup_libbpf(); + + ctx.skel = bpf_hashmap_lookup__open(); + if (!ctx.skel) { + fprintf(stderr, "failed to open skeleton\n"); + exit(1); + } + + bpf_map__set_max_entries(ctx.skel->maps.hash_map_bench, args.max_entries); + bpf_map__set_key_size(ctx.skel->maps.hash_map_bench, args.key_size); + bpf_map__set_value_size(ctx.skel->maps.hash_map_bench, 8); + bpf_map__set_map_flags(ctx.skel->maps.hash_map_bench, args.map_flags); + + ctx.skel->bss->nr_entries = args.nr_entries; + ctx.skel->bss->nr_loops = args.nr_loops / args.nr_entries; + + if (args.key_size > 4) { + for (i = 1; i < args.key_size/4; i++) + ctx.skel->bss->key[i] = 2654435761 * i; + } + + ret = bpf_hashmap_lookup__load(ctx.skel); + if (ret) { + bpf_hashmap_lookup__destroy(ctx.skel); + fprintf(stderr, "failed to load map: %s", strerror(-ret)); + exit(1); + } + + /* fill in the hash_map */ + map_fd = bpf_map__fd(ctx.skel->maps.hash_map_bench); + for (u64 i = 0; i < args.nr_entries; i++) { + patch_key(i, ctx.skel->bss->key); + bpf_map_update_elem(map_fd, ctx.skel->bss->key, &i, BPF_ANY); + } + + link = bpf_program__attach(ctx.skel->progs.benchmark); + if (!link) { + fprintf(stderr, "failed to attach program!\n"); + exit(1); + } +} + +static inline double events_from_time(u64 time) +{ + if (time) + return args.nr_loops * 1000000000llu / time / 1000000.0L; + + return 0; +} + +static int compute_events(u64 *times, double *events_mean, double *events_stddev, u64 *mean_time) +{ + int i, n = 0; + + *events_mean = 0; + *events_stddev = 0; + *mean_time = 0; + + for (i = 0; i < 32; i++) { + if (!times[i]) + break; + *mean_time += times[i]; + *events_mean += events_from_time(times[i]); + n += 1; + } + if (!n) + return 0; + + *mean_time /= n; + *events_mean /= n; + + if (n > 1) { + for (i = 0; i < n; i++) { + double events_i = *events_mean - events_from_time(times[i]); + *events_stddev += events_i * events_i / (n - 1); + } + *events_stddev = sqrt(*events_stddev); + } + + return n; +} + +static void hashmap_report_final(struct bench_res res[], int res_cnt) +{ + unsigned int nr_cpus = bpf_num_possible_cpus(); + double events_mean, events_stddev; + u64 mean_time; + int i, n; + + for (i = 0; i < nr_cpus; i++) { + n = compute_events(ctx.skel->bss->percpu_times[i], &events_mean, + &events_stddev, &mean_time); + if (n == 0) + continue; + + if (env.quiet) { + /* we expect only one cpu to be present */ + if (env.affinity) + printf("%.3lf\n", events_mean); + else + printf("cpu%02d %.3lf\n", i, events_mean); + } else { + printf("cpu%02d: lookup %.3lfM ± %.3lfM events/sec" + " (approximated from %d samples of ~%lums)\n", + i, events_mean, 2*events_stddev, + n, mean_time / 1000000); + } + } +} + +const struct bench bench_bpf_hashmap_lookup = { + .name = "bpf-hashmap-lookup", + .argp = &bench_hashmap_lookup_argp, + .validate = validate, + .setup = setup, + .producer_thread = producer, + .consumer_thread = consumer, + .measure = measure, + .report_progress = NULL, + .report_final = hashmap_report_final, +}; diff --git a/tools/testing/selftests/bpf/progs/bpf_hashmap_lookup.c b/tools/testing/selftests/bpf/progs/bpf_hashmap_lookup.c new file mode 100644 index 000000000000..1eb74ddca414 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_hashmap_lookup.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Isovalent */ + +#include "vmlinux.h" + +#include +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); +} hash_map_bench SEC(".maps"); + +/* The number of slots to store times */ +#define NR_SLOTS 32 +#define NR_CPUS 256 +#define CPU_MASK (NR_CPUS-1) + +/* Configured by userspace */ +u64 nr_entries; +u64 nr_loops; +u32 __attribute__((__aligned__(8))) key[NR_CPUS]; + +/* Filled by us */ +u64 __attribute__((__aligned__(256))) percpu_times_index[NR_CPUS]; +u64 __attribute__((__aligned__(256))) percpu_times[NR_CPUS][NR_SLOTS]; + +static inline void patch_key(u32 i) +{ +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + key[0] = i + 1; +#else + key[0] = __builtin_bswap32(i + 1); +#endif + /* the rest of key is random and is configured by userspace */ +} + +static int lookup_callback(__u32 index, u32 *unused) +{ + patch_key(index); + return bpf_map_lookup_elem(&hash_map_bench, key) ? 0 : 1; +} + +static int loop_lookup_callback(__u32 index, u32 *unused) +{ + return bpf_loop(nr_entries, lookup_callback, NULL, 0) ? 0 : 1; +} + +SEC("fentry/" SYS_PREFIX "sys_getpgid") +int benchmark(void *ctx) +{ + u32 cpu = bpf_get_smp_processor_id(); + u32 times_index; + u64 start_time; + + times_index = percpu_times_index[cpu & CPU_MASK] % NR_SLOTS; + start_time = bpf_ktime_get_ns(); + bpf_loop(nr_loops, loop_lookup_callback, NULL, 0); + percpu_times[cpu & CPU_MASK][times_index] = bpf_ktime_get_ns() - start_time; + percpu_times_index[cpu & CPU_MASK] += 1; + return 0; +} -- cgit v1.2.3 From 6c20822fada1b8adb77fa450d03a0d449686a4a9 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Wed, 15 Feb 2023 19:54:40 +0100 Subject: bpf, test_run: fix &xdp_frame misplacement for LIVE_FRAMES MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit &xdp_buff and &xdp_frame are bound in a way that xdp_buff->data_hard_start == xdp_frame It's always the case and e.g. xdp_convert_buff_to_frame() relies on this. IOW, the following: for (u32 i = 0; i < 0xdead; i++) { xdpf = xdp_convert_buff_to_frame(&xdp); xdp_convert_frame_to_buff(xdpf, &xdp); } shouldn't ever modify @xdpf's contents or the pointer itself. However, "live packet" code wrongly treats &xdp_frame as part of its context placed *before* the data_hard_start. With such flow, data_hard_start is sizeof(*xdpf) off to the right and no longer points to the XDP frame. Instead of replacing `sizeof(ctx)` with `offsetof(ctx, xdpf)` in several places and praying that there are no more miscalcs left somewhere in the code, unionize ::frm with ::data in a flex array, so that both starts pointing to the actual data_hard_start and the XDP frame actually starts being a part of it, i.e. a part of the headroom, not the context. A nice side effect is that the maximum frame size for this mode gets increased by 40 bytes, as xdp_buff::frame_sz includes everything from data_hard_start (-> includes xdpf already) to the end of XDP/skb shared info. Also update %MAX_PKT_SIZE accordingly in the selftests code. Leave it hardcoded for 64 bit && 4k pages, it can be made more flexible later on. Minor: align `&head->data` with how `head->frm` is assigned for consistency. Minor #2: rename 'frm' to 'frame' in &xdp_page_head while at it for clarity. (was found while testing XDP traffic generator on ice, which calls xdp_convert_frame_to_buff() for each XDP frame) Fixes: b530e9e1063e ("bpf: Add "live packet" mode for XDP in BPF_PROG_RUN") Acked-by: Toke Høiland-Jørgensen Signed-off-by: Alexander Lobakin Link: https://lore.kernel.org/r/20230215185440.4126672-1-aleksander.lobakin@intel.com Signed-off-by: Martin KaFai Lau --- net/bpf/test_run.c | 29 +++++++++++++++++----- .../selftests/bpf/prog_tests/xdp_do_redirect.c | 7 +++--- 2 files changed, 27 insertions(+), 9 deletions(-) (limited to 'tools') diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index b766a84c8536..1ab396a2b87f 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -97,8 +97,11 @@ reset: struct xdp_page_head { struct xdp_buff orig_ctx; struct xdp_buff ctx; - struct xdp_frame frm; - u8 data[]; + union { + /* ::data_hard_start starts here */ + DECLARE_FLEX_ARRAY(struct xdp_frame, frame); + DECLARE_FLEX_ARRAY(u8, data); + }; }; struct xdp_test_data { @@ -116,6 +119,20 @@ struct xdp_test_data { #define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head)) #define TEST_XDP_MAX_BATCH 256 +#if BITS_PER_LONG == 64 && PAGE_SIZE == SZ_4K +/* tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c:%MAX_PKT_SIZE + * must be updated accordingly when any of these changes, otherwise BPF + * selftests will fail. + */ +#ifdef __s390x__ +#define TEST_MAX_PKT_SIZE 3216 +#else +#define TEST_MAX_PKT_SIZE 3408 +#endif +static_assert(SKB_WITH_OVERHEAD(TEST_XDP_FRAME_SIZE - XDP_PACKET_HEADROOM) == + TEST_MAX_PKT_SIZE); +#endif + static void xdp_test_run_init_page(struct page *page, void *arg) { struct xdp_page_head *head = phys_to_virt(page_to_phys(page)); @@ -132,8 +149,8 @@ static void xdp_test_run_init_page(struct page *page, void *arg) headroom -= meta_len; new_ctx = &head->ctx; - frm = &head->frm; - data = &head->data; + frm = head->frame; + data = head->data; memcpy(data + headroom, orig_ctx->data_meta, frm_len); xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq); @@ -223,7 +240,7 @@ static void reset_ctx(struct xdp_page_head *head) head->ctx.data = head->orig_ctx.data; head->ctx.data_meta = head->orig_ctx.data_meta; head->ctx.data_end = head->orig_ctx.data_end; - xdp_update_frame_from_buff(&head->ctx, &head->frm); + xdp_update_frame_from_buff(&head->ctx, head->frame); } static int xdp_recv_frames(struct xdp_frame **frames, int nframes, @@ -285,7 +302,7 @@ static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog, head = phys_to_virt(page_to_phys(page)); reset_ctx(head); ctx = &head->ctx; - frm = &head->frm; + frm = head->frame; xdp->frame_cnt++; act = bpf_prog_run_xdp(prog, ctx); diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c index 2666c84dbd01..7271a18ab3e2 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c @@ -65,12 +65,13 @@ static int attach_tc_prog(struct bpf_tc_hook *hook, int fd) } /* The maximum permissible size is: PAGE_SIZE - sizeof(struct xdp_page_head) - - * sizeof(struct skb_shared_info) - XDP_PACKET_HEADROOM = 3368 bytes + * SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) - XDP_PACKET_HEADROOM = + * 3408 bytes for 64-byte cacheline and 3216 for 256-byte one. */ #if defined(__s390x__) -#define MAX_PKT_SIZE 3176 +#define MAX_PKT_SIZE 3216 #else -#define MAX_PKT_SIZE 3368 +#define MAX_PKT_SIZE 3408 #endif static void test_max_pkt_size(int fd) { -- cgit v1.2.3 From 55a9ed0e16baf4d025c160d46bc1e3fac0d4cdc4 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Wed, 15 Feb 2023 00:12:14 +0100 Subject: libbpf: Introduce bpf_{btf,link,map,prog}_get_info_by_fd() These are type-safe wrappers around bpf_obj_get_info_by_fd(). They found one problem in selftests, and are also useful for adding Memory Sanitizer annotations. Signed-off-by: Ilya Leoshkevich Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20230214231221.249277-2-iii@linux.ibm.com --- tools/lib/bpf/bpf.c | 20 ++++++++++++++++++++ tools/lib/bpf/bpf.h | 9 +++++++++ tools/lib/bpf/libbpf.map | 5 +++++ 3 files changed, 34 insertions(+) (limited to 'tools') diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 9aff98f42a3d..e750b6f5fcc3 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -1044,6 +1044,26 @@ int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len) return libbpf_err_errno(err); } +int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info, __u32 *info_len) +{ + return bpf_obj_get_info_by_fd(prog_fd, info, info_len); +} + +int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len) +{ + return bpf_obj_get_info_by_fd(map_fd, info, info_len); +} + +int bpf_btf_get_info_by_fd(int btf_fd, struct bpf_btf_info *info, __u32 *info_len) +{ + return bpf_obj_get_info_by_fd(btf_fd, info, info_len); +} + +int bpf_link_get_info_by_fd(int link_fd, struct bpf_link_info *info, __u32 *info_len) +{ + return bpf_obj_get_info_by_fd(link_fd, info, info_len); +} + int bpf_raw_tracepoint_open(const char *name, int prog_fd) { const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint); diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 7468978d3c27..9ed9bceb4111 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -386,6 +386,15 @@ LIBBPF_API int bpf_link_get_fd_by_id(__u32 id); LIBBPF_API int bpf_link_get_fd_by_id_opts(__u32 id, const struct bpf_get_fd_by_id_opts *opts); LIBBPF_API int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len); +/* Type-safe variants of bpf_obj_get_info_by_fd(). The callers still needs to + * pass info_len, which should normally be + * sizeof(struct bpf_{prog,map,btf,link}_info), in order to be compatible with + * different libbpf and kernel versions. + */ +LIBBPF_API int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info, __u32 *info_len); +LIBBPF_API int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len); +LIBBPF_API int bpf_btf_get_info_by_fd(int btf_fd, struct bpf_btf_info *info, __u32 *info_len); +LIBBPF_API int bpf_link_get_info_by_fd(int link_fd, struct bpf_link_info *info, __u32 *info_len); struct bpf_prog_query_opts { size_t sz; /* size of this struct for forward/backward compatibility */ diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 11c36a3c1a9f..50dde1f6521e 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -384,4 +384,9 @@ LIBBPF_1.1.0 { } LIBBPF_1.0.0; LIBBPF_1.2.0 { + global: + bpf_btf_get_info_by_fd; + bpf_link_get_info_by_fd; + bpf_map_get_info_by_fd; + bpf_prog_get_info_by_fd; } LIBBPF_1.1.0; -- cgit v1.2.3 From 629dfc660cae86a6a48d19f5295226d03caae673 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Wed, 15 Feb 2023 00:12:15 +0100 Subject: libbpf: Use bpf_{btf,link,map,prog}_get_info_by_fd() Use the new type-safe wrappers around bpf_obj_get_info_by_fd(). Signed-off-by: Ilya Leoshkevich Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20230214231221.249277-3-iii@linux.ibm.com --- tools/lib/bpf/btf.c | 8 ++++---- tools/lib/bpf/libbpf.c | 14 +++++++------- tools/lib/bpf/netlink.c | 2 +- tools/lib/bpf/ringbuf.c | 4 ++-- 4 files changed, 14 insertions(+), 14 deletions(-) (limited to 'tools') diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 64841117fbb2..9181d36118d2 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -1350,9 +1350,9 @@ struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf) void *ptr; int err; - /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so + /* we won't know btf_size until we call bpf_btf_get_info_by_fd(). so * let's start with a sane default - 4KiB here - and resize it only if - * bpf_obj_get_info_by_fd() needs a bigger buffer. + * bpf_btf_get_info_by_fd() needs a bigger buffer. */ last_size = 4096; ptr = malloc(last_size); @@ -1362,7 +1362,7 @@ struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf) memset(&btf_info, 0, sizeof(btf_info)); btf_info.btf = ptr_to_u64(ptr); btf_info.btf_size = last_size; - err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len); + err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len); if (!err && btf_info.btf_size > last_size) { void *temp_ptr; @@ -1380,7 +1380,7 @@ struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf) btf_info.btf = ptr_to_u64(ptr); btf_info.btf_size = last_size; - err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len); + err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len); } if (err || btf_info.btf_size > last_size) { diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 35a698eb825d..05c4db355f28 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -4345,7 +4345,7 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd) char *new_name; memset(&info, 0, len); - err = bpf_obj_get_info_by_fd(fd, &info, &len); + err = bpf_map_get_info_by_fd(fd, &info, &len); if (err && errno == EINVAL) err = bpf_get_map_info_from_fdinfo(fd, &info); if (err) @@ -4729,7 +4729,7 @@ static int probe_module_btf(void) * kernel's module BTF support coincides with support for * name/name_len fields in struct bpf_btf_info. */ - err = bpf_obj_get_info_by_fd(fd, &info, &len); + err = bpf_btf_get_info_by_fd(fd, &info, &len); close(fd); return !err; } @@ -4892,7 +4892,7 @@ static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) int err; memset(&map_info, 0, map_info_len); - err = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len); + err = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len); if (err && errno == EINVAL) err = bpf_get_map_info_from_fdinfo(map_fd, &map_info); if (err) { @@ -5437,7 +5437,7 @@ static int load_module_btfs(struct bpf_object *obj) info.name = ptr_to_u64(name); info.name_len = sizeof(name); - err = bpf_obj_get_info_by_fd(fd, &info, &len); + err = bpf_btf_get_info_by_fd(fd, &info, &len); if (err) { err = -errno; pr_warn("failed to get BTF object #%d info: %d\n", id, err); @@ -9030,9 +9030,9 @@ static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd) int err; memset(&info, 0, info_len); - err = bpf_obj_get_info_by_fd(attach_prog_fd, &info, &info_len); + err = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len); if (err) { - pr_warn("failed bpf_obj_get_info_by_fd for FD %d: %d\n", + pr_warn("failed bpf_prog_get_info_by_fd for FD %d: %d\n", attach_prog_fd, err); return err; } @@ -11741,7 +11741,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, /* best-effort sanity checks */ memset(&map, 0, sizeof(map)); map_info_len = sizeof(map); - err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len); + err = bpf_map_get_info_by_fd(map_fd, &map, &map_info_len); if (err) { err = -errno; /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c index cb082a04ffa8..1653e7a8b0a1 100644 --- a/tools/lib/bpf/netlink.c +++ b/tools/lib/bpf/netlink.c @@ -689,7 +689,7 @@ static int tc_add_fd_and_name(struct libbpf_nla_req *req, int fd) int len, ret; memset(&info, 0, info_len); - ret = bpf_obj_get_info_by_fd(fd, &info, &info_len); + ret = bpf_prog_get_info_by_fd(fd, &info, &info_len); if (ret < 0) return ret; diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c index 47855af25f3b..02199364db13 100644 --- a/tools/lib/bpf/ringbuf.c +++ b/tools/lib/bpf/ringbuf.c @@ -83,7 +83,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd, memset(&info, 0, sizeof(info)); - err = bpf_obj_get_info_by_fd(map_fd, &info, &len); + err = bpf_map_get_info_by_fd(map_fd, &info, &len); if (err) { err = -errno; pr_warn("ringbuf: failed to get map info for fd=%d: %d\n", @@ -359,7 +359,7 @@ static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd) memset(&info, 0, sizeof(info)); - err = bpf_obj_get_info_by_fd(map_fd, &info, &len); + err = bpf_map_get_info_by_fd(map_fd, &info, &len); if (err) { err = -errno; pr_warn("user ringbuf: failed to get map info for fd=%d: %d\n", map_fd, err); -- cgit v1.2.3 From 38f0408ef756e738387f7d8f62b8d58ca5938da4 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Wed, 15 Feb 2023 00:12:16 +0100 Subject: bpftool: Use bpf_{btf,link,map,prog}_get_info_by_fd() Use the new type-safe wrappers around bpf_obj_get_info_by_fd(). Split the bpf_obj_get_info_by_fd() call in build_btf_type_table() in two, since knowing the type helps with the Memory Sanitizer. Improve map_parse_fd_and_info() type safety by using struct bpf_map_info * instead of void * for info. Signed-off-by: Ilya Leoshkevich Signed-off-by: Andrii Nakryiko Reviewed-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20230214231221.249277-4-iii@linux.ibm.com --- tools/bpf/bpftool/btf.c | 13 ++++++++----- tools/bpf/bpftool/btf_dumper.c | 4 ++-- tools/bpf/bpftool/cgroup.c | 4 ++-- tools/bpf/bpftool/common.c | 13 +++++++------ tools/bpf/bpftool/link.c | 4 ++-- tools/bpf/bpftool/main.h | 3 ++- tools/bpf/bpftool/map.c | 8 ++++---- tools/bpf/bpftool/prog.c | 22 +++++++++++----------- tools/bpf/bpftool/struct_ops.c | 6 +++--- 9 files changed, 41 insertions(+), 36 deletions(-) (limited to 'tools') diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c index 352290ba7b29..91fcb75babe3 100644 --- a/tools/bpf/bpftool/btf.c +++ b/tools/bpf/bpftool/btf.c @@ -537,7 +537,7 @@ static bool btf_is_kernel_module(__u32 btf_id) len = sizeof(btf_info); btf_info.name = ptr_to_u64(btf_name); btf_info.name_len = sizeof(btf_name); - err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len); + err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len); close(btf_fd); if (err) { p_err("can't get BTF (ID %u) object info: %s", btf_id, strerror(errno)); @@ -606,7 +606,7 @@ static int do_dump(int argc, char **argv) if (fd < 0) return -1; - err = bpf_obj_get_info_by_fd(fd, &info, &len); + err = bpf_prog_get_info_by_fd(fd, &info, &len); if (err) { p_err("can't get prog info: %s", strerror(errno)); goto done; @@ -789,7 +789,10 @@ build_btf_type_table(struct hashmap *tab, enum bpf_obj_type type, } memset(info, 0, *len); - err = bpf_obj_get_info_by_fd(fd, info, len); + if (type == BPF_OBJ_PROG) + err = bpf_prog_get_info_by_fd(fd, info, len); + else + err = bpf_map_get_info_by_fd(fd, info, len); close(fd); if (err) { p_err("can't get %s info: %s", names[type], @@ -931,7 +934,7 @@ show_btf(int fd, struct hashmap *btf_prog_table, int err; memset(&info, 0, sizeof(info)); - err = bpf_obj_get_info_by_fd(fd, &info, &len); + err = bpf_btf_get_info_by_fd(fd, &info, &len); if (err) { p_err("can't get BTF object info: %s", strerror(errno)); return -1; @@ -943,7 +946,7 @@ show_btf(int fd, struct hashmap *btf_prog_table, info.name = ptr_to_u64(name); len = sizeof(info); - err = bpf_obj_get_info_by_fd(fd, &info, &len); + err = bpf_btf_get_info_by_fd(fd, &info, &len); if (err) { p_err("can't get BTF object info: %s", strerror(errno)); return -1; diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c index eda71fdfe95a..e7f6ec3a8f35 100644 --- a/tools/bpf/bpftool/btf_dumper.c +++ b/tools/bpf/bpftool/btf_dumper.c @@ -57,7 +57,7 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d, if (prog_fd < 0) goto print; - err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); if (err) goto print; @@ -70,7 +70,7 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d, info.func_info_rec_size = finfo_rec_size; info.func_info = ptr_to_u64(&finfo); - err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); if (err) goto print; diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c index b46a998d8f8d..ac846b0805b4 100644 --- a/tools/bpf/bpftool/cgroup.c +++ b/tools/bpf/bpftool/cgroup.c @@ -82,7 +82,7 @@ static void guess_vmlinux_btf_id(__u32 attach_btf_obj_id) if (fd < 0) return; - err = bpf_obj_get_info_by_fd(fd, &btf_info, &btf_len); + err = bpf_btf_get_info_by_fd(fd, &btf_info, &btf_len); if (err) goto out; @@ -108,7 +108,7 @@ static int show_bpf_prog(int id, enum bpf_attach_type attach_type, if (prog_fd < 0) return -1; - if (bpf_obj_get_info_by_fd(prog_fd, &info, &info_len)) { + if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len)) { close(prog_fd); return -1; } diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 620032042576..5a73ccf14332 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -353,7 +353,7 @@ void get_prog_full_name(const struct bpf_prog_info *prog_info, int prog_fd, info.func_info_rec_size = sizeof(finfo); info.func_info = ptr_to_u64(&finfo); - if (bpf_obj_get_info_by_fd(prog_fd, &info, &info_len)) + if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len)) goto copy_name; prog_btf = btf__load_from_kernel_by_id(info.btf_id); @@ -488,7 +488,7 @@ static int do_build_table_cb(const char *fpath, const struct stat *sb, goto out_close; memset(&pinned_info, 0, sizeof(pinned_info)); - if (bpf_obj_get_info_by_fd(fd, &pinned_info, &len)) + if (bpf_prog_get_info_by_fd(fd, &pinned_info, &len)) goto out_close; path = strdup(fpath); @@ -756,7 +756,7 @@ static int prog_fd_by_nametag(void *nametag, int **fds, bool tag) goto err_close_fds; } - err = bpf_obj_get_info_by_fd(fd, &info, &len); + err = bpf_prog_get_info_by_fd(fd, &info, &len); if (err) { p_err("can't get prog info (%u): %s", id, strerror(errno)); @@ -916,7 +916,7 @@ static int map_fd_by_name(char *name, int **fds) goto err_close_fds; } - err = bpf_obj_get_info_by_fd(fd, &info, &len); + err = bpf_map_get_info_by_fd(fd, &info, &len); if (err) { p_err("can't get map info (%u): %s", id, strerror(errno)); @@ -1026,7 +1026,8 @@ exit_free: return fd; } -int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len) +int map_parse_fd_and_info(int *argc, char ***argv, struct bpf_map_info *info, + __u32 *info_len) { int err; int fd; @@ -1035,7 +1036,7 @@ int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len) if (fd < 0) return -1; - err = bpf_obj_get_info_by_fd(fd, info, info_len); + err = bpf_map_get_info_by_fd(fd, info, info_len); if (err) { p_err("can't get map info: %s", strerror(errno)); close(fd); diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c index 6f4cfe01cad4..f985b79cca27 100644 --- a/tools/bpf/bpftool/link.c +++ b/tools/bpf/bpftool/link.c @@ -145,7 +145,7 @@ static int get_prog_info(int prog_id, struct bpf_prog_info *info) return prog_fd; memset(info, 0, sizeof(*info)); - err = bpf_obj_get_info_by_fd(prog_fd, info, &len); + err = bpf_prog_get_info_by_fd(prog_fd, info, &len); if (err) p_err("can't get prog info: %s", strerror(errno)); close(prog_fd); @@ -327,7 +327,7 @@ static int do_show_link(int fd) memset(&info, 0, sizeof(info)); again: - err = bpf_obj_get_info_by_fd(fd, &info, &len); + err = bpf_link_get_info_by_fd(fd, &info, &len); if (err) { p_err("can't get link info: %s", strerror(errno)); diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index a84224b6a604..0ef373cef4c7 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -168,7 +168,8 @@ int prog_parse_fd(int *argc, char ***argv); int prog_parse_fds(int *argc, char ***argv, int **fds); int map_parse_fd(int *argc, char ***argv); int map_parse_fds(int *argc, char ***argv, int **fds); -int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len); +int map_parse_fd_and_info(int *argc, char ***argv, struct bpf_map_info *info, + __u32 *info_len); struct bpf_prog_linfo; #if defined(HAVE_LLVM_SUPPORT) || defined(HAVE_LIBBFD_SUPPORT) diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index 88911d3aa2d9..aaeb8939e137 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -638,7 +638,7 @@ static int do_show_subset(int argc, char **argv) if (json_output && nb_fds > 1) jsonw_start_array(json_wtr); /* root array */ for (i = 0; i < nb_fds; i++) { - err = bpf_obj_get_info_by_fd(fds[i], &info, &len); + err = bpf_map_get_info_by_fd(fds[i], &info, &len); if (err) { p_err("can't get map info: %s", strerror(errno)); @@ -708,7 +708,7 @@ static int do_show(int argc, char **argv) break; } - err = bpf_obj_get_info_by_fd(fd, &info, &len); + err = bpf_map_get_info_by_fd(fd, &info, &len); if (err) { p_err("can't get map info: %s", strerror(errno)); close(fd); @@ -764,7 +764,7 @@ static int maps_have_btf(int *fds, int nb_fds) int err, i; for (i = 0; i < nb_fds; i++) { - err = bpf_obj_get_info_by_fd(fds[i], &info, &len); + err = bpf_map_get_info_by_fd(fds[i], &info, &len); if (err) { p_err("can't get map info: %s", strerror(errno)); return -1; @@ -925,7 +925,7 @@ static int do_dump(int argc, char **argv) if (wtr && nb_fds > 1) jsonw_start_array(wtr); /* root array */ for (i = 0; i < nb_fds; i++) { - if (bpf_obj_get_info_by_fd(fds[i], &info, &len)) { + if (bpf_map_get_info_by_fd(fds[i], &info, &len)) { p_err("can't get map info: %s", strerror(errno)); break; } diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index e87738dbffc1..afbe3ec342c8 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -198,7 +198,7 @@ static void show_prog_maps(int fd, __u32 num_maps) info.nr_map_ids = num_maps; info.map_ids = ptr_to_u64(map_ids); - err = bpf_obj_get_info_by_fd(fd, &info, &len); + err = bpf_prog_get_info_by_fd(fd, &info, &len); if (err || !info.nr_map_ids) return; @@ -231,7 +231,7 @@ static void *find_metadata(int prog_fd, struct bpf_map_info *map_info) memset(&prog_info, 0, sizeof(prog_info)); prog_info_len = sizeof(prog_info); - ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len); + ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len); if (ret) return NULL; @@ -248,7 +248,7 @@ static void *find_metadata(int prog_fd, struct bpf_map_info *map_info) prog_info.map_ids = ptr_to_u64(map_ids); prog_info_len = sizeof(prog_info); - ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len); + ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len); if (ret) goto free_map_ids; @@ -259,7 +259,7 @@ static void *find_metadata(int prog_fd, struct bpf_map_info *map_info) memset(map_info, 0, sizeof(*map_info)); map_info_len = sizeof(*map_info); - ret = bpf_obj_get_info_by_fd(map_fd, map_info, &map_info_len); + ret = bpf_map_get_info_by_fd(map_fd, map_info, &map_info_len); if (ret < 0) { close(map_fd); goto free_map_ids; @@ -580,7 +580,7 @@ static int show_prog(int fd) __u32 len = sizeof(info); int err; - err = bpf_obj_get_info_by_fd(fd, &info, &len); + err = bpf_prog_get_info_by_fd(fd, &info, &len); if (err) { p_err("can't get prog info: %s", strerror(errno)); return -1; @@ -949,7 +949,7 @@ static int do_dump(int argc, char **argv) for (i = 0; i < nb_fds; i++) { memset(&info, 0, sizeof(info)); - err = bpf_obj_get_info_by_fd(fds[i], &info, &info_len); + err = bpf_prog_get_info_by_fd(fds[i], &info, &info_len); if (err) { p_err("can't get prog info: %s", strerror(errno)); break; @@ -961,7 +961,7 @@ static int do_dump(int argc, char **argv) break; } - err = bpf_obj_get_info_by_fd(fds[i], &info, &info_len); + err = bpf_prog_get_info_by_fd(fds[i], &info, &info_len); if (err) { p_err("can't get prog info: %s", strerror(errno)); break; @@ -2170,9 +2170,9 @@ static char *profile_target_name(int tgt_fd) char *name = NULL; int err; - err = bpf_obj_get_info_by_fd(tgt_fd, &info, &info_len); + err = bpf_prog_get_info_by_fd(tgt_fd, &info, &info_len); if (err) { - p_err("failed to bpf_obj_get_info_by_fd for prog FD %d", tgt_fd); + p_err("failed to get info for prog FD %d", tgt_fd); goto out; } @@ -2183,7 +2183,7 @@ static char *profile_target_name(int tgt_fd) func_info_rec_size = info.func_info_rec_size; if (info.nr_func_info == 0) { - p_err("bpf_obj_get_info_by_fd for prog FD %d found 0 func_info", tgt_fd); + p_err("found 0 func_info for prog FD %d", tgt_fd); goto out; } @@ -2192,7 +2192,7 @@ static char *profile_target_name(int tgt_fd) info.func_info_rec_size = func_info_rec_size; info.func_info = ptr_to_u64(&func_info); - err = bpf_obj_get_info_by_fd(tgt_fd, &info, &info_len); + err = bpf_prog_get_info_by_fd(tgt_fd, &info, &info_len); if (err) { p_err("failed to get func_info for prog FD %d", tgt_fd); goto out; diff --git a/tools/bpf/bpftool/struct_ops.c b/tools/bpf/bpftool/struct_ops.c index 903b80ff4e9a..b389f4830e11 100644 --- a/tools/bpf/bpftool/struct_ops.c +++ b/tools/bpf/bpftool/struct_ops.c @@ -151,7 +151,7 @@ static int get_next_struct_ops_map(const char *name, int *res_fd, return -1; } - err = bpf_obj_get_info_by_fd(fd, info, &info_len); + err = bpf_map_get_info_by_fd(fd, info, &info_len); if (err) { p_err("can't get map info: %s", strerror(errno)); close(fd); @@ -262,7 +262,7 @@ static struct res do_one_id(const char *id_str, work_func func, void *data, goto done; } - if (bpf_obj_get_info_by_fd(fd, info, &info_len)) { + if (bpf_map_get_info_by_fd(fd, info, &info_len)) { p_err("can't get map info: %s", strerror(errno)); res.nr_errs++; goto done; @@ -522,7 +522,7 @@ static int do_register(int argc, char **argv) bpf_link__disconnect(link); bpf_link__destroy(link); - if (!bpf_obj_get_info_by_fd(bpf_map__fd(map), &info, + if (!bpf_map_get_info_by_fd(bpf_map__fd(map), &info, &info_len)) p_info("Registered %s %s id %u", get_kern_struct_ops_name(&info), -- cgit v1.2.3 From c5a237a4db21ca7a28518c994def39d7bd62a0d1 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Wed, 15 Feb 2023 00:12:18 +0100 Subject: selftests/bpf: Use bpf_{btf,link,map,prog}_get_info_by_fd() Use the new type-safe wrappers around bpf_obj_get_info_by_fd(). Fix a prog/map mixup in prog_holds_map(). Signed-off-by: Ilya Leoshkevich Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20230214231221.249277-6-iii@linux.ibm.com --- .../selftests/bpf/map_tests/map_in_map_batch_ops.c | 2 +- tools/testing/selftests/bpf/prog_tests/bpf_iter.c | 8 ++++---- .../testing/selftests/bpf/prog_tests/bpf_obj_id.c | 20 +++++++++--------- tools/testing/selftests/bpf/prog_tests/btf.c | 24 +++++++++++----------- .../selftests/bpf/prog_tests/btf_map_in_map.c | 2 +- tools/testing/selftests/bpf/prog_tests/check_mtu.c | 2 +- .../selftests/bpf/prog_tests/enable_stats.c | 2 +- .../selftests/bpf/prog_tests/fexit_bpf2bpf.c | 14 ++++++------- .../bpf/prog_tests/flow_dissector_reattach.c | 10 ++++----- .../bpf/prog_tests/libbpf_get_fd_by_id_opts.c | 4 ++-- .../testing/selftests/bpf/prog_tests/lsm_cgroup.c | 3 ++- tools/testing/selftests/bpf/prog_tests/metadata.c | 8 ++++---- tools/testing/selftests/bpf/prog_tests/mmap.c | 2 +- tools/testing/selftests/bpf/prog_tests/perf_link.c | 2 +- tools/testing/selftests/bpf/prog_tests/pinning.c | 2 +- .../selftests/bpf/prog_tests/prog_run_opts.c | 2 +- tools/testing/selftests/bpf/prog_tests/recursion.c | 4 ++-- .../selftests/bpf/prog_tests/sockmap_basic.c | 6 +++--- .../selftests/bpf/prog_tests/task_local_storage.c | 8 ++++---- tools/testing/selftests/bpf/prog_tests/tc_bpf.c | 4 ++-- .../selftests/bpf/prog_tests/tp_attach_query.c | 5 +++-- .../selftests/bpf/prog_tests/unpriv_bpf_disabled.c | 8 ++++---- .../testing/selftests/bpf/prog_tests/verif_stats.c | 5 +++-- .../testing/selftests/bpf/prog_tests/xdp_attach.c | 4 ++-- .../selftests/bpf/prog_tests/xdp_cpumap_attach.c | 8 ++++---- .../selftests/bpf/prog_tests/xdp_devmap_attach.c | 8 ++++---- tools/testing/selftests/bpf/prog_tests/xdp_info.c | 2 +- tools/testing/selftests/bpf/prog_tests/xdp_link.c | 10 +++++---- tools/testing/selftests/bpf/test_maps.c | 2 +- .../selftests/bpf/test_skb_cgroup_id_user.c | 2 +- .../selftests/bpf/test_tcp_check_syncookie_user.c | 2 +- tools/testing/selftests/bpf/test_verifier.c | 8 ++++---- tools/testing/selftests/bpf/testing_helpers.c | 2 +- tools/testing/selftests/bpf/xdp_synproxy.c | 15 ++++++++------ 34 files changed, 109 insertions(+), 101 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c index f472d28ad11a..16f1671e4bde 100644 --- a/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c +++ b/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c @@ -18,7 +18,7 @@ static __u32 get_map_id_from_fd(int map_fd) uint32_t info_len = sizeof(map_info); int ret; - ret = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len); + ret = bpf_map_get_info_by_fd(map_fd, &map_info, &info_len); CHECK(ret < 0, "Finding map info failed", "error:%s\n", strerror(errno)); diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index 3af6450763e9..1f02168103dd 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -195,8 +195,8 @@ static void check_bpf_link_info(const struct bpf_program *prog) return; info_len = sizeof(info); - err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &info, &info_len); - ASSERT_OK(err, "bpf_obj_get_info_by_fd"); + err = bpf_link_get_info_by_fd(bpf_link__fd(link), &info, &info_len); + ASSERT_OK(err, "bpf_link_get_info_by_fd"); ASSERT_EQ(info.iter.task.tid, getpid(), "check_task_tid"); bpf_link__destroy(link); @@ -684,13 +684,13 @@ static void test_overflow(bool test_e2big_overflow, bool ret1) /* setup filtering map_id in bpf program */ map_info_len = sizeof(map_info); - err = bpf_obj_get_info_by_fd(map1_fd, &map_info, &map_info_len); + err = bpf_map_get_info_by_fd(map1_fd, &map_info, &map_info_len); if (CHECK(err, "get_map_info", "get map info failed: %s\n", strerror(errno))) goto free_map2; skel->bss->map1_id = map_info.id; - err = bpf_obj_get_info_by_fd(map2_fd, &map_info, &map_info_len); + err = bpf_map_get_info_by_fd(map2_fd, &map_info, &map_info_len); if (CHECK(err, "get_map_info", "get map info failed: %s\n", strerror(errno))) goto free_map2; diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c index e1c1e521cca2..675b90b15280 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c @@ -44,7 +44,7 @@ void serial_test_bpf_obj_id(void) CHECK(err >= 0 || errno != ENOENT, "get-fd-by-notexist-link-id", "err %d errno %d\n", err, errno); - /* Check bpf_obj_get_info_by_fd() */ + /* Check bpf_map_get_info_by_fd() */ bzero(zeros, sizeof(zeros)); for (i = 0; i < nr_iters; i++) { now = time(NULL); @@ -79,7 +79,7 @@ void serial_test_bpf_obj_id(void) /* Check getting map info */ info_len = sizeof(struct bpf_map_info) * 2; bzero(&map_infos[i], info_len); - err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i], + err = bpf_map_get_info_by_fd(map_fds[i], &map_infos[i], &info_len); if (CHECK(err || map_infos[i].type != BPF_MAP_TYPE_ARRAY || @@ -118,8 +118,8 @@ void serial_test_bpf_obj_id(void) err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts); if (CHECK_FAIL(err)) goto done; - err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i], - &info_len); + err = bpf_prog_get_info_by_fd(prog_fds[i], &prog_infos[i], + &info_len); load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec) + (prog_infos[i].load_time / nsec_per_sec); if (CHECK(err || @@ -161,8 +161,8 @@ void serial_test_bpf_obj_id(void) bzero(&link_infos[i], info_len); link_infos[i].raw_tracepoint.tp_name = ptr_to_u64(&tp_name); link_infos[i].raw_tracepoint.tp_name_len = sizeof(tp_name); - err = bpf_obj_get_info_by_fd(bpf_link__fd(links[i]), - &link_infos[i], &info_len); + err = bpf_link_get_info_by_fd(bpf_link__fd(links[i]), + &link_infos[i], &info_len); if (CHECK(err || link_infos[i].type != BPF_LINK_TYPE_RAW_TRACEPOINT || link_infos[i].prog_id != prog_infos[i].id || @@ -217,7 +217,7 @@ void serial_test_bpf_obj_id(void) * prog_info.map_ids = NULL */ prog_info.nr_map_ids = 1; - err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len); + err = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &info_len); if (CHECK(!err || errno != EFAULT, "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)", err, errno, EFAULT)) @@ -228,7 +228,7 @@ void serial_test_bpf_obj_id(void) saved_map_id = *(int *)((long)prog_infos[i].map_ids); prog_info.map_ids = prog_infos[i].map_ids; prog_info.nr_map_ids = 2; - err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len); + err = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &info_len); prog_infos[i].jited_prog_insns = 0; prog_infos[i].xlated_prog_insns = 0; CHECK(err || info_len != sizeof(struct bpf_prog_info) || @@ -277,7 +277,7 @@ void serial_test_bpf_obj_id(void) if (CHECK_FAIL(err)) goto done; - err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len); + err = bpf_map_get_info_by_fd(map_fd, &map_info, &info_len); CHECK(err || info_len != sizeof(struct bpf_map_info) || memcmp(&map_info, &map_infos[i], info_len) || array_value != array_magic_value, @@ -322,7 +322,7 @@ void serial_test_bpf_obj_id(void) nr_id_found++; - err = bpf_obj_get_info_by_fd(link_fd, &link_info, &info_len); + err = bpf_link_get_info_by_fd(link_fd, &link_info, &info_len); cmp_res = memcmp(&link_info, &link_infos[i], offsetof(struct bpf_link_info, raw_tracepoint)); CHECK(err || info_len != sizeof(link_info) || cmp_res, diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index de1b5b9eb93a..cbb600be943d 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -4422,7 +4422,7 @@ static int test_big_btf_info(unsigned int test_num) info->btf = ptr_to_u64(user_btf); info->btf_size = raw_btf_size; - err = bpf_obj_get_info_by_fd(btf_fd, info, &info_len); + err = bpf_btf_get_info_by_fd(btf_fd, info, &info_len); if (CHECK(!err, "!err")) { err = -1; goto done; @@ -4435,7 +4435,7 @@ static int test_big_btf_info(unsigned int test_num) * to userspace. */ info_garbage.garbage = 0; - err = bpf_obj_get_info_by_fd(btf_fd, info, &info_len); + err = bpf_btf_get_info_by_fd(btf_fd, info, &info_len); if (CHECK(err || info_len != sizeof(*info), "err:%d errno:%d info_len:%u sizeof(*info):%zu", err, errno, info_len, sizeof(*info))) { @@ -4499,7 +4499,7 @@ static int test_btf_id(unsigned int test_num) /* Test BPF_OBJ_GET_INFO_BY_ID on btf_id */ info_len = sizeof(info[0]); - err = bpf_obj_get_info_by_fd(btf_fd[0], &info[0], &info_len); + err = bpf_btf_get_info_by_fd(btf_fd[0], &info[0], &info_len); if (CHECK(err, "errno:%d", errno)) { err = -1; goto done; @@ -4512,7 +4512,7 @@ static int test_btf_id(unsigned int test_num) } ret = 0; - err = bpf_obj_get_info_by_fd(btf_fd[1], &info[1], &info_len); + err = bpf_btf_get_info_by_fd(btf_fd[1], &info[1], &info_len); if (CHECK(err || info[0].id != info[1].id || info[0].btf_size != info[1].btf_size || (ret = memcmp(user_btf[0], user_btf[1], info[0].btf_size)), @@ -4535,7 +4535,7 @@ static int test_btf_id(unsigned int test_num) } info_len = sizeof(map_info); - err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len); + err = bpf_map_get_info_by_fd(map_fd, &map_info, &info_len); if (CHECK(err || map_info.btf_id != info[0].id || map_info.btf_key_type_id != 1 || map_info.btf_value_type_id != 2, "err:%d errno:%d info.id:%u btf_id:%u btf_key_type_id:%u btf_value_type_id:%u", @@ -4638,7 +4638,7 @@ static void do_test_get_info(unsigned int test_num) info.btf_size = user_btf_size; ret = 0; - err = bpf_obj_get_info_by_fd(btf_fd, &info, &info_len); + err = bpf_btf_get_info_by_fd(btf_fd, &info, &info_len); if (CHECK(err || !info.id || info_len != sizeof(info) || info.btf_size != raw_btf_size || (ret = memcmp(raw_btf, user_btf, expected_nbytes)), @@ -4755,7 +4755,7 @@ static void do_test_file(unsigned int test_num) /* get necessary program info */ info_len = sizeof(struct bpf_prog_info); - err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); if (CHECK(err < 0, "invalid get info (1st) errno:%d", errno)) { fprintf(stderr, "%s\n", btf_log_buf); @@ -4787,7 +4787,7 @@ static void do_test_file(unsigned int test_num) info.func_info_rec_size = rec_size; info.func_info = ptr_to_u64(func_info); - err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); if (CHECK(err < 0, "invalid get info (2nd) errno:%d", errno)) { fprintf(stderr, "%s\n", btf_log_buf); @@ -6405,7 +6405,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test, /* get necessary lens */ info_len = sizeof(struct bpf_prog_info); - err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); if (CHECK(err < 0, "invalid get info (1st) errno:%d", errno)) { fprintf(stderr, "%s\n", btf_log_buf); return -1; @@ -6435,7 +6435,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test, info.nr_func_info = nr_func_info; info.func_info_rec_size = rec_size; info.func_info = ptr_to_u64(func_info); - err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); if (CHECK(err < 0, "invalid get info (2nd) errno:%d", errno)) { fprintf(stderr, "%s\n", btf_log_buf); err = -1; @@ -6499,7 +6499,7 @@ static int test_get_linfo(const struct prog_info_raw_test *test, nr_jited_func_lens = nr_jited_ksyms; info_len = sizeof(struct bpf_prog_info); - err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); if (CHECK(err < 0, "err:%d errno:%d", err, errno)) { err = -1; goto done; @@ -6573,7 +6573,7 @@ static int test_get_linfo(const struct prog_info_raw_test *test, info.jited_func_lens = ptr_to_u64(jited_func_lens); } - err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); /* * Only recheck the info.*line_info* fields. diff --git a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c index eb90a6b8850d..a8b53b8736f0 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c @@ -14,7 +14,7 @@ static __u32 bpf_map_id(struct bpf_map *map) int err; memset(&info, 0, info_len); - err = bpf_obj_get_info_by_fd(bpf_map__fd(map), &info, &info_len); + err = bpf_map_get_info_by_fd(bpf_map__fd(map), &info, &info_len); if (err) return 0; return info.id; diff --git a/tools/testing/selftests/bpf/prog_tests/check_mtu.c b/tools/testing/selftests/bpf/prog_tests/check_mtu.c index 12f4395f18b3..5338d2ea0460 100644 --- a/tools/testing/selftests/bpf/prog_tests/check_mtu.c +++ b/tools/testing/selftests/bpf/prog_tests/check_mtu.c @@ -59,7 +59,7 @@ static void test_check_mtu_xdp_attach(void) memset(&link_info, 0, sizeof(link_info)); fd = bpf_link__fd(link); - err = bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len); + err = bpf_link_get_info_by_fd(fd, &link_info, &link_info_len); if (CHECK(err, "link_info", "failed: %d\n", err)) goto out; diff --git a/tools/testing/selftests/bpf/prog_tests/enable_stats.c b/tools/testing/selftests/bpf/prog_tests/enable_stats.c index 2cb2085917e7..75f85d0fe74a 100644 --- a/tools/testing/selftests/bpf/prog_tests/enable_stats.c +++ b/tools/testing/selftests/bpf/prog_tests/enable_stats.c @@ -28,7 +28,7 @@ void test_enable_stats(void) prog_fd = bpf_program__fd(skel->progs.test_enable_stats); memset(&info, 0, info_len); - err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); if (CHECK(err, "get_prog_info", "failed to get bpf_prog_info for fd %d\n", prog_fd)) goto cleanup; diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c index 20f5fa0fcec9..8ec73fdfcdab 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c @@ -79,7 +79,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file, return; info_len = sizeof(prog_info); - err = bpf_obj_get_info_by_fd(tgt_fd, &prog_info, &info_len); + err = bpf_prog_get_info_by_fd(tgt_fd, &prog_info, &info_len); if (!ASSERT_OK(err, "tgt_fd_get_info")) goto close_prog; @@ -136,8 +136,8 @@ static void test_fexit_bpf2bpf_common(const char *obj_file, info_len = sizeof(link_info); memset(&link_info, 0, sizeof(link_info)); - err = bpf_obj_get_info_by_fd(bpf_link__fd(link[i]), - &link_info, &info_len); + err = bpf_link_get_info_by_fd(bpf_link__fd(link[i]), + &link_info, &info_len); ASSERT_OK(err, "link_fd_get_info"); ASSERT_EQ(link_info.tracing.attach_type, bpf_program__expected_attach_type(prog[i]), @@ -417,7 +417,7 @@ static int find_prog_btf_id(const char *name, __u32 attach_prog_fd) struct btf *btf; int ret; - ret = bpf_obj_get_info_by_fd(attach_prog_fd, &info, &info_len); + ret = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len); if (ret) return ret; @@ -483,12 +483,12 @@ static void test_fentry_to_cgroup_bpf(void) if (!ASSERT_GE(fentry_fd, 0, "load_fentry")) goto cleanup; - /* Make sure bpf_obj_get_info_by_fd works correctly when attaching + /* Make sure bpf_prog_get_info_by_fd works correctly when attaching * to another BPF program. */ - ASSERT_OK(bpf_obj_get_info_by_fd(fentry_fd, &info, &info_len), - "bpf_obj_get_info_by_fd"); + ASSERT_OK(bpf_prog_get_info_by_fd(fentry_fd, &info, &info_len), + "bpf_prog_get_info_by_fd"); ASSERT_EQ(info.btf_id, 0, "info.btf_id"); ASSERT_EQ(info.attach_btf_id, btf_id, "info.attach_btf_id"); diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c index 7c79462d2702..9333f7346d15 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c @@ -60,9 +60,9 @@ static __u32 query_prog_id(int prog) __u32 info_len = sizeof(info); int err; - err = bpf_obj_get_info_by_fd(prog, &info, &info_len); + err = bpf_prog_get_info_by_fd(prog, &info, &info_len); if (CHECK_FAIL(err || info_len != sizeof(info))) { - perror("bpf_obj_get_info_by_fd"); + perror("bpf_prog_get_info_by_fd"); return 0; } @@ -497,7 +497,7 @@ static void test_link_get_info(int netns, int prog1, int prog2) } info_len = sizeof(info); - err = bpf_obj_get_info_by_fd(link, &info, &info_len); + err = bpf_link_get_info_by_fd(link, &info, &info_len); if (CHECK_FAIL(err)) { perror("bpf_obj_get_info"); goto out_unlink; @@ -521,7 +521,7 @@ static void test_link_get_info(int netns, int prog1, int prog2) link_id = info.id; info_len = sizeof(info); - err = bpf_obj_get_info_by_fd(link, &info, &info_len); + err = bpf_link_get_info_by_fd(link, &info, &info_len); if (CHECK_FAIL(err)) { perror("bpf_obj_get_info"); goto out_unlink; @@ -546,7 +546,7 @@ static void test_link_get_info(int netns, int prog1, int prog2) netns = -1; info_len = sizeof(info); - err = bpf_obj_get_info_by_fd(link, &info, &info_len); + err = bpf_link_get_info_by_fd(link, &info, &info_len); if (CHECK_FAIL(err)) { perror("bpf_obj_get_info"); goto out_unlink; diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c b/tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c index 25e5dfa9c315..a3f238f51d05 100644 --- a/tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c +++ b/tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c @@ -29,9 +29,9 @@ void test_libbpf_get_fd_by_id_opts(void) if (!ASSERT_OK(ret, "test_libbpf_get_fd_by_id_opts__attach")) goto close_prog; - ret = bpf_obj_get_info_by_fd(bpf_map__fd(skel->maps.data_input), + ret = bpf_map_get_info_by_fd(bpf_map__fd(skel->maps.data_input), &info_m, &len); - if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd")) + if (!ASSERT_OK(ret, "bpf_map_get_info_by_fd")) goto close_prog; fd = bpf_map_get_fd_by_id(info_m.id); diff --git a/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c index f117bfef68a1..130a3b21e467 100644 --- a/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c +++ b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c @@ -47,7 +47,8 @@ static __u32 query_prog_cnt(int cgroup_fd, const char *attach_func) fd = bpf_prog_get_fd_by_id(p.prog_ids[i]); ASSERT_GE(fd, 0, "prog_get_fd_by_id"); - ASSERT_OK(bpf_obj_get_info_by_fd(fd, &info, &info_len), "prog_info_by_fd"); + ASSERT_OK(bpf_prog_get_info_by_fd(fd, &info, &info_len), + "prog_info_by_fd"); close(fd); if (info.attach_btf_id == diff --git a/tools/testing/selftests/bpf/prog_tests/metadata.c b/tools/testing/selftests/bpf/prog_tests/metadata.c index 2c53eade88e3..8b67dfc10f5c 100644 --- a/tools/testing/selftests/bpf/prog_tests/metadata.c +++ b/tools/testing/selftests/bpf/prog_tests/metadata.c @@ -16,7 +16,7 @@ static int duration; static int prog_holds_map(int prog_fd, int map_fd) { struct bpf_prog_info prog_info = {}; - struct bpf_prog_info map_info = {}; + struct bpf_map_info map_info = {}; __u32 prog_info_len; __u32 map_info_len; __u32 *map_ids; @@ -25,12 +25,12 @@ static int prog_holds_map(int prog_fd, int map_fd) int i; map_info_len = sizeof(map_info); - ret = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len); + ret = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len); if (ret) return -errno; prog_info_len = sizeof(prog_info); - ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len); + ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len); if (ret) return -errno; @@ -44,7 +44,7 @@ static int prog_holds_map(int prog_fd, int map_fd) prog_info.map_ids = ptr_to_u64(map_ids); prog_info_len = sizeof(prog_info); - ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len); + ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len); if (ret) { ret = -errno; goto free_map_ids; diff --git a/tools/testing/selftests/bpf/prog_tests/mmap.c b/tools/testing/selftests/bpf/prog_tests/mmap.c index 37b002ca1167..a271d5a0f7ab 100644 --- a/tools/testing/selftests/bpf/prog_tests/mmap.c +++ b/tools/testing/selftests/bpf/prog_tests/mmap.c @@ -64,7 +64,7 @@ void test_mmap(void) /* get map's ID */ memset(&map_info, 0, map_info_sz); - err = bpf_obj_get_info_by_fd(data_map_fd, &map_info, &map_info_sz); + err = bpf_map_get_info_by_fd(data_map_fd, &map_info, &map_info_sz); if (CHECK(err, "map_get_info", "failed %d\n", errno)) goto cleanup; data_map_id = map_info.id; diff --git a/tools/testing/selftests/bpf/prog_tests/perf_link.c b/tools/testing/selftests/bpf/prog_tests/perf_link.c index 224eba6fef2e..3a25f1c743a1 100644 --- a/tools/testing/selftests/bpf/prog_tests/perf_link.c +++ b/tools/testing/selftests/bpf/prog_tests/perf_link.c @@ -54,7 +54,7 @@ void serial_test_perf_link(void) goto cleanup; memset(&info, 0, sizeof(info)); - err = bpf_obj_get_info_by_fd(link_fd, &info, &info_len); + err = bpf_link_get_info_by_fd(link_fd, &info, &info_len); if (!ASSERT_OK(err, "link_get_info")) goto cleanup; diff --git a/tools/testing/selftests/bpf/prog_tests/pinning.c b/tools/testing/selftests/bpf/prog_tests/pinning.c index d95cee5867b7..c799a3c5ad1f 100644 --- a/tools/testing/selftests/bpf/prog_tests/pinning.c +++ b/tools/testing/selftests/bpf/prog_tests/pinning.c @@ -18,7 +18,7 @@ __u32 get_map_id(struct bpf_object *obj, const char *name) if (CHECK(!map, "find map", "NULL map")) return 0; - err = bpf_obj_get_info_by_fd(bpf_map__fd(map), + err = bpf_map_get_info_by_fd(bpf_map__fd(map), &map_info, &map_info_len); CHECK(err, "get map info", "err %d errno %d", err, errno); return map_info.id; diff --git a/tools/testing/selftests/bpf/prog_tests/prog_run_opts.c b/tools/testing/selftests/bpf/prog_tests/prog_run_opts.c index 1ccd2bdf8fa8..01f1d1b6715a 100644 --- a/tools/testing/selftests/bpf/prog_tests/prog_run_opts.c +++ b/tools/testing/selftests/bpf/prog_tests/prog_run_opts.c @@ -12,7 +12,7 @@ static void check_run_cnt(int prog_fd, __u64 run_cnt) __u32 info_len = sizeof(info); int err; - err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); if (CHECK(err, "get_prog_info", "failed to get bpf_prog_info for fd %d\n", prog_fd)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/recursion.c b/tools/testing/selftests/bpf/prog_tests/recursion.c index f3af2627b599..23552d3e3365 100644 --- a/tools/testing/selftests/bpf/prog_tests/recursion.c +++ b/tools/testing/selftests/bpf/prog_tests/recursion.c @@ -31,8 +31,8 @@ void test_recursion(void) bpf_map_delete_elem(bpf_map__fd(skel->maps.hash2), &key); ASSERT_EQ(skel->bss->pass2, 2, "pass2 == 2"); - err = bpf_obj_get_info_by_fd(bpf_program__fd(skel->progs.on_delete), - &prog_info, &prog_info_len); + err = bpf_prog_get_info_by_fd(bpf_program__fd(skel->progs.on_delete), + &prog_info, &prog_info_len); if (!ASSERT_OK(err, "get_prog_info")) goto out; ASSERT_EQ(prog_info.recursion_misses, 2, "recursion_misses"); diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c index 0aa088900699..0ce25a967481 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c @@ -299,9 +299,9 @@ static __u32 query_prog_id(int prog_fd) __u32 info_len = sizeof(info); int err; - err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); - if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd") || - !ASSERT_EQ(info_len, sizeof(info), "bpf_obj_get_info_by_fd")) + err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); + if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd") || + !ASSERT_EQ(info_len, sizeof(info), "bpf_prog_get_info_by_fd")) return 0; return info.id; diff --git a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c index a176bd75a748..ea8537c54413 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c +++ b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c @@ -119,19 +119,19 @@ static void test_recursion(void) prog_fd = bpf_program__fd(skel->progs.on_lookup); memset(&info, 0, sizeof(info)); - err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); ASSERT_OK(err, "get prog info"); ASSERT_GT(info.recursion_misses, 0, "on_lookup prog recursion"); prog_fd = bpf_program__fd(skel->progs.on_update); memset(&info, 0, sizeof(info)); - err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); ASSERT_OK(err, "get prog info"); ASSERT_EQ(info.recursion_misses, 0, "on_update prog recursion"); prog_fd = bpf_program__fd(skel->progs.on_enter); memset(&info, 0, sizeof(info)); - err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); ASSERT_OK(err, "get prog info"); ASSERT_EQ(info.recursion_misses, 0, "on_enter prog recursion"); @@ -221,7 +221,7 @@ static void test_nodeadlock(void) info_len = sizeof(info); prog_fd = bpf_program__fd(skel->progs.socket_post_create); - err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); ASSERT_OK(err, "get prog info"); ASSERT_EQ(info.recursion_misses, 0, "prog recursion"); diff --git a/tools/testing/selftests/bpf/prog_tests/tc_bpf.c b/tools/testing/selftests/bpf/prog_tests/tc_bpf.c index 4a505a5adf4d..e873766276d1 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_bpf.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_bpf.c @@ -29,8 +29,8 @@ static int test_tc_bpf_basic(const struct bpf_tc_hook *hook, int fd) __u32 info_len = sizeof(info); int ret; - ret = bpf_obj_get_info_by_fd(fd, &info, &info_len); - if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd")) + ret = bpf_prog_get_info_by_fd(fd, &info, &info_len); + if (!ASSERT_OK(ret, "bpf_prog_get_info_by_fd")) return ret; ret = bpf_tc_attach(hook, &opts); diff --git a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c index a479080533db..770fcc3bb1ba 100644 --- a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c +++ b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c @@ -45,8 +45,9 @@ void serial_test_tp_attach_query(void) prog_info.xlated_prog_len = 0; prog_info.nr_map_ids = 0; info_len = sizeof(prog_info); - err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len); - if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n", + err = bpf_prog_get_info_by_fd(prog_fd[i], &prog_info, + &info_len); + if (CHECK(err, "bpf_prog_get_info_by_fd", "err %d errno %d\n", err, errno)) goto cleanup1; saved_prog_ids[i] = prog_info.id; diff --git a/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c index 1ed3cc2092db..8383a99f610f 100644 --- a/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c +++ b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c @@ -179,7 +179,7 @@ static void test_unpriv_bpf_disabled_negative(struct test_unpriv_bpf_disabled *s ASSERT_EQ(bpf_prog_get_next_id(prog_id, &next), -EPERM, "prog_get_next_id_fails"); ASSERT_EQ(bpf_prog_get_next_id(0, &next), -EPERM, "prog_get_next_id_fails"); - if (ASSERT_OK(bpf_obj_get_info_by_fd(map_fds[0], &map_info, &map_info_len), + if (ASSERT_OK(bpf_map_get_info_by_fd(map_fds[0], &map_info, &map_info_len), "obj_get_info_by_fd")) { ASSERT_EQ(bpf_map_get_fd_by_id(map_info.id), -EPERM, "map_get_fd_by_id_fails"); ASSERT_EQ(bpf_map_get_next_id(map_info.id, &next), -EPERM, @@ -187,8 +187,8 @@ static void test_unpriv_bpf_disabled_negative(struct test_unpriv_bpf_disabled *s } ASSERT_EQ(bpf_map_get_next_id(0, &next), -EPERM, "map_get_next_id_fails"); - if (ASSERT_OK(bpf_obj_get_info_by_fd(bpf_link__fd(skel->links.sys_nanosleep_enter), - &link_info, &link_info_len), + if (ASSERT_OK(bpf_link_get_info_by_fd(bpf_link__fd(skel->links.sys_nanosleep_enter), + &link_info, &link_info_len), "obj_get_info_by_fd")) { ASSERT_EQ(bpf_link_get_fd_by_id(link_info.id), -EPERM, "link_get_fd_by_id_fails"); ASSERT_EQ(bpf_link_get_next_id(link_info.id, &next), -EPERM, @@ -269,7 +269,7 @@ void test_unpriv_bpf_disabled(void) } prog_fd = bpf_program__fd(skel->progs.sys_nanosleep_enter); - ASSERT_OK(bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len), + ASSERT_OK(bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len), "obj_get_info_by_fd"); prog_id = prog_info.id; ASSERT_GT(prog_id, 0, "valid_prog_id"); diff --git a/tools/testing/selftests/bpf/prog_tests/verif_stats.c b/tools/testing/selftests/bpf/prog_tests/verif_stats.c index a47e7c0e1ffd..af4b95f57ac1 100644 --- a/tools/testing/selftests/bpf/prog_tests/verif_stats.c +++ b/tools/testing/selftests/bpf/prog_tests/verif_stats.c @@ -16,8 +16,9 @@ void test_verif_stats(void) if (!ASSERT_OK_PTR(skel, "trace_vprintk__open_and_load")) goto cleanup; - err = bpf_obj_get_info_by_fd(skel->progs.sys_enter.prog_fd, &info, &len); - if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd")) + err = bpf_prog_get_info_by_fd(skel->progs.sys_enter.prog_fd, + &info, &len); + if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd")) goto cleanup; if (!ASSERT_GT(info.verified_insns, 0, "verified_insns")) diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c index 062fbc8c8e5e..d4cd9f873c14 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c @@ -18,7 +18,7 @@ void serial_test_xdp_attach(void) err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj1, &fd1); if (CHECK_FAIL(err)) return; - err = bpf_obj_get_info_by_fd(fd1, &info, &len); + err = bpf_prog_get_info_by_fd(fd1, &info, &len); if (CHECK_FAIL(err)) goto out_1; id1 = info.id; @@ -28,7 +28,7 @@ void serial_test_xdp_attach(void) goto out_1; memset(&info, 0, sizeof(info)); - err = bpf_obj_get_info_by_fd(fd2, &info, &len); + err = bpf_prog_get_info_by_fd(fd2, &info, &len); if (CHECK_FAIL(err)) goto out_2; id2 = info.id; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c index f775a1613833..481626a875d1 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c @@ -33,8 +33,8 @@ static void test_xdp_with_cpumap_helpers(void) prog_fd = bpf_program__fd(skel->progs.xdp_dummy_cm); map_fd = bpf_map__fd(skel->maps.cpu_map); - err = bpf_obj_get_info_by_fd(prog_fd, &info, &len); - if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd")) + err = bpf_prog_get_info_by_fd(prog_fd, &info, &len); + if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd")) goto out_close; val.bpf_prog.fd = prog_fd; @@ -85,8 +85,8 @@ static void test_xdp_with_cpumap_frags_helpers(void) frags_prog_fd = bpf_program__fd(skel->progs.xdp_dummy_cm_frags); map_fd = bpf_map__fd(skel->maps.cpu_map); - err = bpf_obj_get_info_by_fd(frags_prog_fd, &info, &len); - if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd")) + err = bpf_prog_get_info_by_fd(frags_prog_fd, &info, &len); + if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd")) goto out_close; val.bpf_prog.fd = frags_prog_fd; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c index ead40016c324..ce6812558287 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c @@ -35,8 +35,8 @@ static void test_xdp_with_devmap_helpers(void) dm_fd = bpf_program__fd(skel->progs.xdp_dummy_dm); map_fd = bpf_map__fd(skel->maps.dm_ports); - err = bpf_obj_get_info_by_fd(dm_fd, &info, &len); - if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd")) + err = bpf_prog_get_info_by_fd(dm_fd, &info, &len); + if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd")) goto out_close; val.bpf_prog.fd = dm_fd; @@ -98,8 +98,8 @@ static void test_xdp_with_devmap_frags_helpers(void) dm_fd_frags = bpf_program__fd(skel->progs.xdp_dummy_dm_frags); map_fd = bpf_map__fd(skel->maps.dm_ports); - err = bpf_obj_get_info_by_fd(dm_fd_frags, &info, &len); - if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd")) + err = bpf_prog_get_info_by_fd(dm_fd_frags, &info, &len); + if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd")) goto out_close; val.bpf_prog.fd = dm_fd_frags; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_info.c b/tools/testing/selftests/bpf/prog_tests/xdp_info.c index 286c21ecdc65..1dbddcab87a8 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_info.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_info.c @@ -34,7 +34,7 @@ void serial_test_xdp_info(void) if (CHECK_FAIL(err)) return; - err = bpf_obj_get_info_by_fd(prog_fd, &info, &len); + err = bpf_prog_get_info_by_fd(prog_fd, &info, &len); if (CHECK(err, "get_prog_info", "errno=%d\n", errno)) goto out_close; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_link.c b/tools/testing/selftests/bpf/prog_tests/xdp_link.c index 3e9d5c5521f0..e7e9f3c22edf 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_link.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_link.c @@ -29,13 +29,13 @@ void serial_test_xdp_link(void) prog_fd2 = bpf_program__fd(skel2->progs.xdp_handler); memset(&prog_info, 0, sizeof(prog_info)); - err = bpf_obj_get_info_by_fd(prog_fd1, &prog_info, &prog_info_len); + err = bpf_prog_get_info_by_fd(prog_fd1, &prog_info, &prog_info_len); if (!ASSERT_OK(err, "fd_info1")) goto cleanup; id1 = prog_info.id; memset(&prog_info, 0, sizeof(prog_info)); - err = bpf_obj_get_info_by_fd(prog_fd2, &prog_info, &prog_info_len); + err = bpf_prog_get_info_by_fd(prog_fd2, &prog_info, &prog_info_len); if (!ASSERT_OK(err, "fd_info2")) goto cleanup; id2 = prog_info.id; @@ -119,7 +119,8 @@ void serial_test_xdp_link(void) goto cleanup; memset(&link_info, 0, sizeof(link_info)); - err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &link_info, &link_info_len); + err = bpf_link_get_info_by_fd(bpf_link__fd(link), + &link_info, &link_info_len); if (!ASSERT_OK(err, "link_info")) goto cleanup; @@ -137,7 +138,8 @@ void serial_test_xdp_link(void) goto cleanup; memset(&link_info, 0, sizeof(link_info)); - err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &link_info, &link_info_len); + err = bpf_link_get_info_by_fd(bpf_link__fd(link), + &link_info, &link_info_len); ASSERT_OK(err, "link_info"); ASSERT_EQ(link_info.prog_id, id1, "link_prog_id"); diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index b73152822aa2..7fc00e423e4d 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -1275,7 +1275,7 @@ static void test_map_in_map(void) goto out_map_in_map; } - err = bpf_obj_get_info_by_fd(fd, &info, &len); + err = bpf_map_get_info_by_fd(fd, &info, &len); if (err) { printf("Failed to get map info by fd %d: %d", fd, errno); diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c index 3256de30f563..ed518d075d1d 100644 --- a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c +++ b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c @@ -93,7 +93,7 @@ int get_map_fd_by_prog_id(int prog_id) info.nr_map_ids = 1; info.map_ids = (__u64) (unsigned long) map_ids; - if (bpf_obj_get_info_by_fd(prog_fd, &info, &info_len)) { + if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len)) { log_err("Failed to get info by prog fd %d", prog_fd); goto err; } diff --git a/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c b/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c index 5c8ef062f760..32df93747095 100644 --- a/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c +++ b/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c @@ -96,7 +96,7 @@ static int get_map_fd_by_prog_id(int prog_id, bool *xdp) info.nr_map_ids = 1; info.map_ids = (__u64)(unsigned long)map_ids; - if (bpf_obj_get_info_by_fd(prog_fd, &info, &info_len)) { + if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len)) { log_err("Failed to get info by prog fd %d", prog_fd); goto err; } diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 887c49dc5abd..8b9949bb833d 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -1239,8 +1239,8 @@ static int get_xlated_program(int fd_prog, struct bpf_insn **buf, int *cnt) __u32 xlated_prog_len; __u32 buf_element_size = sizeof(struct bpf_insn); - if (bpf_obj_get_info_by_fd(fd_prog, &info, &info_len)) { - perror("bpf_obj_get_info_by_fd failed"); + if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) { + perror("bpf_prog_get_info_by_fd failed"); return -1; } @@ -1261,8 +1261,8 @@ static int get_xlated_program(int fd_prog, struct bpf_insn **buf, int *cnt) bzero(&info, sizeof(info)); info.xlated_prog_len = xlated_prog_len; info.xlated_prog_insns = (__u64)(unsigned long)*buf; - if (bpf_obj_get_info_by_fd(fd_prog, &info, &info_len)) { - perror("second bpf_obj_get_info_by_fd failed"); + if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) { + perror("second bpf_prog_get_info_by_fd failed"); goto out_free_buf; } diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c index 9695318e8132..6c44153755e6 100644 --- a/tools/testing/selftests/bpf/testing_helpers.c +++ b/tools/testing/selftests/bpf/testing_helpers.c @@ -164,7 +164,7 @@ __u32 link_info_prog_id(const struct bpf_link *link, struct bpf_link_info *info) int err; memset(info, 0, sizeof(*info)); - err = bpf_obj_get_info_by_fd(bpf_link__fd(link), info, &info_len); + err = bpf_link_get_info_by_fd(bpf_link__fd(link), info, &info_len); if (err) { printf("failed to get link info: %d\n", -errno); return 0; diff --git a/tools/testing/selftests/bpf/xdp_synproxy.c b/tools/testing/selftests/bpf/xdp_synproxy.c index 6dbe0b745198..ce68c342b56f 100644 --- a/tools/testing/selftests/bpf/xdp_synproxy.c +++ b/tools/testing/selftests/bpf/xdp_synproxy.c @@ -217,9 +217,10 @@ static int syncookie_attach(const char *argv0, unsigned int ifindex, bool tc) prog_fd = bpf_program__fd(prog); - err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); if (err < 0) { - fprintf(stderr, "Error: bpf_obj_get_info_by_fd: %s\n", strerror(-err)); + fprintf(stderr, "Error: bpf_prog_get_info_by_fd: %s\n", + strerror(-err)); goto out; } attached_tc = tc; @@ -292,9 +293,10 @@ static int syncookie_open_bpf_maps(__u32 prog_id, int *values_map_fd, int *ports }; info_len = sizeof(prog_info); - err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len); + err = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &info_len); if (err != 0) { - fprintf(stderr, "Error: bpf_obj_get_info_by_fd: %s\n", strerror(-err)); + fprintf(stderr, "Error: bpf_prog_get_info_by_fd: %s\n", + strerror(-err)); goto out; } @@ -317,9 +319,10 @@ static int syncookie_open_bpf_maps(__u32 prog_id, int *values_map_fd, int *ports map_fd = err; info_len = sizeof(map_info); - err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len); + err = bpf_map_get_info_by_fd(map_fd, &map_info, &info_len); if (err != 0) { - fprintf(stderr, "Error: bpf_obj_get_info_by_fd: %s\n", strerror(-err)); + fprintf(stderr, "Error: bpf_map_get_info_by_fd: %s\n", + strerror(-err)); close(map_fd); goto err_close_map_fds; } -- cgit v1.2.3 From df71a42cc37a44cdc7682f57aecf14ff44391eed Mon Sep 17 00:00:00 2001 From: Taichi Nishimura Date: Thu, 16 Feb 2023 17:55:37 +0900 Subject: Fix typos in selftest/bpf files Run spell checker on files in selftest/bpf and fixed typos. Signed-off-by: Taichi Nishimura Signed-off-by: Andrii Nakryiko Reviewed-by: Randy Dunlap Link: https://lore.kernel.org/bpf/20230216085537.519062-1-awkrail01@gmail.com --- tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c | 2 +- tools/testing/selftests/bpf/prog_tests/trampoline_count.c | 2 +- tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c | 2 +- tools/testing/selftests/bpf/progs/dynptr_fail.c | 2 +- tools/testing/selftests/bpf/progs/strobemeta.h | 2 +- tools/testing/selftests/bpf/progs/test_cls_redirect.c | 6 +++--- tools/testing/selftests/bpf/progs/test_subprogs.c | 2 +- tools/testing/selftests/bpf/progs/test_xdp_vlan.c | 2 +- tools/testing/selftests/bpf/test_cpp.cpp | 2 +- tools/testing/selftests/bpf/veristat.c | 4 ++-- 10 files changed, 13 insertions(+), 13 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c b/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c index eb2feaac81fe..653b0a20fab9 100644 --- a/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c +++ b/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c @@ -488,7 +488,7 @@ static void run_test(struct migrate_reuseport_test_case *test_case, goto close_servers; } - /* Tie requests to the first four listners */ + /* Tie requests to the first four listeners */ err = start_clients(test_case); if (!ASSERT_OK(err, "start_clients")) goto close_clients; diff --git a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c index 8fd4c0d78089..e91d0d1769f1 100644 --- a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c +++ b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c @@ -79,7 +79,7 @@ void serial_test_trampoline_count(void) if (!ASSERT_EQ(link, NULL, "ptr_is_null")) goto cleanup; - /* and finaly execute the probe */ + /* and finally execute the probe */ prog_fd = bpf_program__fd(prog); if (!ASSERT_GE(prog_fd, 0, "bpf_program__fd")) goto cleanup; diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c index 26fffb02ed10..ad21ee8c7e23 100644 --- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c @@ -84,7 +84,7 @@ typedef void (*printf_fn_t)(const char *, ...); * typedef int (*fn_t)(int); * typedef char * const * (*fn_ptr2_t)(s_t, fn_t); * - * - `fn_complext_t`: pointer to a function returning struct and accepting + * - `fn_complex_t`: pointer to a function returning struct and accepting * union and struct. All structs and enum are anonymous and defined inline. * * - `signal_t: pointer to a function accepting a pointer to a function as an diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index 5950ad6ec2e6..aa5b69354b91 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -630,7 +630,7 @@ static int release_twice_callback_fn(__u32 index, void *data) } /* Test that releasing a dynptr twice, where one of the releases happens - * within a calback function, fails + * within a callback function, fails */ SEC("?raw_tp") __failure __msg("arg 1 is an unacquired reference") diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h index 753718595c26..e562be6356f3 100644 --- a/tools/testing/selftests/bpf/progs/strobemeta.h +++ b/tools/testing/selftests/bpf/progs/strobemeta.h @@ -135,7 +135,7 @@ struct strobe_value_loc { * tpidr_el0 for aarch64). * TLS_IMM_EXEC: absolute address of GOT entry containing offset * from thread pointer; - * TLS_GENERAL_DYN: absolute addres of double GOT entry + * TLS_GENERAL_DYN: absolute address of double GOT entry * containing tls_index_t struct; */ int64_t offset; diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.c b/tools/testing/selftests/bpf/progs/test_cls_redirect.c index 2833ad722cb7..66b304982245 100644 --- a/tools/testing/selftests/bpf/progs/test_cls_redirect.c +++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.c @@ -600,7 +600,7 @@ static INLINING ret_t get_next_hop(buf_t *pkt, encap_headers_t *encap, return TC_ACT_SHOT; } - /* Skip the remainig next hops (may be zero). */ + /* Skip the remaining next hops (may be zero). */ return skip_next_hops(pkt, encap->unigue.hop_count - encap->unigue.next_hop - 1); } @@ -610,8 +610,8 @@ static INLINING ret_t get_next_hop(buf_t *pkt, encap_headers_t *encap, * * fill_tuple(&t, foo, sizeof(struct iphdr), 123, 321) * - * clang will substitue a costant for sizeof, which allows the verifier - * to track it's value. Based on this, it can figure out the constant + * clang will substitute a constant for sizeof, which allows the verifier + * to track its value. Based on this, it can figure out the constant * return value, and calling code works while still being "generic" to * IPv4 and IPv6. */ diff --git a/tools/testing/selftests/bpf/progs/test_subprogs.c b/tools/testing/selftests/bpf/progs/test_subprogs.c index f8e9256cf18d..a8d602d7c88a 100644 --- a/tools/testing/selftests/bpf/progs/test_subprogs.c +++ b/tools/testing/selftests/bpf/progs/test_subprogs.c @@ -47,7 +47,7 @@ static __noinline int sub5(int v) return sub1(v) - 1; /* compensates sub1()'s + 1 */ } -/* unfortunately verifier rejects `struct task_struct *t` as an unkown pointer +/* unfortunately verifier rejects `struct task_struct *t` as an unknown pointer * type, so we need to accept pointer as integer and then cast it inside the * function */ diff --git a/tools/testing/selftests/bpf/progs/test_xdp_vlan.c b/tools/testing/selftests/bpf/progs/test_xdp_vlan.c index cdf3c48d6cbb..4ddcb6dfe500 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_vlan.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_vlan.c @@ -98,7 +98,7 @@ bool parse_eth_frame(struct ethhdr *eth, void *data_end, struct parse_pkt *pkt) return true; } -/* Hint, VLANs are choosen to hit network-byte-order issues */ +/* Hint, VLANs are chosen to hit network-byte-order issues */ #define TESTVLAN 4011 /* 0xFAB */ // #define TO_VLAN 4000 /* 0xFA0 (hint 0xOA0 = 160) */ diff --git a/tools/testing/selftests/bpf/test_cpp.cpp b/tools/testing/selftests/bpf/test_cpp.cpp index 0bd9990e83fa..f4936834f76f 100644 --- a/tools/testing/selftests/bpf/test_cpp.cpp +++ b/tools/testing/selftests/bpf/test_cpp.cpp @@ -91,7 +91,7 @@ static void try_skeleton_template() skel.detach(); - /* destructor will destory underlying skeleton */ + /* destructor will destroy underlying skeleton */ } int main(int argc, char *argv[]) diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c index f961b49b8ef4..83231456d3c5 100644 --- a/tools/testing/selftests/bpf/veristat.c +++ b/tools/testing/selftests/bpf/veristat.c @@ -144,7 +144,7 @@ static struct env { struct verif_stats *prog_stats; int prog_stat_cnt; - /* baseline_stats is allocated and used only in comparsion mode */ + /* baseline_stats is allocated and used only in comparison mode */ struct verif_stats *baseline_stats; int baseline_stat_cnt; @@ -882,7 +882,7 @@ static int process_obj(const char *filename) * that BPF object file is incomplete and has to be statically * linked into a final BPF object file; instead of bailing * out, report it into stderr, mark it as skipped, and - * proceeed + * proceed */ fprintf(stderr, "Failed to open '%s': %d\n", filename, -errno); env.files_skipped++; -- cgit v1.2.3 From 95ebb376176c52382293e05e63f142114a5e40ef Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 15 Feb 2023 20:59:53 -0800 Subject: selftests/bpf: Convert test_global_funcs test to test_loader framework Convert 17 test_global_funcs subtests into test_loader framework for easier maintenance and more declarative way to define expected failures/successes. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Stanislav Fomichev Link: https://lore.kernel.org/bpf/20230216045954.3002473-3-andrii@kernel.org --- .../selftests/bpf/prog_tests/test_global_funcs.c | 131 ++++++--------------- .../selftests/bpf/progs/test_global_func1.c | 6 +- .../selftests/bpf/progs/test_global_func10.c | 4 +- .../selftests/bpf/progs/test_global_func11.c | 4 +- .../selftests/bpf/progs/test_global_func12.c | 4 +- .../selftests/bpf/progs/test_global_func13.c | 4 +- .../selftests/bpf/progs/test_global_func14.c | 4 +- .../selftests/bpf/progs/test_global_func15.c | 4 +- .../selftests/bpf/progs/test_global_func16.c | 4 +- .../selftests/bpf/progs/test_global_func17.c | 4 +- .../selftests/bpf/progs/test_global_func2.c | 43 ++++++- .../selftests/bpf/progs/test_global_func3.c | 10 +- .../selftests/bpf/progs/test_global_func4.c | 55 ++++++++- .../selftests/bpf/progs/test_global_func5.c | 4 +- .../selftests/bpf/progs/test_global_func6.c | 4 +- .../selftests/bpf/progs/test_global_func7.c | 4 +- .../selftests/bpf/progs/test_global_func8.c | 4 +- .../selftests/bpf/progs/test_global_func9.c | 4 +- 18 files changed, 174 insertions(+), 123 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c index 7295cc60f724..2ff4d5c7abfc 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c +++ b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c @@ -1,104 +1,41 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ #include - -const char *err_str; -bool found; - -static int libbpf_debug_print(enum libbpf_print_level level, - const char *format, va_list args) -{ - char *log_buf; - - if (level != LIBBPF_WARN || - strcmp(format, "libbpf: \n%s\n")) { - vprintf(format, args); - return 0; - } - - log_buf = va_arg(args, char *); - if (!log_buf) - goto out; - if (err_str && strstr(log_buf, err_str) == 0) - found = true; -out: - printf(format, log_buf); - return 0; -} - -extern int extra_prog_load_log_flags; - -static int check_load(const char *file) -{ - struct bpf_object *obj = NULL; - struct bpf_program *prog; - int err; - - found = false; - - obj = bpf_object__open_file(file, NULL); - err = libbpf_get_error(obj); - if (err) - return err; - - prog = bpf_object__next_program(obj, NULL); - if (!prog) { - err = -ENOENT; - goto err_out; - } - - bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32); - bpf_program__set_log_level(prog, extra_prog_load_log_flags); - - err = bpf_object__load(obj); - -err_out: - bpf_object__close(obj); - return err; -} - -struct test_def { - const char *file; - const char *err_str; -}; +#include "test_global_func1.skel.h" +#include "test_global_func2.skel.h" +#include "test_global_func3.skel.h" +#include "test_global_func4.skel.h" +#include "test_global_func5.skel.h" +#include "test_global_func6.skel.h" +#include "test_global_func7.skel.h" +#include "test_global_func8.skel.h" +#include "test_global_func9.skel.h" +#include "test_global_func10.skel.h" +#include "test_global_func11.skel.h" +#include "test_global_func12.skel.h" +#include "test_global_func13.skel.h" +#include "test_global_func14.skel.h" +#include "test_global_func15.skel.h" +#include "test_global_func16.skel.h" +#include "test_global_func17.skel.h" void test_test_global_funcs(void) { - struct test_def tests[] = { - { "test_global_func1.bpf.o", "combined stack size of 4 calls is 544" }, - { "test_global_func2.bpf.o" }, - { "test_global_func3.bpf.o", "the call stack of 8 frames" }, - { "test_global_func4.bpf.o" }, - { "test_global_func5.bpf.o", "expected pointer to ctx, but got PTR" }, - { "test_global_func6.bpf.o", "modified ctx ptr R2" }, - { "test_global_func7.bpf.o", "foo() doesn't return scalar" }, - { "test_global_func8.bpf.o" }, - { "test_global_func9.bpf.o" }, - { "test_global_func10.bpf.o", "invalid indirect read from stack" }, - { "test_global_func11.bpf.o", "Caller passes invalid args into func#1" }, - { "test_global_func12.bpf.o", "invalid mem access 'mem_or_null'" }, - { "test_global_func13.bpf.o", "Caller passes invalid args into func#1" }, - { "test_global_func14.bpf.o", "reference type('FWD S') size cannot be determined" }, - { "test_global_func15.bpf.o", "At program exit the register R0 has value" }, - { "test_global_func16.bpf.o", "invalid indirect read from stack" }, - { "test_global_func17.bpf.o", "Caller passes invalid args into func#1" }, - }; - libbpf_print_fn_t old_print_fn = NULL; - int err, i, duration = 0; - - old_print_fn = libbpf_set_print(libbpf_debug_print); - - for (i = 0; i < ARRAY_SIZE(tests); i++) { - const struct test_def *test = &tests[i]; - - if (!test__start_subtest(test->file)) - continue; - - err_str = test->err_str; - err = check_load(test->file); - CHECK_FAIL(!!err ^ !!err_str); - if (err_str) - CHECK(found, "", "expected string '%s'", err_str); - } - libbpf_set_print(old_print_fn); + RUN_TESTS(test_global_func1); + RUN_TESTS(test_global_func2); + RUN_TESTS(test_global_func3); + RUN_TESTS(test_global_func4); + RUN_TESTS(test_global_func5); + RUN_TESTS(test_global_func6); + RUN_TESTS(test_global_func7); + RUN_TESTS(test_global_func8); + RUN_TESTS(test_global_func9); + RUN_TESTS(test_global_func10); + RUN_TESTS(test_global_func11); + RUN_TESTS(test_global_func12); + RUN_TESTS(test_global_func13); + RUN_TESTS(test_global_func14); + RUN_TESTS(test_global_func15); + RUN_TESTS(test_global_func16); + RUN_TESTS(test_global_func17); } diff --git a/tools/testing/selftests/bpf/progs/test_global_func1.c b/tools/testing/selftests/bpf/progs/test_global_func1.c index 7b42dad187b8..23970a20b324 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func1.c +++ b/tools/testing/selftests/bpf/progs/test_global_func1.c @@ -3,10 +3,9 @@ #include #include #include +#include "bpf_misc.h" -#ifndef MAX_STACK #define MAX_STACK (512 - 3 * 32 + 8) -#endif static __attribute__ ((noinline)) int f0(int var, struct __sk_buff *skb) @@ -39,7 +38,8 @@ int f3(int val, struct __sk_buff *skb, int var) } SEC("tc") -int test_cls(struct __sk_buff *skb) +__failure __msg("combined stack size of 4 calls is 544") +int global_func1(struct __sk_buff *skb) { return f0(1, skb) + f1(skb) + f2(2, skb) + f3(3, skb, 4); } diff --git a/tools/testing/selftests/bpf/progs/test_global_func10.c b/tools/testing/selftests/bpf/progs/test_global_func10.c index 97b7031d0e22..98327bdbbfd2 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func10.c +++ b/tools/testing/selftests/bpf/progs/test_global_func10.c @@ -2,6 +2,7 @@ #include #include #include +#include "bpf_misc.h" struct Small { int x; @@ -21,7 +22,8 @@ __noinline int foo(const struct Big *big) } SEC("cgroup_skb/ingress") -int test_cls(struct __sk_buff *skb) +__failure __msg("invalid indirect read from stack") +int global_func10(struct __sk_buff *skb) { const struct Small small = {.x = skb->len }; diff --git a/tools/testing/selftests/bpf/progs/test_global_func11.c b/tools/testing/selftests/bpf/progs/test_global_func11.c index ef5277d982d9..283e036dc401 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func11.c +++ b/tools/testing/selftests/bpf/progs/test_global_func11.c @@ -2,6 +2,7 @@ #include #include #include +#include "bpf_misc.h" struct S { int x; @@ -13,7 +14,8 @@ __noinline int foo(const struct S *s) } SEC("cgroup_skb/ingress") -int test_cls(struct __sk_buff *skb) +__failure __msg("Caller passes invalid args into func#1") +int global_func11(struct __sk_buff *skb) { return foo((const void *)skb); } diff --git a/tools/testing/selftests/bpf/progs/test_global_func12.c b/tools/testing/selftests/bpf/progs/test_global_func12.c index 62343527cc59..7f159d83c6f6 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func12.c +++ b/tools/testing/selftests/bpf/progs/test_global_func12.c @@ -2,6 +2,7 @@ #include #include #include +#include "bpf_misc.h" struct S { int x; @@ -13,7 +14,8 @@ __noinline int foo(const struct S *s) } SEC("cgroup_skb/ingress") -int test_cls(struct __sk_buff *skb) +__failure __msg("invalid mem access 'mem_or_null'") +int global_func12(struct __sk_buff *skb) { const struct S s = {.x = skb->len }; diff --git a/tools/testing/selftests/bpf/progs/test_global_func13.c b/tools/testing/selftests/bpf/progs/test_global_func13.c index ff8897c1ac22..02ea80da75b5 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func13.c +++ b/tools/testing/selftests/bpf/progs/test_global_func13.c @@ -2,6 +2,7 @@ #include #include #include +#include "bpf_misc.h" struct S { int x; @@ -16,7 +17,8 @@ __noinline int foo(const struct S *s) } SEC("cgroup_skb/ingress") -int test_cls(struct __sk_buff *skb) +__failure __msg("Caller passes invalid args into func#1") +int global_func13(struct __sk_buff *skb) { const struct S *s = (const struct S *)(0xbedabeda); diff --git a/tools/testing/selftests/bpf/progs/test_global_func14.c b/tools/testing/selftests/bpf/progs/test_global_func14.c index 698c77199ebf..33b7d5efd7b2 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func14.c +++ b/tools/testing/selftests/bpf/progs/test_global_func14.c @@ -2,6 +2,7 @@ #include #include #include +#include "bpf_misc.h" struct S; @@ -14,7 +15,8 @@ __noinline int foo(const struct S *s) } SEC("cgroup_skb/ingress") -int test_cls(struct __sk_buff *skb) +__failure __msg("reference type('FWD S') size cannot be determined") +int global_func14(struct __sk_buff *skb) { return foo(NULL); diff --git a/tools/testing/selftests/bpf/progs/test_global_func15.c b/tools/testing/selftests/bpf/progs/test_global_func15.c index c19c435988d5..b512d6a6c75e 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func15.c +++ b/tools/testing/selftests/bpf/progs/test_global_func15.c @@ -2,6 +2,7 @@ #include #include #include +#include "bpf_misc.h" __noinline int foo(unsigned int *v) { @@ -12,7 +13,8 @@ __noinline int foo(unsigned int *v) } SEC("cgroup_skb/ingress") -int test_cls(struct __sk_buff *skb) +__failure __msg("At program exit the register R0 has value") +int global_func15(struct __sk_buff *skb) { unsigned int v = 1; diff --git a/tools/testing/selftests/bpf/progs/test_global_func16.c b/tools/testing/selftests/bpf/progs/test_global_func16.c index 0312d1e8d8c0..e7206304632e 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func16.c +++ b/tools/testing/selftests/bpf/progs/test_global_func16.c @@ -2,6 +2,7 @@ #include #include #include +#include "bpf_misc.h" __noinline int foo(int (*arr)[10]) { @@ -12,7 +13,8 @@ __noinline int foo(int (*arr)[10]) } SEC("cgroup_skb/ingress") -int test_cls(struct __sk_buff *skb) +__failure __msg("invalid indirect read from stack") +int global_func16(struct __sk_buff *skb) { int array[10]; diff --git a/tools/testing/selftests/bpf/progs/test_global_func17.c b/tools/testing/selftests/bpf/progs/test_global_func17.c index 2b8b9b8ba018..a32e11c7d933 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func17.c +++ b/tools/testing/selftests/bpf/progs/test_global_func17.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only #include #include +#include "bpf_misc.h" __noinline int foo(int *p) { @@ -10,7 +11,8 @@ __noinline int foo(int *p) const volatile int i; SEC("tc") -int test_cls(struct __sk_buff *skb) +__failure __msg("Caller passes invalid args into func#1") +int global_func17(struct __sk_buff *skb) { return foo((int *)&i); } diff --git a/tools/testing/selftests/bpf/progs/test_global_func2.c b/tools/testing/selftests/bpf/progs/test_global_func2.c index 2c18d82923a2..3dce97fb52a4 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func2.c +++ b/tools/testing/selftests/bpf/progs/test_global_func2.c @@ -1,4 +1,45 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2020 Facebook */ +#include +#include +#include +#include "bpf_misc.h" + #define MAX_STACK (512 - 3 * 32) -#include "test_global_func1.c" + +static __attribute__ ((noinline)) +int f0(int var, struct __sk_buff *skb) +{ + return skb->len; +} + +__attribute__ ((noinline)) +int f1(struct __sk_buff *skb) +{ + volatile char buf[MAX_STACK] = {}; + + return f0(0, skb) + skb->len; +} + +int f3(int, struct __sk_buff *skb, int); + +__attribute__ ((noinline)) +int f2(int val, struct __sk_buff *skb) +{ + return f1(skb) + f3(val, skb, 1); +} + +__attribute__ ((noinline)) +int f3(int val, struct __sk_buff *skb, int var) +{ + volatile char buf[MAX_STACK] = {}; + + return skb->ifindex * val * var; +} + +SEC("tc") +__success +int global_func2(struct __sk_buff *skb) +{ + return f0(1, skb) + f1(skb) + f2(2, skb) + f3(3, skb, 4); +} diff --git a/tools/testing/selftests/bpf/progs/test_global_func3.c b/tools/testing/selftests/bpf/progs/test_global_func3.c index 01bf8275dfd6..142b682d3c2f 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func3.c +++ b/tools/testing/selftests/bpf/progs/test_global_func3.c @@ -3,6 +3,7 @@ #include #include #include +#include "bpf_misc.h" __attribute__ ((noinline)) int f1(struct __sk_buff *skb) @@ -46,20 +47,15 @@ int f7(struct __sk_buff *skb) return f6(skb); } -#ifndef NO_FN8 __attribute__ ((noinline)) int f8(struct __sk_buff *skb) { return f7(skb); } -#endif SEC("tc") -int test_cls(struct __sk_buff *skb) +__failure __msg("the call stack of 8 frames") +int global_func3(struct __sk_buff *skb) { -#ifndef NO_FN8 return f8(skb); -#else - return f7(skb); -#endif } diff --git a/tools/testing/selftests/bpf/progs/test_global_func4.c b/tools/testing/selftests/bpf/progs/test_global_func4.c index 610f75edf276..1733d87ad3f3 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func4.c +++ b/tools/testing/selftests/bpf/progs/test_global_func4.c @@ -1,4 +1,55 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2020 Facebook */ -#define NO_FN8 -#include "test_global_func3.c" +#include +#include +#include +#include "bpf_misc.h" + +__attribute__ ((noinline)) +int f1(struct __sk_buff *skb) +{ + return skb->len; +} + +__attribute__ ((noinline)) +int f2(int val, struct __sk_buff *skb) +{ + return f1(skb) + val; +} + +__attribute__ ((noinline)) +int f3(int val, struct __sk_buff *skb, int var) +{ + return f2(var, skb) + val; +} + +__attribute__ ((noinline)) +int f4(struct __sk_buff *skb) +{ + return f3(1, skb, 2); +} + +__attribute__ ((noinline)) +int f5(struct __sk_buff *skb) +{ + return f4(skb); +} + +__attribute__ ((noinline)) +int f6(struct __sk_buff *skb) +{ + return f5(skb); +} + +__attribute__ ((noinline)) +int f7(struct __sk_buff *skb) +{ + return f6(skb); +} + +SEC("tc") +__success +int global_func4(struct __sk_buff *skb) +{ + return f7(skb); +} diff --git a/tools/testing/selftests/bpf/progs/test_global_func5.c b/tools/testing/selftests/bpf/progs/test_global_func5.c index 9248d03e0d06..cc55aedaf82d 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func5.c +++ b/tools/testing/selftests/bpf/progs/test_global_func5.c @@ -3,6 +3,7 @@ #include #include #include +#include "bpf_misc.h" __attribute__ ((noinline)) int f1(struct __sk_buff *skb) @@ -25,7 +26,8 @@ int f3(int val, struct __sk_buff *skb) } SEC("tc") -int test_cls(struct __sk_buff *skb) +__failure __msg("expected pointer to ctx, but got PTR") +int global_func5(struct __sk_buff *skb) { return f1(skb) + f2(2, skb) + f3(3, skb); } diff --git a/tools/testing/selftests/bpf/progs/test_global_func6.c b/tools/testing/selftests/bpf/progs/test_global_func6.c index af8c78bdfb25..46c38c8f2cf0 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func6.c +++ b/tools/testing/selftests/bpf/progs/test_global_func6.c @@ -3,6 +3,7 @@ #include #include #include +#include "bpf_misc.h" __attribute__ ((noinline)) int f1(struct __sk_buff *skb) @@ -25,7 +26,8 @@ int f3(int val, struct __sk_buff *skb) } SEC("tc") -int test_cls(struct __sk_buff *skb) +__failure __msg("modified ctx ptr R2") +int global_func6(struct __sk_buff *skb) { return f1(skb) + f2(2, skb) + f3(3, skb); } diff --git a/tools/testing/selftests/bpf/progs/test_global_func7.c b/tools/testing/selftests/bpf/progs/test_global_func7.c index 6cb8e2f5254c..f182febfde3c 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func7.c +++ b/tools/testing/selftests/bpf/progs/test_global_func7.c @@ -3,6 +3,7 @@ #include #include #include +#include "bpf_misc.h" __attribute__ ((noinline)) void foo(struct __sk_buff *skb) @@ -11,7 +12,8 @@ void foo(struct __sk_buff *skb) } SEC("tc") -int test_cls(struct __sk_buff *skb) +__failure __msg("foo() doesn't return scalar") +int global_func7(struct __sk_buff *skb) { foo(skb); return 0; diff --git a/tools/testing/selftests/bpf/progs/test_global_func8.c b/tools/testing/selftests/bpf/progs/test_global_func8.c index d55a6544b1ab..9b9c57fa2dd3 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func8.c +++ b/tools/testing/selftests/bpf/progs/test_global_func8.c @@ -3,6 +3,7 @@ #include #include #include +#include "bpf_misc.h" __noinline int foo(struct __sk_buff *skb) { @@ -10,7 +11,8 @@ __noinline int foo(struct __sk_buff *skb) } SEC("cgroup_skb/ingress") -int test_cls(struct __sk_buff *skb) +__success +int global_func8(struct __sk_buff *skb) { if (!foo(skb)) return 0; diff --git a/tools/testing/selftests/bpf/progs/test_global_func9.c b/tools/testing/selftests/bpf/progs/test_global_func9.c index bd233ddede98..1f2cb0159b8d 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func9.c +++ b/tools/testing/selftests/bpf/progs/test_global_func9.c @@ -2,6 +2,7 @@ #include #include #include +#include "bpf_misc.h" struct S { int x; @@ -74,7 +75,8 @@ __noinline int quuz(int **p) } SEC("cgroup_skb/ingress") -int test_cls(struct __sk_buff *skb) +__success +int global_func9(struct __sk_buff *skb) { int result = 0; -- cgit v1.2.3 From e2b5cfc978f871996d1f8667515c0e06b33e620e Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 15 Feb 2023 20:59:54 -0800 Subject: selftests/bpf: Add global subprog context passing tests Add tests validating that it's possible to pass context arguments into global subprogs for various types of programs, including a particularly tricky KPROBE programs (which cover kprobes, uprobes, USDTs, a vast and important class of programs). Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Stanislav Fomichev Link: https://lore.kernel.org/bpf/20230216045954.3002473-4-andrii@kernel.org --- .../selftests/bpf/prog_tests/test_global_funcs.c | 2 + .../bpf/progs/test_global_func_ctx_args.c | 104 +++++++++++++++++++++ 2 files changed, 106 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c (limited to 'tools') diff --git a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c index 2ff4d5c7abfc..e0879df38639 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c +++ b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c @@ -18,6 +18,7 @@ #include "test_global_func15.skel.h" #include "test_global_func16.skel.h" #include "test_global_func17.skel.h" +#include "test_global_func_ctx_args.skel.h" void test_test_global_funcs(void) { @@ -38,4 +39,5 @@ void test_test_global_funcs(void) RUN_TESTS(test_global_func15); RUN_TESTS(test_global_func16); RUN_TESTS(test_global_func17); + RUN_TESTS(test_global_func_ctx_args); } diff --git a/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c b/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c new file mode 100644 index 000000000000..7faa8eef0598 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ +#include "vmlinux.h" +#include +#include +#include +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +static long stack[256]; + +/* + * KPROBE contexts + */ + +__weak int kprobe_typedef_ctx_subprog(bpf_user_pt_regs_t *ctx) +{ + return bpf_get_stack(ctx, &stack, sizeof(stack), 0); +} + +SEC("?kprobe") +__success +int kprobe_typedef_ctx(void *ctx) +{ + return kprobe_typedef_ctx_subprog(ctx); +} + +#define pt_regs_struct_t typeof(*(__PT_REGS_CAST((struct pt_regs *)NULL))) + +__weak int kprobe_struct_ctx_subprog(pt_regs_struct_t *ctx) +{ + return bpf_get_stack((void *)ctx, &stack, sizeof(stack), 0); +} + +SEC("?kprobe") +__success +int kprobe_resolved_ctx(void *ctx) +{ + return kprobe_struct_ctx_subprog(ctx); +} + +/* this is current hack to make this work on old kernels */ +struct bpf_user_pt_regs_t {}; + +__weak int kprobe_workaround_ctx_subprog(struct bpf_user_pt_regs_t *ctx) +{ + return bpf_get_stack(ctx, &stack, sizeof(stack), 0); +} + +SEC("?kprobe") +__success +int kprobe_workaround_ctx(void *ctx) +{ + return kprobe_workaround_ctx_subprog(ctx); +} + +/* + * RAW_TRACEPOINT contexts + */ + +__weak int raw_tp_ctx_subprog(struct bpf_raw_tracepoint_args *ctx) +{ + return bpf_get_stack(ctx, &stack, sizeof(stack), 0); +} + +SEC("?raw_tp") +__success +int raw_tp_ctx(void *ctx) +{ + return raw_tp_ctx_subprog(ctx); +} + +/* + * RAW_TRACEPOINT_WRITABLE contexts + */ + +__weak int raw_tp_writable_ctx_subprog(struct bpf_raw_tracepoint_args *ctx) +{ + return bpf_get_stack(ctx, &stack, sizeof(stack), 0); +} + +SEC("?raw_tp") +__success +int raw_tp_writable_ctx(void *ctx) +{ + return raw_tp_writable_ctx_subprog(ctx); +} + +/* + * PERF_EVENT contexts + */ + +__weak int perf_event_ctx_subprog(struct bpf_perf_event_data *ctx) +{ + return bpf_get_stack(ctx, &stack, sizeof(stack), 0); +} + +SEC("?perf_event") +__success +int perf_event_ctx(void *ctx) +{ + return perf_event_ctx_subprog(ctx); +} -- cgit v1.2.3 From 181127fb76e62d06ab17a75fd610129688612343 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Fri, 17 Feb 2023 12:13:09 -0800 Subject: Revert "bpf, test_run: fix &xdp_frame misplacement for LIVE_FRAMES" This reverts commit 6c20822fada1b8adb77fa450d03a0d449686a4a9. build bot failed on arch with different cache line size: https://lore.kernel.org/bpf/50c35055-afa9-d01e-9a05-ea5351280e4f@intel.com/ Signed-off-by: Martin KaFai Lau --- net/bpf/test_run.c | 29 +++++----------------- .../selftests/bpf/prog_tests/xdp_do_redirect.c | 7 +++--- 2 files changed, 9 insertions(+), 27 deletions(-) (limited to 'tools') diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 982e81bba6cf..6f3d654b3339 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -97,11 +97,8 @@ reset: struct xdp_page_head { struct xdp_buff orig_ctx; struct xdp_buff ctx; - union { - /* ::data_hard_start starts here */ - DECLARE_FLEX_ARRAY(struct xdp_frame, frame); - DECLARE_FLEX_ARRAY(u8, data); - }; + struct xdp_frame frm; + u8 data[]; }; struct xdp_test_data { @@ -119,20 +116,6 @@ struct xdp_test_data { #define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head)) #define TEST_XDP_MAX_BATCH 256 -#if BITS_PER_LONG == 64 && PAGE_SIZE == SZ_4K -/* tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c:%MAX_PKT_SIZE - * must be updated accordingly when any of these changes, otherwise BPF - * selftests will fail. - */ -#ifdef __s390x__ -#define TEST_MAX_PKT_SIZE 3216 -#else -#define TEST_MAX_PKT_SIZE 3408 -#endif -static_assert(SKB_WITH_OVERHEAD(TEST_XDP_FRAME_SIZE - XDP_PACKET_HEADROOM) == - TEST_MAX_PKT_SIZE); -#endif - static void xdp_test_run_init_page(struct page *page, void *arg) { struct xdp_page_head *head = phys_to_virt(page_to_phys(page)); @@ -149,8 +132,8 @@ static void xdp_test_run_init_page(struct page *page, void *arg) headroom -= meta_len; new_ctx = &head->ctx; - frm = head->frame; - data = head->data; + frm = &head->frm; + data = &head->data; memcpy(data + headroom, orig_ctx->data_meta, frm_len); xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq); @@ -240,7 +223,7 @@ static void reset_ctx(struct xdp_page_head *head) head->ctx.data = head->orig_ctx.data; head->ctx.data_meta = head->orig_ctx.data_meta; head->ctx.data_end = head->orig_ctx.data_end; - xdp_update_frame_from_buff(&head->ctx, head->frame); + xdp_update_frame_from_buff(&head->ctx, &head->frm); } static int xdp_recv_frames(struct xdp_frame **frames, int nframes, @@ -302,7 +285,7 @@ static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog, head = phys_to_virt(page_to_phys(page)); reset_ctx(head); ctx = &head->ctx; - frm = head->frame; + frm = &head->frm; xdp->frame_cnt++; act = bpf_prog_run_xdp(prog, ctx); diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c index 7271a18ab3e2..2666c84dbd01 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c @@ -65,13 +65,12 @@ static int attach_tc_prog(struct bpf_tc_hook *hook, int fd) } /* The maximum permissible size is: PAGE_SIZE - sizeof(struct xdp_page_head) - - * SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) - XDP_PACKET_HEADROOM = - * 3408 bytes for 64-byte cacheline and 3216 for 256-byte one. + * sizeof(struct skb_shared_info) - XDP_PACKET_HEADROOM = 3368 bytes */ #if defined(__s390x__) -#define MAX_PKT_SIZE 3216 +#define MAX_PKT_SIZE 3176 #else -#define MAX_PKT_SIZE 3408 +#define MAX_PKT_SIZE 3368 #endif static void test_max_pkt_size(int fd) { -- cgit v1.2.3 From 31de4105f00d64570139bc5494a201b0bd57349f Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Fri, 17 Feb 2023 12:55:14 -0800 Subject: bpf: Add BPF_FIB_LOOKUP_SKIP_NEIGH for bpf_fib_lookup The bpf_fib_lookup() also looks up the neigh table. This was done before bpf_redirect_neigh() was added. In the use case that does not manage the neigh table and requires bpf_fib_lookup() to lookup a fib to decide if it needs to redirect or not, the bpf prog can depend only on using bpf_redirect_neigh() to lookup the neigh. It also keeps the neigh entries fresh and connected. This patch adds a bpf_fib_lookup flag, SKIP_NEIGH, to avoid the double neigh lookup when the bpf prog always call bpf_redirect_neigh() to do the neigh lookup. The params->smac output is skipped together when SKIP_NEIGH is set because bpf_redirect_neigh() will figure out the smac also. Signed-off-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20230217205515.3583372-1-martin.lau@linux.dev --- include/uapi/linux/bpf.h | 6 ++++++ net/core/filter.c | 39 ++++++++++++++++++++++++++------------- tools/include/uapi/linux/bpf.h | 6 ++++++ 3 files changed, 38 insertions(+), 13 deletions(-) (limited to 'tools') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 1503f61336b6..62ce1f5d1b1d 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3134,6 +3134,11 @@ union bpf_attr { * **BPF_FIB_LOOKUP_OUTPUT** * Perform lookup from an egress perspective (default is * ingress). + * **BPF_FIB_LOOKUP_SKIP_NEIGH** + * Skip the neighbour table lookup. *params*->dmac + * and *params*->smac will not be set as output. A common + * use case is to call **bpf_redirect_neigh**\ () after + * doing **bpf_fib_lookup**\ (). * * *ctx* is either **struct xdp_md** for XDP programs or * **struct sk_buff** tc cls_act programs. @@ -6750,6 +6755,7 @@ struct bpf_raw_tracepoint_args { enum { BPF_FIB_LOOKUP_DIRECT = (1U << 0), BPF_FIB_LOOKUP_OUTPUT = (1U << 1), + BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2), }; enum { diff --git a/net/core/filter.c b/net/core/filter.c index 8daaaf76ab15..1d6f165923bf 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5722,12 +5722,8 @@ static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = { #endif #if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6) -static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params, - const struct neighbour *neigh, - const struct net_device *dev, u32 mtu) +static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params, u32 mtu) { - memcpy(params->dmac, neigh->ha, ETH_ALEN); - memcpy(params->smac, dev->dev_addr, ETH_ALEN); params->h_vlan_TCI = 0; params->h_vlan_proto = 0; if (mtu) @@ -5838,21 +5834,29 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, if (likely(nhc->nhc_gw_family != AF_INET6)) { if (nhc->nhc_gw_family) params->ipv4_dst = nhc->nhc_gw.ipv4; - - neigh = __ipv4_neigh_lookup_noref(dev, - (__force u32)params->ipv4_dst); } else { struct in6_addr *dst = (struct in6_addr *)params->ipv6_dst; params->family = AF_INET6; *dst = nhc->nhc_gw.ipv6; - neigh = __ipv6_neigh_lookup_noref_stub(dev, dst); } + if (flags & BPF_FIB_LOOKUP_SKIP_NEIGH) + goto set_fwd_params; + + if (likely(nhc->nhc_gw_family != AF_INET6)) + neigh = __ipv4_neigh_lookup_noref(dev, + (__force u32)params->ipv4_dst); + else + neigh = __ipv6_neigh_lookup_noref_stub(dev, params->ipv6_dst); + if (!neigh || !(neigh->nud_state & NUD_VALID)) return BPF_FIB_LKUP_RET_NO_NEIGH; + memcpy(params->dmac, neigh->ha, ETH_ALEN); + memcpy(params->smac, dev->dev_addr, ETH_ALEN); - return bpf_fib_set_fwd_params(params, neigh, dev, mtu); +set_fwd_params: + return bpf_fib_set_fwd_params(params, mtu); } #endif @@ -5960,24 +5964,33 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, params->rt_metric = res.f6i->fib6_metric; params->ifindex = dev->ifindex; + if (flags & BPF_FIB_LOOKUP_SKIP_NEIGH) + goto set_fwd_params; + /* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is * not needed here. */ neigh = __ipv6_neigh_lookup_noref_stub(dev, dst); if (!neigh || !(neigh->nud_state & NUD_VALID)) return BPF_FIB_LKUP_RET_NO_NEIGH; + memcpy(params->dmac, neigh->ha, ETH_ALEN); + memcpy(params->smac, dev->dev_addr, ETH_ALEN); - return bpf_fib_set_fwd_params(params, neigh, dev, mtu); +set_fwd_params: + return bpf_fib_set_fwd_params(params, mtu); } #endif +#define BPF_FIB_LOOKUP_MASK (BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT | \ + BPF_FIB_LOOKUP_SKIP_NEIGH) + BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx, struct bpf_fib_lookup *, params, int, plen, u32, flags) { if (plen < sizeof(*params)) return -EINVAL; - if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT)) + if (flags & ~BPF_FIB_LOOKUP_MASK) return -EINVAL; switch (params->family) { @@ -6015,7 +6028,7 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb, if (plen < sizeof(*params)) return -EINVAL; - if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT)) + if (flags & ~BPF_FIB_LOOKUP_MASK) return -EINVAL; if (params->tot_len) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 1503f61336b6..62ce1f5d1b1d 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3134,6 +3134,11 @@ union bpf_attr { * **BPF_FIB_LOOKUP_OUTPUT** * Perform lookup from an egress perspective (default is * ingress). + * **BPF_FIB_LOOKUP_SKIP_NEIGH** + * Skip the neighbour table lookup. *params*->dmac + * and *params*->smac will not be set as output. A common + * use case is to call **bpf_redirect_neigh**\ () after + * doing **bpf_fib_lookup**\ (). * * *ctx* is either **struct xdp_md** for XDP programs or * **struct sk_buff** tc cls_act programs. @@ -6750,6 +6755,7 @@ struct bpf_raw_tracepoint_args { enum { BPF_FIB_LOOKUP_DIRECT = (1U << 0), BPF_FIB_LOOKUP_OUTPUT = (1U << 1), + BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2), }; enum { -- cgit v1.2.3 From 168de0233586fb06c5c5c56304aa9a928a09b0ba Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Fri, 17 Feb 2023 12:55:15 -0800 Subject: selftests/bpf: Add bpf_fib_lookup test This patch tests the bpf_fib_lookup helper when looking up a neigh in NUD_FAILED and NUD_STALE state. It also adds test for the new BPF_FIB_LOOKUP_SKIP_NEIGH flag. Signed-off-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20230217205515.3583372-2-martin.lau@linux.dev --- .../testing/selftests/bpf/prog_tests/fib_lookup.c | 187 +++++++++++++++++++++ tools/testing/selftests/bpf/progs/fib_lookup.c | 22 +++ 2 files changed, 209 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/fib_lookup.c create mode 100644 tools/testing/selftests/bpf/progs/fib_lookup.c (limited to 'tools') diff --git a/tools/testing/selftests/bpf/prog_tests/fib_lookup.c b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c new file mode 100644 index 000000000000..61ccddccf485 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c @@ -0,0 +1,187 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include +#include + +#include "test_progs.h" +#include "network_helpers.h" +#include "fib_lookup.skel.h" + +#define SYS(fmt, ...) \ + ({ \ + char cmd[1024]; \ + snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ + if (!ASSERT_OK(system(cmd), cmd)) \ + goto fail; \ + }) + +#define NS_TEST "fib_lookup_ns" +#define IPV6_IFACE_ADDR "face::face" +#define IPV6_NUD_FAILED_ADDR "face::1" +#define IPV6_NUD_STALE_ADDR "face::2" +#define IPV4_IFACE_ADDR "10.0.0.254" +#define IPV4_NUD_FAILED_ADDR "10.0.0.1" +#define IPV4_NUD_STALE_ADDR "10.0.0.2" +#define DMAC "11:11:11:11:11:11" +#define DMAC_INIT { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, } + +struct fib_lookup_test { + const char *desc; + const char *daddr; + int expected_ret; + int lookup_flags; + __u8 dmac[6]; +}; + +static const struct fib_lookup_test tests[] = { + { .desc = "IPv6 failed neigh", + .daddr = IPV6_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_NO_NEIGH, }, + { .desc = "IPv6 stale neigh", + .daddr = IPV6_NUD_STALE_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS, + .dmac = DMAC_INIT, }, + { .desc = "IPv6 skip neigh", + .daddr = IPV6_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS, + .lookup_flags = BPF_FIB_LOOKUP_SKIP_NEIGH, }, + { .desc = "IPv4 failed neigh", + .daddr = IPV4_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_NO_NEIGH, }, + { .desc = "IPv4 stale neigh", + .daddr = IPV4_NUD_STALE_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS, + .dmac = DMAC_INIT, }, + { .desc = "IPv4 skip neigh", + .daddr = IPV4_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS, + .lookup_flags = BPF_FIB_LOOKUP_SKIP_NEIGH, }, +}; + +static int ifindex; + +static int setup_netns(void) +{ + int err; + + SYS("ip link add veth1 type veth peer name veth2"); + SYS("ip link set dev veth1 up"); + + SYS("ip addr add %s/64 dev veth1 nodad", IPV6_IFACE_ADDR); + SYS("ip neigh add %s dev veth1 nud failed", IPV6_NUD_FAILED_ADDR); + SYS("ip neigh add %s dev veth1 lladdr %s nud stale", IPV6_NUD_STALE_ADDR, DMAC); + + SYS("ip addr add %s/24 dev veth1 nodad", IPV4_IFACE_ADDR); + SYS("ip neigh add %s dev veth1 nud failed", IPV4_NUD_FAILED_ADDR); + SYS("ip neigh add %s dev veth1 lladdr %s nud stale", IPV4_NUD_STALE_ADDR, DMAC); + + err = write_sysctl("/proc/sys/net/ipv4/conf/veth1/forwarding", "1"); + if (!ASSERT_OK(err, "write_sysctl(net.ipv4.conf.veth1.forwarding)")) + goto fail; + + err = write_sysctl("/proc/sys/net/ipv6/conf/veth1/forwarding", "1"); + if (!ASSERT_OK(err, "write_sysctl(net.ipv6.conf.veth1.forwarding)")) + goto fail; + + return 0; +fail: + return -1; +} + +static int set_lookup_params(struct bpf_fib_lookup *params, const char *daddr) +{ + int ret; + + memset(params, 0, sizeof(*params)); + + params->l4_protocol = IPPROTO_TCP; + params->ifindex = ifindex; + + if (inet_pton(AF_INET6, daddr, params->ipv6_dst) == 1) { + params->family = AF_INET6; + ret = inet_pton(AF_INET6, IPV6_IFACE_ADDR, params->ipv6_src); + if (!ASSERT_EQ(ret, 1, "inet_pton(IPV6_IFACE_ADDR)")) + return -1; + return 0; + } + + ret = inet_pton(AF_INET, daddr, ¶ms->ipv4_dst); + if (!ASSERT_EQ(ret, 1, "convert IP[46] address")) + return -1; + params->family = AF_INET; + ret = inet_pton(AF_INET, IPV4_IFACE_ADDR, ¶ms->ipv4_src); + if (!ASSERT_EQ(ret, 1, "inet_pton(IPV4_IFACE_ADDR)")) + return -1; + + return 0; +} + +static void mac_str(char *b, const __u8 *mac) +{ + sprintf(b, "%02X:%02X:%02X:%02X:%02X:%02X", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); +} + +void test_fib_lookup(void) +{ + struct bpf_fib_lookup *fib_params; + struct nstoken *nstoken = NULL; + struct __sk_buff skb = { }; + struct fib_lookup *skel; + int prog_fd, err, ret, i; + + /* The test does not use the skb->data, so + * use pkt_v6 for both v6 and v4 test. + */ + LIBBPF_OPTS(bpf_test_run_opts, run_opts, + .data_in = &pkt_v6, + .data_size_in = sizeof(pkt_v6), + .ctx_in = &skb, + .ctx_size_in = sizeof(skb), + ); + + skel = fib_lookup__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel open_and_load")) + return; + prog_fd = bpf_program__fd(skel->progs.fib_lookup); + + SYS("ip netns add %s", NS_TEST); + + nstoken = open_netns(NS_TEST); + if (!ASSERT_OK_PTR(nstoken, "open_netns")) + goto fail; + + if (setup_netns()) + goto fail; + + ifindex = if_nametoindex("veth1"); + skb.ifindex = ifindex; + fib_params = &skel->bss->fib_params; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + printf("Testing %s\n", tests[i].desc); + + if (set_lookup_params(fib_params, tests[i].daddr)) + continue; + skel->bss->fib_lookup_ret = -1; + skel->bss->lookup_flags = BPF_FIB_LOOKUP_OUTPUT | + tests[i].lookup_flags; + + err = bpf_prog_test_run_opts(prog_fd, &run_opts); + if (!ASSERT_OK(err, "bpf_prog_test_run_opts")) + continue; + + ASSERT_EQ(tests[i].expected_ret, skel->bss->fib_lookup_ret, + "fib_lookup_ret"); + + ret = memcmp(tests[i].dmac, fib_params->dmac, sizeof(tests[i].dmac)); + if (!ASSERT_EQ(ret, 0, "dmac not match")) { + char expected[18], actual[18]; + + mac_str(expected, tests[i].dmac); + mac_str(actual, fib_params->dmac); + printf("dmac expected %s actual %s\n", expected, actual); + } + } + +fail: + if (nstoken) + close_netns(nstoken); + system("ip netns del " NS_TEST " &> /dev/null"); + fib_lookup__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/fib_lookup.c b/tools/testing/selftests/bpf/progs/fib_lookup.c new file mode 100644 index 000000000000..c4514dd58c62 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/fib_lookup.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include +#include "bpf_tracing_net.h" + +struct bpf_fib_lookup fib_params = {}; +int fib_lookup_ret = 0; +int lookup_flags = 0; + +SEC("tc") +int fib_lookup(struct __sk_buff *skb) +{ + fib_lookup_ret = bpf_fib_lookup(skb, &fib_params, sizeof(fib_params), + lookup_flags); + + return TC_ACT_SHOT; +} + +char _license[] SEC("license") = "GPL"; -- cgit v1.2.3