From 60f270753960291895cdd07d360c4e09c56c4596 Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Mon, 1 Nov 2021 15:43:54 -0700 Subject: bpftool: Migrate -1 err checks of libbpf fn calls Per [0], callers of libbpf functions with LIBBPF_STRICT_DIRECT_ERRS set should handle negative error codes of various values (e.g. -EINVAL). Migrate two callsites which were explicitly checking for -1 only to handle the new scheme. [0]: https://github.com/libbpf/libbpf/wiki/Libbpf-1.0-migration-guide#direct-error-code-returning-libbpf_strict_direct_errs Signed-off-by: Dave Marchevsky Signed-off-by: Andrii Nakryiko Acked-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20211101224357.2651181-2-davemarchevsky@fb.com --- tools/bpf/bpftool/btf_dumper.c | 2 +- tools/bpf/bpftool/struct_ops.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c index 9c25286a5c73..6934e8634b94 100644 --- a/tools/bpf/bpftool/btf_dumper.c +++ b/tools/bpf/bpftool/btf_dumper.c @@ -52,7 +52,7 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d, /* Get the bpf_prog's name. Obtain from func_info. */ prog_fd = bpf_prog_get_fd_by_id(prog_id); - if (prog_fd == -1) + if (prog_fd < 0) goto print; prog_info = bpf_program__get_prog_info_linear(prog_fd, diff --git a/tools/bpf/bpftool/struct_ops.c b/tools/bpf/bpftool/struct_ops.c index ab2d2290569a..20f803dce2e4 100644 --- a/tools/bpf/bpftool/struct_ops.c +++ b/tools/bpf/bpftool/struct_ops.c @@ -252,7 +252,7 @@ static struct res do_one_id(const char *id_str, work_func func, void *data, } fd = bpf_map_get_fd_by_id(id); - if (fd == -1) { + if (fd < 0) { p_err("can't get map by id (%lu): %s", id, strerror(errno)); res.nr_errs++; return res; -- cgit v1.2.3 From c59765cfd193382b00454b1a4424cb78d4c065e2 Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Mon, 1 Nov 2021 15:43:55 -0700 Subject: bpftool: Use bpf_obj_get_info_by_fd directly To prepare for impending deprecation of libbpf's bpf_program__get_prog_info_linear, migrate uses of this function to use bpf_obj_get_info_by_fd. Since the profile_target_name and dump_prog_id_as_func_ptr helpers were only looking at the first func_info, avoid grabbing the rest to save a malloc. For do_dump, add a more full-featured helper, but avoid free/realloc of buffer when possible for multi-prog dumps. Signed-off-by: Dave Marchevsky Signed-off-by: Andrii Nakryiko Acked-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20211101224357.2651181-3-davemarchevsky@fb.com --- tools/bpf/bpftool/btf_dumper.c | 40 ++++++----- tools/bpf/bpftool/prog.c | 159 ++++++++++++++++++++++++++++++++--------- 2 files changed, 149 insertions(+), 50 deletions(-) diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c index 6934e8634b94..f5dddf8ef404 100644 --- a/tools/bpf/bpftool/btf_dumper.c +++ b/tools/bpf/bpftool/btf_dumper.c @@ -32,14 +32,16 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d, const struct btf_type *func_proto, __u32 prog_id) { - struct bpf_prog_info_linear *prog_info = NULL; const struct btf_type *func_type; + int prog_fd = -1, func_sig_len; + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); const char *prog_name = NULL; - struct bpf_func_info *finfo; struct btf *prog_btf = NULL; - struct bpf_prog_info *info; - int prog_fd, func_sig_len; + struct bpf_func_info finfo; + __u32 finfo_rec_size; char prog_str[1024]; + int err; /* Get the ptr's func_proto */ func_sig_len = btf_dump_func(d->btf, prog_str, func_proto, NULL, 0, @@ -55,22 +57,27 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d, if (prog_fd < 0) goto print; - prog_info = bpf_program__get_prog_info_linear(prog_fd, - 1UL << BPF_PROG_INFO_FUNC_INFO); - close(prog_fd); - if (IS_ERR(prog_info)) { - prog_info = NULL; + err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + if (err) goto print; - } - info = &prog_info->info; - if (!info->btf_id || !info->nr_func_info) + if (!info.btf_id || !info.nr_func_info) + goto print; + + finfo_rec_size = info.func_info_rec_size; + memset(&info, 0, sizeof(info)); + info.nr_func_info = 1; + info.func_info_rec_size = finfo_rec_size; + info.func_info = ptr_to_u64(&finfo); + + err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + if (err) goto print; - prog_btf = btf__load_from_kernel_by_id(info->btf_id); + + prog_btf = btf__load_from_kernel_by_id(info.btf_id); if (libbpf_get_error(prog_btf)) goto print; - finfo = u64_to_ptr(info->func_info); - func_type = btf__type_by_id(prog_btf, finfo->type_id); + func_type = btf__type_by_id(prog_btf, finfo.type_id); if (!func_type || !btf_is_func(func_type)) goto print; @@ -92,7 +99,8 @@ print: prog_str[sizeof(prog_str) - 1] = '\0'; jsonw_string(d->jw, prog_str); btf__free(prog_btf); - free(prog_info); + if (prog_fd >= 0) + close(prog_fd); return 0; } diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 515d22952602..dea7a49ec26e 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -100,6 +100,76 @@ static enum bpf_attach_type parse_attach_type(const char *str) return __MAX_BPF_ATTACH_TYPE; } +static int prep_prog_info(struct bpf_prog_info *const info, enum dump_mode mode, + void **info_data, size_t *const info_data_sz) +{ + struct bpf_prog_info holder = {}; + size_t needed = 0; + void *ptr; + + if (mode == DUMP_JITED) { + holder.jited_prog_len = info->jited_prog_len; + needed += info->jited_prog_len; + } else { + holder.xlated_prog_len = info->xlated_prog_len; + needed += info->xlated_prog_len; + } + + holder.nr_jited_ksyms = info->nr_jited_ksyms; + needed += info->nr_jited_ksyms * sizeof(__u64); + + holder.nr_jited_func_lens = info->nr_jited_func_lens; + needed += info->nr_jited_func_lens * sizeof(__u32); + + holder.nr_func_info = info->nr_func_info; + holder.func_info_rec_size = info->func_info_rec_size; + needed += info->nr_func_info * info->func_info_rec_size; + + holder.nr_line_info = info->nr_line_info; + holder.line_info_rec_size = info->line_info_rec_size; + needed += info->nr_line_info * info->line_info_rec_size; + + holder.nr_jited_line_info = info->nr_jited_line_info; + holder.jited_line_info_rec_size = info->jited_line_info_rec_size; + needed += info->nr_jited_line_info * info->jited_line_info_rec_size; + + if (needed > *info_data_sz) { + ptr = realloc(*info_data, needed); + if (!ptr) + return -1; + + *info_data = ptr; + *info_data_sz = needed; + } + ptr = *info_data; + + if (mode == DUMP_JITED) { + holder.jited_prog_insns = ptr_to_u64(ptr); + ptr += holder.jited_prog_len; + } else { + holder.xlated_prog_insns = ptr_to_u64(ptr); + ptr += holder.xlated_prog_len; + } + + holder.jited_ksyms = ptr_to_u64(ptr); + ptr += holder.nr_jited_ksyms * sizeof(__u64); + + holder.jited_func_lens = ptr_to_u64(ptr); + ptr += holder.nr_jited_func_lens * sizeof(__u32); + + holder.func_info = ptr_to_u64(ptr); + ptr += holder.nr_func_info * holder.func_info_rec_size; + + holder.line_info = ptr_to_u64(ptr); + ptr += holder.nr_line_info * holder.line_info_rec_size; + + holder.jited_line_info = ptr_to_u64(ptr); + ptr += holder.nr_jited_line_info * holder.jited_line_info_rec_size; + + *info = holder; + return 0; +} + static void print_boot_time(__u64 nsecs, char *buf, unsigned int size) { struct timespec real_time_ts, boot_time_ts; @@ -803,16 +873,18 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode, static int do_dump(int argc, char **argv) { - struct bpf_prog_info_linear *info_linear; + struct bpf_prog_info info; + __u32 info_len = sizeof(info); + size_t info_data_sz = 0; + void *info_data = NULL; char *filepath = NULL; bool opcodes = false; bool visual = false; enum dump_mode mode; bool linum = false; - int *fds = NULL; int nb_fds, i = 0; + int *fds = NULL; int err = -1; - __u64 arrays; if (is_prefix(*argv, "jited")) { if (disasm_init()) @@ -872,43 +944,44 @@ static int do_dump(int argc, char **argv) goto exit_close; } - if (mode == DUMP_JITED) - arrays = 1UL << BPF_PROG_INFO_JITED_INSNS; - else - arrays = 1UL << BPF_PROG_INFO_XLATED_INSNS; - - arrays |= 1UL << BPF_PROG_INFO_JITED_KSYMS; - arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS; - arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO; - arrays |= 1UL << BPF_PROG_INFO_LINE_INFO; - arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO; - if (json_output && nb_fds > 1) jsonw_start_array(json_wtr); /* root array */ for (i = 0; i < nb_fds; i++) { - info_linear = bpf_program__get_prog_info_linear(fds[i], arrays); - if (IS_ERR_OR_NULL(info_linear)) { + memset(&info, 0, sizeof(info)); + + err = bpf_obj_get_info_by_fd(fds[i], &info, &info_len); + if (err) { + p_err("can't get prog info: %s", strerror(errno)); + break; + } + + err = prep_prog_info(&info, mode, &info_data, &info_data_sz); + if (err) { + p_err("can't grow prog info_data"); + break; + } + + err = bpf_obj_get_info_by_fd(fds[i], &info, &info_len); + if (err) { p_err("can't get prog info: %s", strerror(errno)); break; } if (json_output && nb_fds > 1) { jsonw_start_object(json_wtr); /* prog object */ - print_prog_header_json(&info_linear->info); + print_prog_header_json(&info); jsonw_name(json_wtr, "insns"); } else if (nb_fds > 1) { - print_prog_header_plain(&info_linear->info); + print_prog_header_plain(&info); } - err = prog_dump(&info_linear->info, mode, filepath, opcodes, - visual, linum); + err = prog_dump(&info, mode, filepath, opcodes, visual, linum); if (json_output && nb_fds > 1) jsonw_end_object(json_wtr); /* prog object */ else if (i != nb_fds - 1 && nb_fds > 1) printf("\n"); - free(info_linear); if (err) break; close(fds[i]); @@ -920,6 +993,7 @@ exit_close: for (; i < nb_fds; i++) close(fds[i]); exit_free: + free(info_data); free(fds); return err; } @@ -2016,41 +2090,58 @@ static void profile_print_readings(void) static char *profile_target_name(int tgt_fd) { - struct bpf_prog_info_linear *info_linear; - struct bpf_func_info *func_info; + struct bpf_func_info func_info; + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); const struct btf_type *t; + __u32 func_info_rec_size; struct btf *btf = NULL; char *name = NULL; + int err; - info_linear = bpf_program__get_prog_info_linear( - tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO); - if (IS_ERR_OR_NULL(info_linear)) { - p_err("failed to get info_linear for prog FD %d", tgt_fd); - return NULL; + err = bpf_obj_get_info_by_fd(tgt_fd, &info, &info_len); + if (err) { + p_err("failed to bpf_obj_get_info_by_fd for prog FD %d", tgt_fd); + goto out; } - if (info_linear->info.btf_id == 0) { + if (info.btf_id == 0) { p_err("prog FD %d doesn't have valid btf", tgt_fd); goto out; } - btf = btf__load_from_kernel_by_id(info_linear->info.btf_id); + func_info_rec_size = info.func_info_rec_size; + if (info.nr_func_info == 0) { + p_err("bpf_obj_get_info_by_fd for prog FD %d found 0 func_info", tgt_fd); + goto out; + } + + memset(&info, 0, sizeof(info)); + info.nr_func_info = 1; + info.func_info_rec_size = func_info_rec_size; + info.func_info = ptr_to_u64(&func_info); + + err = bpf_obj_get_info_by_fd(tgt_fd, &info, &info_len); + if (err) { + p_err("failed to get func_info for prog FD %d", tgt_fd); + goto out; + } + + btf = btf__load_from_kernel_by_id(info.btf_id); if (libbpf_get_error(btf)) { p_err("failed to load btf for prog FD %d", tgt_fd); goto out; } - func_info = u64_to_ptr(info_linear->info.func_info); - t = btf__type_by_id(btf, func_info[0].type_id); + t = btf__type_by_id(btf, func_info.type_id); if (!t) { p_err("btf %d doesn't have type %d", - info_linear->info.btf_id, func_info[0].type_id); + info.btf_id, func_info.type_id); goto out; } name = strdup(btf__name_by_offset(btf, t->name_off)); out: btf__free(btf); - free(info_linear); return name; } -- cgit v1.2.3 From 199e06fe832ddca80c2167661acab0e9dec657c4 Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Mon, 1 Nov 2021 15:43:56 -0700 Subject: perf: Pull in bpf_program__get_prog_info_linear To prepare for impending deprecation of libbpf's bpf_program__get_prog_info_linear, pull in the function and associated helpers into the perf codebase and migrate existing uses to the perf copy. Since libbpf's deprecated definitions will still be visible to perf, it is necessary to rename perf's definitions. Signed-off-by: Dave Marchevsky Signed-off-by: Andrii Nakryiko Acked-by: Song Liu Acked-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20211101224357.2651181-4-davemarchevsky@fb.com --- tools/perf/Documentation/perf.data-file-format.txt | 2 +- tools/perf/util/Build | 1 + tools/perf/util/annotate.c | 3 +- tools/perf/util/bpf-event.c | 41 ++-- tools/perf/util/bpf-event.h | 2 +- tools/perf/util/bpf-utils.c | 261 +++++++++++++++++++++ tools/perf/util/bpf-utils.h | 76 ++++++ tools/perf/util/bpf_counter.c | 6 +- tools/perf/util/dso.c | 1 + tools/perf/util/env.c | 1 + tools/perf/util/header.c | 13 +- 11 files changed, 374 insertions(+), 33 deletions(-) create mode 100644 tools/perf/util/bpf-utils.c create mode 100644 tools/perf/util/bpf-utils.h diff --git a/tools/perf/Documentation/perf.data-file-format.txt b/tools/perf/Documentation/perf.data-file-format.txt index e6ff8c898ada..f56d0e0fbff6 100644 --- a/tools/perf/Documentation/perf.data-file-format.txt +++ b/tools/perf/Documentation/perf.data-file-format.txt @@ -346,7 +346,7 @@ to special needs. HEADER_BPF_PROG_INFO = 25, -struct bpf_prog_info_linear, which contains detailed information about +struct perf_bpil, which contains detailed information about a BPF program, including type, id, tag, jited/xlated instructions, etc. HEADER_BPF_BTF = 26, diff --git a/tools/perf/util/Build b/tools/perf/util/Build index f2914d5bed6e..ee42da1d3639 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -201,6 +201,7 @@ endif perf-y += perf-hooks.o perf-$(CONFIG_LIBBPF) += bpf-event.o +perf-$(CONFIG_LIBBPF) += bpf-utils.o perf-$(CONFIG_CXX) += c++/ diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 0bae061b2d6d..f0e5a236b7e3 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -28,6 +28,7 @@ #include "evsel.h" #include "evlist.h" #include "bpf-event.h" +#include "bpf-utils.h" #include "block-range.h" #include "string2.h" #include "util/event.h" @@ -1700,12 +1701,12 @@ static int symbol__disassemble_bpf(struct symbol *sym, { struct annotation *notes = symbol__annotation(sym); struct annotation_options *opts = args->options; - struct bpf_prog_info_linear *info_linear; struct bpf_prog_linfo *prog_linfo = NULL; struct bpf_prog_info_node *info_node; int len = sym->end - sym->start; disassembler_ftype disassemble; struct map *map = args->ms.map; + struct perf_bpil *info_linear; struct disassemble_info info; struct dso *dso = map->dso; int pc = 0, count, sub_id; diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c index 388847bab6d9..a27badb0a53a 100644 --- a/tools/perf/util/bpf-event.c +++ b/tools/perf/util/bpf-event.c @@ -10,6 +10,7 @@ #include #include #include "bpf-event.h" +#include "bpf-utils.h" #include "debug.h" #include "dso.h" #include "symbol.h" @@ -32,8 +33,6 @@ struct btf * __weak btf__load_from_kernel_by_id(__u32 id) return err ? ERR_PTR(err) : btf; } -#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr)) - static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len) { int ret = 0; @@ -48,9 +47,9 @@ static int machine__process_bpf_event_load(struct machine *machine, union perf_event *event, struct perf_sample *sample __maybe_unused) { - struct bpf_prog_info_linear *info_linear; struct bpf_prog_info_node *info_node; struct perf_env *env = machine->env; + struct perf_bpil *info_linear; int id = event->bpf.id; unsigned int i; @@ -175,9 +174,9 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session, { struct perf_record_ksymbol *ksymbol_event = &event->ksymbol; struct perf_record_bpf_event *bpf_event = &event->bpf; - struct bpf_prog_info_linear *info_linear; struct perf_tool *tool = session->tool; struct bpf_prog_info_node *info_node; + struct perf_bpil *info_linear; struct bpf_prog_info *info; struct btf *btf = NULL; struct perf_env *env; @@ -191,15 +190,15 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session, */ env = session->data ? &session->header.env : &perf_env; - arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS; - arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS; - arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO; - arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS; - arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS; - arrays |= 1UL << BPF_PROG_INFO_LINE_INFO; - arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO; + arrays = 1UL << PERF_BPIL_JITED_KSYMS; + arrays |= 1UL << PERF_BPIL_JITED_FUNC_LENS; + arrays |= 1UL << PERF_BPIL_FUNC_INFO; + arrays |= 1UL << PERF_BPIL_PROG_TAGS; + arrays |= 1UL << PERF_BPIL_JITED_INSNS; + arrays |= 1UL << PERF_BPIL_LINE_INFO; + arrays |= 1UL << PERF_BPIL_JITED_LINE_INFO; - info_linear = bpf_program__get_prog_info_linear(fd, arrays); + info_linear = get_bpf_prog_info_linear(fd, arrays); if (IS_ERR_OR_NULL(info_linear)) { info_linear = NULL; pr_debug("%s: failed to get BPF program info. aborting\n", __func__); @@ -452,8 +451,8 @@ int perf_event__synthesize_bpf_events(struct perf_session *session, static void perf_env__add_bpf_info(struct perf_env *env, u32 id) { - struct bpf_prog_info_linear *info_linear; struct bpf_prog_info_node *info_node; + struct perf_bpil *info_linear; struct btf *btf = NULL; u64 arrays; u32 btf_id; @@ -463,15 +462,15 @@ static void perf_env__add_bpf_info(struct perf_env *env, u32 id) if (fd < 0) return; - arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS; - arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS; - arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO; - arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS; - arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS; - arrays |= 1UL << BPF_PROG_INFO_LINE_INFO; - arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO; + arrays = 1UL << PERF_BPIL_JITED_KSYMS; + arrays |= 1UL << PERF_BPIL_JITED_FUNC_LENS; + arrays |= 1UL << PERF_BPIL_FUNC_INFO; + arrays |= 1UL << PERF_BPIL_PROG_TAGS; + arrays |= 1UL << PERF_BPIL_JITED_INSNS; + arrays |= 1UL << PERF_BPIL_LINE_INFO; + arrays |= 1UL << PERF_BPIL_JITED_LINE_INFO; - info_linear = bpf_program__get_prog_info_linear(fd, arrays); + info_linear = get_bpf_prog_info_linear(fd, arrays); if (IS_ERR_OR_NULL(info_linear)) { pr_debug("%s: failed to get BPF program info. aborting\n", __func__); goto out; diff --git a/tools/perf/util/bpf-event.h b/tools/perf/util/bpf-event.h index 68f315c3df5b..144a8a24cc69 100644 --- a/tools/perf/util/bpf-event.h +++ b/tools/perf/util/bpf-event.h @@ -19,7 +19,7 @@ struct evlist; struct target; struct bpf_prog_info_node { - struct bpf_prog_info_linear *info_linear; + struct perf_bpil *info_linear; struct rb_node rb_node; }; diff --git a/tools/perf/util/bpf-utils.c b/tools/perf/util/bpf-utils.c new file mode 100644 index 000000000000..e271e05e51bc --- /dev/null +++ b/tools/perf/util/bpf-utils.c @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#include +#include +#include +#include +#include +#include "bpf-utils.h" +#include "debug.h" + +struct bpil_array_desc { + int array_offset; /* e.g. offset of jited_prog_insns */ + int count_offset; /* e.g. offset of jited_prog_len */ + int size_offset; /* > 0: offset of rec size, + * < 0: fix size of -size_offset + */ +}; + +static struct bpil_array_desc bpil_array_desc[] = { + [PERF_BPIL_JITED_INSNS] = { + offsetof(struct bpf_prog_info, jited_prog_insns), + offsetof(struct bpf_prog_info, jited_prog_len), + -1, + }, + [PERF_BPIL_XLATED_INSNS] = { + offsetof(struct bpf_prog_info, xlated_prog_insns), + offsetof(struct bpf_prog_info, xlated_prog_len), + -1, + }, + [PERF_BPIL_MAP_IDS] = { + offsetof(struct bpf_prog_info, map_ids), + offsetof(struct bpf_prog_info, nr_map_ids), + -(int)sizeof(__u32), + }, + [PERF_BPIL_JITED_KSYMS] = { + offsetof(struct bpf_prog_info, jited_ksyms), + offsetof(struct bpf_prog_info, nr_jited_ksyms), + -(int)sizeof(__u64), + }, + [PERF_BPIL_JITED_FUNC_LENS] = { + offsetof(struct bpf_prog_info, jited_func_lens), + offsetof(struct bpf_prog_info, nr_jited_func_lens), + -(int)sizeof(__u32), + }, + [PERF_BPIL_FUNC_INFO] = { + offsetof(struct bpf_prog_info, func_info), + offsetof(struct bpf_prog_info, nr_func_info), + offsetof(struct bpf_prog_info, func_info_rec_size), + }, + [PERF_BPIL_LINE_INFO] = { + offsetof(struct bpf_prog_info, line_info), + offsetof(struct bpf_prog_info, nr_line_info), + offsetof(struct bpf_prog_info, line_info_rec_size), + }, + [PERF_BPIL_JITED_LINE_INFO] = { + offsetof(struct bpf_prog_info, jited_line_info), + offsetof(struct bpf_prog_info, nr_jited_line_info), + offsetof(struct bpf_prog_info, jited_line_info_rec_size), + }, + [PERF_BPIL_PROG_TAGS] = { + offsetof(struct bpf_prog_info, prog_tags), + offsetof(struct bpf_prog_info, nr_prog_tags), + -(int)sizeof(__u8) * BPF_TAG_SIZE, + }, + +}; + +static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, + int offset) +{ + __u32 *array = (__u32 *)info; + + if (offset >= 0) + return array[offset / sizeof(__u32)]; + return -(int)offset; +} + +static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, + int offset) +{ + __u64 *array = (__u64 *)info; + + if (offset >= 0) + return array[offset / sizeof(__u64)]; + return -(int)offset; +} + +static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset, + __u32 val) +{ + __u32 *array = (__u32 *)info; + + if (offset >= 0) + array[offset / sizeof(__u32)] = val; +} + +static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset, + __u64 val) +{ + __u64 *array = (__u64 *)info; + + if (offset >= 0) + array[offset / sizeof(__u64)] = val; +} + +struct perf_bpil * +get_bpf_prog_info_linear(int fd, __u64 arrays) +{ + struct bpf_prog_info info = {}; + struct perf_bpil *info_linear; + __u32 info_len = sizeof(info); + __u32 data_len = 0; + int i, err; + void *ptr; + + if (arrays >> PERF_BPIL_LAST_ARRAY) + return ERR_PTR(-EINVAL); + + /* step 1: get array dimensions */ + err = bpf_obj_get_info_by_fd(fd, &info, &info_len); + if (err) { + pr_debug("can't get prog info: %s", strerror(errno)); + return ERR_PTR(-EFAULT); + } + + /* step 2: calculate total size of all arrays */ + for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) { + bool include_array = (arrays & (1UL << i)) > 0; + struct bpil_array_desc *desc; + __u32 count, size; + + desc = bpil_array_desc + i; + + /* kernel is too old to support this field */ + if (info_len < desc->array_offset + sizeof(__u32) || + info_len < desc->count_offset + sizeof(__u32) || + (desc->size_offset > 0 && info_len < (__u32)desc->size_offset)) + include_array = false; + + if (!include_array) { + arrays &= ~(1UL << i); /* clear the bit */ + continue; + } + + count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); + size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); + + data_len += count * size; + } + + /* step 3: allocate continuous memory */ + data_len = roundup(data_len, sizeof(__u64)); + info_linear = malloc(sizeof(struct perf_bpil) + data_len); + if (!info_linear) + return ERR_PTR(-ENOMEM); + + /* step 4: fill data to info_linear->info */ + info_linear->arrays = arrays; + memset(&info_linear->info, 0, sizeof(info)); + ptr = info_linear->data; + + for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) { + struct bpil_array_desc *desc; + __u32 count, size; + + if ((arrays & (1UL << i)) == 0) + continue; + + desc = bpil_array_desc + i; + count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); + size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); + bpf_prog_info_set_offset_u32(&info_linear->info, + desc->count_offset, count); + bpf_prog_info_set_offset_u32(&info_linear->info, + desc->size_offset, size); + bpf_prog_info_set_offset_u64(&info_linear->info, + desc->array_offset, + ptr_to_u64(ptr)); + ptr += count * size; + } + + /* step 5: call syscall again to get required arrays */ + err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len); + if (err) { + pr_debug("can't get prog info: %s", strerror(errno)); + free(info_linear); + return ERR_PTR(-EFAULT); + } + + /* step 6: verify the data */ + for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) { + struct bpil_array_desc *desc; + __u32 v1, v2; + + if ((arrays & (1UL << i)) == 0) + continue; + + desc = bpil_array_desc + i; + v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset); + v2 = bpf_prog_info_read_offset_u32(&info_linear->info, + desc->count_offset); + if (v1 != v2) + pr_warning("%s: mismatch in element count\n", __func__); + + v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset); + v2 = bpf_prog_info_read_offset_u32(&info_linear->info, + desc->size_offset); + if (v1 != v2) + pr_warning("%s: mismatch in rec size\n", __func__); + } + + /* step 7: update info_len and data_len */ + info_linear->info_len = sizeof(struct bpf_prog_info); + info_linear->data_len = data_len; + + return info_linear; +} + +void bpil_addr_to_offs(struct perf_bpil *info_linear) +{ + int i; + + for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) { + struct bpil_array_desc *desc; + __u64 addr, offs; + + if ((info_linear->arrays & (1UL << i)) == 0) + continue; + + desc = bpil_array_desc + i; + addr = bpf_prog_info_read_offset_u64(&info_linear->info, + desc->array_offset); + offs = addr - ptr_to_u64(info_linear->data); + bpf_prog_info_set_offset_u64(&info_linear->info, + desc->array_offset, offs); + } +} + +void bpil_offs_to_addr(struct perf_bpil *info_linear) +{ + int i; + + for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) { + struct bpil_array_desc *desc; + __u64 addr, offs; + + if ((info_linear->arrays & (1UL << i)) == 0) + continue; + + desc = bpil_array_desc + i; + offs = bpf_prog_info_read_offset_u64(&info_linear->info, + desc->array_offset); + addr = offs + ptr_to_u64(info_linear->data); + bpf_prog_info_set_offset_u64(&info_linear->info, + desc->array_offset, addr); + } +} diff --git a/tools/perf/util/bpf-utils.h b/tools/perf/util/bpf-utils.h new file mode 100644 index 000000000000..86a5055cdfad --- /dev/null +++ b/tools/perf/util/bpf-utils.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ + +#ifndef __PERF_BPF_UTILS_H +#define __PERF_BPF_UTILS_H + +#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr)) + +#ifdef HAVE_LIBBPF_SUPPORT + +#include + +/* + * Get bpf_prog_info in continuous memory + * + * struct bpf_prog_info has multiple arrays. The user has option to choose + * arrays to fetch from kernel. The following APIs provide an uniform way to + * fetch these data. All arrays in bpf_prog_info are stored in a single + * continuous memory region. This makes it easy to store the info in a + * file. + * + * Before writing perf_bpil to files, it is necessary to + * translate pointers in bpf_prog_info to offsets. Helper functions + * bpil_addr_to_offs() and bpil_offs_to_addr() + * are introduced to switch between pointers and offsets. + * + * Examples: + * # To fetch map_ids and prog_tags: + * __u64 arrays = (1UL << PERF_BPIL_MAP_IDS) | + * (1UL << PERF_BPIL_PROG_TAGS); + * struct perf_bpil *info_linear = + * get_bpf_prog_info_linear(fd, arrays); + * + * # To save data in file + * bpil_addr_to_offs(info_linear); + * write(f, info_linear, sizeof(*info_linear) + info_linear->data_len); + * + * # To read data from file + * read(f, info_linear, ); + * bpil_offs_to_addr(info_linear); + */ +enum perf_bpil_array_types { + PERF_BPIL_FIRST_ARRAY = 0, + PERF_BPIL_JITED_INSNS = 0, + PERF_BPIL_XLATED_INSNS, + PERF_BPIL_MAP_IDS, + PERF_BPIL_JITED_KSYMS, + PERF_BPIL_JITED_FUNC_LENS, + PERF_BPIL_FUNC_INFO, + PERF_BPIL_LINE_INFO, + PERF_BPIL_JITED_LINE_INFO, + PERF_BPIL_PROG_TAGS, + PERF_BPIL_LAST_ARRAY, +}; + +struct perf_bpil { + /* size of struct bpf_prog_info, when the tool is compiled */ + __u32 info_len; + /* total bytes allocated for data, round up to 8 bytes */ + __u32 data_len; + /* which arrays are included in data */ + __u64 arrays; + struct bpf_prog_info info; + __u8 data[]; +}; + +struct perf_bpil * +get_bpf_prog_info_linear(int fd, __u64 arrays); + +void +bpil_addr_to_offs(struct perf_bpil *info_linear); + +void +bpil_offs_to_addr(struct perf_bpil *info_linear); + +#endif /* HAVE_LIBBPF_SUPPORT */ +#endif /* __PERF_BPF_UTILS_H */ diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c index ba0f20853651..2b04df8c5f87 100644 --- a/tools/perf/util/bpf_counter.c +++ b/tools/perf/util/bpf_counter.c @@ -13,6 +13,7 @@ #include #include "bpf_counter.h" +#include "bpf-utils.h" #include "counts.h" #include "debug.h" #include "evsel.h" @@ -61,14 +62,13 @@ static int bpf_program_profiler__destroy(struct evsel *evsel) static char *bpf_target_prog_name(int tgt_fd) { - struct bpf_prog_info_linear *info_linear; struct bpf_func_info *func_info; + struct perf_bpil *info_linear; const struct btf_type *t; struct btf *btf = NULL; char *name = NULL; - info_linear = bpf_program__get_prog_info_linear( - tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO); + info_linear = get_bpf_prog_info_linear(tgt_fd, 1UL << PERF_BPIL_FUNC_INFO); if (IS_ERR_OR_NULL(info_linear)) { pr_debug("failed to get info_linear for prog FD %d\n", tgt_fd); return NULL; diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c index 9ed9a5676d35..9cc8a1772b4b 100644 --- a/tools/perf/util/dso.c +++ b/tools/perf/util/dso.c @@ -14,6 +14,7 @@ #ifdef HAVE_LIBBPF_SUPPORT #include #include "bpf-event.h" +#include "bpf-utils.h" #endif #include "compress.h" #include "env.h" diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c index cf773f0dec38..17f1dd0680b4 100644 --- a/tools/perf/util/env.c +++ b/tools/perf/util/env.c @@ -16,6 +16,7 @@ struct perf_env perf_env; #ifdef HAVE_LIBBPF_SUPPORT #include "bpf-event.h" +#include "bpf-utils.h" #include void perf_env__insert_bpf_prog_info(struct perf_env *env, diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 1c7414f66655..56511db8fa03 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -48,6 +48,7 @@ #include "util/util.h" // perf_exe() #include "cputopo.h" #include "bpf-event.h" +#include "bpf-utils.h" #include "clockid.h" #include "pmu-hybrid.h" @@ -1006,17 +1007,17 @@ static int write_bpf_prog_info(struct feat_fd *ff, node = rb_entry(next, struct bpf_prog_info_node, rb_node); next = rb_next(&node->rb_node); - len = sizeof(struct bpf_prog_info_linear) + + len = sizeof(struct perf_bpil) + node->info_linear->data_len; /* before writing to file, translate address to offset */ - bpf_program__bpil_addr_to_offs(node->info_linear); + bpil_addr_to_offs(node->info_linear); ret = do_write(ff, node->info_linear, len); /* * translate back to address even when do_write() fails, * so that this function never changes the data. */ - bpf_program__bpil_offs_to_addr(node->info_linear); + bpil_offs_to_addr(node->info_linear); if (ret < 0) goto out; } @@ -3018,9 +3019,9 @@ static int process_dir_format(struct feat_fd *ff, #ifdef HAVE_LIBBPF_SUPPORT static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused) { - struct bpf_prog_info_linear *info_linear; struct bpf_prog_info_node *info_node; struct perf_env *env = &ff->ph->env; + struct perf_bpil *info_linear; u32 count, i; int err = -1; @@ -3049,7 +3050,7 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused) goto out; } - info_linear = malloc(sizeof(struct bpf_prog_info_linear) + + info_linear = malloc(sizeof(struct perf_bpil) + data_len); if (!info_linear) goto out; @@ -3071,7 +3072,7 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused) goto out; /* after reading from file, translate offset to address */ - bpf_program__bpil_offs_to_addr(info_linear); + bpil_offs_to_addr(info_linear); info_node->info_linear = info_linear; perf_env__insert_bpf_prog_info(env, info_node); } -- cgit v1.2.3 From f5aafbc2af51931668799a9c5080c8e35cbb571f Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Mon, 1 Nov 2021 15:43:57 -0700 Subject: libbpf: Deprecate bpf_program__get_prog_info_linear As part of the road to libbpf 1.0, and discussed in libbpf issue tracker [0], bpf_program__get_prog_info_linear and its associated structs and helper functions should be deprecated. The functionality is too specific to the needs of 'perf', and there's little/no out-of-tree usage to preclude introduction of a more general helper in the future. [0] Closes: https://github.com/libbpf/libbpf/issues/313 Signed-off-by: Dave Marchevsky Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211101224357.2651181-5-davemarchevsky@fb.com --- tools/lib/bpf/libbpf.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 9de0f299706b..797f5f8a0e20 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -918,12 +918,15 @@ struct bpf_prog_info_linear { __u8 data[]; }; +LIBBPF_DEPRECATED_SINCE(0, 6, "use a custom linear prog_info wrapper") LIBBPF_API struct bpf_prog_info_linear * bpf_program__get_prog_info_linear(int fd, __u64 arrays); +LIBBPF_DEPRECATED_SINCE(0, 6, "use a custom linear prog_info wrapper") LIBBPF_API void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear); +LIBBPF_DEPRECATED_SINCE(0, 6, "use a custom linear prog_info wrapper") LIBBPF_API void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear); -- cgit v1.2.3 From 833907876be55205d0ec153dcd819c014404ee16 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 3 Nov 2021 10:32:09 -0700 Subject: libbpf: Detect corrupted ELF symbols section Prevent divide-by-zero if ELF is corrupted and has zero sh_entsize. Reported by oss-fuzz project. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20211103173213.1376990-2-andrii@kernel.org --- tools/lib/bpf/libbpf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index a1bea1953df6..71f5a009010a 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -3555,7 +3555,7 @@ static int bpf_object__collect_externs(struct bpf_object *obj) scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx); sh = elf_sec_hdr(obj, scn); - if (!sh) + if (!sh || sh->sh_entsize != sizeof(Elf64_Sym)) return -LIBBPF_ERRNO__FORMAT; dummy_var_btf_id = add_dummy_ksym_var(obj->btf); -- cgit v1.2.3 From 88918dc12dc357a06d8d722a684617b1c87a4654 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 3 Nov 2021 10:32:10 -0700 Subject: libbpf: Improve sanity checking during BTF fix up If BTF is corrupted DATASEC's variable type ID might be incorrect. Prevent this easy to detect situation with extra NULL check. Reported by oss-fuzz project. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20211103173213.1376990-3-andrii@kernel.org --- tools/lib/bpf/libbpf.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 71f5a009010a..f836a1936597 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2752,13 +2752,12 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf, for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) { t_var = btf__type_by_id(btf, vsi->type); - var = btf_var(t_var); - - if (!btf_is_var(t_var)) { + if (!t_var || !btf_is_var(t_var)) { pr_debug("Non-VAR type seen in section %s\n", name); return -EINVAL; } + var = btf_var(t_var); if (var->linkage == BTF_VAR_STATIC) continue; -- cgit v1.2.3 From 62554d52e71797eefa3fc15b54008038837bb2d4 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 3 Nov 2021 10:32:11 -0700 Subject: libbpf: Validate that .BTF and .BTF.ext sections contain data .BTF and .BTF.ext ELF sections should have SHT_PROGBITS type and contain data. If they are not, ELF is invalid or corrupted, so bail out. Otherwise this can lead to data->d_buf being NULL and SIGSEGV later on. Reported by oss-fuzz project. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20211103173213.1376990-4-andrii@kernel.org --- tools/lib/bpf/libbpf.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index f836a1936597..0dc6465271ce 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -3270,8 +3270,12 @@ static int bpf_object__elf_collect(struct bpf_object *obj) } else if (strcmp(name, MAPS_ELF_SEC) == 0) { obj->efile.btf_maps_shndx = idx; } else if (strcmp(name, BTF_ELF_SEC) == 0) { + if (sh->sh_type != SHT_PROGBITS) + return -LIBBPF_ERRNO__FORMAT; btf_data = data; } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) { + if (sh->sh_type != SHT_PROGBITS) + return -LIBBPF_ERRNO__FORMAT; btf_ext_data = data; } else if (sh->sh_type == SHT_SYMTAB) { /* already processed during the first pass above */ -- cgit v1.2.3 From 0d6988e16a12ebd41d3e268992211b0ceba44ed7 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 3 Nov 2021 10:32:12 -0700 Subject: libbpf: Fix section counting logic e_shnum does include section #0 and as such is exactly the number of ELF sections that we need to allocate memory for to use section indices as array indices. Fix the off-by-one error. This is purely accounting fix, previously we were overallocating one too many array items. But no correctness errors otherwise. Fixes: 25bbbd7a444b ("libbpf: Remove assumptions about uniqueness of .rodata/.data/.bss maps") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20211103173213.1376990-5-andrii@kernel.org --- tools/lib/bpf/libbpf.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 0dc6465271ce..ecfea6c20042 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -3190,11 +3190,11 @@ static int bpf_object__elf_collect(struct bpf_object *obj) Elf_Scn *scn; Elf64_Shdr *sh; - /* ELF section indices are 1-based, so allocate +1 element to keep - * indexing simple. Also include 0th invalid section into sec_cnt for - * simpler and more traditional iteration logic. + /* ELF section indices are 0-based, but sec #0 is special "invalid" + * section. e_shnum does include sec #0, so e_shnum is the necessary + * size of an array to keep all the sections. */ - obj->efile.sec_cnt = 1 + obj->efile.ehdr->e_shnum; + obj->efile.sec_cnt = obj->efile.ehdr->e_shnum; obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs)); if (!obj->efile.secs) return -ENOMEM; -- cgit v1.2.3 From b7332d2820d394dd2ac127df1567b4da597355a1 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 3 Nov 2021 10:32:13 -0700 Subject: libbpf: Improve ELF relo sanitization Add few sanity checks for relocations to prevent div-by-zero and out-of-bounds array accesses in libbpf. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20211103173213.1376990-6-andrii@kernel.org --- tools/lib/bpf/libbpf.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index ecfea6c20042..86a44735230e 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -3306,6 +3306,10 @@ static int bpf_object__elf_collect(struct bpf_object *obj) } else if (sh->sh_type == SHT_REL) { int targ_sec_idx = sh->sh_info; /* points to other section */ + if (sh->sh_entsize != sizeof(Elf64_Rel) || + targ_sec_idx >= obj->efile.sec_cnt) + return -LIBBPF_ERRNO__FORMAT; + /* Only do relo for section with exec instructions */ if (!section_have_execinstr(obj, targ_sec_idx) && strcmp(name, ".rel" STRUCT_OPS_SEC) && @@ -4025,7 +4029,7 @@ static int bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data) { const char *relo_sec_name, *sec_name; - size_t sec_idx = shdr->sh_info; + size_t sec_idx = shdr->sh_info, sym_idx; struct bpf_program *prog; struct reloc_desc *relos; int err, i, nrels; @@ -4036,6 +4040,9 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Dat Elf64_Sym *sym; Elf64_Rel *rel; + if (sec_idx >= obj->efile.sec_cnt) + return -EINVAL; + scn = elf_sec_by_idx(obj, sec_idx); scn_data = elf_sec_data(obj, scn); @@ -4055,16 +4062,23 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Dat return -LIBBPF_ERRNO__FORMAT; } - sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info)); + sym_idx = ELF64_R_SYM(rel->r_info); + sym = elf_sym_by_idx(obj, sym_idx); if (!sym) { - pr_warn("sec '%s': symbol 0x%zx not found for relo #%d\n", - relo_sec_name, (size_t)ELF64_R_SYM(rel->r_info), i); + pr_warn("sec '%s': symbol #%zu not found for relo #%d\n", + relo_sec_name, sym_idx, i); + return -LIBBPF_ERRNO__FORMAT; + } + + if (sym->st_shndx >= obj->efile.sec_cnt) { + pr_warn("sec '%s': corrupted symbol #%zu pointing to invalid section #%zu for relo #%d\n", + relo_sec_name, sym_idx, (size_t)sym->st_shndx, i); return -LIBBPF_ERRNO__FORMAT; } if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) { pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n", - relo_sec_name, (size_t)ELF64_R_SYM(rel->r_info), i); + relo_sec_name, (size_t)rel->r_offset, i); return -LIBBPF_ERRNO__FORMAT; } -- cgit v1.2.3 From be2f2d1680dfb36793ea8d3110edd4a1db496352 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 2 Nov 2021 22:14:49 -0700 Subject: libbpf: Deprecate bpf_program__load() API Mark bpf_program__load() as deprecated ([0]) since v0.6. Also rename few internal program loading bpf_object helper functions to have more consistent naming. [0] Closes: https://github.com/libbpf/libbpf/issues/301 Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211103051449.1884903-1-andrii@kernel.org --- tools/lib/bpf/libbpf.c | 36 ++++++++++++++++++++++-------------- tools/lib/bpf/libbpf.h | 4 ++-- 2 files changed, 24 insertions(+), 16 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 86a44735230e..7fcea11ecaa9 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -6428,12 +6428,12 @@ static int libbpf_preload_prog(struct bpf_program *prog, return 0; } -static int -load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, - char *license, __u32 kern_version, int *pfd) +static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_program *prog, + struct bpf_insn *insns, int insns_cnt, + const char *license, __u32 kern_version, + int *prog_fd) { struct bpf_prog_load_params load_attr = {}; - struct bpf_object *obj = prog->obj; char *cp, errmsg[STRERR_BUFSIZE]; size_t log_buf_size = 0; char *log_buf = NULL; @@ -6494,7 +6494,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, if (obj->gen_loader) { bpf_gen__prog_load(obj->gen_loader, &load_attr, prog - obj->programs); - *pfd = -1; + *prog_fd = -1; return 0; } retry_load: @@ -6532,7 +6532,7 @@ retry_load: } } - *pfd = ret; + *prog_fd = ret; ret = 0; goto out; } @@ -6608,11 +6608,12 @@ static int bpf_program__record_externs(struct bpf_program *prog) return 0; } -int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver) +static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog, + const char *license, __u32 kern_ver) { int err = 0, fd, i; - if (prog->obj->loaded) { + if (obj->loaded) { pr_warn("prog '%s': can't load after object was loaded\n", prog->name); return libbpf_err(-EINVAL); } @@ -6638,10 +6639,11 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver) pr_warn("prog '%s': inconsistent nr(%d) != 1\n", prog->name, prog->instances.nr); } - if (prog->obj->gen_loader) + if (obj->gen_loader) bpf_program__record_externs(prog); - err = load_program(prog, prog->insns, prog->insns_cnt, - license, kern_ver, &fd); + err = bpf_object_load_prog_instance(obj, prog, + prog->insns, prog->insns_cnt, + license, kern_ver, &fd); if (!err) prog->instances.fds[0] = fd; goto out; @@ -6669,8 +6671,9 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver) continue; } - err = load_program(prog, result.new_insn_ptr, - result.new_insn_cnt, license, kern_ver, &fd); + err = bpf_object_load_prog_instance(obj, prog, + result.new_insn_ptr, result.new_insn_cnt, + license, kern_ver, &fd); if (err) { pr_warn("Loading the %dth instance of program '%s' failed\n", i, prog->name); @@ -6687,6 +6690,11 @@ out: return libbpf_err(err); } +int bpf_program__load(struct bpf_program *prog, const char *license, __u32 kern_ver) +{ + return bpf_object_load_prog(prog->obj, prog, license, kern_ver); +} + static int bpf_object__load_progs(struct bpf_object *obj, int log_level) { @@ -6710,7 +6718,7 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level) continue; } prog->log_level |= log_level; - err = bpf_program__load(prog, obj->license, obj->kern_version); + err = bpf_object_load_prog(obj, prog, obj->license, obj->kern_version); if (err) return err; } diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 797f5f8a0e20..a364c379b998 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -262,8 +262,8 @@ LIBBPF_API const struct bpf_insn *bpf_program__insns(const struct bpf_program *p */ LIBBPF_API size_t bpf_program__insn_cnt(const struct bpf_program *prog); -LIBBPF_API int bpf_program__load(struct bpf_program *prog, char *license, - __u32 kern_version); +LIBBPF_DEPRECATED_SINCE(0, 6, "use bpf_object__load() instead") +LIBBPF_API int bpf_program__load(struct bpf_program *prog, const char *license, __u32 kern_version); LIBBPF_API int bpf_program__fd(const struct bpf_program *prog); LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated") LIBBPF_API int bpf_program__pin_instance(struct bpf_program *prog, -- cgit v1.2.3 From b8b5cb55f5d3f03cc1479a3768d68173a10359ad Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 5 Nov 2021 12:10:55 -0700 Subject: libbpf: Fix non-C89 loop variable declaration in gen_loader.c Fix the `int i` declaration inside the for statement. This is non-C89 compliant. See [0] for user report breaking BCC build. [0] https://github.com/libbpf/libbpf/issues/403 Fixes: 18f4fccbf314 ("libbpf: Update gen_loader to emit BTF_KIND_FUNC relocations") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/bpf/20211105191055.3324874-1-andrii@kernel.org --- tools/lib/bpf/gen_loader.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c index 502dea53a742..2e10776b6d85 100644 --- a/tools/lib/bpf/gen_loader.c +++ b/tools/lib/bpf/gen_loader.c @@ -584,8 +584,9 @@ void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak, static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_desc *relo) { struct ksym_desc *kdesc; + int i; - for (int i = 0; i < gen->nr_ksyms; i++) { + for (i = 0; i < gen->nr_ksyms; i++) { if (!strcmp(gen->ksyms[i].name, relo->name)) { gen->ksyms[i].ref++; return &gen->ksyms[i]; -- cgit v1.2.3 From be80e9cdbca8ac66d09e0e24e0bd41d992362a0b Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 3 Nov 2021 15:08:34 -0700 Subject: libbpf: Rename DECLARE_LIBBPF_OPTS into LIBBPF_OPTS It's confusing that libbpf-provided helper macro doesn't start with LIBBPF. Also "declare" vs "define" is confusing terminology, I can never remember and always have to look up previous examples. Bypass both issues by renaming DECLARE_LIBBPF_OPTS into a short and clean LIBBPF_OPTS. To avoid breaking existing code, provide: #define DECLARE_LIBBPF_OPTS LIBBPF_OPTS in libbpf_legacy.h. We can decide later if we ever want to remove it or we'll keep it forever because it doesn't add any maintainability burden. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Dave Marchevsky Link: https://lore.kernel.org/bpf/20211103220845.2676888-2-andrii@kernel.org --- tools/lib/bpf/bpf.h | 1 + tools/lib/bpf/libbpf_common.h | 2 +- tools/lib/bpf/libbpf_legacy.h | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 6fffb3cdf39b..f35146c1d9a9 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -29,6 +29,7 @@ #include #include "libbpf_common.h" +#include "libbpf_legacy.h" #ifdef __cplusplus extern "C" { diff --git a/tools/lib/bpf/libbpf_common.h b/tools/lib/bpf/libbpf_common.h index aaa1efbf6f51..0967112b933a 100644 --- a/tools/lib/bpf/libbpf_common.h +++ b/tools/lib/bpf/libbpf_common.h @@ -54,7 +54,7 @@ * including any extra padding, it with memset() and then assigns initial * values provided by users in struct initializer-syntax as varargs. */ -#define DECLARE_LIBBPF_OPTS(TYPE, NAME, ...) \ +#define LIBBPF_OPTS(TYPE, NAME, ...) \ struct TYPE NAME = ({ \ memset(&NAME, 0, sizeof(struct TYPE)); \ (struct TYPE) { \ diff --git a/tools/lib/bpf/libbpf_legacy.h b/tools/lib/bpf/libbpf_legacy.h index 5ba5c9beccfa..bb03c568af7b 100644 --- a/tools/lib/bpf/libbpf_legacy.h +++ b/tools/lib/bpf/libbpf_legacy.h @@ -69,6 +69,7 @@ enum libbpf_strict_mode { LIBBPF_API int libbpf_set_strict_mode(enum libbpf_strict_mode mode); +#define DECLARE_LIBBPF_OPTS LIBBPF_OPTS #ifdef __cplusplus } /* extern "C" */ -- cgit v1.2.3 From 45493cbaf59e3c9482e0e6a2646b362fff45db8b Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 3 Nov 2021 15:08:35 -0700 Subject: libbpf: Pass number of prog load attempts explicitly Allow to control number of BPF_PROG_LOAD attempts from outside the sys_bpf_prog_load() helper. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Dave Marchevsky Link: https://lore.kernel.org/bpf/20211103220845.2676888-3-andrii@kernel.org --- tools/lib/bpf/bpf.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index c09cbb868c9f..8e6a23c42560 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -74,14 +74,15 @@ static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr, return ensure_good_fd(fd); } -static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size) +#define PROG_LOAD_ATTEMPTS 5 + +static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts) { - int retries = 5; int fd; do { fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size); - } while (fd < 0 && errno == EAGAIN && retries-- > 0); + } while (fd < 0 && errno == EAGAIN && --attempts > 0); return fd; } @@ -304,7 +305,7 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr) memcpy(attr.prog_name, load_attr->name, min(strlen(load_attr->name), (size_t)BPF_OBJ_NAME_LEN - 1)); - fd = sys_bpf_prog_load(&attr, sizeof(attr)); + fd = sys_bpf_prog_load(&attr, sizeof(attr), PROG_LOAD_ATTEMPTS); if (fd >= 0) return fd; @@ -345,7 +346,7 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr) break; } - fd = sys_bpf_prog_load(&attr, sizeof(attr)); + fd = sys_bpf_prog_load(&attr, sizeof(attr), PROG_LOAD_ATTEMPTS); if (fd >= 0) goto done; } @@ -359,7 +360,7 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr) attr.log_level = 1; load_attr->log_buf[0] = 0; - fd = sys_bpf_prog_load(&attr, sizeof(attr)); + fd = sys_bpf_prog_load(&attr, sizeof(attr), PROG_LOAD_ATTEMPTS); done: /* free() doesn't affect errno, so we don't need to restore it */ free(finfo); @@ -449,7 +450,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, attr.kern_version = kern_version; attr.prog_flags = prog_flags; - fd = sys_bpf_prog_load(&attr, sizeof(attr)); + fd = sys_bpf_prog_load(&attr, sizeof(attr), PROG_LOAD_ATTEMPTS); return libbpf_err_errno(fd); } -- cgit v1.2.3 From d10ef2b825cffd0807dd733fdfd6a5bea32270d7 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 3 Nov 2021 15:08:36 -0700 Subject: libbpf: Unify low-level BPF_PROG_LOAD APIs into bpf_prog_load() Add a new unified OPTS-based low-level API for program loading, bpf_prog_load() ([0]). bpf_prog_load() accepts few "mandatory" parameters as input arguments (program type, name, license, instructions) and all the other optional (as in not required to specify for all types of BPF programs) fields into struct bpf_prog_load_opts. This makes all the other non-extensible APIs variant for BPF_PROG_LOAD obsolete and they are slated for deprecation in libbpf v0.7: - bpf_load_program(); - bpf_load_program_xattr(); - bpf_verify_program(). Implementation-wise, internal helper libbpf__bpf_prog_load is refactored to become a public bpf_prog_load() API. struct bpf_prog_load_params used internally is replaced by public struct bpf_prog_load_opts. Unfortunately, while conceptually all this is pretty straightforward, the biggest complication comes from the already existing bpf_prog_load() *high-level* API, which has nothing to do with BPF_PROG_LOAD command. We try really hard to have a new API named bpf_prog_load(), though, because it maps naturally to BPF_PROG_LOAD command. For that, we rename old bpf_prog_load() into bpf_prog_load_deprecated() and mark it as COMPAT_VERSION() for shared library users compiled against old version of libbpf. Statically linked users and shared lib users compiled against new version of libbpf headers will get "rerouted" to bpf_prog_deprecated() through a macro helper that decides whether to use new or old bpf_prog_load() based on number of input arguments (see ___libbpf_overload in libbpf_common.h). To test that existing bpf_prog_load()-using code compiles and works as expected, I've compiled and ran selftests as is. I had to remove (locally) selftest/bpf/Makefile -Dbpf_prog_load=bpf_prog_test_load hack because it was conflicting with the macro-based overload approach. I don't expect anyone else to do something like this in practice, though. This is testing-specific way to replace bpf_prog_load() calls with special testing variant of it, which adds extra prog_flags value. After testing I kept this selftests hack, but ensured that we use a new bpf_prog_load_deprecated name for this. This patch also marks bpf_prog_load() and bpf_prog_load_xattr() as deprecated. bpf_object interface has to be used for working with struct bpf_program. Libbpf doesn't support loading just a bpf_program. The silver lining is that when we get to libbpf 1.0 all these complication will be gone and we'll have one clean bpf_prog_load() low-level API with no backwards compatibility hackery surrounding it. [0] Closes: https://github.com/libbpf/libbpf/issues/284 Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211103220845.2676888-4-andrii@kernel.org --- tools/lib/bpf/bpf.c | 149 +++++++++++++++++++++-------------- tools/lib/bpf/bpf.h | 73 ++++++++++++++++- tools/lib/bpf/bpf_gen_internal.h | 8 +- tools/lib/bpf/gen_loader.c | 30 +++---- tools/lib/bpf/libbpf.c | 51 ++++++------ tools/lib/bpf/libbpf.h | 5 +- tools/lib/bpf/libbpf.map | 2 + tools/lib/bpf/libbpf_common.h | 12 +++ tools/lib/bpf/libbpf_internal.h | 31 -------- tools/testing/selftests/bpf/Makefile | 2 +- 10 files changed, 223 insertions(+), 140 deletions(-) diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 8e6a23c42560..8f2a701cb079 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "bpf.h" #include "libbpf.h" #include "libbpf_internal.h" @@ -254,58 +255,91 @@ alloc_zero_tailing_info(const void *orecord, __u32 cnt, return info; } -int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr) +DEFAULT_VERSION(bpf_prog_load_v0_6_0, bpf_prog_load, LIBBPF_0.6.0) +int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type, + const char *prog_name, const char *license, + const struct bpf_insn *insns, size_t insn_cnt, + const struct bpf_prog_load_opts *opts) { void *finfo = NULL, *linfo = NULL; + const char *func_info, *line_info; + __u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd; + __u32 func_info_rec_size, line_info_rec_size; + int fd, attempts; union bpf_attr attr; - int fd; + char *log_buf; - if (!load_attr->log_buf != !load_attr->log_buf_sz) + if (!OPTS_VALID(opts, bpf_prog_load_opts)) return libbpf_err(-EINVAL); - if (load_attr->log_level > (4 | 2 | 1) || (load_attr->log_level && !load_attr->log_buf)) + attempts = OPTS_GET(opts, attempts, 0); + if (attempts < 0) return libbpf_err(-EINVAL); + if (attempts == 0) + attempts = PROG_LOAD_ATTEMPTS; memset(&attr, 0, sizeof(attr)); - attr.prog_type = load_attr->prog_type; - attr.expected_attach_type = load_attr->expected_attach_type; - if (load_attr->attach_prog_fd) - attr.attach_prog_fd = load_attr->attach_prog_fd; - else - attr.attach_btf_obj_fd = load_attr->attach_btf_obj_fd; - attr.attach_btf_id = load_attr->attach_btf_id; + attr.prog_type = prog_type; + attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0); - attr.prog_ifindex = load_attr->prog_ifindex; - attr.kern_version = load_attr->kern_version; + attr.prog_btf_fd = OPTS_GET(opts, prog_btf_fd, 0); + attr.prog_flags = OPTS_GET(opts, prog_flags, 0); + attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0); + attr.kern_version = OPTS_GET(opts, kern_version, 0); - attr.insn_cnt = (__u32)load_attr->insn_cnt; - attr.insns = ptr_to_u64(load_attr->insns); - attr.license = ptr_to_u64(load_attr->license); + if (prog_name) + strncat(attr.prog_name, prog_name, sizeof(attr.prog_name) - 1); + attr.license = ptr_to_u64(license); - attr.log_level = load_attr->log_level; - if (attr.log_level) { - attr.log_buf = ptr_to_u64(load_attr->log_buf); - attr.log_size = load_attr->log_buf_sz; - } + if (insn_cnt > UINT_MAX) + return libbpf_err(-E2BIG); - attr.prog_btf_fd = load_attr->prog_btf_fd; - attr.prog_flags = load_attr->prog_flags; + attr.insns = ptr_to_u64(insns); + attr.insn_cnt = (__u32)insn_cnt; - attr.func_info_rec_size = load_attr->func_info_rec_size; - attr.func_info_cnt = load_attr->func_info_cnt; - attr.func_info = ptr_to_u64(load_attr->func_info); + attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0); + attach_btf_obj_fd = OPTS_GET(opts, attach_btf_obj_fd, 0); - attr.line_info_rec_size = load_attr->line_info_rec_size; - attr.line_info_cnt = load_attr->line_info_cnt; - attr.line_info = ptr_to_u64(load_attr->line_info); - attr.fd_array = ptr_to_u64(load_attr->fd_array); + if (attach_prog_fd && attach_btf_obj_fd) + return libbpf_err(-EINVAL); - if (load_attr->name) - memcpy(attr.prog_name, load_attr->name, - min(strlen(load_attr->name), (size_t)BPF_OBJ_NAME_LEN - 1)); + attr.attach_btf_id = OPTS_GET(opts, attach_btf_id, 0); + if (attach_prog_fd) + attr.attach_prog_fd = attach_prog_fd; + else + attr.attach_btf_obj_fd = attach_btf_obj_fd; - fd = sys_bpf_prog_load(&attr, sizeof(attr), PROG_LOAD_ATTEMPTS); + log_buf = OPTS_GET(opts, log_buf, NULL); + log_size = OPTS_GET(opts, log_size, 0); + log_level = OPTS_GET(opts, log_level, 0); + + if (!!log_buf != !!log_size) + return libbpf_err(-EINVAL); + if (log_level > (4 | 2 | 1)) + return libbpf_err(-EINVAL); + if (log_level && !log_buf) + return libbpf_err(-EINVAL); + + attr.log_level = log_level; + attr.log_buf = ptr_to_u64(log_buf); + attr.log_size = log_size; + + func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0); + func_info = OPTS_GET(opts, func_info, NULL); + attr.func_info_rec_size = func_info_rec_size; + attr.func_info = ptr_to_u64(func_info); + attr.func_info_cnt = OPTS_GET(opts, func_info_cnt, 0); + + line_info_rec_size = OPTS_GET(opts, line_info_rec_size, 0); + line_info = OPTS_GET(opts, line_info, NULL); + attr.line_info_rec_size = line_info_rec_size; + attr.line_info = ptr_to_u64(line_info); + attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0); + + attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL)); + + fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); if (fd >= 0) return fd; @@ -315,11 +349,11 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr) */ while (errno == E2BIG && (!finfo || !linfo)) { if (!finfo && attr.func_info_cnt && - attr.func_info_rec_size < load_attr->func_info_rec_size) { + attr.func_info_rec_size < func_info_rec_size) { /* try with corrected func info records */ - finfo = alloc_zero_tailing_info(load_attr->func_info, - load_attr->func_info_cnt, - load_attr->func_info_rec_size, + finfo = alloc_zero_tailing_info(func_info, + attr.func_info_cnt, + func_info_rec_size, attr.func_info_rec_size); if (!finfo) { errno = E2BIG; @@ -327,13 +361,12 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr) } attr.func_info = ptr_to_u64(finfo); - attr.func_info_rec_size = load_attr->func_info_rec_size; + attr.func_info_rec_size = func_info_rec_size; } else if (!linfo && attr.line_info_cnt && - attr.line_info_rec_size < - load_attr->line_info_rec_size) { - linfo = alloc_zero_tailing_info(load_attr->line_info, - load_attr->line_info_cnt, - load_attr->line_info_rec_size, + attr.line_info_rec_size < line_info_rec_size) { + linfo = alloc_zero_tailing_info(line_info, + attr.line_info_cnt, + line_info_rec_size, attr.line_info_rec_size); if (!linfo) { errno = E2BIG; @@ -341,26 +374,26 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr) } attr.line_info = ptr_to_u64(linfo); - attr.line_info_rec_size = load_attr->line_info_rec_size; + attr.line_info_rec_size = line_info_rec_size; } else { break; } - fd = sys_bpf_prog_load(&attr, sizeof(attr), PROG_LOAD_ATTEMPTS); + fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); if (fd >= 0) goto done; } - if (load_attr->log_level || !load_attr->log_buf) + if (log_level || !log_buf) goto done; /* Try again with log */ - attr.log_buf = ptr_to_u64(load_attr->log_buf); - attr.log_size = load_attr->log_buf_sz; + log_buf[0] = 0; + attr.log_buf = ptr_to_u64(log_buf); + attr.log_size = log_size; attr.log_level = 1; - load_attr->log_buf[0] = 0; - fd = sys_bpf_prog_load(&attr, sizeof(attr), PROG_LOAD_ATTEMPTS); + fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); done: /* free() doesn't affect errno, so we don't need to restore it */ free(finfo); @@ -371,14 +404,13 @@ done: int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, char *log_buf, size_t log_buf_sz) { - struct bpf_prog_load_params p = {}; + LIBBPF_OPTS(bpf_prog_load_opts, p); if (!load_attr || !log_buf != !log_buf_sz) return libbpf_err(-EINVAL); - p.prog_type = load_attr->prog_type; p.expected_attach_type = load_attr->expected_attach_type; - switch (p.prog_type) { + switch (load_attr->prog_type) { case BPF_PROG_TYPE_STRUCT_OPS: case BPF_PROG_TYPE_LSM: p.attach_btf_id = load_attr->attach_btf_id; @@ -392,12 +424,9 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, p.prog_ifindex = load_attr->prog_ifindex; p.kern_version = load_attr->kern_version; } - p.insn_cnt = load_attr->insns_cnt; - p.insns = load_attr->insns; - p.license = load_attr->license; p.log_level = load_attr->log_level; p.log_buf = log_buf; - p.log_buf_sz = log_buf_sz; + p.log_size = log_buf_sz; p.prog_btf_fd = load_attr->prog_btf_fd; p.func_info_rec_size = load_attr->func_info_rec_size; p.func_info_cnt = load_attr->func_info_cnt; @@ -405,10 +434,10 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, p.line_info_rec_size = load_attr->line_info_rec_size; p.line_info_cnt = load_attr->line_info_cnt; p.line_info = load_attr->line_info; - p.name = load_attr->name; p.prog_flags = load_attr->prog_flags; - return libbpf__bpf_prog_load(&p); + return bpf_prog_load(load_attr->prog_type, load_attr->name, load_attr->license, + load_attr->insns, load_attr->insns_cnt, &p); } int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index f35146c1d9a9..079cc81ac51e 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -72,6 +72,71 @@ LIBBPF_API int bpf_create_map_in_map(enum bpf_map_type map_type, int inner_map_fd, int max_entries, __u32 map_flags); +struct bpf_prog_load_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + + /* libbpf can retry BPF_PROG_LOAD command if bpf() syscall returns + * -EAGAIN. This field determines how many attempts libbpf has to + * make. If not specified, libbpf will use default value of 5. + */ + int attempts; + + enum bpf_attach_type expected_attach_type; + __u32 prog_btf_fd; + __u32 prog_flags; + __u32 prog_ifindex; + __u32 kern_version; + + __u32 attach_btf_id; + __u32 attach_prog_fd; + __u32 attach_btf_obj_fd; + + const int *fd_array; + + /* .BTF.ext func info data */ + const void *func_info; + __u32 func_info_cnt; + __u32 func_info_rec_size; + + /* .BTF.ext line info data */ + const void *line_info; + __u32 line_info_cnt; + __u32 line_info_rec_size; + + /* verifier log options */ + __u32 log_level; + __u32 log_size; + char *log_buf; +}; +#define bpf_prog_load_opts__last_field log_buf + +LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type, + const char *prog_name, const char *license, + const struct bpf_insn *insns, size_t insn_cnt, + const struct bpf_prog_load_opts *opts); +/* this "specialization" should go away in libbpf 1.0 */ +LIBBPF_API int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type, + const char *prog_name, const char *license, + const struct bpf_insn *insns, size_t insn_cnt, + const struct bpf_prog_load_opts *opts); + +/* This is an elaborate way to not conflict with deprecated bpf_prog_load() + * API, defined in libbpf.h. Once we hit libbpf 1.0, all this will be gone. + * With this approach, if someone is calling bpf_prog_load() with + * 4 arguments, they will use the deprecated API, which keeps backwards + * compatibility (both source code and binary). If bpf_prog_load() is called + * with 6 arguments, though, it gets redirected to __bpf_prog_load. + * So looking forward to libbpf 1.0 when this hack will be gone and + * __bpf_prog_load() will be called just bpf_prog_load(). + */ +#ifndef bpf_prog_load +#define bpf_prog_load(...) ___libbpf_overload(___bpf_prog_load, __VA_ARGS__) +#define ___bpf_prog_load4(file, type, pobj, prog_fd) \ + bpf_prog_load_deprecated(file, type, pobj, prog_fd) +#define ___bpf_prog_load6(prog_type, prog_name, license, insns, insn_cnt, opts) \ + bpf_prog_load(prog_type, prog_name, license, insns, insn_cnt, opts) +#endif /* bpf_prog_load */ + struct bpf_load_program_attr { enum bpf_prog_type prog_type; enum bpf_attach_type expected_attach_type; @@ -103,13 +168,15 @@ struct bpf_load_program_attr { /* Recommend log buffer size */ #define BPF_LOG_BUF_SIZE (UINT32_MAX >> 8) /* verifier maximum in kernels <= 5.1 */ -LIBBPF_API int -bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, - char *log_buf, size_t log_buf_sz); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead") +LIBBPF_API int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, + char *log_buf, size_t log_buf_sz); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead") LIBBPF_API int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, size_t insns_cnt, const char *license, __u32 kern_version, char *log_buf, size_t log_buf_sz); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead") LIBBPF_API int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, size_t insns_cnt, __u32 prog_flags, diff --git a/tools/lib/bpf/bpf_gen_internal.h b/tools/lib/bpf/bpf_gen_internal.h index d26e5472fe50..75ca9fb857b2 100644 --- a/tools/lib/bpf/bpf_gen_internal.h +++ b/tools/lib/bpf/bpf_gen_internal.h @@ -3,6 +3,8 @@ #ifndef __BPF_GEN_INTERNAL_H #define __BPF_GEN_INTERNAL_H +#include "bpf.h" + struct ksym_relo_desc { const char *name; int kind; @@ -50,8 +52,10 @@ int bpf_gen__finish(struct bpf_gen *gen); void bpf_gen__free(struct bpf_gen *gen); void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size); void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_params *map_attr, int map_idx); -struct bpf_prog_load_params; -void bpf_gen__prog_load(struct bpf_gen *gen, struct bpf_prog_load_params *load_attr, int prog_idx); +void bpf_gen__prog_load(struct bpf_gen *gen, + enum bpf_prog_type prog_type, const char *prog_name, + const char *license, struct bpf_insn *insns, size_t insn_cnt, + struct bpf_prog_load_opts *load_attr, int prog_idx); void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u32 value_size); void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx); void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type); diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c index 2e10776b6d85..7b73f97b1fa1 100644 --- a/tools/lib/bpf/gen_loader.c +++ b/tools/lib/bpf/gen_loader.c @@ -901,27 +901,27 @@ static void cleanup_relos(struct bpf_gen *gen, int insns) } void bpf_gen__prog_load(struct bpf_gen *gen, - struct bpf_prog_load_params *load_attr, int prog_idx) + enum bpf_prog_type prog_type, const char *prog_name, + const char *license, struct bpf_insn *insns, size_t insn_cnt, + struct bpf_prog_load_opts *load_attr, int prog_idx) { int attr_size = offsetofend(union bpf_attr, fd_array); - int prog_load_attr, license, insns, func_info, line_info; + int prog_load_attr, license_off, insns_off, func_info, line_info; union bpf_attr attr; memset(&attr, 0, attr_size); - pr_debug("gen: prog_load: type %d insns_cnt %zd\n", - load_attr->prog_type, load_attr->insn_cnt); + pr_debug("gen: prog_load: type %d insns_cnt %zd\n", prog_type, insn_cnt); /* add license string to blob of bytes */ - license = add_data(gen, load_attr->license, strlen(load_attr->license) + 1); + license_off = add_data(gen, license, strlen(license) + 1); /* add insns to blob of bytes */ - insns = add_data(gen, load_attr->insns, - load_attr->insn_cnt * sizeof(struct bpf_insn)); + insns_off = add_data(gen, insns, insn_cnt * sizeof(struct bpf_insn)); - attr.prog_type = load_attr->prog_type; + attr.prog_type = prog_type; attr.expected_attach_type = load_attr->expected_attach_type; attr.attach_btf_id = load_attr->attach_btf_id; attr.prog_ifindex = load_attr->prog_ifindex; attr.kern_version = 0; - attr.insn_cnt = (__u32)load_attr->insn_cnt; + attr.insn_cnt = (__u32)insn_cnt; attr.prog_flags = load_attr->prog_flags; attr.func_info_rec_size = load_attr->func_info_rec_size; @@ -934,15 +934,15 @@ void bpf_gen__prog_load(struct bpf_gen *gen, line_info = add_data(gen, load_attr->line_info, attr.line_info_cnt * attr.line_info_rec_size); - memcpy(attr.prog_name, load_attr->name, - min((unsigned)strlen(load_attr->name), BPF_OBJ_NAME_LEN - 1)); + memcpy(attr.prog_name, prog_name, + min((unsigned)strlen(prog_name), BPF_OBJ_NAME_LEN - 1)); prog_load_attr = add_data(gen, &attr, attr_size); /* populate union bpf_attr with a pointer to license */ - emit_rel_store(gen, attr_field(prog_load_attr, license), license); + emit_rel_store(gen, attr_field(prog_load_attr, license), license_off); /* populate union bpf_attr with a pointer to instructions */ - emit_rel_store(gen, attr_field(prog_load_attr, insns), insns); + emit_rel_store(gen, attr_field(prog_load_attr, insns), insns_off); /* populate union bpf_attr with a pointer to func_info */ emit_rel_store(gen, attr_field(prog_load_attr, func_info), func_info); @@ -974,12 +974,12 @@ void bpf_gen__prog_load(struct bpf_gen *gen, emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(union bpf_attr, attach_btf_obj_fd))); } - emit_relos(gen, insns); + emit_relos(gen, insns_off); /* emit PROG_LOAD command */ emit_sys_bpf(gen, BPF_PROG_LOAD, prog_load_attr, attr_size); debug_ret(gen, "prog_load %s insn_cnt %d", attr.prog_name, attr.insn_cnt); /* successful or not, close btf module FDs used in extern ksyms and attach_btf_obj_fd */ - cleanup_relos(gen, insns); + cleanup_relos(gen, insns_off); if (gen->attach_kind) emit_sys_close_blob(gen, attr_field(prog_load_attr, attach_btf_obj_fd)); diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 7fcea11ecaa9..7a82b81b8859 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -221,7 +221,7 @@ struct reloc_desc { struct bpf_sec_def; typedef int (*init_fn_t)(struct bpf_program *prog, long cookie); -typedef int (*preload_fn_t)(struct bpf_program *prog, struct bpf_prog_load_params *attr, long cookie); +typedef int (*preload_fn_t)(struct bpf_program *prog, struct bpf_prog_load_opts *opts, long cookie); typedef struct bpf_link *(*attach_fn_t)(const struct bpf_program *prog, long cookie); /* stored as sec_def->cookie for all libbpf-supported SEC()s */ @@ -6391,16 +6391,16 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attac /* this is called as prog->sec_def->preload_fn for libbpf-supported sec_defs */ static int libbpf_preload_prog(struct bpf_program *prog, - struct bpf_prog_load_params *attr, long cookie) + struct bpf_prog_load_opts *opts, long cookie) { enum sec_def_flags def = cookie; /* old kernels might not support specifying expected_attach_type */ if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE)) - attr->expected_attach_type = 0; + opts->expected_attach_type = 0; if (def & SEC_SLEEPABLE) - attr->prog_flags |= BPF_F_SLEEPABLE; + opts->prog_flags |= BPF_F_SLEEPABLE; if ((prog->type == BPF_PROG_TYPE_TRACING || prog->type == BPF_PROG_TYPE_LSM || @@ -6419,11 +6419,11 @@ static int libbpf_preload_prog(struct bpf_program *prog, /* but by now libbpf common logic is not utilizing * prog->atach_btf_obj_fd/prog->attach_btf_id anymore because - * this callback is called after attrs were populated by - * libbpf, so this callback has to update attr explicitly here + * this callback is called after opts were populated by + * libbpf, so this callback has to update opts explicitly here */ - attr->attach_btf_obj_fd = btf_obj_fd; - attr->attach_btf_id = btf_type_id; + opts->attach_btf_obj_fd = btf_obj_fd; + opts->attach_btf_id = btf_type_id; } return 0; } @@ -6433,7 +6433,8 @@ static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_prog const char *license, __u32 kern_version, int *prog_fd) { - struct bpf_prog_load_params load_attr = {}; + LIBBPF_OPTS(bpf_prog_load_opts, load_attr); + const char *prog_name = NULL; char *cp, errmsg[STRERR_BUFSIZE]; size_t log_buf_size = 0; char *log_buf = NULL; @@ -6452,13 +6453,9 @@ static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_prog if (!insns || !insns_cnt) return -EINVAL; - load_attr.prog_type = prog->type; load_attr.expected_attach_type = prog->expected_attach_type; if (kernel_supports(obj, FEAT_PROG_NAME)) - load_attr.name = prog->name; - load_attr.insns = insns; - load_attr.insn_cnt = insns_cnt; - load_attr.license = license; + prog_name = prog->name; load_attr.attach_btf_id = prog->attach_btf_id; load_attr.attach_prog_fd = prog->attach_prog_fd; load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd; @@ -6492,7 +6489,8 @@ static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_prog } if (obj->gen_loader) { - bpf_gen__prog_load(obj->gen_loader, &load_attr, + bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name, + license, insns, insns_cnt, &load_attr, prog - obj->programs); *prog_fd = -1; return 0; @@ -6507,8 +6505,8 @@ retry_load: } load_attr.log_buf = log_buf; - load_attr.log_buf_sz = log_buf_size; - ret = libbpf__bpf_prog_load(&load_attr); + load_attr.log_size = log_buf_size; + ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr); if (ret >= 0) { if (log_buf && load_attr.log_level) @@ -6554,19 +6552,19 @@ retry_load: pr_warn("-- BEGIN DUMP LOG ---\n"); pr_warn("\n%s\n", log_buf); pr_warn("-- END LOG --\n"); - } else if (load_attr.insn_cnt >= BPF_MAXINSNS) { - pr_warn("Program too large (%zu insns), at most %d insns\n", - load_attr.insn_cnt, BPF_MAXINSNS); + } else if (insns_cnt >= BPF_MAXINSNS) { + pr_warn("Program too large (%d insns), at most %d insns\n", + insns_cnt, BPF_MAXINSNS); ret = -LIBBPF_ERRNO__PROG2BIG; - } else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) { + } else if (prog->type != BPF_PROG_TYPE_KPROBE) { /* Wrong program type? */ int fd; - load_attr.prog_type = BPF_PROG_TYPE_KPROBE; load_attr.expected_attach_type = 0; load_attr.log_buf = NULL; - load_attr.log_buf_sz = 0; - fd = libbpf__bpf_prog_load(&load_attr); + load_attr.log_size = 0; + fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, prog_name, license, + insns, insns_cnt, &load_attr); if (fd >= 0) { close(fd); ret = -LIBBPF_ERRNO__PROGTYPE; @@ -9170,8 +9168,9 @@ long libbpf_get_error(const void *ptr) return -errno; } -int bpf_prog_load(const char *file, enum bpf_prog_type type, - struct bpf_object **pobj, int *prog_fd) +COMPAT_VERSION(bpf_prog_load_deprecated, bpf_prog_load, LIBBPF_0.0.1) +int bpf_prog_load_deprecated(const char *file, enum bpf_prog_type type, + struct bpf_object **pobj, int *prog_fd) { struct bpf_prog_load_attr attr; diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index a364c379b998..bbc828667b22 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -676,8 +676,9 @@ struct bpf_prog_load_attr { LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, struct bpf_object **pobj, int *prog_fd); -LIBBPF_API int bpf_prog_load(const char *file, enum bpf_prog_type type, - struct bpf_object **pobj, int *prog_fd); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__open() and bpf_object__load() instead") +LIBBPF_API int bpf_prog_load_deprecated(const char *file, enum bpf_prog_type type, + struct bpf_object **pobj, int *prog_fd); /* XDP related API */ struct xdp_link_info { diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 43580eb47740..b895861a13c0 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -395,6 +395,8 @@ LIBBPF_0.6.0 { bpf_object__next_program; bpf_object__prev_map; bpf_object__prev_program; + bpf_prog_load_deprecated; + bpf_prog_load; bpf_program__insn_cnt; bpf_program__insns; btf__add_btf; diff --git a/tools/lib/bpf/libbpf_common.h b/tools/lib/bpf/libbpf_common.h index 0967112b933a..b21cefc9c3b6 100644 --- a/tools/lib/bpf/libbpf_common.h +++ b/tools/lib/bpf/libbpf_common.h @@ -41,6 +41,18 @@ #define __LIBBPF_MARK_DEPRECATED_0_7(X) #endif +/* This set of internal macros allows to do "function overloading" based on + * number of arguments provided by used in backwards-compatible way during the + * transition to libbpf 1.0 + * It's ugly but necessary evil that will be cleaned up when we get to 1.0. + * See bpf_prog_load() overload for example. + */ +#define ___libbpf_cat(A, B) A ## B +#define ___libbpf_select(NAME, NUM) ___libbpf_cat(NAME, NUM) +#define ___libbpf_nth(_1, _2, _3, _4, _5, _6, N, ...) N +#define ___libbpf_cnt(...) ___libbpf_nth(__VA_ARGS__, 6, 5, 4, 3, 2, 1) +#define ___libbpf_overload(NAME, ...) ___libbpf_select(NAME, ___libbpf_cnt(__VA_ARGS__))(__VA_ARGS__) + /* Helper macro to declare and initialize libbpf options struct * * This dance with uninitialized declaration, followed by memset to zero, diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index aeb79e3a8ff9..c1e34794b829 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -276,37 +276,6 @@ int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz); int libbpf__load_raw_btf(const char *raw_types, size_t types_len, const char *str_sec, size_t str_len); -struct bpf_prog_load_params { - enum bpf_prog_type prog_type; - enum bpf_attach_type expected_attach_type; - const char *name; - const struct bpf_insn *insns; - size_t insn_cnt; - const char *license; - __u32 kern_version; - __u32 attach_prog_fd; - __u32 attach_btf_obj_fd; - __u32 attach_btf_id; - __u32 prog_ifindex; - __u32 prog_btf_fd; - __u32 prog_flags; - - __u32 func_info_rec_size; - const void *func_info; - __u32 func_info_cnt; - - __u32 line_info_rec_size; - const void *line_info; - __u32 line_info_cnt; - - __u32 log_level; - char *log_buf; - size_t log_buf_sz; - int *fd_array; -}; - -int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr); - struct bpf_create_map_params { const char *name; enum bpf_map_type map_type; diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 54b0a41a3775..c4497a4af3fe 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -24,7 +24,7 @@ SAN_CFLAGS ?= CFLAGS += -g -O0 -rdynamic -Wall $(GENFLAGS) $(SAN_CFLAGS) \ -I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \ -I$(TOOLSINCDIR) -I$(APIDIR) -I$(OUTPUT) \ - -Dbpf_prog_load=bpf_prog_test_load \ + -Dbpf_prog_load_deprecated=bpf_prog_test_load \ -Dbpf_load_program=bpf_test_load_program LDLIBS += -lcap -lelf -lz -lrt -lpthread -- cgit v1.2.3 From e32660ac6fd6bd3c9d249644330d968c6ef61b07 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 3 Nov 2021 15:08:37 -0700 Subject: libbpf: Remove internal use of deprecated bpf_prog_load() variants Remove all the internal uses of bpf_load_program_xattr(), which is slated for deprecation in v0.7. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211103220845.2676888-5-andrii@kernel.org --- tools/lib/bpf/bpf.c | 8 ++- tools/lib/bpf/libbpf.c | 119 ++++++++++++++---------------------------- tools/lib/bpf/libbpf_probes.c | 20 ++++--- tools/lib/bpf/xsk.c | 34 ++++-------- 4 files changed, 64 insertions(+), 117 deletions(-) diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 8f2a701cb079..4ff45749f8cf 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -401,8 +401,12 @@ done: return libbpf_err_errno(fd); } +__attribute__((alias("bpf_load_program_xattr2"))) int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, - char *log_buf, size_t log_buf_sz) + char *log_buf, size_t log_buf_sz); + +static int bpf_load_program_xattr2(const struct bpf_load_program_attr *load_attr, + char *log_buf, size_t log_buf_sz) { LIBBPF_OPTS(bpf_prog_load_opts, p); @@ -456,7 +460,7 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, load_attr.license = license; load_attr.kern_version = kern_version; - return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz); + return bpf_load_program_xattr2(&load_attr, log_buf, log_buf_sz); } int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 7a82b81b8859..5751cade0f66 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -4282,30 +4282,20 @@ int bpf_map__resize(struct bpf_map *map, __u32 max_entries) static int bpf_object__probe_loading(struct bpf_object *obj) { - struct bpf_load_program_attr attr; char *cp, errmsg[STRERR_BUFSIZE]; struct bpf_insn insns[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }; - int ret; + int ret, insn_cnt = ARRAY_SIZE(insns); if (obj->gen_loader) return 0; /* make sure basic loading works */ - - memset(&attr, 0, sizeof(attr)); - attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; - attr.insns = insns; - attr.insns_cnt = ARRAY_SIZE(insns); - attr.license = "GPL"; - - ret = bpf_load_program_xattr(&attr, NULL, 0); - if (ret < 0) { - attr.prog_type = BPF_PROG_TYPE_TRACEPOINT; - ret = bpf_load_program_xattr(&attr, NULL, 0); - } + ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL); + if (ret < 0) + ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL); if (ret < 0) { ret = errno; cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); @@ -4329,28 +4319,19 @@ static int probe_fd(int fd) static int probe_kern_prog_name(void) { - struct bpf_load_program_attr attr; struct bpf_insn insns[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }; - int ret; + int ret, insn_cnt = ARRAY_SIZE(insns); /* make sure loading with name works */ - - memset(&attr, 0, sizeof(attr)); - attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; - attr.insns = insns; - attr.insns_cnt = ARRAY_SIZE(insns); - attr.license = "GPL"; - attr.name = "test"; - ret = bpf_load_program_xattr(&attr, NULL, 0); + ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "test", "GPL", insns, insn_cnt, NULL); return probe_fd(ret); } static int probe_kern_global_data(void) { - struct bpf_load_program_attr prg_attr; struct bpf_create_map_attr map_attr; char *cp, errmsg[STRERR_BUFSIZE]; struct bpf_insn insns[] = { @@ -4359,7 +4340,7 @@ static int probe_kern_global_data(void) BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }; - int ret, map; + int ret, map, insn_cnt = ARRAY_SIZE(insns); memset(&map_attr, 0, sizeof(map_attr)); map_attr.map_type = BPF_MAP_TYPE_ARRAY; @@ -4378,13 +4359,7 @@ static int probe_kern_global_data(void) insns[0].imm = map; - memset(&prg_attr, 0, sizeof(prg_attr)); - prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; - prg_attr.insns = insns; - prg_attr.insns_cnt = ARRAY_SIZE(insns); - prg_attr.license = "GPL"; - - ret = bpf_load_program_xattr(&prg_attr, NULL, 0); + ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL); close(map); return probe_fd(ret); } @@ -4500,30 +4475,24 @@ static int probe_kern_array_mmap(void) static int probe_kern_exp_attach_type(void) { - struct bpf_load_program_attr attr; + LIBBPF_OPTS(bpf_prog_load_opts, opts, .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE); struct bpf_insn insns[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }; + int fd, insn_cnt = ARRAY_SIZE(insns); - memset(&attr, 0, sizeof(attr)); /* use any valid combination of program type and (optional) * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS) * to see if kernel supports expected_attach_type field for * BPF_PROG_LOAD command */ - attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK; - attr.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE; - attr.insns = insns; - attr.insns_cnt = ARRAY_SIZE(insns); - attr.license = "GPL"; - - return probe_fd(bpf_load_program_xattr(&attr, NULL, 0)); + fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts); + return probe_fd(fd); } static int probe_kern_probe_read_kernel(void) { - struct bpf_load_program_attr attr; struct bpf_insn insns[] = { BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */ @@ -4532,26 +4501,21 @@ static int probe_kern_probe_read_kernel(void) BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel), BPF_EXIT_INSN(), }; + int fd, insn_cnt = ARRAY_SIZE(insns); - memset(&attr, 0, sizeof(attr)); - attr.prog_type = BPF_PROG_TYPE_KPROBE; - attr.insns = insns; - attr.insns_cnt = ARRAY_SIZE(insns); - attr.license = "GPL"; - - return probe_fd(bpf_load_program_xattr(&attr, NULL, 0)); + fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", insns, insn_cnt, NULL); + return probe_fd(fd); } static int probe_prog_bind_map(void) { - struct bpf_load_program_attr prg_attr; struct bpf_create_map_attr map_attr; char *cp, errmsg[STRERR_BUFSIZE]; struct bpf_insn insns[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }; - int ret, map, prog; + int ret, map, prog, insn_cnt = ARRAY_SIZE(insns); memset(&map_attr, 0, sizeof(map_attr)); map_attr.map_type = BPF_MAP_TYPE_ARRAY; @@ -4568,13 +4532,7 @@ static int probe_prog_bind_map(void) return ret; } - memset(&prg_attr, 0, sizeof(prg_attr)); - prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; - prg_attr.insns = insns; - prg_attr.insns_cnt = ARRAY_SIZE(insns); - prg_attr.license = "GPL"; - - prog = bpf_load_program_xattr(&prg_attr, NULL, 0); + prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL); if (prog < 0) { close(map); return 0; @@ -4619,19 +4577,14 @@ static int probe_module_btf(void) static int probe_perf_link(void) { - struct bpf_load_program_attr attr; struct bpf_insn insns[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }; int prog_fd, link_fd, err; - memset(&attr, 0, sizeof(attr)); - attr.prog_type = BPF_PROG_TYPE_TRACEPOINT; - attr.insns = insns; - attr.insns_cnt = ARRAY_SIZE(insns); - attr.license = "GPL"; - prog_fd = bpf_load_program_xattr(&attr, NULL, 0); + prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", + insns, ARRAY_SIZE(insns), NULL); if (prog_fd < 0) return -errno; @@ -9168,22 +9121,12 @@ long libbpf_get_error(const void *ptr) return -errno; } -COMPAT_VERSION(bpf_prog_load_deprecated, bpf_prog_load, LIBBPF_0.0.1) -int bpf_prog_load_deprecated(const char *file, enum bpf_prog_type type, - struct bpf_object **pobj, int *prog_fd) -{ - struct bpf_prog_load_attr attr; - - memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); - attr.file = file; - attr.prog_type = type; - attr.expected_attach_type = 0; - - return bpf_prog_load_xattr(&attr, pobj, prog_fd); -} - +__attribute__((alias("bpf_prog_load_xattr2"))) int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, - struct bpf_object **pobj, int *prog_fd) + struct bpf_object **pobj, int *prog_fd); + +static int bpf_prog_load_xattr2(const struct bpf_prog_load_attr *attr, + struct bpf_object **pobj, int *prog_fd) { struct bpf_object_open_attr open_attr = {}; struct bpf_program *prog, *first_prog = NULL; @@ -9254,6 +9197,20 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, return 0; } +COMPAT_VERSION(bpf_prog_load_deprecated, bpf_prog_load, LIBBPF_0.0.1) +int bpf_prog_load_deprecated(const char *file, enum bpf_prog_type type, + struct bpf_object **pobj, int *prog_fd) +{ + struct bpf_prog_load_attr attr; + + memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); + attr.file = file; + attr.prog_type = type; + attr.expected_attach_type = 0; + + return bpf_prog_load_xattr2(&attr, pobj, prog_fd); +} + struct bpf_link { int (*detach)(struct bpf_link *link); void (*dealloc)(struct bpf_link *link); diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c index 68f2dbf364aa..02c401e314c7 100644 --- a/tools/lib/bpf/libbpf_probes.c +++ b/tools/lib/bpf/libbpf_probes.c @@ -68,21 +68,21 @@ static void probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, size_t insns_cnt, char *buf, size_t buf_len, __u32 ifindex) { - struct bpf_load_program_attr xattr = {}; + LIBBPF_OPTS(bpf_prog_load_opts, opts); int fd; switch (prog_type) { case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: - xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT; + opts.expected_attach_type = BPF_CGROUP_INET4_CONNECT; break; case BPF_PROG_TYPE_CGROUP_SOCKOPT: - xattr.expected_attach_type = BPF_CGROUP_GETSOCKOPT; + opts.expected_attach_type = BPF_CGROUP_GETSOCKOPT; break; case BPF_PROG_TYPE_SK_LOOKUP: - xattr.expected_attach_type = BPF_SK_LOOKUP; + opts.expected_attach_type = BPF_SK_LOOKUP; break; case BPF_PROG_TYPE_KPROBE: - xattr.kern_version = get_kernel_version(); + opts.kern_version = get_kernel_version(); break; case BPF_PROG_TYPE_UNSPEC: case BPF_PROG_TYPE_SOCKET_FILTER: @@ -115,13 +115,11 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, break; } - xattr.prog_type = prog_type; - xattr.insns = insns; - xattr.insns_cnt = insns_cnt; - xattr.license = "GPL"; - xattr.prog_ifindex = ifindex; + opts.prog_ifindex = ifindex; + opts.log_buf = buf; + opts.log_size = buf_len; - fd = bpf_load_program_xattr(&xattr, buf, buf_len); + fd = bpf_prog_load(prog_type, NULL, "GPL", insns, insns_cnt, NULL); if (fd >= 0) close(fd); } diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c index 81f8fbc85e70..fdb22f5405c9 100644 --- a/tools/lib/bpf/xsk.c +++ b/tools/lib/bpf/xsk.c @@ -364,7 +364,6 @@ int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area, static enum xsk_prog get_xsk_prog(void) { enum xsk_prog detected = XSK_PROG_FALLBACK; - struct bpf_load_program_attr prog_attr; struct bpf_create_map_attr map_attr; __u32 size_out, retval, duration; char data_in = 0, data_out; @@ -375,7 +374,7 @@ static enum xsk_prog get_xsk_prog(void) BPF_EMIT_CALL(BPF_FUNC_redirect_map), BPF_EXIT_INSN(), }; - int prog_fd, map_fd, ret; + int prog_fd, map_fd, ret, insn_cnt = ARRAY_SIZE(insns); memset(&map_attr, 0, sizeof(map_attr)); map_attr.map_type = BPF_MAP_TYPE_XSKMAP; @@ -389,13 +388,7 @@ static enum xsk_prog get_xsk_prog(void) insns[0].imm = map_fd; - memset(&prog_attr, 0, sizeof(prog_attr)); - prog_attr.prog_type = BPF_PROG_TYPE_XDP; - prog_attr.insns = insns; - prog_attr.insns_cnt = ARRAY_SIZE(insns); - prog_attr.license = "GPL"; - - prog_fd = bpf_load_program_xattr(&prog_attr, NULL, 0); + prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL); if (prog_fd < 0) { close(map_fd); return detected; @@ -495,10 +488,13 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk) }; struct bpf_insn *progs[] = {prog, prog_redirect_flags}; enum xsk_prog option = get_xsk_prog(); + LIBBPF_OPTS(bpf_prog_load_opts, opts, + .log_buf = log_buf, + .log_size = log_buf_size, + ); - prog_fd = bpf_load_program(BPF_PROG_TYPE_XDP, progs[option], insns_cnt[option], - "LGPL-2.1 or BSD-2-Clause", 0, log_buf, - log_buf_size); + prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "LGPL-2.1 or BSD-2-Clause", + progs[option], insns_cnt[option], &opts); if (prog_fd < 0) { pr_warn("BPF log buffer:\n%s", log_buf); return prog_fd; @@ -725,14 +721,12 @@ static int xsk_link_lookup(int ifindex, __u32 *prog_id, int *link_fd) static bool xsk_probe_bpf_link(void) { - DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts, - .flags = XDP_FLAGS_SKB_MODE); - struct bpf_load_program_attr prog_attr; + LIBBPF_OPTS(bpf_link_create_opts, opts, .flags = XDP_FLAGS_SKB_MODE); struct bpf_insn insns[2] = { BPF_MOV64_IMM(BPF_REG_0, XDP_PASS), BPF_EXIT_INSN() }; - int prog_fd, link_fd = -1; + int prog_fd, link_fd = -1, insn_cnt = ARRAY_SIZE(insns); int ifindex_lo = 1; bool ret = false; int err; @@ -744,13 +738,7 @@ static bool xsk_probe_bpf_link(void) if (link_fd >= 0) return true; - memset(&prog_attr, 0, sizeof(prog_attr)); - prog_attr.prog_type = BPF_PROG_TYPE_XDP; - prog_attr.insns = insns; - prog_attr.insns_cnt = ARRAY_SIZE(insns); - prog_attr.license = "GPL"; - - prog_fd = bpf_load_program_xattr(&prog_attr, NULL, 0); + prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL); if (prog_fd < 0) return ret; -- cgit v1.2.3 From bcc40fc0021d4b7c016f8bcf62bd4e21251fdee8 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 3 Nov 2021 15:08:38 -0700 Subject: libbpf: Stop using to-be-deprecated APIs Remove all the internal uses of libbpf APIs that are slated to be deprecated in v0.7. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211103220845.2676888-6-andrii@kernel.org --- tools/lib/bpf/libbpf.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 5751cade0f66..dfd15cc60ea7 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -7709,7 +7709,7 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path) return 0; err_unpin_maps: - while ((map = bpf_map__prev(map, obj))) { + while ((map = bpf_object__prev_map(obj, map))) { if (!map->pin_path) continue; @@ -7789,7 +7789,7 @@ int bpf_object__pin_programs(struct bpf_object *obj, const char *path) return 0; err_unpin_programs: - while ((prog = bpf_program__prev(prog, obj))) { + while ((prog = bpf_object__prev_program(obj, prog))) { char buf[PATH_MAX]; int len; @@ -8130,9 +8130,11 @@ int bpf_program__set_autoload(struct bpf_program *prog, bool autoload) return 0; } +static int bpf_program_nth_fd(const struct bpf_program *prog, int n); + int bpf_program__fd(const struct bpf_program *prog) { - return bpf_program__nth_fd(prog, 0); + return bpf_program_nth_fd(prog, 0); } size_t bpf_program__size(const struct bpf_program *prog) @@ -8178,7 +8180,10 @@ int bpf_program__set_prep(struct bpf_program *prog, int nr_instances, return 0; } -int bpf_program__nth_fd(const struct bpf_program *prog, int n) +__attribute__((alias("bpf_program_nth_fd"))) +int bpf_program__nth_fd(const struct bpf_program *prog, int n); + +static int bpf_program_nth_fd(const struct bpf_program *prog, int n) { int fd; -- cgit v1.2.3 From a3c7c7e8050fc299b42fa3d89bac253a8dfa5c0c Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 3 Nov 2021 15:08:39 -0700 Subject: bpftool: Stop using deprecated bpf_load_program() Switch to bpf_prog_load() instead. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211103220845.2676888-7-andrii@kernel.org --- tools/bpf/bpftool/feature.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c index ade44577688e..5397077d0d9e 100644 --- a/tools/bpf/bpftool/feature.c +++ b/tools/bpf/bpftool/feature.c @@ -467,7 +467,7 @@ static bool probe_bpf_syscall(const char *define_prefix) { bool res; - bpf_load_program(BPF_PROG_TYPE_UNSPEC, NULL, 0, NULL, 0, NULL, 0); + bpf_prog_load(BPF_PROG_TYPE_UNSPEC, NULL, NULL, NULL, 0, NULL); res = (errno != ENOSYS); print_bool_feature("have_bpf_syscall", -- cgit v1.2.3 From 5c5edcdebfcf3a95257b0d8ef27a60af0e0ea03a Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 3 Nov 2021 15:08:40 -0700 Subject: libbpf: Remove deprecation attribute from struct bpf_prog_prep_result This deprecation annotation has no effect because for struct deprecation attribute has to be declared after struct definition. But instead of moving it to the end of struct definition, remove it. When deprecation will go in effect at libbpf v0.7, this deprecation attribute will cause libbpf's own source code compilation to trigger deprecation warnings, which is unavoidable because libbpf still has to support that API. So keep deprecation of APIs, but don't mark structs used in API as deprecated. Fixes: e21d585cb3db ("libbpf: Deprecate multi-instance bpf_program APIs") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Dave Marchevsky Link: https://lore.kernel.org/bpf/20211103220845.2676888-8-andrii@kernel.org --- tools/lib/bpf/libbpf.h | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index bbc828667b22..039058763173 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -431,7 +431,6 @@ bpf_program__attach_iter(const struct bpf_program *prog, * one instance. In this case bpf_program__fd(prog) is equal to * bpf_program__nth_fd(prog, 0). */ -LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__insns() for getting bpf_program instructions") struct bpf_prog_prep_result { /* * If not NULL, load new instruction array. -- cgit v1.2.3 From 3d1d62397f4a12dedee09727b26cd5a4b254ebb7 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 3 Nov 2021 15:08:41 -0700 Subject: selftests/bpf: Fix non-strict SEC() program sections Fix few more SEC() definitions that were previously missed. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Dave Marchevsky Link: https://lore.kernel.org/bpf/20211103220845.2676888-9-andrii@kernel.org --- tools/testing/selftests/bpf/progs/test_l4lb.c | 2 +- tools/testing/selftests/bpf/progs/test_l4lb_noinline.c | 2 +- tools/testing/selftests/bpf/progs/test_map_lock.c | 2 +- tools/testing/selftests/bpf/progs/test_queue_stack_map.h | 2 +- tools/testing/selftests/bpf/progs/test_skb_ctx.c | 2 +- tools/testing/selftests/bpf/progs/test_spin_lock.c | 2 +- tools/testing/selftests/bpf/progs/test_tcp_estats.c | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/test_l4lb.c b/tools/testing/selftests/bpf/progs/test_l4lb.c index 04fee08863cb..c26057ec46dc 100644 --- a/tools/testing/selftests/bpf/progs/test_l4lb.c +++ b/tools/testing/selftests/bpf/progs/test_l4lb.c @@ -448,7 +448,7 @@ static __always_inline int process_packet(void *data, __u64 off, void *data_end, return bpf_redirect(ifindex, 0); } -SEC("l4lb-demo") +SEC("tc") int balancer_ingress(struct __sk_buff *ctx) { void *data_end = (void *)(long)ctx->data_end; diff --git a/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c index b9e2753f4f91..19e4d2071c60 100644 --- a/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c +++ b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c @@ -447,7 +447,7 @@ static __noinline int process_packet(void *data, __u64 off, void *data_end, return bpf_redirect(ifindex, 0); } -SEC("l4lb-demo") +SEC("tc") int balancer_ingress(struct __sk_buff *ctx) { void *data_end = (void *)(long)ctx->data_end; diff --git a/tools/testing/selftests/bpf/progs/test_map_lock.c b/tools/testing/selftests/bpf/progs/test_map_lock.c index b5c07ae7b68f..acf073db9e8b 100644 --- a/tools/testing/selftests/bpf/progs/test_map_lock.c +++ b/tools/testing/selftests/bpf/progs/test_map_lock.c @@ -30,7 +30,7 @@ struct { __type(value, struct array_elem); } array_map SEC(".maps"); -SEC("map_lock_demo") +SEC("cgroup/skb") int bpf_map_lock_test(struct __sk_buff *skb) { struct hmap_elem zero = {}, *val; diff --git a/tools/testing/selftests/bpf/progs/test_queue_stack_map.h b/tools/testing/selftests/bpf/progs/test_queue_stack_map.h index 0fcd3ff0e38a..648e8cab7a23 100644 --- a/tools/testing/selftests/bpf/progs/test_queue_stack_map.h +++ b/tools/testing/selftests/bpf/progs/test_queue_stack_map.h @@ -24,7 +24,7 @@ struct { __uint(value_size, sizeof(__u32)); } map_out SEC(".maps"); -SEC("test") +SEC("tc") int _test(struct __sk_buff *skb) { void *data_end = (void *)(long)skb->data_end; diff --git a/tools/testing/selftests/bpf/progs/test_skb_ctx.c b/tools/testing/selftests/bpf/progs/test_skb_ctx.c index 1d61b36e6067..c482110cfc95 100644 --- a/tools/testing/selftests/bpf/progs/test_skb_ctx.c +++ b/tools/testing/selftests/bpf/progs/test_skb_ctx.c @@ -5,7 +5,7 @@ char _license[] SEC("license") = "GPL"; -SEC("skb_ctx") +SEC("tc") int process(struct __sk_buff *skb) { #pragma clang loop unroll(full) diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock.c b/tools/testing/selftests/bpf/progs/test_spin_lock.c index 0d31a3b3505f..7e88309d3229 100644 --- a/tools/testing/selftests/bpf/progs/test_spin_lock.c +++ b/tools/testing/selftests/bpf/progs/test_spin_lock.c @@ -45,7 +45,7 @@ struct { #define CREDIT_PER_NS(delta, rate) (((delta) * rate) >> 20) -SEC("spin_lock_demo") +SEC("tc") int bpf_sping_lock_test(struct __sk_buff *skb) { volatile int credit = 0, max_credit = 100, pkt_len = 64; diff --git a/tools/testing/selftests/bpf/progs/test_tcp_estats.c b/tools/testing/selftests/bpf/progs/test_tcp_estats.c index 2c5c602c6011..e2ae049c2f85 100644 --- a/tools/testing/selftests/bpf/progs/test_tcp_estats.c +++ b/tools/testing/selftests/bpf/progs/test_tcp_estats.c @@ -244,7 +244,7 @@ static __always_inline void send_basic_event(struct sock *sk, bpf_map_update_elem(&ev_record_map, &key, &ev, BPF_ANY); } -SEC("dummy_tracepoint") +SEC("tp/dummy/tracepoint") int _dummy_tracepoint(struct dummy_tracepoint_args *arg) { if (!arg->sock) -- cgit v1.2.3 From d8e86407e5fc6c3da1e336f89bd3e9bbc1c0cf60 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 3 Nov 2021 15:08:42 -0700 Subject: selftests/bpf: Convert legacy prog load APIs to bpf_prog_load() Convert all the uses of legacy low-level BPF program loading APIs (mostly bpf_load_program_xattr(), but also some bpf_verify_program()) to bpf_prog_load() uses. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211103220845.2676888-10-andrii@kernel.org --- tools/testing/selftests/bpf/prog_tests/align.c | 11 +++++-- .../selftests/bpf/prog_tests/fexit_stress.c | 33 ++++++++++--------- .../raw_tp_writable_reject_nbd_invalid.c | 14 ++++---- .../bpf/prog_tests/raw_tp_writable_test_run.c | 29 ++++++++--------- tools/testing/selftests/bpf/prog_tests/sockopt.c | 19 +++++------ tools/testing/selftests/bpf/test_lru_map.c | 9 +---- tools/testing/selftests/bpf/test_sock.c | 23 +++++++------ tools/testing/selftests/bpf/test_sock_addr.c | 13 +++----- tools/testing/selftests/bpf/test_sysctl.c | 22 +++++-------- tools/testing/selftests/bpf/test_verifier.c | 38 +++++++++++----------- 10 files changed, 99 insertions(+), 112 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c index 5861446d0777..837f67c6bfda 100644 --- a/tools/testing/selftests/bpf/prog_tests/align.c +++ b/tools/testing/selftests/bpf/prog_tests/align.c @@ -594,6 +594,12 @@ static int do_test_single(struct bpf_align_test *test) struct bpf_insn *prog = test->insns; int prog_type = test->prog_type; char bpf_vlog_copy[32768]; + LIBBPF_OPTS(bpf_prog_load_opts, opts, + .prog_flags = BPF_F_STRICT_ALIGNMENT, + .log_buf = bpf_vlog, + .log_size = sizeof(bpf_vlog), + .log_level = 2, + ); const char *line_ptr; int cur_line = -1; int prog_len, i; @@ -601,9 +607,8 @@ static int do_test_single(struct bpf_align_test *test) int ret; prog_len = probe_filter_length(prog); - fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, - prog, prog_len, BPF_F_STRICT_ALIGNMENT, - "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 2); + fd_prog = bpf_prog_load(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", + prog, prog_len, &opts); if (fd_prog < 0 && test->result != REJECT) { printf("Failed to load program.\n"); printf("%s", bpf_vlog); diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c index 7c9b62e971f1..e4cede6b4b2d 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c @@ -20,34 +20,33 @@ void test_fexit_stress(void) BPF_EXIT_INSN(), }; - struct bpf_load_program_attr load_attr = { - .prog_type = BPF_PROG_TYPE_TRACING, - .license = "GPL", - .insns = trace_program, - .insns_cnt = sizeof(trace_program) / sizeof(struct bpf_insn), + LIBBPF_OPTS(bpf_prog_load_opts, trace_opts, .expected_attach_type = BPF_TRACE_FEXIT, - }; + .log_buf = error, + .log_size = sizeof(error), + ); const struct bpf_insn skb_program[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }; - struct bpf_load_program_attr skb_load_attr = { - .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, - .license = "GPL", - .insns = skb_program, - .insns_cnt = sizeof(skb_program) / sizeof(struct bpf_insn), - }; + LIBBPF_OPTS(bpf_prog_load_opts, skb_opts, + .log_buf = error, + .log_size = sizeof(error), + ); err = libbpf_find_vmlinux_btf_id("bpf_fentry_test1", - load_attr.expected_attach_type); + trace_opts.expected_attach_type); if (CHECK(err <= 0, "find_vmlinux_btf_id", "failed: %d\n", err)) goto out; - load_attr.attach_btf_id = err; + trace_opts.attach_btf_id = err; for (i = 0; i < CNT; i++) { - fexit_fd[i] = bpf_load_program_xattr(&load_attr, error, sizeof(error)); + fexit_fd[i] = bpf_prog_load(BPF_PROG_TYPE_TRACING, NULL, "GPL", + trace_program, + sizeof(trace_program) / sizeof(struct bpf_insn), + &trace_opts); if (CHECK(fexit_fd[i] < 0, "fexit loaded", "failed: %d errno %d\n", fexit_fd[i], errno)) goto out; @@ -57,7 +56,9 @@ void test_fexit_stress(void) goto out; } - filter_fd = bpf_load_program_xattr(&skb_load_attr, error, sizeof(error)); + filter_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", + skb_program, sizeof(skb_program) / sizeof(struct bpf_insn), + &skb_opts); if (CHECK(filter_fd < 0, "test_program_loaded", "failed: %d errno %d\n", filter_fd, errno)) goto out; diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c index 9807336a3016..e2f1445b0e10 100644 --- a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c +++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c @@ -18,15 +18,15 @@ void test_raw_tp_writable_reject_nbd_invalid(void) BPF_EXIT_INSN(), }; - struct bpf_load_program_attr load_attr = { - .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, - .license = "GPL v2", - .insns = program, - .insns_cnt = sizeof(program) / sizeof(struct bpf_insn), + LIBBPF_OPTS(bpf_prog_load_opts, opts, .log_level = 2, - }; + .log_buf = error, + .log_size = sizeof(error), + ); - bpf_fd = bpf_load_program_xattr(&load_attr, error, sizeof(error)); + bpf_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, NULL, "GPL v2", + program, sizeof(program) / sizeof(struct bpf_insn), + &opts); if (CHECK(bpf_fd < 0, "bpf_raw_tracepoint_writable load", "failed: %d errno %d\n", bpf_fd, errno)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c index ddefa1192e5d..239baccabccb 100644 --- a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c +++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c @@ -17,15 +17,15 @@ void serial_test_raw_tp_writable_test_run(void) BPF_EXIT_INSN(), }; - struct bpf_load_program_attr load_attr = { - .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, - .license = "GPL v2", - .insns = trace_program, - .insns_cnt = sizeof(trace_program) / sizeof(struct bpf_insn), + LIBBPF_OPTS(bpf_prog_load_opts, trace_opts, .log_level = 2, - }; + .log_buf = error, + .log_size = sizeof(error), + ); - int bpf_fd = bpf_load_program_xattr(&load_attr, error, sizeof(error)); + int bpf_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, NULL, "GPL v2", + trace_program, sizeof(trace_program) / sizeof(struct bpf_insn), + &trace_opts); if (CHECK(bpf_fd < 0, "bpf_raw_tracepoint_writable loaded", "failed: %d errno %d\n", bpf_fd, errno)) return; @@ -35,15 +35,14 @@ void serial_test_raw_tp_writable_test_run(void) BPF_EXIT_INSN(), }; - struct bpf_load_program_attr skb_load_attr = { - .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, - .license = "GPL v2", - .insns = skb_program, - .insns_cnt = sizeof(skb_program) / sizeof(struct bpf_insn), - }; + LIBBPF_OPTS(bpf_prog_load_opts, skb_opts, + .log_buf = error, + .log_size = sizeof(error), + ); - int filter_fd = - bpf_load_program_xattr(&skb_load_attr, error, sizeof(error)); + int filter_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL v2", + skb_program, sizeof(skb_program) / sizeof(struct bpf_insn), + &skb_opts); if (CHECK(filter_fd < 0, "test_program_loaded", "failed: %d errno %d\n", filter_fd, errno)) goto out_bpffd; diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt.c b/tools/testing/selftests/bpf/prog_tests/sockopt.c index 3e8517a8395a..cd09f4c7dd92 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt.c @@ -852,22 +852,21 @@ static struct sockopt_test { static int load_prog(const struct bpf_insn *insns, enum bpf_attach_type expected_attach_type) { - struct bpf_load_program_attr attr = { - .prog_type = BPF_PROG_TYPE_CGROUP_SOCKOPT, + LIBBPF_OPTS(bpf_prog_load_opts, opts, .expected_attach_type = expected_attach_type, - .insns = insns, - .license = "GPL", .log_level = 2, - }; - int fd; + .log_buf = bpf_log_buf, + .log_size = sizeof(bpf_log_buf), + ); + int fd, insns_cnt = 0; for (; - insns[attr.insns_cnt].code != (BPF_JMP | BPF_EXIT); - attr.insns_cnt++) { + insns[insns_cnt].code != (BPF_JMP | BPF_EXIT); + insns_cnt++) { } - attr.insns_cnt++; + insns_cnt++; - fd = bpf_load_program_xattr(&attr, bpf_log_buf, sizeof(bpf_log_buf)); + fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCKOPT, NULL, "GPL", insns, insns_cnt, &opts); if (verbose && fd < 0) fprintf(stderr, "%s\n", bpf_log_buf); diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c index 7e9049fa3edf..7f3d1d8460b4 100644 --- a/tools/testing/selftests/bpf/test_lru_map.c +++ b/tools/testing/selftests/bpf/test_lru_map.c @@ -42,7 +42,6 @@ static int create_map(int map_type, int map_flags, unsigned int size) static int bpf_map_lookup_elem_with_ref_bit(int fd, unsigned long long key, void *value) { - struct bpf_load_program_attr prog; struct bpf_create_map_attr map; struct bpf_insn insns[] = { BPF_LD_MAP_VALUE(BPF_REG_9, 0, 0), @@ -76,13 +75,7 @@ static int bpf_map_lookup_elem_with_ref_bit(int fd, unsigned long long key, insns[0].imm = mfd; - memset(&prog, 0, sizeof(prog)); - prog.prog_type = BPF_PROG_TYPE_SCHED_CLS; - prog.insns = insns; - prog.insns_cnt = ARRAY_SIZE(insns); - prog.license = "GPL"; - - pfd = bpf_load_program_xattr(&prog, NULL, 0); + pfd = bpf_prog_load(BPF_PROG_TYPE_SCHED_CLS, NULL, "GPL", insns, ARRAY_SIZE(insns), NULL); if (pfd < 0) { close(mfd); return -1; diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c index 9613f7538840..e8edd3dd3ec2 100644 --- a/tools/testing/selftests/bpf/test_sock.c +++ b/tools/testing/selftests/bpf/test_sock.c @@ -328,18 +328,17 @@ static size_t probe_prog_length(const struct bpf_insn *fp) static int load_sock_prog(const struct bpf_insn *prog, enum bpf_attach_type attach_type) { - struct bpf_load_program_attr attr; - int ret; - - memset(&attr, 0, sizeof(struct bpf_load_program_attr)); - attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK; - attr.expected_attach_type = attach_type; - attr.insns = prog; - attr.insns_cnt = probe_prog_length(attr.insns); - attr.license = "GPL"; - attr.log_level = 2; - - ret = bpf_load_program_xattr(&attr, bpf_log_buf, BPF_LOG_BUF_SIZE); + LIBBPF_OPTS(bpf_prog_load_opts, opts); + int ret, insn_cnt; + + insn_cnt = probe_prog_length(prog); + + opts.expected_attach_type = attach_type; + opts.log_buf = bpf_log_buf; + opts.log_size = BPF_LOG_BUF_SIZE; + opts.log_level = 2; + + ret = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", prog, insn_cnt, &opts); if (verbose && ret < 0) fprintf(stderr, "%s\n", bpf_log_buf); diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c index aa3f185fcb89..05c9e4944c01 100644 --- a/tools/testing/selftests/bpf/test_sock_addr.c +++ b/tools/testing/selftests/bpf/test_sock_addr.c @@ -645,17 +645,14 @@ static int mk_sockaddr(int domain, const char *ip, unsigned short port, static int load_insns(const struct sock_addr_test *test, const struct bpf_insn *insns, size_t insns_cnt) { - struct bpf_load_program_attr load_attr; + LIBBPF_OPTS(bpf_prog_load_opts, opts); int ret; - memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); - load_attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; - load_attr.expected_attach_type = test->expected_attach_type; - load_attr.insns = insns; - load_attr.insns_cnt = insns_cnt; - load_attr.license = "GPL"; + opts.expected_attach_type = test->expected_attach_type; + opts.log_buf = bpf_log_buf; + opts.log_size = BPF_LOG_BUF_SIZE; - ret = bpf_load_program_xattr(&load_attr, bpf_log_buf, BPF_LOG_BUF_SIZE); + ret = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, NULL, "GPL", insns, insns_cnt, &opts); if (ret < 0 && test->expected_result != LOAD_REJECT) { log_err(">>> Loading program error.\n" ">>> Verifier output:\n%s\n-------\n", bpf_log_buf); diff --git a/tools/testing/selftests/bpf/test_sysctl.c b/tools/testing/selftests/bpf/test_sysctl.c index a3bb6d399daa..4a395d7a8ea9 100644 --- a/tools/testing/selftests/bpf/test_sysctl.c +++ b/tools/testing/selftests/bpf/test_sysctl.c @@ -1435,14 +1435,10 @@ static int load_sysctl_prog_insns(struct sysctl_test *test, const char *sysctl_path) { struct bpf_insn *prog = test->insns; - struct bpf_load_program_attr attr; - int ret; + LIBBPF_OPTS(bpf_prog_load_opts, opts); + int ret, insn_cnt; - memset(&attr, 0, sizeof(struct bpf_load_program_attr)); - attr.prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL; - attr.insns = prog; - attr.insns_cnt = probe_prog_length(attr.insns); - attr.license = "GPL"; + insn_cnt = probe_prog_length(prog); if (test->fixup_value_insn) { char buf[128]; @@ -1465,7 +1461,10 @@ static int load_sysctl_prog_insns(struct sysctl_test *test, return -1; } - ret = bpf_load_program_xattr(&attr, bpf_log_buf, BPF_LOG_BUF_SIZE); + opts.log_buf = bpf_log_buf; + opts.log_size = BPF_LOG_BUF_SIZE; + + ret = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SYSCTL, NULL, "GPL", prog, insn_cnt, &opts); if (ret < 0 && test->result != LOAD_REJECT) { log_err(">>> Loading program error.\n" ">>> Verifier output:\n%s\n-------\n", bpf_log_buf); @@ -1476,15 +1475,10 @@ static int load_sysctl_prog_insns(struct sysctl_test *test, static int load_sysctl_prog_file(struct sysctl_test *test) { - struct bpf_prog_load_attr attr; struct bpf_object *obj; int prog_fd; - memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); - attr.file = test->prog_file; - attr.prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL; - - if (bpf_prog_load_xattr(&attr, &obj, &prog_fd)) { + if (bpf_prog_test_load(test->prog_file, BPF_PROG_TYPE_CGROUP_SYSCTL, &obj, &prog_fd)) { if (test->result != LOAD_REJECT) log_err(">>> Loading program (%s) error.\n", test->prog_file); diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 25afe423b3f0..e512b715a785 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -498,8 +498,7 @@ static int create_prog_dummy_simple(enum bpf_prog_type prog_type, int ret) BPF_EXIT_INSN(), }; - return bpf_load_program(prog_type, prog, - ARRAY_SIZE(prog), "GPL", 0, NULL, 0); + return bpf_prog_load(prog_type, NULL, "GPL", prog, ARRAY_SIZE(prog), NULL); } static int create_prog_dummy_loop(enum bpf_prog_type prog_type, int mfd, @@ -514,8 +513,7 @@ static int create_prog_dummy_loop(enum bpf_prog_type prog_type, int mfd, BPF_EXIT_INSN(), }; - return bpf_load_program(prog_type, prog, - ARRAY_SIZE(prog), "GPL", 0, NULL, 0); + return bpf_prog_load(prog_type, NULL, "GPL", prog, ARRAY_SIZE(prog), NULL); } static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem, @@ -1045,7 +1043,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv, int fd_prog, expected_ret, alignment_prevented_execution; int prog_len, prog_type = test->prog_type; struct bpf_insn *prog = test->insns; - struct bpf_load_program_attr attr; + LIBBPF_OPTS(bpf_prog_load_opts, opts); int run_errs, run_successes; int map_fds[MAX_NR_MAPS]; const char *expected_err; @@ -1085,32 +1083,34 @@ static void do_test_single(struct bpf_test *test, bool unpriv, test->result_unpriv : test->result; expected_err = unpriv && test->errstr_unpriv ? test->errstr_unpriv : test->errstr; - memset(&attr, 0, sizeof(attr)); - attr.prog_type = prog_type; - attr.expected_attach_type = test->expected_attach_type; - attr.insns = prog; - attr.insns_cnt = prog_len; - attr.license = "GPL"; + + opts.expected_attach_type = test->expected_attach_type; if (verbose) - attr.log_level = 1; + opts.log_level = 1; else if (expected_ret == VERBOSE_ACCEPT) - attr.log_level = 2; + opts.log_level = 2; else - attr.log_level = 4; - attr.prog_flags = pflags; + opts.log_level = 4; + opts.prog_flags = pflags; if (prog_type == BPF_PROG_TYPE_TRACING && test->kfunc) { - attr.attach_btf_id = libbpf_find_vmlinux_btf_id(test->kfunc, - attr.expected_attach_type); - if (attr.attach_btf_id < 0) { + int attach_btf_id; + + attach_btf_id = libbpf_find_vmlinux_btf_id(test->kfunc, + opts.expected_attach_type); + if (attach_btf_id < 0) { printf("FAIL\nFailed to find BTF ID for '%s'!\n", test->kfunc); (*errors)++; return; } + + opts.attach_btf_id = attach_btf_id; } - fd_prog = bpf_load_program_xattr(&attr, bpf_vlog, sizeof(bpf_vlog)); + opts.log_buf = bpf_vlog; + opts.log_size = sizeof(bpf_vlog); + fd_prog = bpf_prog_load(prog_type, NULL, "GPL", prog, prog_len, &opts); saved_errno = errno; /* BPF_PROG_TYPE_TRACING requires more setup and -- cgit v1.2.3 From f87c1930ac2951d7fb3bacb523c24046c81015ed Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 3 Nov 2021 15:08:43 -0700 Subject: selftests/bpf: Merge test_stub.c into testing_helpers.c Move testing prog and object load wrappers (bpf_prog_test_load and bpf_test_load_program) into testing_helpers.{c,h} and get rid of otherwise useless test_stub.c. Make testing_helpers.c available to non-test_progs binaries as well. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Dave Marchevsky Link: https://lore.kernel.org/bpf/20211103220845.2676888-11-andrii@kernel.org --- tools/testing/selftests/bpf/Makefile | 33 ++++++++-------- tools/testing/selftests/bpf/test_stub.c | 44 --------------------- tools/testing/selftests/bpf/testing_helpers.c | 55 +++++++++++++++++++++++++++ tools/testing/selftests/bpf/testing_helpers.h | 6 +++ 4 files changed, 78 insertions(+), 60 deletions(-) delete mode 100644 tools/testing/selftests/bpf/test_stub.c diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index c4497a4af3fe..5588c622d266 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -178,10 +178,6 @@ $(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(wildcard bpf_testmod/Makefile bpf_tes $(Q)$(MAKE) $(submake_extras) -C bpf_testmod $(Q)cp bpf_testmod/bpf_testmod.ko $@ -$(OUTPUT)/test_stub.o: test_stub.c $(BPFOBJ) - $(call msg,CC,,$@) - $(Q)$(CC) -c $(CFLAGS) -o $@ $< - DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool $(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT) @@ -194,18 +190,23 @@ $(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT) TEST_GEN_PROGS_EXTENDED += $(DEFAULT_BPFTOOL) -$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/test_stub.o $(BPFOBJ) - -$(OUTPUT)/test_dev_cgroup: cgroup_helpers.c -$(OUTPUT)/test_skb_cgroup_id_user: cgroup_helpers.c -$(OUTPUT)/test_sock: cgroup_helpers.c -$(OUTPUT)/test_sock_addr: cgroup_helpers.c -$(OUTPUT)/test_sockmap: cgroup_helpers.c -$(OUTPUT)/test_tcpnotify_user: cgroup_helpers.c trace_helpers.c -$(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c -$(OUTPUT)/test_cgroup_storage: cgroup_helpers.c -$(OUTPUT)/test_sock_fields: cgroup_helpers.c -$(OUTPUT)/test_sysctl: cgroup_helpers.c +$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(BPFOBJ) + +$(OUTPUT)/test_dev_cgroup: cgroup_helpers.c testing_helpers.o +$(OUTPUT)/test_skb_cgroup_id_user: cgroup_helpers.c testing_helpers.o +$(OUTPUT)/test_sock: cgroup_helpers.c testing_helpers.o +$(OUTPUT)/test_sock_addr: cgroup_helpers.c testing_helpers.o +$(OUTPUT)/test_sockmap: cgroup_helpers.c testing_helpers.o +$(OUTPUT)/test_tcpnotify_user: cgroup_helpers.c trace_helpers.c testing_helpers.o +$(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c testing_helpers.o +$(OUTPUT)/test_cgroup_storage: cgroup_helpers.c testing_helpers.o +$(OUTPUT)/test_sock_fields: cgroup_helpers.c testing_helpers.o +$(OUTPUT)/test_sysctl: cgroup_helpers.c testing_helpers.o +$(OUTPUT)/test_tag: testing_helpers.o +$(OUTPUT)/test_lirc_mode2_user: testing_helpers.o +$(OUTPUT)/xdping: testing_helpers.o +$(OUTPUT)/flow_dissector_load: testing_helpers.o +$(OUTPUT)/test_maps: testing_helpers.o BPFTOOL ?= $(DEFAULT_BPFTOOL) $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \ diff --git a/tools/testing/selftests/bpf/test_stub.c b/tools/testing/selftests/bpf/test_stub.c deleted file mode 100644 index 47e132726203..000000000000 --- a/tools/testing/selftests/bpf/test_stub.c +++ /dev/null @@ -1,44 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -/* Copyright (C) 2019 Netronome Systems, Inc. */ - -#include -#include -#include - -int extra_prog_load_log_flags = 0; - -int bpf_prog_test_load(const char *file, enum bpf_prog_type type, - struct bpf_object **pobj, int *prog_fd) -{ - struct bpf_prog_load_attr attr; - - memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); - attr.file = file; - attr.prog_type = type; - attr.expected_attach_type = 0; - attr.prog_flags = BPF_F_TEST_RND_HI32; - attr.log_level = extra_prog_load_log_flags; - - return bpf_prog_load_xattr(&attr, pobj, prog_fd); -} - -int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, - size_t insns_cnt, const char *license, - __u32 kern_version, char *log_buf, - size_t log_buf_sz) -{ - struct bpf_load_program_attr load_attr; - - memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); - load_attr.prog_type = type; - load_attr.expected_attach_type = 0; - load_attr.name = NULL; - load_attr.insns = insns; - load_attr.insns_cnt = insns_cnt; - load_attr.license = license; - load_attr.kern_version = kern_version; - load_attr.prog_flags = BPF_F_TEST_RND_HI32; - load_attr.log_level = extra_prog_load_log_flags; - - return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz); -} diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c index 800d503e5cb4..ef61d43adfe4 100644 --- a/tools/testing/selftests/bpf/testing_helpers.c +++ b/tools/testing/selftests/bpf/testing_helpers.c @@ -1,7 +1,11 @@ // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +/* Copyright (C) 2019 Netronome Systems, Inc. */ /* Copyright (C) 2020 Facebook, Inc. */ #include +#include #include +#include +#include #include "testing_helpers.h" int parse_num_list(const char *s, bool **num_set, int *num_set_len) @@ -78,3 +82,54 @@ __u32 link_info_prog_id(const struct bpf_link *link, struct bpf_link_info *info) } return info->prog_id; } + +int extra_prog_load_log_flags = 0; + +int bpf_prog_test_load(const char *file, enum bpf_prog_type type, + struct bpf_object **pobj, int *prog_fd) +{ + struct bpf_object *obj; + struct bpf_program *prog; + int err; + + obj = bpf_object__open(file); + if (!obj) + return -errno; + + prog = bpf_object__next_program(obj, NULL); + if (!prog) { + err = -ENOENT; + goto err_out; + } + + if (type != BPF_PROG_TYPE_UNSPEC) + bpf_program__set_type(prog, type); + + err = bpf_object__load(obj); + if (err) + goto err_out; + + *pobj = obj; + *prog_fd = bpf_program__fd(prog); + + return 0; +err_out: + bpf_object__close(obj); + return err; +} + +int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, + size_t insns_cnt, const char *license, + __u32 kern_version, char *log_buf, + size_t log_buf_sz) +{ + LIBBPF_OPTS(bpf_prog_load_opts, opts, + .kern_version = kern_version, + .prog_flags = BPF_F_TEST_RND_HI32, + .log_level = extra_prog_load_log_flags, + .log_buf = log_buf, + .log_size = log_buf_sz, + ); + + return bpf_prog_load(type, NULL, license, insns, insns_cnt, &opts); +} diff --git a/tools/testing/selftests/bpf/testing_helpers.h b/tools/testing/selftests/bpf/testing_helpers.h index d4f8e749611b..f46ebc476ee8 100644 --- a/tools/testing/selftests/bpf/testing_helpers.h +++ b/tools/testing/selftests/bpf/testing_helpers.h @@ -6,3 +6,9 @@ int parse_num_list(const char *s, bool **set, int *set_len); __u32 link_info_prog_id(const struct bpf_link *link, struct bpf_link_info *info); +int bpf_prog_test_load(const char *file, enum bpf_prog_type type, + struct bpf_object **pobj, int *prog_fd); +int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, + size_t insns_cnt, const char *license, + __u32 kern_version, char *log_buf, + size_t log_buf_sz); -- cgit v1.2.3 From cbdb1461dcf45765a036e9f6975ffe19e69bdc33 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 3 Nov 2021 15:08:44 -0700 Subject: selftests/bpf: Use explicit bpf_prog_test_load() calls everywhere -Dbpf_prog_load_deprecated=bpf_prog_test_load trick is both ugly and breaks when deprecation goes into effect due to macro magic. Convert all the uses to explicit bpf_prog_test_load() calls which avoid deprecation errors and makes everything less magical. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Dave Marchevsky Link: https://lore.kernel.org/bpf/20211103220845.2676888-12-andrii@kernel.org --- tools/testing/selftests/bpf/Makefile | 2 +- tools/testing/selftests/bpf/flow_dissector_load.h | 3 ++- tools/testing/selftests/bpf/get_cgroup_id_user.c | 5 +++-- tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c | 2 +- tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c | 8 ++++---- .../selftests/bpf/prog_tests/get_stack_raw_tp.c | 4 ++-- tools/testing/selftests/bpf/prog_tests/global_data.c | 2 +- .../selftests/bpf/prog_tests/global_func_args.c | 2 +- tools/testing/selftests/bpf/prog_tests/kfree_skb.c | 2 +- tools/testing/selftests/bpf/prog_tests/l4lb_all.c | 2 +- .../selftests/bpf/prog_tests/load_bytes_relative.c | 2 +- tools/testing/selftests/bpf/prog_tests/map_lock.c | 4 ++-- tools/testing/selftests/bpf/prog_tests/pkt_access.c | 2 +- tools/testing/selftests/bpf/prog_tests/pkt_md_access.c | 2 +- .../testing/selftests/bpf/prog_tests/queue_stack_map.c | 2 +- tools/testing/selftests/bpf/prog_tests/skb_ctx.c | 2 +- tools/testing/selftests/bpf/prog_tests/skb_helpers.c | 2 +- tools/testing/selftests/bpf/prog_tests/spinlock.c | 4 ++-- .../testing/selftests/bpf/prog_tests/stacktrace_map.c | 2 +- .../selftests/bpf/prog_tests/stacktrace_map_raw_tp.c | 2 +- tools/testing/selftests/bpf/prog_tests/tailcalls.c | 18 +++++++++--------- .../selftests/bpf/prog_tests/task_fd_query_rawtp.c | 2 +- .../selftests/bpf/prog_tests/task_fd_query_tp.c | 4 ++-- tools/testing/selftests/bpf/prog_tests/tcp_estats.c | 2 +- .../testing/selftests/bpf/prog_tests/tp_attach_query.c | 2 +- tools/testing/selftests/bpf/prog_tests/xdp.c | 2 +- .../testing/selftests/bpf/prog_tests/xdp_adjust_tail.c | 6 +++--- tools/testing/selftests/bpf/prog_tests/xdp_attach.c | 6 +++--- tools/testing/selftests/bpf/prog_tests/xdp_info.c | 2 +- tools/testing/selftests/bpf/prog_tests/xdp_perf.c | 2 +- tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c | 2 +- tools/testing/selftests/bpf/test_dev_cgroup.c | 3 ++- tools/testing/selftests/bpf/test_lirc_mode2_user.c | 6 ++++-- tools/testing/selftests/bpf/test_maps.c | 7 ++++--- tools/testing/selftests/bpf/test_sysctl.c | 1 + tools/testing/selftests/bpf/test_tcpnotify_user.c | 3 ++- tools/testing/selftests/bpf/xdping.c | 3 ++- 37 files changed, 68 insertions(+), 59 deletions(-) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 5588c622d266..2016c583ed20 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -24,7 +24,6 @@ SAN_CFLAGS ?= CFLAGS += -g -O0 -rdynamic -Wall $(GENFLAGS) $(SAN_CFLAGS) \ -I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \ -I$(TOOLSINCDIR) -I$(APIDIR) -I$(OUTPUT) \ - -Dbpf_prog_load_deprecated=bpf_prog_test_load \ -Dbpf_load_program=bpf_test_load_program LDLIBS += -lcap -lelf -lz -lrt -lpthread @@ -207,6 +206,7 @@ $(OUTPUT)/test_lirc_mode2_user: testing_helpers.o $(OUTPUT)/xdping: testing_helpers.o $(OUTPUT)/flow_dissector_load: testing_helpers.o $(OUTPUT)/test_maps: testing_helpers.o +$(OUTPUT)/test_verifier: testing_helpers.o BPFTOOL ?= $(DEFAULT_BPFTOOL) $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \ diff --git a/tools/testing/selftests/bpf/flow_dissector_load.h b/tools/testing/selftests/bpf/flow_dissector_load.h index 9d0acc2fc6cc..f40b585f4e7e 100644 --- a/tools/testing/selftests/bpf/flow_dissector_load.h +++ b/tools/testing/selftests/bpf/flow_dissector_load.h @@ -4,6 +4,7 @@ #include #include +#include "testing_helpers.h" static inline int bpf_flow_load(struct bpf_object **obj, const char *path, @@ -18,7 +19,7 @@ static inline int bpf_flow_load(struct bpf_object **obj, int prog_array_fd; int ret, fd, i; - ret = bpf_prog_load(path, BPF_PROG_TYPE_FLOW_DISSECTOR, obj, + ret = bpf_prog_test_load(path, BPF_PROG_TYPE_FLOW_DISSECTOR, obj, prog_fd); if (ret) return ret; diff --git a/tools/testing/selftests/bpf/get_cgroup_id_user.c b/tools/testing/selftests/bpf/get_cgroup_id_user.c index 99628e1a1e58..3a7b82bd9e94 100644 --- a/tools/testing/selftests/bpf/get_cgroup_id_user.c +++ b/tools/testing/selftests/bpf/get_cgroup_id_user.c @@ -19,6 +19,7 @@ #include #include "cgroup_helpers.h" +#include "testing_helpers.h" #include "bpf_rlimit.h" #define CHECK(condition, tag, format...) ({ \ @@ -66,8 +67,8 @@ int main(int argc, char **argv) if (CHECK(cgroup_fd < 0, "cgroup_setup_and_join", "err %d errno %d\n", cgroup_fd, errno)) return 1; - err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); - if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno)) + err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); + if (CHECK(err, "bpf_prog_test_load", "err %d errno %d\n", err, errno)) goto cleanup_cgroup_env; cgidmap_fd = bpf_find_map(__func__, obj, "cg_ids"); diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c index eb8eeebe6935..0a6c5f00abd4 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c @@ -48,7 +48,7 @@ void serial_test_bpf_obj_id(void) bzero(zeros, sizeof(zeros)); for (i = 0; i < nr_iters; i++) { now = time(NULL); - err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, + err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &objs[i], &prog_fds[i]); /* test_obj_id.o is a dumb prog. It should never fail * to load. diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c index 9cff14a23bb7..fdd603ebda28 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c @@ -65,7 +65,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file, int err, tgt_fd, i; struct btf *btf; - err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC, + err = bpf_prog_test_load(target_obj_file, BPF_PROG_TYPE_UNSPEC, &tgt_obj, &tgt_fd); if (!ASSERT_OK(err, "tgt_prog_load")) return; @@ -224,7 +224,7 @@ static int test_second_attach(struct bpf_object *obj) if (CHECK(!prog, "find_prog", "prog %s not found\n", prog_name)) return -ENOENT; - err = bpf_prog_load(tgt_obj_file, BPF_PROG_TYPE_UNSPEC, + err = bpf_prog_test_load(tgt_obj_file, BPF_PROG_TYPE_UNSPEC, &tgt_obj, &tgt_fd); if (CHECK(err, "second_prog_load", "file %s err %d errno %d\n", tgt_obj_file, err, errno)) @@ -274,7 +274,7 @@ static void test_fmod_ret_freplace(void) __u32 duration = 0; int err, pkt_fd, attach_prog_fd; - err = bpf_prog_load(tgt_name, BPF_PROG_TYPE_UNSPEC, + err = bpf_prog_test_load(tgt_name, BPF_PROG_TYPE_UNSPEC, &pkt_obj, &pkt_fd); /* the target prog should load fine */ if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n", @@ -341,7 +341,7 @@ static void test_obj_load_failure_common(const char *obj_file, int err, pkt_fd; __u32 duration = 0; - err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC, + err = bpf_prog_test_load(target_obj_file, BPF_PROG_TYPE_UNSPEC, &pkt_obj, &pkt_fd); /* the target prog should load fine */ if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n", diff --git a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c index 522237aa4470..569fcc6ed660 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c +++ b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c @@ -94,11 +94,11 @@ void test_get_stack_raw_tp(void) struct bpf_map *map; cpu_set_t cpu_set; - err = bpf_prog_load(file_err, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); + err = bpf_prog_test_load(file_err, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); if (CHECK(err >= 0, "prog_load raw tp", "err %d errno %d\n", err, errno)) return; - err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/global_data.c b/tools/testing/selftests/bpf/prog_tests/global_data.c index afd8639f9a94..9da131b32e13 100644 --- a/tools/testing/selftests/bpf/prog_tests/global_data.c +++ b/tools/testing/selftests/bpf/prog_tests/global_data.c @@ -136,7 +136,7 @@ void test_global_data(void) struct bpf_object *obj; int err, prog_fd; - err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK(err, "load program", "error %d loading %s\n", err, file)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/global_func_args.c b/tools/testing/selftests/bpf/prog_tests/global_func_args.c index 8bcc2869102f..93a2439237b0 100644 --- a/tools/testing/selftests/bpf/prog_tests/global_func_args.c +++ b/tools/testing/selftests/bpf/prog_tests/global_func_args.c @@ -44,7 +44,7 @@ void test_global_func_args(void) struct bpf_object *obj; int err, prog_fd; - err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd); if (CHECK(err, "load program", "error %d loading %s\n", err, file)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c index 01e51d16c8b8..885413ed5c96 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c +++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c @@ -74,7 +74,7 @@ void serial_test_kfree_skb(void) const int zero = 0; bool test_ok[2]; - err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, + err = bpf_prog_test_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &tattr.prog_fd); if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c index 8073105548ff..540ef28fabff 100644 --- a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c +++ b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c @@ -30,7 +30,7 @@ static void test_l4lb(const char *file) char buf[128]; u32 *magic = (u32 *)buf; - err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c b/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c index 5a2a689dbb68..4e0b2ec057aa 100644 --- a/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c +++ b/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c @@ -27,7 +27,7 @@ void test_load_bytes_relative(void) if (CHECK_FAIL(server_fd < 0)) goto close_cgroup_fd; - err = bpf_prog_load("./load_bytes_relative.o", BPF_PROG_TYPE_CGROUP_SKB, + err = bpf_prog_test_load("./load_bytes_relative.o", BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd); if (CHECK_FAIL(err)) goto close_server_fd; diff --git a/tools/testing/selftests/bpf/prog_tests/map_lock.c b/tools/testing/selftests/bpf/prog_tests/map_lock.c index ce17b1ed8709..23d19e9cf26a 100644 --- a/tools/testing/selftests/bpf/prog_tests/map_lock.c +++ b/tools/testing/selftests/bpf/prog_tests/map_lock.c @@ -53,9 +53,9 @@ void test_map_lock(void) int err = 0, key = 0, i; void *ret; - err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd); if (CHECK_FAIL(err)) { - printf("test_map_lock:bpf_prog_load errno %d\n", errno); + printf("test_map_lock:bpf_prog_test_load errno %d\n", errno); goto close_prog; } map_fd[0] = bpf_find_map(__func__, obj, "hash_map"); diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_access.c index 44b514fabccd..6628710ec3c6 100644 --- a/tools/testing/selftests/bpf/prog_tests/pkt_access.c +++ b/tools/testing/selftests/bpf/prog_tests/pkt_access.c @@ -9,7 +9,7 @@ void test_pkt_access(void) __u32 duration, retval; int err, prog_fd; - err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c index 939015cd6dba..c9d2d6a1bfcc 100644 --- a/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c +++ b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c @@ -9,7 +9,7 @@ void test_pkt_md_access(void) __u32 duration, retval; int err, prog_fd; - err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c index f47e7b1cb32c..8ccba3ab70ee 100644 --- a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c +++ b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c @@ -27,7 +27,7 @@ static void test_queue_stack_map_by_type(int type) else return; - err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c index c437e6ba8fe2..d3106078838c 100644 --- a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c +++ b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c @@ -32,7 +32,7 @@ void test_skb_ctx(void) int err; int i; - err = bpf_prog_load("./test_skb_ctx.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + err = bpf_prog_test_load("./test_skb_ctx.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &tattr.prog_fd); if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/skb_helpers.c b/tools/testing/selftests/bpf/prog_tests/skb_helpers.c index f302ad84a298..6f802a1c0800 100644 --- a/tools/testing/selftests/bpf/prog_tests/skb_helpers.c +++ b/tools/testing/selftests/bpf/prog_tests/skb_helpers.c @@ -20,7 +20,7 @@ void test_skb_helpers(void) struct bpf_object *obj; int err; - err = bpf_prog_load("./test_skb_helpers.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + err = bpf_prog_test_load("./test_skb_helpers.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &tattr.prog_fd); if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/spinlock.c b/tools/testing/selftests/bpf/prog_tests/spinlock.c index 7577a77a4c4c..6307f5d2b417 100644 --- a/tools/testing/selftests/bpf/prog_tests/spinlock.c +++ b/tools/testing/selftests/bpf/prog_tests/spinlock.c @@ -24,9 +24,9 @@ void test_spinlock(void) int err = 0, i; void *ret; - err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd); if (CHECK_FAIL(err)) { - printf("test_spin_lock:bpf_prog_load errno %d\n", errno); + printf("test_spin_lock:bpf_prog_test_load errno %d\n", errno); goto close_prog; } for (i = 0; i < 4; i++) diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c index 04b476bd62b9..337493d74ec5 100644 --- a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c @@ -12,7 +12,7 @@ void test_stacktrace_map(void) struct bpf_object *obj; struct bpf_link *link; - err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c index 4fd30bb651ad..063a14a2060d 100644 --- a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c @@ -12,7 +12,7 @@ void test_stacktrace_map_raw_tp(void) struct bpf_object *obj; struct bpf_link *link = NULL; - err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c index 9825f1f7bfcc..5dc0f425bd11 100644 --- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c +++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c @@ -16,7 +16,7 @@ static void test_tailcall_1(void) char prog_name[32]; char buff[128] = {}; - err = bpf_prog_load("tailcall1.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + err = bpf_prog_test_load("tailcall1.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; @@ -154,7 +154,7 @@ static void test_tailcall_2(void) char prog_name[32]; char buff[128] = {}; - err = bpf_prog_load("tailcall2.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + err = bpf_prog_test_load("tailcall2.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; @@ -228,7 +228,7 @@ static void test_tailcall_count(const char *which) __u32 retval, duration; char buff[128] = {}; - err = bpf_prog_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj, + err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; @@ -324,7 +324,7 @@ static void test_tailcall_4(void) char buff[128] = {}; char prog_name[32]; - err = bpf_prog_load("tailcall4.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + err = bpf_prog_test_load("tailcall4.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; @@ -412,7 +412,7 @@ static void test_tailcall_5(void) char buff[128] = {}; char prog_name[32]; - err = bpf_prog_load("tailcall5.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + err = bpf_prog_test_load("tailcall5.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; @@ -498,7 +498,7 @@ static void test_tailcall_bpf2bpf_1(void) __u32 retval, duration; char prog_name[32]; - err = bpf_prog_load("tailcall_bpf2bpf1.o", BPF_PROG_TYPE_SCHED_CLS, + err = bpf_prog_test_load("tailcall_bpf2bpf1.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; @@ -582,7 +582,7 @@ static void test_tailcall_bpf2bpf_2(void) __u32 retval, duration; char buff[128] = {}; - err = bpf_prog_load("tailcall_bpf2bpf2.o", BPF_PROG_TYPE_SCHED_CLS, + err = bpf_prog_test_load("tailcall_bpf2bpf2.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; @@ -660,7 +660,7 @@ static void test_tailcall_bpf2bpf_3(void) __u32 retval, duration; char prog_name[32]; - err = bpf_prog_load("tailcall_bpf2bpf3.o", BPF_PROG_TYPE_SCHED_CLS, + err = bpf_prog_test_load("tailcall_bpf2bpf3.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; @@ -757,7 +757,7 @@ static void test_tailcall_bpf2bpf_4(bool noise) __u32 retval, duration; char prog_name[32]; - err = bpf_prog_load("tailcall_bpf2bpf4.o", BPF_PROG_TYPE_SCHED_CLS, + err = bpf_prog_test_load("tailcall_bpf2bpf4.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c index 1bdc1d86a50c..17947c9e1d66 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c +++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c @@ -11,7 +11,7 @@ void test_task_fd_query_rawtp(void) __u32 duration = 0; char buf[256]; - err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c index 3f131b8fe328..c2a98a7a8dfc 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c +++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c @@ -13,8 +13,8 @@ static void test_task_fd_query_tp_core(const char *probe_name, __u32 duration = 0; char buf[256]; - err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); - if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno)) + err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); + if (CHECK(err, "bpf_prog_test_load", "err %d errno %d\n", err, errno)) goto close_prog; snprintf(buf, sizeof(buf), diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_estats.c b/tools/testing/selftests/bpf/prog_tests/tcp_estats.c index 594307dffd13..11bf755be4c9 100644 --- a/tools/testing/selftests/bpf/prog_tests/tcp_estats.c +++ b/tools/testing/selftests/bpf/prog_tests/tcp_estats.c @@ -8,7 +8,7 @@ void test_tcp_estats(void) struct bpf_object *obj; __u32 duration = 0; - err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); CHECK(err, "", "err %d errno %d\n", err, errno); if (err) return; diff --git a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c index 8652d0a46c87..39e79291c82b 100644 --- a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c +++ b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c @@ -35,7 +35,7 @@ void serial_test_tp_attach_query(void) query = malloc(sizeof(*query) + sizeof(__u32) * num_progs); for (i = 0; i < num_progs; i++) { - err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i], + err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i], &prog_fd[i]); if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) goto cleanup1; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp.c b/tools/testing/selftests/bpf/prog_tests/xdp.c index 48921ff74850..7a7ef9d4e151 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp.c @@ -16,7 +16,7 @@ void test_xdp(void) __u32 duration, retval, size; int err, prog_fd, map_fd; - err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); if (CHECK_FAIL(err)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c index f529e3c923ae..3f5a17c38be5 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c @@ -10,7 +10,7 @@ static void test_xdp_adjust_tail_shrink(void) int err, prog_fd; char buf[128]; - err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); if (CHECK_FAIL(err)) return; @@ -38,7 +38,7 @@ static void test_xdp_adjust_tail_grow(void) __u32 duration, retval, size, expect_sz; int err, prog_fd; - err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); if (CHECK_FAIL(err)) return; @@ -75,7 +75,7 @@ static void test_xdp_adjust_tail_grow2(void) .data_size_out = 0, /* Per test */ }; - err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &tattr.prog_fd); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &tattr.prog_fd); if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c index 4c4057262cd8..c6fa390e3aa1 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c @@ -16,7 +16,7 @@ void serial_test_xdp_attach(void) len = sizeof(info); - err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj1, &fd1); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj1, &fd1); if (CHECK_FAIL(err)) return; err = bpf_obj_get_info_by_fd(fd1, &info, &len); @@ -24,7 +24,7 @@ void serial_test_xdp_attach(void) goto out_1; id1 = info.id; - err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj2, &fd2); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj2, &fd2); if (CHECK_FAIL(err)) goto out_1; @@ -34,7 +34,7 @@ void serial_test_xdp_attach(void) goto out_2; id2 = info.id; - err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj3, &fd3); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj3, &fd3); if (CHECK_FAIL(err)) goto out_2; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_info.c b/tools/testing/selftests/bpf/prog_tests/xdp_info.c index 4e2a4fd56f67..abe48e82e1dc 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_info.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_info.c @@ -29,7 +29,7 @@ void serial_test_xdp_info(void) /* Setup prog */ - err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); if (CHECK_FAIL(err)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_perf.c b/tools/testing/selftests/bpf/prog_tests/xdp_perf.c index 7185bee16fe4..15a3900e4370 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_perf.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_perf.c @@ -9,7 +9,7 @@ void test_xdp_perf(void) char in[128], out[128]; int err, prog_fd; - err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); if (CHECK_FAIL(err)) return; diff --git a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c index 49a84a3a2306..48cd14b43741 100644 --- a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c +++ b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c @@ -73,7 +73,7 @@ int test_subprog2(struct args_subprog2 *ctx) __builtin_preserve_access_index(&skb->len)); ret = ctx->ret; - /* bpf_prog_load() loads "test_pkt_access.o" with BPF_F_TEST_RND_HI32 + /* bpf_prog_test_load() loads "test_pkt_access.o" with BPF_F_TEST_RND_HI32 * which randomizes upper 32 bits after BPF_ALU32 insns. * Hence after 'w0 <<= 1' upper bits of $rax are random. * That is expected and correct. Trim them. diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c index 804dddd97d4c..c299d3452695 100644 --- a/tools/testing/selftests/bpf/test_dev_cgroup.c +++ b/tools/testing/selftests/bpf/test_dev_cgroup.c @@ -14,6 +14,7 @@ #include #include "cgroup_helpers.h" +#include "testing_helpers.h" #include "bpf_rlimit.h" #define DEV_CGROUP_PROG "./dev_cgroup.o" @@ -27,7 +28,7 @@ int main(int argc, char **argv) int prog_fd, cgroup_fd; __u32 prog_cnt; - if (bpf_prog_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE, + if (bpf_prog_test_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE, &obj, &prog_fd)) { printf("Failed to load DEV_CGROUP program\n"); goto out; diff --git a/tools/testing/selftests/bpf/test_lirc_mode2_user.c b/tools/testing/selftests/bpf/test_lirc_mode2_user.c index fb5fd6841ef3..ebf68dce5504 100644 --- a/tools/testing/selftests/bpf/test_lirc_mode2_user.c +++ b/tools/testing/selftests/bpf/test_lirc_mode2_user.c @@ -45,6 +45,8 @@ #include #include +#include "testing_helpers.h" + int main(int argc, char **argv) { struct bpf_object *obj; @@ -58,8 +60,8 @@ int main(int argc, char **argv) return 2; } - ret = bpf_prog_load("test_lirc_mode2_kern.o", - BPF_PROG_TYPE_LIRC_MODE2, &obj, &progfd); + ret = bpf_prog_test_load("test_lirc_mode2_kern.o", + BPF_PROG_TYPE_LIRC_MODE2, &obj, &progfd); if (ret) { printf("Failed to load bpf program\n"); return 1; diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index c7a36a9378f8..8b31bc1a801d 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -25,6 +25,7 @@ #include "bpf_util.h" #include "bpf_rlimit.h" #include "test_maps.h" +#include "testing_helpers.h" #ifndef ENOTSUPP #define ENOTSUPP 524 @@ -830,21 +831,21 @@ static void test_sockmap(unsigned int tasks, void *data) } /* Load SK_SKB program and Attach */ - err = bpf_prog_load(SOCKMAP_PARSE_PROG, + err = bpf_prog_test_load(SOCKMAP_PARSE_PROG, BPF_PROG_TYPE_SK_SKB, &obj, &parse_prog); if (err) { printf("Failed to load SK_SKB parse prog\n"); goto out_sockmap; } - err = bpf_prog_load(SOCKMAP_TCP_MSG_PROG, + err = bpf_prog_test_load(SOCKMAP_TCP_MSG_PROG, BPF_PROG_TYPE_SK_MSG, &obj, &msg_prog); if (err) { printf("Failed to load SK_SKB msg prog\n"); goto out_sockmap; } - err = bpf_prog_load(SOCKMAP_VERDICT_PROG, + err = bpf_prog_test_load(SOCKMAP_VERDICT_PROG, BPF_PROG_TYPE_SK_SKB, &obj, &verdict_prog); if (err) { printf("Failed to load SK_SKB verdict prog\n"); diff --git a/tools/testing/selftests/bpf/test_sysctl.c b/tools/testing/selftests/bpf/test_sysctl.c index 4a395d7a8ea9..4f6cf833b522 100644 --- a/tools/testing/selftests/bpf/test_sysctl.c +++ b/tools/testing/selftests/bpf/test_sysctl.c @@ -17,6 +17,7 @@ #include "bpf_rlimit.h" #include "bpf_util.h" #include "cgroup_helpers.h" +#include "testing_helpers.h" #define CG_PATH "/foo" #define MAX_INSNS 512 diff --git a/tools/testing/selftests/bpf/test_tcpnotify_user.c b/tools/testing/selftests/bpf/test_tcpnotify_user.c index 4a39304cc5a6..63111cb082fe 100644 --- a/tools/testing/selftests/bpf/test_tcpnotify_user.c +++ b/tools/testing/selftests/bpf/test_tcpnotify_user.c @@ -25,6 +25,7 @@ #include "test_tcpnotify.h" #include "trace_helpers.h" +#include "testing_helpers.h" #define SOCKET_BUFFER_SIZE (getpagesize() < 8192L ? getpagesize() : 8192L) @@ -92,7 +93,7 @@ int main(int argc, char **argv) if (cg_fd < 0) goto err; - if (bpf_prog_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) { + if (bpf_prog_test_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) { printf("FAILED: load_bpf_file failed for: %s\n", file); goto err; } diff --git a/tools/testing/selftests/bpf/xdping.c b/tools/testing/selftests/bpf/xdping.c index 30f12637f4e4..baa870a759a2 100644 --- a/tools/testing/selftests/bpf/xdping.c +++ b/tools/testing/selftests/bpf/xdping.c @@ -22,6 +22,7 @@ #include "bpf/libbpf.h" #include "xdping.h" +#include "testing_helpers.h" static int ifindex; static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; @@ -173,7 +174,7 @@ int main(int argc, char **argv) snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); - if (bpf_prog_load(filename, BPF_PROG_TYPE_XDP, &obj, &prog_fd)) { + if (bpf_prog_test_load(filename, BPF_PROG_TYPE_XDP, &obj, &prog_fd)) { fprintf(stderr, "load of %s failed\n", filename); return 1; } -- cgit v1.2.3 From f19ddfe0360a1aa64db0b4a41f59e1ade3f6d288 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 3 Nov 2021 15:08:45 -0700 Subject: selftests/bpf: Use explicit bpf_test_load_program() helper calls Remove the second part of prog loading testing helper re-definition: -Dbpf_load_program=bpf_test_load_program This completes the clean up of deprecated libbpf program loading APIs. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Dave Marchevsky Link: https://lore.kernel.org/bpf/20211103220845.2676888-13-andrii@kernel.org --- tools/testing/selftests/bpf/Makefile | 3 +-- tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c | 2 +- tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c | 2 +- tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c | 2 +- tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c | 2 +- tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c | 4 ++-- tools/testing/selftests/bpf/prog_tests/signal_pending.c | 2 +- tools/testing/selftests/bpf/test_cgroup_storage.c | 3 ++- tools/testing/selftests/bpf/test_tag.c | 3 ++- 9 files changed, 12 insertions(+), 11 deletions(-) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 2016c583ed20..e19cc6473936 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -23,8 +23,7 @@ BPF_GCC ?= $(shell command -v bpf-gcc;) SAN_CFLAGS ?= CFLAGS += -g -O0 -rdynamic -Wall $(GENFLAGS) $(SAN_CFLAGS) \ -I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \ - -I$(TOOLSINCDIR) -I$(APIDIR) -I$(OUTPUT) \ - -Dbpf_load_program=bpf_test_load_program + -I$(TOOLSINCDIR) -I$(APIDIR) -I$(OUTPUT) LDLIBS += -lcap -lelf -lz -lrt -lpthread # Silence some warnings when compiled with clang diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c index 5de485c7370f..858916d11e2e 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c @@ -16,7 +16,7 @@ static int prog_load(void) }; size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); - return bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB, + return bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB, prog, insns_cnt, "GPL", 0, bpf_log_buf, BPF_LOG_BUF_SIZE); } diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c index 731bea84d8ed..de9c3e12b0ea 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c @@ -66,7 +66,7 @@ static int prog_load_cnt(int verdict, int val) size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); int ret; - ret = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB, + ret = bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB, prog, insns_cnt, "GPL", 0, bpf_log_buf, BPF_LOG_BUF_SIZE); diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c index 10d3c33821a7..356547e849e2 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c @@ -18,7 +18,7 @@ static int prog_load(int verdict) }; size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); - return bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB, + return bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB, prog, insns_cnt, "GPL", 0, bpf_log_buf, BPF_LOG_BUF_SIZE); } diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c index 6093728497c7..93ac3f28226c 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c @@ -30,7 +30,7 @@ void serial_test_flow_dissector_load_bytes(void) /* make sure bpf_skb_load_bytes is not allowed from skb-less context */ - fd = bpf_load_program(BPF_PROG_TYPE_FLOW_DISSECTOR, prog, + fd = bpf_test_load_program(BPF_PROG_TYPE_FLOW_DISSECTOR, prog, ARRAY_SIZE(prog), "GPL", 0, NULL, 0); CHECK(fd < 0, "flow_dissector-bpf_skb_load_bytes-load", diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c index f0c6c226aba8..7c79462d2702 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c @@ -47,9 +47,9 @@ static int load_prog(enum bpf_prog_type type) }; int fd; - fd = bpf_load_program(type, prog, ARRAY_SIZE(prog), "GPL", 0, NULL, 0); + fd = bpf_test_load_program(type, prog, ARRAY_SIZE(prog), "GPL", 0, NULL, 0); if (CHECK_FAIL(fd < 0)) - perror("bpf_load_program"); + perror("bpf_test_load_program"); return fd; } diff --git a/tools/testing/selftests/bpf/prog_tests/signal_pending.c b/tools/testing/selftests/bpf/prog_tests/signal_pending.c index fdfdcff6cbef..aecfe662c070 100644 --- a/tools/testing/selftests/bpf/prog_tests/signal_pending.c +++ b/tools/testing/selftests/bpf/prog_tests/signal_pending.c @@ -22,7 +22,7 @@ static void test_signal_pending_by_type(enum bpf_prog_type prog_type) prog[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0); prog[ARRAY_SIZE(prog) - 1] = BPF_EXIT_INSN(); - prog_fd = bpf_load_program(prog_type, prog, ARRAY_SIZE(prog), + prog_fd = bpf_test_load_program(prog_type, prog, ARRAY_SIZE(prog), "GPL", 0, NULL, 0); CHECK(prog_fd < 0, "test-run", "errno %d\n", errno); diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c index 0cda61da5d39..a63787e7bb1a 100644 --- a/tools/testing/selftests/bpf/test_cgroup_storage.c +++ b/tools/testing/selftests/bpf/test_cgroup_storage.c @@ -8,6 +8,7 @@ #include "bpf_rlimit.h" #include "cgroup_helpers.h" +#include "testing_helpers.h" char bpf_log_buf[BPF_LOG_BUF_SIZE]; @@ -66,7 +67,7 @@ int main(int argc, char **argv) prog[0].imm = percpu_map_fd; prog[7].imm = map_fd; - prog_fd = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB, + prog_fd = bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB, prog, insns_cnt, "GPL", 0, bpf_log_buf, BPF_LOG_BUF_SIZE); if (prog_fd < 0) { diff --git a/tools/testing/selftests/bpf/test_tag.c b/tools/testing/selftests/bpf/test_tag.c index 6272c784ca2a..5c7bea525626 100644 --- a/tools/testing/selftests/bpf/test_tag.c +++ b/tools/testing/selftests/bpf/test_tag.c @@ -21,6 +21,7 @@ #include "../../../include/linux/filter.h" #include "bpf_rlimit.h" +#include "testing_helpers.h" static struct bpf_insn prog[BPF_MAXINSNS]; @@ -57,7 +58,7 @@ static int bpf_try_load_prog(int insns, int fd_map, int fd_prog; bpf_filler(insns, fd_map); - fd_prog = bpf_load_program(BPF_PROG_TYPE_SCHED_CLS, prog, insns, "", 0, + fd_prog = bpf_test_load_program(BPF_PROG_TYPE_SCHED_CLS, prog, insns, "", 0, NULL, 0); assert(fd_prog > 0); if (fd_map > 0) -- cgit v1.2.3 From 2a2cb45b727b7a1041f3d3d93414b774e66454bb Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Sun, 7 Nov 2021 08:55:13 -0800 Subject: selftests/bpf: Pass sanitizer flags to linker through LDFLAGS When adding -fsanitize=address to SAN_CFLAGS, it has to be passed both to compiler through CFLAGS as well as linker through LDFLAGS. Add SAN_CFLAGS into LDFLAGS to allow building selftests with ASAN. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Reviewed-by: Hengqi Chen Link: https://lore.kernel.org/bpf/20211107165521.9240-2-andrii@kernel.org --- tools/testing/selftests/bpf/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index e19cc6473936..0468ea57650d 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -24,6 +24,7 @@ SAN_CFLAGS ?= CFLAGS += -g -O0 -rdynamic -Wall $(GENFLAGS) $(SAN_CFLAGS) \ -I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \ -I$(TOOLSINCDIR) -I$(APIDIR) -I$(OUTPUT) +LDFLAGS += $(SAN_CFLAGS) LDLIBS += -lcap -lelf -lz -lrt -lpthread # Silence some warnings when compiled with clang -- cgit v1.2.3 From 8f7b239ea8cfdc8e64c875ee417fed41431a1f37 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Sun, 7 Nov 2021 08:55:14 -0800 Subject: libbpf: Free up resources used by inner map definition It's not enough to just free(map->inner_map), as inner_map itself can have extra memory allocated, like map name. Fixes: 646f02ffdd49 ("libbpf: Add BTF-defined map-in-map support") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Reviewed-by: Hengqi Chen Link: https://lore.kernel.org/bpf/20211107165521.9240-3-andrii@kernel.org --- tools/lib/bpf/libbpf.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index dfd15cc60ea7..d869ebee1e27 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -9009,7 +9009,10 @@ int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd) pr_warn("error: inner_map_fd already specified\n"); return libbpf_err(-EINVAL); } - zfree(&map->inner_map); + if (map->inner_map) { + bpf_map__destroy(map->inner_map); + zfree(&map->inner_map); + } map->inner_map_fd = fd; return 0; } -- cgit v1.2.3 From 8ba285874913da21ca39a46376e9cc5ce0f45f94 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Sun, 7 Nov 2021 08:55:15 -0800 Subject: selftests/bpf: Fix memory leaks in btf_type_c_dump() helper Free up memory and resources used by temporary allocated memstream and btf_dump instance. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Reviewed-by: Hengqi Chen Link: https://lore.kernel.org/bpf/20211107165521.9240-4-andrii@kernel.org --- tools/testing/selftests/bpf/btf_helpers.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/bpf/btf_helpers.c b/tools/testing/selftests/bpf/btf_helpers.c index b5b6b013a245..3d1a748d09d8 100644 --- a/tools/testing/selftests/bpf/btf_helpers.c +++ b/tools/testing/selftests/bpf/btf_helpers.c @@ -251,18 +251,23 @@ const char *btf_type_c_dump(const struct btf *btf) d = btf_dump__new(btf, NULL, &opts, btf_dump_printf); if (libbpf_get_error(d)) { fprintf(stderr, "Failed to create btf_dump instance: %ld\n", libbpf_get_error(d)); - return NULL; + goto err_out; } for (i = 1; i < btf__type_cnt(btf); i++) { err = btf_dump__dump_type(d, i); if (err) { fprintf(stderr, "Failed to dump type [%d]: %d\n", i, err); - return NULL; + goto err_out; } } + btf_dump__free(d); fflush(buf_file); fclose(buf_file); return buf; +err_out: + btf_dump__free(d); + fclose(buf_file); + return NULL; } -- cgit v1.2.3 From b8b26e585f3a0fbcee1032c622f046787da57390 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Sun, 7 Nov 2021 08:55:16 -0800 Subject: selftests/bpf: Free per-cpu values array in bpf_iter selftest Array holding per-cpu values wasn't freed. Fix that. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211107165521.9240-5-andrii@kernel.org --- tools/testing/selftests/bpf/prog_tests/bpf_iter.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index 9454331aaf85..3e10abce3e5a 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -699,14 +699,13 @@ static void test_bpf_percpu_hash_map(void) char buf[64]; void *val; - val = malloc(8 * bpf_num_possible_cpus()); - skel = bpf_iter_bpf_percpu_hash_map__open(); if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__open", "skeleton open failed\n")) return; skel->rodata->num_cpus = bpf_num_possible_cpus(); + val = malloc(8 * bpf_num_possible_cpus()); err = bpf_iter_bpf_percpu_hash_map__load(skel); if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__load", @@ -770,6 +769,7 @@ free_link: bpf_link__destroy(link); out: bpf_iter_bpf_percpu_hash_map__destroy(skel); + free(val); } static void test_bpf_array_map(void) @@ -870,14 +870,13 @@ static void test_bpf_percpu_array_map(void) void *val; int len; - val = malloc(8 * bpf_num_possible_cpus()); - skel = bpf_iter_bpf_percpu_array_map__open(); if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__open", "skeleton open failed\n")) return; skel->rodata->num_cpus = bpf_num_possible_cpus(); + val = malloc(8 * bpf_num_possible_cpus()); err = bpf_iter_bpf_percpu_array_map__load(skel); if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__load", @@ -933,6 +932,7 @@ free_link: bpf_link__destroy(link); out: bpf_iter_bpf_percpu_array_map__destroy(skel); + free(val); } /* An iterator program deletes all local storage in a map. */ -- cgit v1.2.3 From 5309b516bcc6f76dda0e44a7a1824324277093d6 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Sun, 7 Nov 2021 08:55:17 -0800 Subject: selftests/bpf: Free inner strings index in btf selftest Inner array of allocated strings wasn't freed on success. Now it's always freed. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Reviewed-by: Hengqi Chen Link: https://lore.kernel.org/bpf/20211107165521.9240-6-andrii@kernel.org --- tools/testing/selftests/bpf/prog_tests/btf.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index ac596cb06e40..ebd1aa4d09d6 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -4046,11 +4046,9 @@ static void *btf_raw_create(const struct btf_header *hdr, next_str_idx < strs_cnt ? strs_idx[next_str_idx] : NULL; done: + free(strs_idx); if (err) { - if (raw_btf) - free(raw_btf); - if (strs_idx) - free(strs_idx); + free(raw_btf); return NULL; } return raw_btf; -- cgit v1.2.3 From f79587520a6007a3734b23a3c2eb4c62aa457533 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Sun, 7 Nov 2021 08:55:18 -0800 Subject: selftests/bpf: Clean up btf and btf_dump in dump_datasec test Free up used resources at the end and on error. Also make it more obvious that there is btf__parse() call that creates struct btf instance. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Reviewed-by: Hengqi Chen Link: https://lore.kernel.org/bpf/20211107165521.9240-7-andrii@kernel.org --- tools/testing/selftests/bpf/prog_tests/btf_dump.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c index aa76360d8f49..a04961942dfa 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -814,21 +814,25 @@ static void test_btf_datasec(struct btf *btf, struct btf_dump *d, char *str, static void test_btf_dump_datasec_data(char *str) { - struct btf *btf = btf__parse("xdping_kern.o", NULL); + struct btf *btf; struct btf_dump_opts opts = { .ctx = str }; char license[4] = "GPL"; struct btf_dump *d; + btf = btf__parse("xdping_kern.o", NULL); if (!ASSERT_OK_PTR(btf, "xdping_kern.o BTF not found")) return; d = btf_dump__new(btf, NULL, &opts, btf_dump_snprintf); if (!ASSERT_OK_PTR(d, "could not create BTF dump")) - return; + goto out; test_btf_datasec(btf, d, str, "license", "SEC(\"license\") char[4] _license = (char[4])['G','P','L',];", license, sizeof(license)); +out: + btf_dump__free(d); + btf__free(btf); } void test_btf_dump() { -- cgit v1.2.3 From f92321d706a810b89a905e04658e38931c4bb0e0 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Sun, 7 Nov 2021 08:55:19 -0800 Subject: selftests/bpf: Avoid duplicate btf__parse() call btf__parse() is repeated after successful setup, leaving the first instance leaked. Remove redundant and premature call. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Reviewed-by: Hengqi Chen Link: https://lore.kernel.org/bpf/20211107165521.9240-8-andrii@kernel.org --- tools/testing/selftests/bpf/prog_tests/core_reloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index 55ec85ba7375..1041d0c593f6 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -433,7 +433,7 @@ static int setup_type_id_case_local(struct core_reloc_test_case *test) static int setup_type_id_case_success(struct core_reloc_test_case *test) { struct core_reloc_type_id_output *exp = (void *)test->output; - struct btf *targ_btf = btf__parse(test->btf_src_file, NULL); + struct btf *targ_btf; int err; err = setup_type_id_case_local(test); -- cgit v1.2.3 From f91231eeeed752119f49eb6620cae44ec745a007 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Sun, 7 Nov 2021 08:55:20 -0800 Subject: selftests/bpf: Destroy XDP link correctly bpf_link__detach() was confused with bpf_link__destroy() and leaves leaked FD in the process. Fix the problem. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Reviewed-by: Hengqi Chen Link: https://lore.kernel.org/bpf/20211107165521.9240-9-andrii@kernel.org --- tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c b/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c index 7589c03fd26b..eb2feaac81fe 100644 --- a/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c +++ b/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c @@ -204,8 +204,8 @@ static int pass_ack(struct migrate_reuseport_test_case *test_case) { int err; - err = bpf_link__detach(test_case->link); - if (!ASSERT_OK(err, "bpf_link__detach")) + err = bpf_link__destroy(test_case->link); + if (!ASSERT_OK(err, "bpf_link__destroy")) return -1; test_case->link = NULL; -- cgit v1.2.3 From 8c7a95520184b6677ca6075e12df9c208d57d088 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Sun, 7 Nov 2021 08:55:21 -0800 Subject: selftests/bpf: Fix bpf_object leak in skb_ctx selftest skb_ctx selftest didn't close bpf_object implicitly allocated by bpf_prog_test_load() helper. Fix the problem by explicitly calling bpf_object__close() at the end of the test. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Reviewed-by: Hengqi Chen Link: https://lore.kernel.org/bpf/20211107165521.9240-10-andrii@kernel.org --- tools/testing/selftests/bpf/prog_tests/skb_ctx.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c index d3106078838c..b5319ba2ee27 100644 --- a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c +++ b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c @@ -111,4 +111,6 @@ void test_skb_ctx(void) "ctx_out_mark", "skb->mark == %u, expected %d\n", skb.mark, 10); + + bpf_object__close(obj); } -- cgit v1.2.3 From 7c7e3d31e7856a8260a254f8c71db416f7f9f5a1 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Fri, 5 Nov 2021 16:23:29 -0700 Subject: bpf: Introduce helper bpf_find_vma In some profiler use cases, it is necessary to map an address to the backing file, e.g., a shared library. bpf_find_vma helper provides a flexible way to achieve this. bpf_find_vma maps an address of a task to the vma (vm_area_struct) for this address, and feed the vma to an callback BPF function. The callback function is necessary here, as we need to ensure mmap_sem is unlocked. It is necessary to lock mmap_sem for find_vma. To lock and unlock mmap_sem safely when irqs are disable, we use the same mechanism as stackmap with build_id. Specifically, when irqs are disabled, the unlocked is postponed in an irq_work. Refactor stackmap.c so that the irq_work is shared among bpf_find_vma and stackmap helpers. Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Tested-by: Hengqi Chen Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20211105232330.1936330-2-songliubraving@fb.com --- include/linux/bpf.h | 1 + include/uapi/linux/bpf.h | 20 +++++++++++ kernel/bpf/btf.c | 5 ++- kernel/bpf/mmap_unlock_work.h | 65 ++++++++++++++++++++++++++++++++++ kernel/bpf/stackmap.c | 80 ++++-------------------------------------- kernel/bpf/task_iter.c | 76 +++++++++++++++++++++++++++++++++++---- kernel/bpf/verifier.c | 34 ++++++++++++++++++ kernel/trace/bpf_trace.c | 2 ++ tools/include/uapi/linux/bpf.h | 20 +++++++++++ 9 files changed, 222 insertions(+), 81 deletions(-) create mode 100644 kernel/bpf/mmap_unlock_work.h diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 2be6dfd68df9..df3410bff4b0 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2157,6 +2157,7 @@ extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto; extern const struct bpf_func_proto bpf_sk_setsockopt_proto; extern const struct bpf_func_proto bpf_sk_getsockopt_proto; extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto; +extern const struct bpf_func_proto bpf_find_vma_proto; const struct bpf_func_proto *tracing_prog_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index ba5af15e25f5..509eee5f0393 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -4938,6 +4938,25 @@ union bpf_attr { * **-ENOENT** if symbol is not found. * * **-EPERM** if caller does not have permission to obtain kernel address. + * + * long bpf_find_vma(struct task_struct *task, u64 addr, void *callback_fn, void *callback_ctx, u64 flags) + * Description + * Find vma of *task* that contains *addr*, call *callback_fn* + * function with *task*, *vma*, and *callback_ctx*. + * The *callback_fn* should be a static function and + * the *callback_ctx* should be a pointer to the stack. + * The *flags* is used to control certain aspects of the helper. + * Currently, the *flags* must be 0. + * + * The expected callback signature is + * + * long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx); + * + * Return + * 0 on success. + * **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*. + * **-EBUSY** if failed to try lock mmap_lock. + * **-EINVAL** for invalid **flags**. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5120,6 +5139,7 @@ union bpf_attr { FN(trace_vprintk), \ FN(skc_to_unix_sock), \ FN(kallsyms_lookup_name), \ + FN(find_vma), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index dbc3ad07e21b..cdb0fba65600 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6342,7 +6342,10 @@ const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = { .arg4_type = ARG_ANYTHING, }; -BTF_ID_LIST_GLOBAL_SINGLE(btf_task_struct_ids, struct, task_struct) +BTF_ID_LIST_GLOBAL(btf_task_struct_ids) +BTF_ID(struct, task_struct) +BTF_ID(struct, file) +BTF_ID(struct, vm_area_struct) /* BTF ID set registration API for modules */ diff --git a/kernel/bpf/mmap_unlock_work.h b/kernel/bpf/mmap_unlock_work.h new file mode 100644 index 000000000000..5d18d7d85bef --- /dev/null +++ b/kernel/bpf/mmap_unlock_work.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2021 Facebook + */ + +#ifndef __MMAP_UNLOCK_WORK_H__ +#define __MMAP_UNLOCK_WORK_H__ +#include + +/* irq_work to run mmap_read_unlock() in irq_work */ +struct mmap_unlock_irq_work { + struct irq_work irq_work; + struct mm_struct *mm; +}; + +DECLARE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work); + +/* + * We cannot do mmap_read_unlock() when the irq is disabled, because of + * risk to deadlock with rq_lock. To look up vma when the irqs are + * disabled, we need to run mmap_read_unlock() in irq_work. We use a + * percpu variable to do the irq_work. If the irq_work is already used + * by another lookup, we fall over. + */ +static inline bool bpf_mmap_unlock_get_irq_work(struct mmap_unlock_irq_work **work_ptr) +{ + struct mmap_unlock_irq_work *work = NULL; + bool irq_work_busy = false; + + if (irqs_disabled()) { + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { + work = this_cpu_ptr(&mmap_unlock_work); + if (irq_work_is_busy(&work->irq_work)) { + /* cannot queue more up_read, fallback */ + irq_work_busy = true; + } + } else { + /* + * PREEMPT_RT does not allow to trylock mmap sem in + * interrupt disabled context. Force the fallback code. + */ + irq_work_busy = true; + } + } + + *work_ptr = work; + return irq_work_busy; +} + +static inline void bpf_mmap_unlock_mm(struct mmap_unlock_irq_work *work, struct mm_struct *mm) +{ + if (!work) { + mmap_read_unlock(mm); + } else { + work->mm = mm; + + /* The lock will be released once we're out of interrupt + * context. Tell lockdep that we've released it now so + * it doesn't complain that we forgot to release it. + */ + rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_); + irq_work_queue(&work->irq_work); + } +} + +#endif /* __MMAP_UNLOCK_WORK_H__ */ diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 6e75bbee39f0..1de0a1b03636 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -7,10 +7,10 @@ #include #include #include -#include #include #include #include "percpu_freelist.h" +#include "mmap_unlock_work.h" #define STACK_CREATE_FLAG_MASK \ (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | \ @@ -31,25 +31,6 @@ struct bpf_stack_map { struct stack_map_bucket *buckets[]; }; -/* irq_work to run up_read() for build_id lookup in nmi context */ -struct stack_map_irq_work { - struct irq_work irq_work; - struct mm_struct *mm; -}; - -static void do_up_read(struct irq_work *entry) -{ - struct stack_map_irq_work *work; - - if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT))) - return; - - work = container_of(entry, struct stack_map_irq_work, irq_work); - mmap_read_unlock_non_owner(work->mm); -} - -static DEFINE_PER_CPU(struct stack_map_irq_work, up_read_work); - static inline bool stack_map_use_build_id(struct bpf_map *map) { return (map->map_flags & BPF_F_STACK_BUILD_ID); @@ -149,35 +130,13 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, u64 *ips, u32 trace_nr, bool user) { int i; + struct mmap_unlock_irq_work *work = NULL; + bool irq_work_busy = bpf_mmap_unlock_get_irq_work(&work); struct vm_area_struct *vma; - bool irq_work_busy = false; - struct stack_map_irq_work *work = NULL; - - if (irqs_disabled()) { - if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { - work = this_cpu_ptr(&up_read_work); - if (irq_work_is_busy(&work->irq_work)) { - /* cannot queue more up_read, fallback */ - irq_work_busy = true; - } - } else { - /* - * PREEMPT_RT does not allow to trylock mmap sem in - * interrupt disabled context. Force the fallback code. - */ - irq_work_busy = true; - } - } - /* - * We cannot do up_read() when the irq is disabled, because of - * risk to deadlock with rq_lock. To do build_id lookup when the - * irqs are disabled, we need to run up_read() in irq_work. We use - * a percpu variable to do the irq_work. If the irq_work is - * already used by another lookup, we fall back to report ips. - * - * Same fallback is used for kernel stack (!user) on a stackmap - * with build_id. + /* If the irq_work is in use, fall back to report ips. Same + * fallback is used for kernel stack (!user) on a stackmap with + * build_id. */ if (!user || !current || !current->mm || irq_work_busy || !mmap_read_trylock(current->mm)) { @@ -203,19 +162,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, - vma->vm_start; id_offs[i].status = BPF_STACK_BUILD_ID_VALID; } - - if (!work) { - mmap_read_unlock(current->mm); - } else { - work->mm = current->mm; - - /* The lock will be released once we're out of interrupt - * context. Tell lockdep that we've released it now so - * it doesn't complain that we forgot to release it. - */ - rwsem_release(¤t->mm->mmap_lock.dep_map, _RET_IP_); - irq_work_queue(&work->irq_work); - } + bpf_mmap_unlock_mm(work, current->mm); } static struct perf_callchain_entry * @@ -719,16 +666,3 @@ const struct bpf_map_ops stack_trace_map_ops = { .map_btf_name = "bpf_stack_map", .map_btf_id = &stack_trace_map_btf_id, }; - -static int __init stack_map_init(void) -{ - int cpu; - struct stack_map_irq_work *work; - - for_each_possible_cpu(cpu) { - work = per_cpu_ptr(&up_read_work, cpu); - init_irq_work(&work->irq_work, do_up_read); - } - return 0; -} -subsys_initcall(stack_map_init); diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c index b48750bfba5a..f171479f7dd6 100644 --- a/kernel/bpf/task_iter.c +++ b/kernel/bpf/task_iter.c @@ -8,6 +8,7 @@ #include #include #include +#include "mmap_unlock_work.h" struct bpf_iter_seq_task_common { struct pid_namespace *ns; @@ -524,10 +525,6 @@ static const struct seq_operations task_vma_seq_ops = { .show = task_vma_seq_show, }; -BTF_ID_LIST(btf_task_file_ids) -BTF_ID(struct, file) -BTF_ID(struct, vm_area_struct) - static const struct bpf_iter_seq_info task_seq_info = { .seq_ops = &task_seq_ops, .init_seq_private = init_seq_pidns, @@ -586,9 +583,74 @@ static struct bpf_iter_reg task_vma_reg_info = { .seq_info = &task_vma_seq_info, }; +BPF_CALL_5(bpf_find_vma, struct task_struct *, task, u64, start, + bpf_callback_t, callback_fn, void *, callback_ctx, u64, flags) +{ + struct mmap_unlock_irq_work *work = NULL; + struct vm_area_struct *vma; + bool irq_work_busy = false; + struct mm_struct *mm; + int ret = -ENOENT; + + if (flags) + return -EINVAL; + + if (!task) + return -ENOENT; + + mm = task->mm; + if (!mm) + return -ENOENT; + + irq_work_busy = bpf_mmap_unlock_get_irq_work(&work); + + if (irq_work_busy || !mmap_read_trylock(mm)) + return -EBUSY; + + vma = find_vma(mm, start); + + if (vma && vma->vm_start <= start && vma->vm_end > start) { + callback_fn((u64)(long)task, (u64)(long)vma, + (u64)(long)callback_ctx, 0, 0); + ret = 0; + } + bpf_mmap_unlock_mm(work, mm); + return ret; +} + +const struct bpf_func_proto bpf_find_vma_proto = { + .func = bpf_find_vma, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_BTF_ID, + .arg1_btf_id = &btf_task_struct_ids[0], + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_PTR_TO_FUNC, + .arg4_type = ARG_PTR_TO_STACK_OR_NULL, + .arg5_type = ARG_ANYTHING, +}; + +DEFINE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work); + +static void do_mmap_read_unlock(struct irq_work *entry) +{ + struct mmap_unlock_irq_work *work; + + if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT))) + return; + + work = container_of(entry, struct mmap_unlock_irq_work, irq_work); + mmap_read_unlock_non_owner(work->mm); +} + static int __init task_iter_init(void) { - int ret; + struct mmap_unlock_irq_work *work; + int ret, cpu; + + for_each_possible_cpu(cpu) { + work = per_cpu_ptr(&mmap_unlock_work, cpu); + init_irq_work(&work->irq_work, do_mmap_read_unlock); + } task_reg_info.ctx_arg_info[0].btf_id = btf_task_struct_ids[0]; ret = bpf_iter_reg_target(&task_reg_info); @@ -596,13 +658,13 @@ static int __init task_iter_init(void) return ret; task_file_reg_info.ctx_arg_info[0].btf_id = btf_task_struct_ids[0]; - task_file_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[0]; + task_file_reg_info.ctx_arg_info[1].btf_id = btf_task_struct_ids[1]; ret = bpf_iter_reg_target(&task_file_reg_info); if (ret) return ret; task_vma_reg_info.ctx_arg_info[0].btf_id = btf_task_struct_ids[0]; - task_vma_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[1]; + task_vma_reg_info.ctx_arg_info[1].btf_id = btf_task_struct_ids[2]; return bpf_iter_reg_target(&task_vma_reg_info); } late_initcall(task_iter_init); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index f0dca726ebfd..1aafb43f61d1 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -6132,6 +6132,33 @@ static int set_timer_callback_state(struct bpf_verifier_env *env, return 0; } +static int set_find_vma_callback_state(struct bpf_verifier_env *env, + struct bpf_func_state *caller, + struct bpf_func_state *callee, + int insn_idx) +{ + /* bpf_find_vma(struct task_struct *task, u64 addr, + * void *callback_fn, void *callback_ctx, u64 flags) + * (callback_fn)(struct task_struct *task, + * struct vm_area_struct *vma, void *callback_ctx); + */ + callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; + + callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID; + __mark_reg_known_zero(&callee->regs[BPF_REG_2]); + callee->regs[BPF_REG_2].btf = btf_vmlinux; + callee->regs[BPF_REG_2].btf_id = btf_task_struct_ids[2]; + + /* pointer to stack or null */ + callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4]; + + /* unused */ + __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); + __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); + callee->in_callback_fn = true; + return 0; +} + static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) { struct bpf_verifier_state *state = env->cur_state; @@ -6489,6 +6516,13 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn return -EINVAL; } + if (func_id == BPF_FUNC_find_vma) { + err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, + set_find_vma_callback_state); + if (err < 0) + return -EINVAL; + } + if (func_id == BPF_FUNC_snprintf) { err = check_bpf_snprintf_call(env, regs); if (err < 0) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 7396488793ff..390176a3031a 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1208,6 +1208,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_get_func_ip_proto_tracing; case BPF_FUNC_get_branch_snapshot: return &bpf_get_branch_snapshot_proto; + case BPF_FUNC_find_vma: + return &bpf_find_vma_proto; case BPF_FUNC_trace_vprintk: return bpf_get_trace_vprintk_proto(); default: diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index ba5af15e25f5..509eee5f0393 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -4938,6 +4938,25 @@ union bpf_attr { * **-ENOENT** if symbol is not found. * * **-EPERM** if caller does not have permission to obtain kernel address. + * + * long bpf_find_vma(struct task_struct *task, u64 addr, void *callback_fn, void *callback_ctx, u64 flags) + * Description + * Find vma of *task* that contains *addr*, call *callback_fn* + * function with *task*, *vma*, and *callback_ctx*. + * The *callback_fn* should be a static function and + * the *callback_ctx* should be a pointer to the stack. + * The *flags* is used to control certain aspects of the helper. + * Currently, the *flags* must be 0. + * + * The expected callback signature is + * + * long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx); + * + * Return + * 0 on success. + * **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*. + * **-EBUSY** if failed to try lock mmap_lock. + * **-EINVAL** for invalid **flags**. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5120,6 +5139,7 @@ union bpf_attr { FN(trace_vprintk), \ FN(skc_to_unix_sock), \ FN(kallsyms_lookup_name), \ + FN(find_vma), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper -- cgit v1.2.3 From f108662b27c96cdadfadd39f0c0d650704cd593d Mon Sep 17 00:00:00 2001 From: Song Liu Date: Fri, 5 Nov 2021 16:23:30 -0700 Subject: selftests/bpf: Add tests for bpf_find_vma Add tests for bpf_find_vma in perf_event program and kprobe program. The perf_event program is triggered from NMI context, so the second call of bpf_find_vma() will return -EBUSY (irq_work busy). The kprobe program, on the other hand, does not have this constraint. Also add tests for illegal writes to task or vma from the callback function. The verifier should reject both cases. Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20211105232330.1936330-3-songliubraving@fb.com --- tools/testing/selftests/bpf/prog_tests/find_vma.c | 117 +++++++++++++++++++++ tools/testing/selftests/bpf/progs/find_vma.c | 69 ++++++++++++ tools/testing/selftests/bpf/progs/find_vma_fail1.c | 29 +++++ tools/testing/selftests/bpf/progs/find_vma_fail2.c | 29 +++++ 4 files changed, 244 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/find_vma.c create mode 100644 tools/testing/selftests/bpf/progs/find_vma.c create mode 100644 tools/testing/selftests/bpf/progs/find_vma_fail1.c create mode 100644 tools/testing/selftests/bpf/progs/find_vma_fail2.c diff --git a/tools/testing/selftests/bpf/prog_tests/find_vma.c b/tools/testing/selftests/bpf/prog_tests/find_vma.c new file mode 100644 index 000000000000..b74b3c0c555a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/find_vma.c @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include +#include +#include +#include "find_vma.skel.h" +#include "find_vma_fail1.skel.h" +#include "find_vma_fail2.skel.h" + +static void test_and_reset_skel(struct find_vma *skel, int expected_find_zero_ret) +{ + ASSERT_EQ(skel->bss->found_vm_exec, 1, "found_vm_exec"); + ASSERT_EQ(skel->data->find_addr_ret, 0, "find_addr_ret"); + ASSERT_EQ(skel->data->find_zero_ret, expected_find_zero_ret, "find_zero_ret"); + ASSERT_OK_PTR(strstr(skel->bss->d_iname, "test_progs"), "find_test_progs"); + + skel->bss->found_vm_exec = 0; + skel->data->find_addr_ret = -1; + skel->data->find_zero_ret = -1; + skel->bss->d_iname[0] = 0; +} + +static int open_pe(void) +{ + struct perf_event_attr attr = {0}; + int pfd; + + /* create perf event */ + attr.size = sizeof(attr); + attr.type = PERF_TYPE_HARDWARE; + attr.config = PERF_COUNT_HW_CPU_CYCLES; + attr.freq = 1; + attr.sample_freq = 4000; + pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC); + + return pfd >= 0 ? pfd : -errno; +} + +static void test_find_vma_pe(struct find_vma *skel) +{ + struct bpf_link *link = NULL; + volatile int j = 0; + int pfd, i; + + pfd = open_pe(); + if (pfd < 0) { + if (pfd == -ENOENT || pfd == -EOPNOTSUPP) { + printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__); + test__skip(); + goto cleanup; + } + if (!ASSERT_GE(pfd, 0, "perf_event_open")) + goto cleanup; + } + + link = bpf_program__attach_perf_event(skel->progs.handle_pe, pfd); + if (!ASSERT_OK_PTR(link, "attach_perf_event")) + goto cleanup; + + for (i = 0; i < 1000000; ++i) + ++j; + + test_and_reset_skel(skel, -EBUSY /* in nmi, irq_work is busy */); +cleanup: + bpf_link__destroy(link); + close(pfd); +} + +static void test_find_vma_kprobe(struct find_vma *skel) +{ + int err; + + err = find_vma__attach(skel); + if (!ASSERT_OK(err, "get_branch_snapshot__attach")) + return; + + getpgid(skel->bss->target_pid); + test_and_reset_skel(skel, -ENOENT /* could not find vma for ptr 0 */); +} + +static void test_illegal_write_vma(void) +{ + struct find_vma_fail1 *skel; + + skel = find_vma_fail1__open_and_load(); + if (!ASSERT_ERR_PTR(skel, "find_vma_fail1__open_and_load")) + find_vma_fail1__destroy(skel); +} + +static void test_illegal_write_task(void) +{ + struct find_vma_fail2 *skel; + + skel = find_vma_fail2__open_and_load(); + if (!ASSERT_ERR_PTR(skel, "find_vma_fail2__open_and_load")) + find_vma_fail2__destroy(skel); +} + +void serial_test_find_vma(void) +{ + struct find_vma *skel; + + skel = find_vma__open_and_load(); + if (!ASSERT_OK_PTR(skel, "find_vma__open_and_load")) + return; + + skel->bss->target_pid = getpid(); + skel->bss->addr = (__u64)(uintptr_t)test_find_vma_pe; + + test_find_vma_pe(skel); + usleep(100000); /* allow the irq_work to finish */ + test_find_vma_kprobe(skel); + + find_vma__destroy(skel); + test_illegal_write_vma(); + test_illegal_write_task(); +} diff --git a/tools/testing/selftests/bpf/progs/find_vma.c b/tools/testing/selftests/bpf/progs/find_vma.c new file mode 100644 index 000000000000..38034fb82530 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/find_vma.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include "vmlinux.h" +#include +#include + +char _license[] SEC("license") = "GPL"; + +struct callback_ctx { + int dummy; +}; + +#define VM_EXEC 0x00000004 +#define DNAME_INLINE_LEN 32 + +pid_t target_pid = 0; +char d_iname[DNAME_INLINE_LEN] = {0}; +__u32 found_vm_exec = 0; +__u64 addr = 0; +int find_zero_ret = -1; +int find_addr_ret = -1; + +static long check_vma(struct task_struct *task, struct vm_area_struct *vma, + struct callback_ctx *data) +{ + if (vma->vm_file) + bpf_probe_read_kernel_str(d_iname, DNAME_INLINE_LEN - 1, + vma->vm_file->f_path.dentry->d_iname); + + /* check for VM_EXEC */ + if (vma->vm_flags & VM_EXEC) + found_vm_exec = 1; + + return 0; +} + +SEC("raw_tp/sys_enter") +int handle_getpid(void) +{ + struct task_struct *task = bpf_get_current_task_btf(); + struct callback_ctx data = {}; + + if (task->pid != target_pid) + return 0; + + find_addr_ret = bpf_find_vma(task, addr, check_vma, &data, 0); + + /* this should return -ENOENT */ + find_zero_ret = bpf_find_vma(task, 0, check_vma, &data, 0); + return 0; +} + +SEC("perf_event") +int handle_pe(void) +{ + struct task_struct *task = bpf_get_current_task_btf(); + struct callback_ctx data = {}; + + if (task->pid != target_pid) + return 0; + + find_addr_ret = bpf_find_vma(task, addr, check_vma, &data, 0); + + /* In NMI, this should return -EBUSY, as the previous call is using + * the irq_work. + */ + find_zero_ret = bpf_find_vma(task, 0, check_vma, &data, 0); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/find_vma_fail1.c b/tools/testing/selftests/bpf/progs/find_vma_fail1.c new file mode 100644 index 000000000000..b3b326b8e2d1 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/find_vma_fail1.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include "vmlinux.h" +#include + +char _license[] SEC("license") = "GPL"; + +struct callback_ctx { + int dummy; +}; + +static long write_vma(struct task_struct *task, struct vm_area_struct *vma, + struct callback_ctx *data) +{ + /* writing to vma, which is illegal */ + vma->vm_flags |= 0x55; + + return 0; +} + +SEC("raw_tp/sys_enter") +int handle_getpid(void) +{ + struct task_struct *task = bpf_get_current_task_btf(); + struct callback_ctx data = {}; + + bpf_find_vma(task, 0, write_vma, &data, 0); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/find_vma_fail2.c b/tools/testing/selftests/bpf/progs/find_vma_fail2.c new file mode 100644 index 000000000000..9bcf3203e26b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/find_vma_fail2.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include "vmlinux.h" +#include + +char _license[] SEC("license") = "GPL"; + +struct callback_ctx { + int dummy; +}; + +static long write_task(struct task_struct *task, struct vm_area_struct *vma, + struct callback_ctx *data) +{ + /* writing to task, which is illegal */ + task->mm = NULL; + + return 0; +} + +SEC("raw_tp/sys_enter") +int handle_getpid(void) +{ + struct task_struct *task = bpf_get_current_task_btf(); + struct callback_ctx data = {}; + + bpf_find_vma(task, 0, write_task, &data, 0); + return 0; +} -- cgit v1.2.3 From b89ddf4cca43f1269093942cf5c4e457fd45c335 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 5 Nov 2021 16:50:45 +0000 Subject: arm64/bpf: Remove 128MB limit for BPF JIT programs Commit 91fc957c9b1d ("arm64/bpf: don't allocate BPF JIT programs in module memory") restricts BPF JIT program allocation to a 128MB region to ensure BPF programs are still in branching range of each other. However this restriction should not apply to the aarch64 JIT, since BPF_JMP | BPF_CALL are implemented as a 64-bit move into a register and then a BLR instruction - which has the effect of being able to call anything without proximity limitation. The practical reason to relax this restriction on JIT memory is that 128MB of JIT memory can be quickly exhausted, especially where PAGE_SIZE is 64KB - one page is needed per program. In cases where seccomp filters are applied to multiple VMs on VM launch - such filters are classic BPF but converted to BPF - this can severely limit the number of VMs that can be launched. In a world where we support BPF JIT always on, turning off the JIT isn't always an option either. Fixes: 91fc957c9b1d ("arm64/bpf: don't allocate BPF JIT programs in module memory") Suggested-by: Ard Biesheuvel Signed-off-by: Russell King Signed-off-by: Daniel Borkmann Tested-by: Alan Maguire Link: https://lore.kernel.org/bpf/1636131046-5982-2-git-send-email-alan.maguire@oracle.com --- arch/arm64/include/asm/extable.h | 9 --------- arch/arm64/include/asm/memory.h | 5 +---- arch/arm64/kernel/traps.c | 2 +- arch/arm64/mm/ptdump.c | 2 -- arch/arm64/net/bpf_jit_comp.c | 7 ++----- 5 files changed, 4 insertions(+), 21 deletions(-) diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h index 8b300dd28def..72b0e71cc3de 100644 --- a/arch/arm64/include/asm/extable.h +++ b/arch/arm64/include/asm/extable.h @@ -33,15 +33,6 @@ do { \ (b)->data = (tmp).data; \ } while (0) -static inline bool in_bpf_jit(struct pt_regs *regs) -{ - if (!IS_ENABLED(CONFIG_BPF_JIT)) - return false; - - return regs->pc >= BPF_JIT_REGION_START && - regs->pc < BPF_JIT_REGION_END; -} - #ifdef CONFIG_BPF_JIT bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs); diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 1b9a1e242612..0af70d9abede 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -44,11 +44,8 @@ #define _PAGE_OFFSET(va) (-(UL(1) << (va))) #define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS)) #define KIMAGE_VADDR (MODULES_END) -#define BPF_JIT_REGION_START (_PAGE_END(VA_BITS_MIN)) -#define BPF_JIT_REGION_SIZE (SZ_128M) -#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE) #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) -#define MODULES_VADDR (BPF_JIT_REGION_END) +#define MODULES_VADDR (_PAGE_END(VA_BITS_MIN)) #define MODULES_VSIZE (SZ_128M) #define VMEMMAP_START (-(UL(1) << (VA_BITS - VMEMMAP_SHIFT))) #define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 7b21213a570f..e8986e6067a9 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -994,7 +994,7 @@ static struct break_hook bug_break_hook = { static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr) { pr_err("%s generated an invalid instruction at %pS!\n", - in_bpf_jit(regs) ? "BPF JIT" : "Kernel text patching", + "Kernel text patching", (void *)instruction_pointer(regs)); /* We cannot handle this */ diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c index 1c403536c9bb..9bc4066c5bf3 100644 --- a/arch/arm64/mm/ptdump.c +++ b/arch/arm64/mm/ptdump.c @@ -41,8 +41,6 @@ static struct addr_marker address_markers[] = { { 0 /* KASAN_SHADOW_START */, "Kasan shadow start" }, { KASAN_SHADOW_END, "Kasan shadow end" }, #endif - { BPF_JIT_REGION_START, "BPF start" }, - { BPF_JIT_REGION_END, "BPF end" }, { MODULES_VADDR, "Modules start" }, { MODULES_END, "Modules end" }, { VMALLOC_START, "vmalloc() area" }, diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 3a8a7140a9bf..86c9dc0681cc 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -1141,15 +1141,12 @@ out: u64 bpf_jit_alloc_exec_limit(void) { - return BPF_JIT_REGION_SIZE; + return VMALLOC_END - VMALLOC_START; } void *bpf_jit_alloc_exec(unsigned long size) { - return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START, - BPF_JIT_REGION_END, GFP_KERNEL, - PAGE_KERNEL, 0, NUMA_NO_NODE, - __builtin_return_address(0)); + return vmalloc(size); } void bpf_jit_free_exec(void *addr) -- cgit v1.2.3 From c23551c9c36ae394f9c53a5adf1944a943c65e0b Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Fri, 5 Nov 2021 16:50:46 +0000 Subject: selftests/bpf: Add exception handling selftests for tp_bpf program Exception handling is triggered in BPF tracing programs when a NULL pointer is dereferenced; the exception handler zeroes the target register and execution of the BPF program progresses. To test exception handling then, we need to trigger a NULL pointer dereference for a field which should never be zero; if it is, the only explanation is the exception handler ran. task->task_works is the NULL pointer chosen (for a new task from fork() no work is associated), and the task_works->func field should not be zero if task_works is non-NULL. The test verifies that task_works and task_works->func are 0. Signed-off-by: Alan Maguire Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/1636131046-5982-3-git-send-email-alan.maguire@oracle.com --- tools/testing/selftests/bpf/prog_tests/exhandler.c | 43 ++++++++++++++++++++++ tools/testing/selftests/bpf/progs/exhandler_kern.c | 43 ++++++++++++++++++++++ 2 files changed, 86 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/exhandler.c create mode 100644 tools/testing/selftests/bpf/progs/exhandler_kern.c diff --git a/tools/testing/selftests/bpf/prog_tests/exhandler.c b/tools/testing/selftests/bpf/prog_tests/exhandler.c new file mode 100644 index 000000000000..118bb182ee20 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/exhandler.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021, Oracle and/or its affiliates. */ + +#include + +/* Test that verifies exception handling is working. fork() + * triggers task_newtask tracepoint; that new task will have a + * NULL pointer task_works, and the associated task->task_works->func + * should not be NULL if task_works itself is non-NULL. + * + * So to verify exception handling we want to see a NULL task_works + * and task_works->func; if we see this we can conclude that the + * exception handler ran when we attempted to dereference task->task_works + * and zeroed the destination register. + */ +#include "exhandler_kern.skel.h" + +void test_exhandler(void) +{ + int err = 0, duration = 0, status; + struct exhandler_kern *skel; + pid_t cpid; + + skel = exhandler_kern__open_and_load(); + if (CHECK(!skel, "skel_load", "skeleton failed: %d\n", err)) + goto cleanup; + + skel->bss->test_pid = getpid(); + + err = exhandler_kern__attach(skel); + if (!ASSERT_OK(err, "attach")) + goto cleanup; + cpid = fork(); + if (!ASSERT_GT(cpid, -1, "fork failed")) + goto cleanup; + if (cpid == 0) + _exit(0); + waitpid(cpid, &status, 0); + + ASSERT_NEQ(skel->bss->exception_triggered, 0, "verify exceptions occurred"); +cleanup: + exhandler_kern__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/exhandler_kern.c b/tools/testing/selftests/bpf/progs/exhandler_kern.c new file mode 100644 index 000000000000..f5ca142abf8f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/exhandler_kern.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021, Oracle and/or its affiliates. */ + +#include "vmlinux.h" + +#include +#include +#include + +char _license[] SEC("license") = "GPL"; + +unsigned int exception_triggered; +int test_pid; + +/* TRACE_EVENT(task_newtask, + * TP_PROTO(struct task_struct *p, u64 clone_flags) + */ +SEC("tp_btf/task_newtask") +int BPF_PROG(trace_task_newtask, struct task_struct *task, u64 clone_flags) +{ + int pid = bpf_get_current_pid_tgid() >> 32; + struct callback_head *work; + void *func; + + if (test_pid != pid) + return 0; + + /* To verify we hit an exception we dereference task->task_works->func. + * If task work has been added, + * - task->task_works is non-NULL; and + * - task->task_works->func is non-NULL also (the callback function + * must be specified for the task work. + * + * However, for a newly-created task, task->task_works is NULLed, + * so we know the exception handler triggered if task_works is + * NULL and func is NULL. + */ + work = task->task_works; + func = work->func; + if (!work && !func) + exception_triggered++; + return 0; +} -- cgit v1.2.3 From 3a74ac2d1159716f35c944639f71b33fa16084c8 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Sat, 6 Nov 2021 05:12:40 +0530 Subject: libbpf: Compile using -std=gnu89 The minimum supported C standard version is C89, with use of GNU extensions, hence make sure to catch any instances that would break the build for this mode by passing -std=gnu89. Signed-off-by: Kumar Kartikeya Dwivedi Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211105234243.390179-4-memxor@gmail.com --- tools/lib/bpf/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index b393b5e82380..5f7086fae31c 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -84,6 +84,7 @@ else endif # Append required CFLAGS +override CFLAGS += -std=gnu89 override CFLAGS += $(EXTRA_WARNINGS) -Wno-switch-enum override CFLAGS += -Werror -Wall override CFLAGS += $(INCLUDES) -- cgit v1.2.3 From 1a8b597ddabe7dc25aa9defd33949d455ee9cde8 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Fri, 5 Nov 2021 22:19:04 +0000 Subject: bpftool: Fix SPDX tag for Makefiles and .gitignore Bpftool is dual-licensed under GPLv2 and BSD-2-Clause. In commit 907b22365115 ("tools: bpftool: dual license all files") we made sure that all its source files were indeed covered by the two licenses, and that they had the correct SPDX tags. However, bpftool's Makefile, the Makefile for its documentation, and the .gitignore file were skipped at the time (their GPL-2.0-only tag was added later). Let's update the tags. Signed-off-by: Quentin Monnet Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Acked-by: Tobias Klauser Acked-by: Joe Stringer Acked-by: Song Liu Acked-by: Jean-Philippe Brucker Acked-by: Jesper Dangaard Brouer Acked-by: Jakub Kicinski Link: https://lore.kernel.org/bpf/20211105221904.3536-1-quentin@isovalent.com --- tools/bpf/bpftool/.gitignore | 2 +- tools/bpf/bpftool/Documentation/Makefile | 2 +- tools/bpf/bpftool/Makefile | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/bpf/bpftool/.gitignore b/tools/bpf/bpftool/.gitignore index 05ce4446b780..a736f64dc5dc 100644 --- a/tools/bpf/bpftool/.gitignore +++ b/tools/bpf/bpftool/.gitignore @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0-only +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) *.d /bootstrap/ /bpftool diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile index c49487905ceb..44b60784847b 100644 --- a/tools/bpf/bpftool/Documentation/Makefile +++ b/tools/bpf/bpftool/Documentation/Makefile @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0-only +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) include ../../../scripts/Makefile.include include ../../../scripts/utilities.mak diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index c0c30e56988f..622568c7a9b8 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0-only +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) include ../../scripts/Makefile.include include ../../scripts/utilities.mak -- cgit v1.2.3 From f89315650ba34ec6c91a8bded72796980bee2a4d Mon Sep 17 00:00:00 2001 From: Mark Pashmfouroush Date: Wed, 10 Nov 2021 11:10:15 +0000 Subject: bpf: Add ingress_ifindex to bpf_sk_lookup It may be helpful to have access to the ifindex during bpf socket lookup. An example may be to scope certain socket lookup logic to specific interfaces, i.e. an interface may be made exempt from custom lookup code. Add the ifindex of the arriving connection to the bpf_sk_lookup API. Signed-off-by: Mark Pashmfouroush Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211110111016.5670-2-markpash@cloudflare.com --- include/linux/filter.h | 7 +++++-- include/uapi/linux/bpf.h | 1 + net/core/filter.c | 7 +++++++ net/ipv4/inet_hashtables.c | 8 ++++---- net/ipv4/udp.c | 8 ++++---- net/ipv6/inet6_hashtables.c | 8 ++++---- net/ipv6/udp.c | 8 ++++---- tools/include/uapi/linux/bpf.h | 1 + 8 files changed, 30 insertions(+), 18 deletions(-) diff --git a/include/linux/filter.h b/include/linux/filter.h index 24b7ed2677af..b6a216eb217a 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1374,6 +1374,7 @@ struct bpf_sk_lookup_kern { const struct in6_addr *daddr; } v6; struct sock *selected_sk; + u32 ingress_ifindex; bool no_reuseport; }; @@ -1436,7 +1437,7 @@ extern struct static_key_false bpf_sk_lookup_enabled; static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol, const __be32 saddr, const __be16 sport, const __be32 daddr, const u16 dport, - struct sock **psk) + const int ifindex, struct sock **psk) { struct bpf_prog_array *run_array; struct sock *selected_sk = NULL; @@ -1452,6 +1453,7 @@ static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol, .v4.daddr = daddr, .sport = sport, .dport = dport, + .ingress_ifindex = ifindex, }; u32 act; @@ -1474,7 +1476,7 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol, const __be16 sport, const struct in6_addr *daddr, const u16 dport, - struct sock **psk) + const int ifindex, struct sock **psk) { struct bpf_prog_array *run_array; struct sock *selected_sk = NULL; @@ -1490,6 +1492,7 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol, .v6.daddr = daddr, .sport = sport, .dport = dport, + .ingress_ifindex = ifindex, }; u32 act; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 509eee5f0393..6297eafdc40f 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -6316,6 +6316,7 @@ struct bpf_sk_lookup { __u32 local_ip4; /* Network byte order */ __u32 local_ip6[4]; /* Network byte order */ __u32 local_port; /* Host byte order */ + __u32 ingress_ifindex; /* The arriving interface. Determined by inet_iif. */ }; /* diff --git a/net/core/filter.c b/net/core/filter.c index 8e8d3b49c297..315a58466fc9 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -10491,6 +10491,7 @@ static bool sk_lookup_is_valid_access(int off, int size, case bpf_ctx_range_till(struct bpf_sk_lookup, local_ip6[0], local_ip6[3]): case bpf_ctx_range(struct bpf_sk_lookup, remote_port): case bpf_ctx_range(struct bpf_sk_lookup, local_port): + case bpf_ctx_range(struct bpf_sk_lookup, ingress_ifindex): bpf_ctx_record_field_size(info, sizeof(__u32)); return bpf_ctx_narrow_access_ok(off, size, sizeof(__u32)); @@ -10580,6 +10581,12 @@ static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type, bpf_target_off(struct bpf_sk_lookup_kern, dport, 2, target_size)); break; + + case offsetof(struct bpf_sk_lookup, ingress_ifindex): + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, + bpf_target_off(struct bpf_sk_lookup_kern, + ingress_ifindex, 4, target_size)); + break; } return insn - insn_buf; diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 75737267746f..30ab717ff1b8 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -307,7 +307,7 @@ static inline struct sock *inet_lookup_run_bpf(struct net *net, struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, __be32 saddr, __be16 sport, - __be32 daddr, u16 hnum) + __be32 daddr, u16 hnum, const int dif) { struct sock *sk, *reuse_sk; bool no_reuseport; @@ -315,8 +315,8 @@ static inline struct sock *inet_lookup_run_bpf(struct net *net, if (hashinfo != &tcp_hashinfo) return NULL; /* only TCP is supported */ - no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_TCP, - saddr, sport, daddr, hnum, &sk); + no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_TCP, saddr, sport, + daddr, hnum, dif, &sk); if (no_reuseport || IS_ERR_OR_NULL(sk)) return sk; @@ -340,7 +340,7 @@ struct sock *__inet_lookup_listener(struct net *net, /* Lookup redirect from BPF */ if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { result = inet_lookup_run_bpf(net, hashinfo, skb, doff, - saddr, sport, daddr, hnum); + saddr, sport, daddr, hnum, dif); if (result) goto done; } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 2fffcf2b54f3..5fceee3de65d 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -460,7 +460,7 @@ static struct sock *udp4_lookup_run_bpf(struct net *net, struct udp_table *udptable, struct sk_buff *skb, __be32 saddr, __be16 sport, - __be32 daddr, u16 hnum) + __be32 daddr, u16 hnum, const int dif) { struct sock *sk, *reuse_sk; bool no_reuseport; @@ -468,8 +468,8 @@ static struct sock *udp4_lookup_run_bpf(struct net *net, if (udptable != &udp_table) return NULL; /* only UDP is supported */ - no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_UDP, - saddr, sport, daddr, hnum, &sk); + no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_UDP, saddr, sport, + daddr, hnum, dif, &sk); if (no_reuseport || IS_ERR_OR_NULL(sk)) return sk; @@ -505,7 +505,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, /* Lookup redirect from BPF */ if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { sk = udp4_lookup_run_bpf(net, udptable, skb, - saddr, sport, daddr, hnum); + saddr, sport, daddr, hnum, dif); if (sk) { result = sk; goto done; diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 67c9114835c8..4514444e96c8 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -165,7 +165,7 @@ static inline struct sock *inet6_lookup_run_bpf(struct net *net, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, - const u16 hnum) + const u16 hnum, const int dif) { struct sock *sk, *reuse_sk; bool no_reuseport; @@ -173,8 +173,8 @@ static inline struct sock *inet6_lookup_run_bpf(struct net *net, if (hashinfo != &tcp_hashinfo) return NULL; /* only TCP is supported */ - no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_TCP, - saddr, sport, daddr, hnum, &sk); + no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_TCP, saddr, sport, + daddr, hnum, dif, &sk); if (no_reuseport || IS_ERR_OR_NULL(sk)) return sk; @@ -198,7 +198,7 @@ struct sock *inet6_lookup_listener(struct net *net, /* Lookup redirect from BPF */ if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { result = inet6_lookup_run_bpf(net, hashinfo, skb, doff, - saddr, sport, daddr, hnum); + saddr, sport, daddr, hnum, dif); if (result) goto done; } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 12c12619ee35..ea4ea525f94a 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -195,7 +195,7 @@ static inline struct sock *udp6_lookup_run_bpf(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, - u16 hnum) + u16 hnum, const int dif) { struct sock *sk, *reuse_sk; bool no_reuseport; @@ -203,8 +203,8 @@ static inline struct sock *udp6_lookup_run_bpf(struct net *net, if (udptable != &udp_table) return NULL; /* only UDP is supported */ - no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, - saddr, sport, daddr, hnum, &sk); + no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, saddr, sport, + daddr, hnum, dif, &sk); if (no_reuseport || IS_ERR_OR_NULL(sk)) return sk; @@ -240,7 +240,7 @@ struct sock *__udp6_lib_lookup(struct net *net, /* Lookup redirect from BPF */ if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { sk = udp6_lookup_run_bpf(net, udptable, skb, - saddr, sport, daddr, hnum); + saddr, sport, daddr, hnum, dif); if (sk) { result = sk; goto done; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 509eee5f0393..6297eafdc40f 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -6316,6 +6316,7 @@ struct bpf_sk_lookup { __u32 local_ip4; /* Network byte order */ __u32 local_ip6[4]; /* Network byte order */ __u32 local_port; /* Host byte order */ + __u32 ingress_ifindex; /* The arriving interface. Determined by inet_iif. */ }; /* -- cgit v1.2.3 From 8b4fd2bf1f47c3e3a63c327fca2ad5c4e2691ef8 Mon Sep 17 00:00:00 2001 From: Mark Pashmfouroush Date: Wed, 10 Nov 2021 11:10:16 +0000 Subject: selftests/bpf: Add tests for accessing ingress_ifindex in bpf_sk_lookup A new field was added to the bpf_sk_lookup data that users can access. Add tests that validate that the new ingress_ifindex field contains the right data. Signed-off-by: Mark Pashmfouroush Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211110111016.5670-3-markpash@cloudflare.com --- tools/testing/selftests/bpf/prog_tests/sk_lookup.c | 31 +++++++++++++++++++++ tools/testing/selftests/bpf/progs/test_sk_lookup.c | 8 ++++++ .../testing/selftests/bpf/verifier/ctx_sk_lookup.c | 32 ++++++++++++++++++++++ 3 files changed, 71 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c index 6db07401bc49..57846cc7ce36 100644 --- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c +++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c @@ -937,6 +937,37 @@ static void test_drop_on_lookup(struct test_sk_lookup *skel) .connect_to = { EXT_IP6, EXT_PORT }, .listen_at = { EXT_IP6, INT_PORT }, }, + /* The program will drop on success, meaning that the ifindex + * was 1. + */ + { + .desc = "TCP IPv4 drop on valid ifindex", + .lookup_prog = skel->progs.check_ifindex, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { EXT_IP4, EXT_PORT }, + }, + { + .desc = "TCP IPv6 drop on valid ifindex", + .lookup_prog = skel->progs.check_ifindex, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { EXT_IP6, EXT_PORT }, + }, + { + .desc = "UDP IPv4 drop on valid ifindex", + .lookup_prog = skel->progs.check_ifindex, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { EXT_IP4, EXT_PORT }, + }, + { + .desc = "UDP IPv6 drop on valid ifindex", + .lookup_prog = skel->progs.check_ifindex, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { EXT_IP6, EXT_PORT }, + }, }; const struct test *t; diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup.c b/tools/testing/selftests/bpf/progs/test_sk_lookup.c index 19d2465d9442..83b0aaa52ef7 100644 --- a/tools/testing/selftests/bpf/progs/test_sk_lookup.c +++ b/tools/testing/selftests/bpf/progs/test_sk_lookup.c @@ -84,6 +84,14 @@ int lookup_drop(struct bpf_sk_lookup *ctx) return SK_DROP; } +SEC("sk_lookup") +int check_ifindex(struct bpf_sk_lookup *ctx) +{ + if (ctx->ingress_ifindex == 1) + return SK_DROP; + return SK_PASS; +} + SEC("sk_reuseport") int reuseport_pass(struct sk_reuseport_md *ctx) { diff --git a/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c b/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c index d78627be060f..a2b006e2fd06 100644 --- a/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c +++ b/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c @@ -229,6 +229,24 @@ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_sk_lookup, local_port)), + /* 1-byte read from ingress_ifindex field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, ingress_ifindex)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, ingress_ifindex) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, ingress_ifindex) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, ingress_ifindex) + 3), + /* 2-byte read from ingress_ifindex field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, ingress_ifindex)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, ingress_ifindex) + 2), + /* 4-byte read from ingress_ifindex field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, ingress_ifindex)), + /* 8-byte read from sk field */ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_sk_lookup, sk)), @@ -351,6 +369,20 @@ .expected_attach_type = BPF_SK_LOOKUP, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, +{ + "invalid 8-byte read from bpf_sk_lookup ingress_ifindex field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, ingress_ifindex)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, /* invalid 1,2,4-byte reads from 8-byte fields in bpf_sk_lookup */ { "invalid 4-byte read from bpf_sk_lookup sk field", -- cgit v1.2.3 From a6ca71583137300f207343d5d950cb1c365ab911 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 10 Nov 2021 21:17:57 -0800 Subject: libbpf: Add ability to get/set per-program load flags Add bpf_program__flags() API to retrieve prog_flags that will be (or were) supplied to BPF_PROG_LOAD command. Also add bpf_program__set_extra_flags() API to allow to set *extra* flags, in addition to those determined by program's SEC() definition. Such flags are logically OR'ed with libbpf-derived flags. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211111051758.92283-2-andrii@kernel.org --- tools/lib/bpf/libbpf.c | 14 ++++++++++++++ tools/lib/bpf/libbpf.h | 3 +++ tools/lib/bpf/libbpf.map | 2 ++ 3 files changed, 19 insertions(+) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index d869ebee1e27..a823b5ed705b 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -8262,6 +8262,20 @@ void bpf_program__set_expected_attach_type(struct bpf_program *prog, prog->expected_attach_type = type; } +__u32 bpf_program__flags(const struct bpf_program *prog) +{ + return prog->prog_flags; +} + +int bpf_program__set_extra_flags(struct bpf_program *prog, __u32 extra_flags) +{ + if (prog->obj->loaded) + return libbpf_err(-EBUSY); + + prog->prog_flags |= extra_flags; + return 0; +} + #define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \ .sec = sec_pfx, \ .prog_type = BPF_PROG_TYPE_##ptype, \ diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 039058763173..f69512ae8505 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -493,6 +493,9 @@ LIBBPF_API void bpf_program__set_expected_attach_type(struct bpf_program *prog, enum bpf_attach_type type); +LIBBPF_API __u32 bpf_program__flags(const struct bpf_program *prog); +LIBBPF_API int bpf_program__set_extra_flags(struct bpf_program *prog, __u32 extra_flags); + LIBBPF_API int bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd, const char *attach_func_name); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index b895861a13c0..256d5d4be951 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -397,8 +397,10 @@ LIBBPF_0.6.0 { bpf_object__prev_program; bpf_prog_load_deprecated; bpf_prog_load; + bpf_program__flags; bpf_program__insn_cnt; bpf_program__insns; + bpf_program__set_extra_flags; btf__add_btf; btf__add_decl_tag; btf__raw_data; -- cgit v1.2.3 From 50dee7078b66d881c62f6177844d625f7ead6003 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 10 Nov 2021 21:17:58 -0800 Subject: selftests/bpf: Fix bpf_prog_test_load() logic to pass extra log level After recent refactoring bpf_prog_test_load(), used across multiple selftests, lost ability to specify extra log_level 1 or 2 (for -vv and -vvv, respectively). Fix that problem by using bpf_object__load_xattr() API that supports extra log_level flags. Also restore BPF_F_TEST_RND_HI32 prog_flags by utilizing new bpf_program__set_extra_flags() API. Fixes: f87c1930ac29 ("selftests/bpf: Merge test_stub.c into testing_helpers.c") Reported-by: Alexei Starovoitov Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211111051758.92283-3-andrii@kernel.org --- tools/testing/selftests/bpf/testing_helpers.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c index ef61d43adfe4..52c2f24e0898 100644 --- a/tools/testing/selftests/bpf/testing_helpers.c +++ b/tools/testing/selftests/bpf/testing_helpers.c @@ -88,6 +88,7 @@ int extra_prog_load_log_flags = 0; int bpf_prog_test_load(const char *file, enum bpf_prog_type type, struct bpf_object **pobj, int *prog_fd) { + struct bpf_object_load_attr attr = {}; struct bpf_object *obj; struct bpf_program *prog; int err; @@ -105,7 +106,11 @@ int bpf_prog_test_load(const char *file, enum bpf_prog_type type, if (type != BPF_PROG_TYPE_UNSPEC) bpf_program__set_type(prog, type); - err = bpf_object__load(obj); + bpf_program__set_extra_flags(prog, BPF_F_TEST_RND_HI32); + + attr.obj = obj; + attr.log_level = extra_prog_load_log_flags; + err = bpf_object__load_xattr(&attr); if (err) goto err_out; -- cgit v1.2.3 From 6501182c08f76e4118177b42614174547e1bd149 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 10 Nov 2021 21:36:16 -0800 Subject: bpftool: Normalize compile rules to specify output file last When dealing with verbose Makefile output, it's extremely confusing when compiler invocation commands don't specify -o as the last argument. Normalize bpftool's Makefile to do just that, as most other BPF-related Makefiles are already doing that. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211111053624.190580-2-andrii@kernel.org --- tools/bpf/bpftool/Makefile | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index 622568c7a9b8..31dfef6a4121 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -177,7 +177,8 @@ $(OUTPUT)%.bpf.o: skeleton/%.bpf.c $(OUTPUT)vmlinux.h $(LIBBPF) -I$(if $(OUTPUT),$(OUTPUT),.) \ -I$(srctree)/tools/include/uapi/ \ -I$(LIBBPF_INCLUDE) \ - -g -O2 -Wall -target bpf -c $< -o $@ && $(LLVM_STRIP) -g $@ + -g -O2 -Wall -target bpf -c $< -o $@ + $(Q)$(LLVM_STRIP) -g $@ $(OUTPUT)%.skel.h: $(OUTPUT)%.bpf.o $(BPFTOOL_BOOTSTRAP) $(QUIET_GEN)$(BPFTOOL_BOOTSTRAP) gen skeleton $< > $@ @@ -192,10 +193,10 @@ endif CFLAGS += $(if $(BUILD_BPF_SKELS),,-DBPFTOOL_WITHOUT_SKELETONS) $(BOOTSTRAP_OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c - $(QUIET_CC)$(HOSTCC) $(CFLAGS) -c -MMD -o $@ $< + $(QUIET_CC)$(HOSTCC) $(CFLAGS) -c -MMD $< -o $@ $(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c - $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $< + $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD $< -o $@ $(OUTPUT)feature.o: ifneq ($(feature-zlib), 1) @@ -203,17 +204,16 @@ ifneq ($(feature-zlib), 1) endif $(BPFTOOL_BOOTSTRAP): $(BOOTSTRAP_OBJS) $(LIBBPF_BOOTSTRAP) - $(QUIET_LINK)$(HOSTCC) $(CFLAGS) $(LDFLAGS) -o $@ $(BOOTSTRAP_OBJS) \ - $(LIBS_BOOTSTRAP) + $(QUIET_LINK)$(HOSTCC) $(CFLAGS) $(LDFLAGS) $(BOOTSTRAP_OBJS) $(LIBS_BOOTSTRAP) -o $@ $(OUTPUT)bpftool: $(OBJS) $(LIBBPF) - $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJS) $(LIBS) + $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(OBJS) $(LIBS) -o $@ $(BOOTSTRAP_OUTPUT)%.o: %.c $(LIBBPF_INTERNAL_HDRS) | $(BOOTSTRAP_OUTPUT) - $(QUIET_CC)$(HOSTCC) $(CFLAGS) -c -MMD -o $@ $< + $(QUIET_CC)$(HOSTCC) $(CFLAGS) -c -MMD $< -o $@ $(OUTPUT)%.o: %.c - $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $< + $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD $< -o $@ feature-detect-clean: $(call QUIET_CLEAN, feature-detect) -- cgit v1.2.3 From de29e6bbb9ee674d639cd42fe565f28757208614 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 10 Nov 2021 21:36:17 -0800 Subject: selftests/bpf: Minor cleanups and normalization of Makefile Few clean ups and single-line simplifications. Also split CLEAN command into multiple $(RM) invocations as it gets dangerously close to too long argument list. Make sure that -o is used always as the last argument for saner verbose make output. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211111053624.190580-3-andrii@kernel.org --- tools/testing/selftests/bpf/Makefile | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 0468ea57650d..0470802c907c 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -45,10 +45,8 @@ ifneq ($(BPF_GCC),) TEST_GEN_PROGS += test_progs-bpf_gcc endif -TEST_GEN_FILES = test_lwt_ip_encap.o \ - test_tc_edt.o -TEST_FILES = xsk_prereqs.sh \ - $(wildcard progs/btf_dump_test_case_*.c) +TEST_GEN_FILES = test_lwt_ip_encap.o test_tc_edt.o +TEST_FILES = xsk_prereqs.sh $(wildcard progs/btf_dump_test_case_*.c) # Order correspond to 'make run_tests' order TEST_PROGS := test_kmod.sh \ @@ -107,7 +105,10 @@ endif OVERRIDE_TARGETS := 1 override define CLEAN $(call msg,CLEAN) - $(Q)$(RM) -r $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(EXTRA_CLEAN) + $(Q)$(RM) -r $(TEST_GEN_PROGS) + $(Q)$(RM) -r $(TEST_GEN_PROGS_EXTENDED) + $(Q)$(RM) -r $(TEST_GEN_FILES) + $(Q)$(RM) -r $(EXTRA_CLEAN) $(Q)$(MAKE) -C bpf_testmod clean $(Q)$(MAKE) docs-clean endef @@ -169,7 +170,7 @@ $(OUTPUT)/%:%.c $(OUTPUT)/urandom_read: urandom_read.c $(call msg,BINARY,,$@) - $(Q)$(CC) $(LDFLAGS) -o $@ $< $(LDLIBS) -Wl,--build-id=sha1 + $(Q)$(CC) $(LDFLAGS) $< $(LDLIBS) -Wl,--build-id=sha1 -o $@ $(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(wildcard bpf_testmod/Makefile bpf_testmod/*.[ch]) $(call msg,MOD,,$@) @@ -232,16 +233,16 @@ docs-clean: prefix= OUTPUT=$(OUTPUT)/ DESTDIR=$(OUTPUT)/ $@ $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \ - ../../../include/uapi/linux/bpf.h \ + $(APIDIR)/linux/bpf.h \ | $(BUILD_DIR)/libbpf $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \ EXTRA_CFLAGS='-g -O0' \ DESTDIR=$(SCRATCH_DIR) prefix= all install_headers ifneq ($(BPFOBJ),$(HOST_BPFOBJ)) -$(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \ - ../../../include/uapi/linux/bpf.h \ - | $(HOST_BUILD_DIR)/libbpf +$(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \ + $(APIDIR)/linux/bpf.h \ + | $(HOST_BUILD_DIR)/libbpf $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) \ EXTRA_CFLAGS='-g -O0' \ OUTPUT=$(HOST_BUILD_DIR)/libbpf/ CC=$(HOSTCC) LD=$(HOSTLD) \ @@ -305,12 +306,12 @@ $(OUTPUT)/flow_dissector_load.o: flow_dissector_load.h # $3 - CFLAGS define CLANG_BPF_BUILD_RULE $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2) - $(Q)$(CLANG) $3 -O2 -target bpf -c $1 -o $2 -mcpu=v3 + $(Q)$(CLANG) $3 -O2 -target bpf -c $1 -mcpu=v3 -o $2 endef # Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32 define CLANG_NOALU32_BPF_BUILD_RULE $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2) - $(Q)$(CLANG) $3 -O2 -target bpf -c $1 -o $2 -mcpu=v2 + $(Q)$(CLANG) $3 -O2 -target bpf -c $1 -mcpu=v2 -o $2 endef # Build BPF object using GCC define GCC_BPF_BUILD_RULE @@ -472,13 +473,12 @@ TRUNNER_TESTS_DIR := prog_tests TRUNNER_BPF_PROGS_DIR := progs TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \ network_helpers.c testing_helpers.c \ - btf_helpers.c flow_dissector_load.h + btf_helpers.c flow_dissector_load.h TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \ ima_setup.sh \ $(wildcard progs/btf_dump_test_case_*.c) TRUNNER_BPF_BUILD_RULE := CLANG_BPF_BUILD_RULE -TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(CLANG_CFLAGS) -TRUNNER_BPF_CFLAGS += -DENABLE_ATOMICS_TESTS +TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(CLANG_CFLAGS) -DENABLE_ATOMICS_TESTS $(eval $(call DEFINE_TEST_RUNNER,test_progs)) # Define test_progs-no_alu32 test runner. @@ -540,7 +540,7 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o $(OUTPUT)/testing_helpers.o \ $(OUTPUT)/bench_ringbufs.o \ $(OUTPUT)/bench_bloom_filter_map.o $(call msg,BINARY,,$@) - $(Q)$(CC) $(LDFLAGS) -o $@ $(filter %.a %.o,$^) $(LDLIBS) + $(Q)$(CC) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@ EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \ prog_tests/tests.h map_tests/tests.h verifier/tests.h \ -- cgit v1.2.3 From 957d350a8b94133d114a9b1ac3e79f1f77100681 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 10 Nov 2021 21:36:18 -0800 Subject: libbpf: Turn btf_dedup_opts into OPTS-based struct btf__dedup() and struct btf_dedup_opts were added before we figured out OPTS mechanism. As such, btf_dedup_opts is non-extensible without breaking an ABI and potentially crashing user application. Unfortunately, btf__dedup() and btf_dedup_opts are short and succinct names that would be great to preserve and use going forward. So we use ___libbpf_override() macro approach, used previously for bpf_prog_load() API, to define a new btf__dedup() variant that accepts only struct btf * and struct btf_dedup_opts * arguments, and rename the old btf__dedup() implementation into btf__dedup_deprecated(). This keeps both source and binary compatibility with old and new applications. The biggest problem was struct btf_dedup_opts, which wasn't OPTS-based, and as such doesn't have `size_t sz;` as a first field. But btf__dedup() is a pretty rarely used API and I believe that the only currently known users (besides selftests) are libbpf's own bpf_linker and pahole. Neither use case actually uses options and just passes NULL. So instead of doing extra hacks, just rewrite struct btf_dedup_opts into OPTS-based one, move btf_ext argument into those opts (only bpf_linker needs to dedup btf_ext, so it's not a typical thing to specify), and drop never used `dont_resolve_fwds` option (it was never used anywhere, AFAIK, it makes BTF dedup much less useful and efficient). Just in case, for old implementation, btf__dedup_deprecated(), detect non-NULL options and error out with helpful message, to help users migrate, if there are any user playing with btf__dedup(). The last remaining piece is dedup_table_size, which is another anachronism from very early days of BTF dedup. Since then it has been reduced to the only valid value, 1, to request forced hash collisions. This is only used during testing. So instead introduce a bool flag to force collisions explicitly. This patch also adapts selftests to new btf__dedup() and btf_dedup_opts use to avoid selftests breakage. [0] Closes: https://github.com/libbpf/libbpf/issues/281 Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211111053624.190580-4-andrii@kernel.org --- tools/lib/bpf/btf.c | 46 +++++++++++++--------- tools/lib/bpf/btf.h | 20 ++++++++-- tools/lib/bpf/libbpf.map | 2 + tools/lib/bpf/linker.c | 4 +- tools/testing/selftests/bpf/prog_tests/btf.c | 46 ++++------------------ .../selftests/bpf/prog_tests/btf_dedup_split.c | 6 +-- 6 files changed, 58 insertions(+), 66 deletions(-) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 7e4c5586bd87..fcec27622e7a 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -2846,8 +2846,7 @@ __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext) struct btf_dedup; -static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, - const struct btf_dedup_opts *opts); +static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts); static void btf_dedup_free(struct btf_dedup *d); static int btf_dedup_prep(struct btf_dedup *d); static int btf_dedup_strings(struct btf_dedup *d); @@ -2994,12 +2993,17 @@ static int btf_dedup_remap_types(struct btf_dedup *d); * deduplicating structs/unions is described in greater details in comments for * `btf_dedup_is_equiv` function. */ -int btf__dedup(struct btf *btf, struct btf_ext *btf_ext, - const struct btf_dedup_opts *opts) + +DEFAULT_VERSION(btf__dedup_v0_6_0, btf__dedup, LIBBPF_0.6.0) +int btf__dedup_v0_6_0(struct btf *btf, const struct btf_dedup_opts *opts) { - struct btf_dedup *d = btf_dedup_new(btf, btf_ext, opts); + struct btf_dedup *d; int err; + if (!OPTS_VALID(opts, btf_dedup_opts)) + return libbpf_err(-EINVAL); + + d = btf_dedup_new(btf, opts); if (IS_ERR(d)) { pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d)); return libbpf_err(-EINVAL); @@ -3051,6 +3055,19 @@ done: return libbpf_err(err); } +COMPAT_VERSION(bpf__dedup_deprecated, btf__dedup, LIBBPF_0.0.2) +int btf__dedup_deprecated(struct btf *btf, struct btf_ext *btf_ext, const void *unused_opts) +{ + LIBBPF_OPTS(btf_dedup_opts, opts, .btf_ext = btf_ext); + + if (unused_opts) { + pr_warn("please use new version of btf__dedup() that supports options\n"); + return libbpf_err(-ENOTSUP); + } + + return btf__dedup(btf, &opts); +} + #define BTF_UNPROCESSED_ID ((__u32)-1) #define BTF_IN_PROGRESS_ID ((__u32)-2) @@ -3163,8 +3180,7 @@ static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx) return k1 == k2; } -static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, - const struct btf_dedup_opts *opts) +static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts) { struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup)); hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn; @@ -3173,13 +3189,11 @@ static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, if (!d) return ERR_PTR(-ENOMEM); - d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds; - /* dedup_table_size is now used only to force collisions in tests */ - if (opts && opts->dedup_table_size == 1) + if (OPTS_GET(opts, force_collisions, false)) hash_fn = btf_dedup_collision_hash_fn; d->btf = btf; - d->btf_ext = btf_ext; + d->btf_ext = OPTS_GET(opts, btf_ext, NULL); d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL); if (IS_ERR(d->dedup_table)) { @@ -3708,8 +3722,6 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) new_id = cand_id; break; } - if (d->opts.dont_resolve_fwds) - continue; if (btf_compat_enum(t, cand)) { if (btf_is_enum_fwd(t)) { /* resolve fwd to full enum */ @@ -3952,8 +3964,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, return 0; /* FWD <--> STRUCT/UNION equivalence check, if enabled */ - if (!d->opts.dont_resolve_fwds - && (cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD) + if ((cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD) && cand_kind != canon_kind) { __u16 real_kind; __u16 fwd_kind; @@ -3979,10 +3990,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, return btf_equal_int_tag(cand_type, canon_type); case BTF_KIND_ENUM: - if (d->opts.dont_resolve_fwds) - return btf_equal_enum(cand_type, canon_type); - else - return btf_compat_enum(cand_type, canon_type); + return btf_compat_enum(cand_type, canon_type); case BTF_KIND_FWD: case BTF_KIND_FLOAT: diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index bc005ba3ceec..6aae4f62ee0b 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -245,12 +245,24 @@ LIBBPF_API int btf__add_decl_tag(struct btf *btf, const char *value, int ref_typ int component_idx); struct btf_dedup_opts { - unsigned int dedup_table_size; - bool dont_resolve_fwds; + size_t sz; + /* optional .BTF.ext info to dedup along the main BTF info */ + struct btf_ext *btf_ext; + /* force hash collisions (used for testing) */ + bool force_collisions; + size_t :0; }; +#define btf_dedup_opts__last_field force_collisions + +LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts); + +LIBBPF_API int btf__dedup_v0_6_0(struct btf *btf, const struct btf_dedup_opts *opts); -LIBBPF_API int btf__dedup(struct btf *btf, struct btf_ext *btf_ext, - const struct btf_dedup_opts *opts); +LIBBPF_DEPRECATED_SINCE(0, 7, "use btf__dedup() instead") +LIBBPF_API int btf__dedup_deprecated(struct btf *btf, struct btf_ext *btf_ext, const void *opts); +#define btf__dedup(...) ___libbpf_overload(___btf_dedup, __VA_ARGS__) +#define ___btf_dedup3(btf, btf_ext, opts) btf__dedup_deprecated(btf, btf_ext, opts) +#define ___btf_dedup2(btf, opts) btf__dedup(btf, opts) struct btf_dump; diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 256d5d4be951..b8c8d14621d2 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -403,6 +403,8 @@ LIBBPF_0.6.0 { bpf_program__set_extra_flags; btf__add_btf; btf__add_decl_tag; + btf__dedup; + btf__dedup_deprecated; btf__raw_data; btf__type_cnt; } LIBBPF_0.5.0; diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c index f677dccdeae4..594b206fa674 100644 --- a/tools/lib/bpf/linker.c +++ b/tools/lib/bpf/linker.c @@ -2650,6 +2650,7 @@ static int emit_elf_data_sec(struct bpf_linker *linker, const char *sec_name, static int finalize_btf(struct bpf_linker *linker) { + LIBBPF_OPTS(btf_dedup_opts, opts); struct btf *btf = linker->btf; const void *raw_data; int i, j, id, err; @@ -2686,7 +2687,8 @@ static int finalize_btf(struct bpf_linker *linker) return err; } - err = btf__dedup(linker->btf, linker->btf_ext, NULL); + opts.btf_ext = linker->btf_ext; + err = btf__dedup(linker->btf, &opts); if (err) { pr_warn("BTF dedup failed: %d\n", err); return err; diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index ebd1aa4d09d6..1e8b36d74df2 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -6627,7 +6627,7 @@ struct btf_dedup_test { struct btf_dedup_opts opts; }; -const struct btf_dedup_test dedup_tests[] = { +static struct btf_dedup_test dedup_tests[] = { { .descr = "dedup: unused strings filtering", @@ -6647,9 +6647,6 @@ const struct btf_dedup_test dedup_tests[] = { }, BTF_STR_SEC("\0int\0long"), }, - .opts = { - .dont_resolve_fwds = false, - }, }, { .descr = "dedup: strings deduplication", @@ -6672,9 +6669,6 @@ const struct btf_dedup_test dedup_tests[] = { }, BTF_STR_SEC("\0int\0long int"), }, - .opts = { - .dont_resolve_fwds = false, - }, }, { .descr = "dedup: struct example #1", @@ -6755,9 +6749,6 @@ const struct btf_dedup_test dedup_tests[] = { }, BTF_STR_SEC("\0a\0b\0c\0d\0int\0float\0next\0s"), }, - .opts = { - .dont_resolve_fwds = false, - }, }, { .descr = "dedup: struct <-> fwd resolution w/ hash collision", @@ -6800,8 +6791,7 @@ const struct btf_dedup_test dedup_tests[] = { BTF_STR_SEC("\0s\0x"), }, .opts = { - .dont_resolve_fwds = false, - .dedup_table_size = 1, /* force hash collisions */ + .force_collisions = true, /* force hash collisions */ }, }, { @@ -6847,8 +6837,7 @@ const struct btf_dedup_test dedup_tests[] = { BTF_STR_SEC("\0s\0x"), }, .opts = { - .dont_resolve_fwds = false, - .dedup_table_size = 1, /* force hash collisions */ + .force_collisions = true, /* force hash collisions */ }, }, { @@ -6911,9 +6900,6 @@ const struct btf_dedup_test dedup_tests[] = { }, BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q"), }, - .opts = { - .dont_resolve_fwds = false, - }, }, { .descr = "dedup: no int/float duplicates", @@ -6965,9 +6951,6 @@ const struct btf_dedup_test dedup_tests[] = { }, BTF_STR_SEC("\0int\0some other int\0float"), }, - .opts = { - .dont_resolve_fwds = false, - }, }, { .descr = "dedup: enum fwd resolution", @@ -7009,9 +6992,6 @@ const struct btf_dedup_test dedup_tests[] = { }, BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"), }, - .opts = { - .dont_resolve_fwds = false, - }, }, { .descr = "dedup: datasec and vars pass-through", @@ -7054,8 +7034,7 @@ const struct btf_dedup_test dedup_tests[] = { BTF_STR_SEC("\0.bss\0t"), }, .opts = { - .dont_resolve_fwds = false, - .dedup_table_size = 1 + .force_collisions = true }, }, { @@ -7099,9 +7078,6 @@ const struct btf_dedup_test dedup_tests[] = { }, BTF_STR_SEC("\0t\0a1\0a2\0f\0tag"), }, - .opts = { - .dont_resolve_fwds = false, - }, }, { .descr = "dedup: func/func_param tags", @@ -7152,9 +7128,6 @@ const struct btf_dedup_test dedup_tests[] = { }, BTF_STR_SEC("\0a1\0a2\0f\0tag1\0tag2\0tag3"), }, - .opts = { - .dont_resolve_fwds = false, - }, }, { .descr = "dedup: struct/struct_member tags", @@ -7200,9 +7173,6 @@ const struct btf_dedup_test dedup_tests[] = { }, BTF_STR_SEC("\0t\0m1\0m2\0tag1\0tag2\0tag3"), }, - .opts = { - .dont_resolve_fwds = false, - }, }, { .descr = "dedup: typedef tags", @@ -7233,9 +7203,6 @@ const struct btf_dedup_test dedup_tests[] = { }, BTF_STR_SEC("\0t\0tag1\0tag2\0tag3"), }, - .opts = { - .dont_resolve_fwds = false, - }, }, }; @@ -7293,7 +7260,7 @@ static void dump_btf_strings(const char *strs, __u32 len) static void do_test_dedup(unsigned int test_num) { - const struct btf_dedup_test *test = &dedup_tests[test_num - 1]; + struct btf_dedup_test *test = &dedup_tests[test_num - 1]; __u32 test_nr_types, expect_nr_types, test_btf_size, expect_btf_size; const struct btf_header *test_hdr, *expect_hdr; struct btf *test_btf = NULL, *expect_btf = NULL; @@ -7337,7 +7304,8 @@ static void do_test_dedup(unsigned int test_num) goto done; } - err = btf__dedup(test_btf, NULL, &test->opts); + test->opts.sz = sizeof(test->opts); + err = btf__dedup(test_btf, &test->opts); if (CHECK(err, "btf_dedup failed errno:%d", err)) { err = -1; goto done; diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c b/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c index 64554fd33547..9d3b8d7a1537 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c @@ -92,7 +92,7 @@ struct s2 {\n\ int *f3;\n\ };\n\n", "c_dump"); - err = btf__dedup(btf2, NULL, NULL); + err = btf__dedup(btf2, NULL); if (!ASSERT_OK(err, "btf_dedup")) goto cleanup; @@ -186,7 +186,7 @@ static void test_split_fwd_resolve() { "\t'f1' type_id=7 bits_offset=0\n" "\t'f2' type_id=9 bits_offset=64"); - err = btf__dedup(btf2, NULL, NULL); + err = btf__dedup(btf2, NULL); if (!ASSERT_OK(err, "btf_dedup")) goto cleanup; @@ -283,7 +283,7 @@ static void test_split_struct_duped() { "[13] STRUCT 's3' size=8 vlen=1\n" "\t'f1' type_id=12 bits_offset=0"); - err = btf__dedup(btf2, NULL, NULL); + err = btf__dedup(btf2, NULL); if (!ASSERT_OK(err, "btf_dedup")) goto cleanup; -- cgit v1.2.3 From 6084f5dc928f2ada4331ba9eda65542e94d86bc6 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 10 Nov 2021 21:36:19 -0800 Subject: libbpf: Ensure btf_dump__new() and btf_dump_opts are future-proof Change btf_dump__new() and corresponding struct btf_dump_ops structure to be extensible by using OPTS "framework" ([0]). Given we don't change the names, we use a similar approach as with bpf_prog_load(), but this time we ended up with two APIs with the same name and same number of arguments, so overloading based on number of arguments with ___libbpf_override() doesn't work. Instead, use "overloading" based on types. In this particular case, print callback has to be specified, so we detect which argument is a callback. If it's 4th (last) argument, old implementation of API is used by user code. If not, it must be 2nd, and thus new implementation is selected. The rest is handled by the same symbol versioning approach. btf_ext argument is dropped as it was never used and isn't necessary either. If in the future we'll need btf_ext, that will be added into OPTS-based struct btf_dump_opts. struct btf_dump_opts is reused for both old API and new APIs. ctx field is marked deprecated in v0.7+ and it's put at the same memory location as OPTS's sz field. Any user of new-style btf_dump__new() will have to set sz field and doesn't/shouldn't use ctx, as ctx is now passed along the callback as mandatory input argument, following the other APIs in libbpf that accept callbacks consistently. Again, this is quite ugly in implementation, but is done in the name of backwards compatibility and uniform and extensible future APIs (at the same time, sigh). And it will be gone in libbpf 1.0. [0] Closes: https://github.com/libbpf/libbpf/issues/283 Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211111053624.190580-5-andrii@kernel.org --- tools/lib/bpf/btf.h | 51 ++++++++++++++++++++++++++++++++++++++++++++---- tools/lib/bpf/btf_dump.c | 31 ++++++++++++++++++++--------- tools/lib/bpf/libbpf.map | 2 ++ 3 files changed, 71 insertions(+), 13 deletions(-) diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 6aae4f62ee0b..45310c65e865 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -267,15 +267,58 @@ LIBBPF_API int btf__dedup_deprecated(struct btf *btf, struct btf_ext *btf_ext, c struct btf_dump; struct btf_dump_opts { - void *ctx; + union { + size_t sz; + void *ctx; /* DEPRECATED: will be gone in v1.0 */ + }; }; typedef void (*btf_dump_printf_fn_t)(void *ctx, const char *fmt, va_list args); LIBBPF_API struct btf_dump *btf_dump__new(const struct btf *btf, - const struct btf_ext *btf_ext, - const struct btf_dump_opts *opts, - btf_dump_printf_fn_t printf_fn); + btf_dump_printf_fn_t printf_fn, + void *ctx, + const struct btf_dump_opts *opts); + +LIBBPF_API struct btf_dump *btf_dump__new_v0_6_0(const struct btf *btf, + btf_dump_printf_fn_t printf_fn, + void *ctx, + const struct btf_dump_opts *opts); + +LIBBPF_API struct btf_dump *btf_dump__new_deprecated(const struct btf *btf, + const struct btf_ext *btf_ext, + const struct btf_dump_opts *opts, + btf_dump_printf_fn_t printf_fn); + +/* Choose either btf_dump__new() or btf_dump__new_deprecated() based on the + * type of 4th argument. If it's btf_dump's print callback, use deprecated + * API; otherwise, choose the new btf_dump__new(). ___libbpf_override() + * doesn't work here because both variants have 4 input arguments. + * + * (void *) casts are necessary to avoid compilation warnings about type + * mismatches, because even though __builtin_choose_expr() only ever evaluates + * one side the other side still has to satisfy type constraints (this is + * compiler implementation limitation which might be lifted eventually, + * according to the documentation). So passing struct btf_ext in place of + * btf_dump_printf_fn_t would be generating compilation warning. Casting to + * void * avoids this issue. + * + * Also, two type compatibility checks for a function and function pointer are + * required because passing function reference into btf_dump__new() as + * btf_dump__new(..., my_callback, ...) and as btf_dump__new(..., + * &my_callback, ...) (not explicit ampersand in the latter case) actually + * differs as far as __builtin_types_compatible_p() is concerned. Thus two + * checks are combined to detect callback argument. + * + * The rest works just like in case of ___libbpf_override() usage with symbol + * versioning. + */ +#define btf_dump__new(a1, a2, a3, a4) __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(a4), btf_dump_printf_fn_t) || \ + __builtin_types_compatible_p(typeof(a4), void(void *, const char *, va_list)), \ + btf_dump__new_deprecated((void *)a1, (void *)a2, (void *)a3, (void *)a4), \ + btf_dump__new((void *)a1, (void *)a2, (void *)a3, (void *)a4)) + LIBBPF_API void btf_dump__free(struct btf_dump *d); LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id); diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index 17db62b5002e..b8cd7e4f557a 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -77,9 +77,8 @@ struct btf_dump_data { struct btf_dump { const struct btf *btf; - const struct btf_ext *btf_ext; btf_dump_printf_fn_t printf_fn; - struct btf_dump_opts opts; + void *cb_ctx; int ptr_sz; bool strip_mods; bool skip_anon_defs; @@ -138,29 +137,32 @@ static void btf_dump_printf(const struct btf_dump *d, const char *fmt, ...) va_list args; va_start(args, fmt); - d->printf_fn(d->opts.ctx, fmt, args); + d->printf_fn(d->cb_ctx, fmt, args); va_end(args); } static int btf_dump_mark_referenced(struct btf_dump *d); static int btf_dump_resize(struct btf_dump *d); -struct btf_dump *btf_dump__new(const struct btf *btf, - const struct btf_ext *btf_ext, - const struct btf_dump_opts *opts, - btf_dump_printf_fn_t printf_fn) +DEFAULT_VERSION(btf_dump__new_v0_6_0, btf_dump__new, LIBBPF_0.6.0) +struct btf_dump *btf_dump__new_v0_6_0(const struct btf *btf, + btf_dump_printf_fn_t printf_fn, + void *ctx, + const struct btf_dump_opts *opts) { struct btf_dump *d; int err; + if (!printf_fn) + return libbpf_err_ptr(-EINVAL); + d = calloc(1, sizeof(struct btf_dump)); if (!d) return libbpf_err_ptr(-ENOMEM); d->btf = btf; - d->btf_ext = btf_ext; d->printf_fn = printf_fn; - d->opts.ctx = opts ? opts->ctx : NULL; + d->cb_ctx = ctx; d->ptr_sz = btf__pointer_size(btf) ? : sizeof(void *); d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL); @@ -186,6 +188,17 @@ err: return libbpf_err_ptr(err); } +COMPAT_VERSION(btf_dump__new_deprecated, btf_dump__new, LIBBPF_0.0.4) +struct btf_dump *btf_dump__new_deprecated(const struct btf *btf, + const struct btf_ext *btf_ext, + const struct btf_dump_opts *opts, + btf_dump_printf_fn_t printf_fn) +{ + if (!printf_fn) + return libbpf_err_ptr(-EINVAL); + return btf_dump__new_v0_6_0(btf, printf_fn, opts ? opts->ctx : NULL, opts); +} + static int btf_dump_resize(struct btf_dump *d) { int err, last_id = btf__type_cnt(d->btf) - 1; diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index b8c8d14621d2..07762d6723ef 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -407,4 +407,6 @@ LIBBPF_0.6.0 { btf__dedup_deprecated; btf__raw_data; btf__type_cnt; + btf_dump__new; + btf_dump__new_deprecated; } LIBBPF_0.5.0; -- cgit v1.2.3 From 4178893465774f91dcd49465ae6f4e3cc036b7b2 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 10 Nov 2021 21:36:20 -0800 Subject: libbpf: Make perf_buffer__new() use OPTS-based interface Add new variants of perf_buffer__new() and perf_buffer__new_raw() that use OPTS-based options for future extensibility ([0]). Given all the currently used API names are best fits, re-use them and use ___libbpf_override() approach and symbol versioning to preserve ABI and source code compatibility. struct perf_buffer_opts and struct perf_buffer_raw_opts are kept as well, but they are restructured such that they are OPTS-based when used with new APIs. For struct perf_buffer_raw_opts we keep few fields intact, so we have to also preserve the memory location of them both when used as OPTS and for legacy API variants. This is achieved with anonymous padding for OPTS "incarnation" of the struct. These pads can be eventually used for new options. [0] Closes: https://github.com/libbpf/libbpf/issues/311 Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211111053624.190580-6-andrii@kernel.org --- tools/lib/bpf/libbpf.c | 70 ++++++++++++++++++++++++++++++--------- tools/lib/bpf/libbpf.h | 86 ++++++++++++++++++++++++++++++++++++++++-------- tools/lib/bpf/libbpf.map | 4 +++ 3 files changed, 132 insertions(+), 28 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index a823b5ed705b..7db14859b27c 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -10578,11 +10578,18 @@ error: static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, struct perf_buffer_params *p); -struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt, - const struct perf_buffer_opts *opts) +DEFAULT_VERSION(perf_buffer__new_v0_6_0, perf_buffer__new, LIBBPF_0.6.0) +struct perf_buffer *perf_buffer__new_v0_6_0(int map_fd, size_t page_cnt, + perf_buffer_sample_fn sample_cb, + perf_buffer_lost_fn lost_cb, + void *ctx, + const struct perf_buffer_opts *opts) { struct perf_buffer_params p = {}; - struct perf_event_attr attr = { 0, }; + struct perf_event_attr attr = {}; + + if (!OPTS_VALID(opts, perf_buffer_opts)) + return libbpf_err_ptr(-EINVAL); attr.config = PERF_COUNT_SW_BPF_OUTPUT; attr.type = PERF_TYPE_SOFTWARE; @@ -10591,29 +10598,62 @@ struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt, attr.wakeup_events = 1; p.attr = &attr; - p.sample_cb = opts ? opts->sample_cb : NULL; - p.lost_cb = opts ? opts->lost_cb : NULL; - p.ctx = opts ? opts->ctx : NULL; + p.sample_cb = sample_cb; + p.lost_cb = lost_cb; + p.ctx = ctx; return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p)); } -struct perf_buffer * -perf_buffer__new_raw(int map_fd, size_t page_cnt, - const struct perf_buffer_raw_opts *opts) +COMPAT_VERSION(perf_buffer__new_deprecated, perf_buffer__new, LIBBPF_0.0.4) +struct perf_buffer *perf_buffer__new_deprecated(int map_fd, size_t page_cnt, + const struct perf_buffer_opts *opts) +{ + return perf_buffer__new_v0_6_0(map_fd, page_cnt, + opts ? opts->sample_cb : NULL, + opts ? opts->lost_cb : NULL, + opts ? opts->ctx : NULL, + NULL); +} + +DEFAULT_VERSION(perf_buffer__new_raw_v0_6_0, perf_buffer__new_raw, LIBBPF_0.6.0) +struct perf_buffer *perf_buffer__new_raw_v0_6_0(int map_fd, size_t page_cnt, + struct perf_event_attr *attr, + perf_buffer_event_fn event_cb, void *ctx, + const struct perf_buffer_raw_opts *opts) { struct perf_buffer_params p = {}; - p.attr = opts->attr; - p.event_cb = opts->event_cb; - p.ctx = opts->ctx; - p.cpu_cnt = opts->cpu_cnt; - p.cpus = opts->cpus; - p.map_keys = opts->map_keys; + if (page_cnt == 0 || !attr) + return libbpf_err_ptr(-EINVAL); + + if (!OPTS_VALID(opts, perf_buffer_raw_opts)) + return libbpf_err_ptr(-EINVAL); + + p.attr = attr; + p.event_cb = event_cb; + p.ctx = ctx; + p.cpu_cnt = OPTS_GET(opts, cpu_cnt, 0); + p.cpus = OPTS_GET(opts, cpus, NULL); + p.map_keys = OPTS_GET(opts, map_keys, NULL); return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p)); } +COMPAT_VERSION(perf_buffer__new_raw_deprecated, perf_buffer__new_raw, LIBBPF_0.0.4) +struct perf_buffer *perf_buffer__new_raw_deprecated(int map_fd, size_t page_cnt, + const struct perf_buffer_raw_opts *opts) +{ + LIBBPF_OPTS(perf_buffer_raw_opts, inner_opts, + .cpu_cnt = opts->cpu_cnt, + .cpus = opts->cpus, + .map_keys = opts->map_keys, + ); + + return perf_buffer__new_raw_v0_6_0(map_fd, page_cnt, opts->attr, + opts->event_cb, opts->ctx, &inner_opts); +} + static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, struct perf_buffer_params *p) { diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index f69512ae8505..4ec69f224342 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -778,18 +778,52 @@ typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt); /* common use perf buffer options */ struct perf_buffer_opts { - /* if specified, sample_cb is called for each sample */ - perf_buffer_sample_fn sample_cb; - /* if specified, lost_cb is called for each batch of lost samples */ - perf_buffer_lost_fn lost_cb; - /* ctx is provided to sample_cb and lost_cb */ - void *ctx; + union { + size_t sz; + struct { /* DEPRECATED: will be removed in v1.0 */ + /* if specified, sample_cb is called for each sample */ + perf_buffer_sample_fn sample_cb; + /* if specified, lost_cb is called for each batch of lost samples */ + perf_buffer_lost_fn lost_cb; + /* ctx is provided to sample_cb and lost_cb */ + void *ctx; + }; + }; }; +#define perf_buffer_opts__last_field sz +/** + * @brief **perf_buffer__new()** creates BPF perfbuf manager for a specified + * BPF_PERF_EVENT_ARRAY map + * @param map_fd FD of BPF_PERF_EVENT_ARRAY BPF map that will be used by BPF + * code to send data over to user-space + * @param page_cnt number of memory pages allocated for each per-CPU buffer + * @param sample_cb function called on each received data record + * @param lost_cb function called when record loss has occurred + * @param ctx user-provided extra context passed into *sample_cb* and *lost_cb* + * @return a new instance of struct perf_buffer on success, NULL on error with + * *errno* containing an error code + */ LIBBPF_API struct perf_buffer * perf_buffer__new(int map_fd, size_t page_cnt, + perf_buffer_sample_fn sample_cb, perf_buffer_lost_fn lost_cb, void *ctx, const struct perf_buffer_opts *opts); +LIBBPF_API struct perf_buffer * +perf_buffer__new_v0_6_0(int map_fd, size_t page_cnt, + perf_buffer_sample_fn sample_cb, perf_buffer_lost_fn lost_cb, void *ctx, + const struct perf_buffer_opts *opts); + +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use new variant of perf_buffer__new() instead") +struct perf_buffer *perf_buffer__new_deprecated(int map_fd, size_t page_cnt, + const struct perf_buffer_opts *opts); + +#define perf_buffer__new(...) ___libbpf_overload(___perf_buffer_new, __VA_ARGS__) +#define ___perf_buffer_new6(map_fd, page_cnt, sample_cb, lost_cb, ctx, opts) \ + perf_buffer__new(map_fd, page_cnt, sample_cb, lost_cb, ctx, opts) +#define ___perf_buffer_new3(map_fd, page_cnt, opts) \ + perf_buffer__new_deprecated(map_fd, page_cnt, opts) + enum bpf_perf_event_ret { LIBBPF_PERF_EVENT_DONE = 0, LIBBPF_PERF_EVENT_ERROR = -1, @@ -803,12 +837,21 @@ typedef enum bpf_perf_event_ret /* raw perf buffer options, giving most power and control */ struct perf_buffer_raw_opts { - /* perf event attrs passed directly into perf_event_open() */ - struct perf_event_attr *attr; - /* raw event callback */ - perf_buffer_event_fn event_cb; - /* ctx is provided to event_cb */ - void *ctx; + union { + struct { + size_t sz; + long :0; + long :0; + }; + struct { /* DEPRECATED: will be removed in v1.0 */ + /* perf event attrs passed directly into perf_event_open() */ + struct perf_event_attr *attr; + /* raw event callback */ + perf_buffer_event_fn event_cb; + /* ctx is provided to event_cb */ + void *ctx; + }; + }; /* if cpu_cnt == 0, open all on all possible CPUs (up to the number of * max_entries of given PERF_EVENT_ARRAY map) */ @@ -818,11 +861,28 @@ struct perf_buffer_raw_opts { /* if cpu_cnt > 0, map_keys specify map keys to set per-CPU FDs for */ int *map_keys; }; +#define perf_buffer_raw_opts__last_field map_keys LIBBPF_API struct perf_buffer * -perf_buffer__new_raw(int map_fd, size_t page_cnt, +perf_buffer__new_raw(int map_fd, size_t page_cnt, struct perf_event_attr *attr, + perf_buffer_event_fn event_cb, void *ctx, const struct perf_buffer_raw_opts *opts); +LIBBPF_API struct perf_buffer * +perf_buffer__new_raw_v0_6_0(int map_fd, size_t page_cnt, struct perf_event_attr *attr, + perf_buffer_event_fn event_cb, void *ctx, + const struct perf_buffer_raw_opts *opts); + +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use new variant of perf_buffer__new_raw() instead") +struct perf_buffer *perf_buffer__new_raw_deprecated(int map_fd, size_t page_cnt, + const struct perf_buffer_raw_opts *opts); + +#define perf_buffer__new_raw(...) ___libbpf_overload(___perf_buffer_new_raw, __VA_ARGS__) +#define ___perf_buffer_new_raw6(map_fd, page_cnt, attr, event_cb, ctx, opts) \ + perf_buffer__new_raw(map_fd, page_cnt, attr, event_cb, ctx, opts) +#define ___perf_buffer_new_raw3(map_fd, page_cnt, opts) \ + perf_buffer__new_raw_deprecated(map_fd, page_cnt, opts) + LIBBPF_API void perf_buffer__free(struct perf_buffer *pb); LIBBPF_API int perf_buffer__epoll_fd(const struct perf_buffer *pb); LIBBPF_API int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 07762d6723ef..9e91aa8da303 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -409,4 +409,8 @@ LIBBPF_0.6.0 { btf__type_cnt; btf_dump__new; btf_dump__new_deprecated; + perf_buffer__new; + perf_buffer__new_deprecated; + perf_buffer__new_raw; + perf_buffer__new_raw_deprecated; } LIBBPF_0.5.0; -- cgit v1.2.3 From 0b52a5f4b994c05070237271c7fac3265b640ffb Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 10 Nov 2021 21:36:21 -0800 Subject: selftests/bpf: Migrate all deprecated perf_buffer uses Migrate all old-style perf_buffer__new() and perf_buffer__new_raw() calls to new v1.0+ variants. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211111053624.190580-7-andrii@kernel.org --- tools/testing/selftests/bpf/benchs/bench_ringbufs.c | 8 ++------ tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c | 5 ++--- tools/testing/selftests/bpf/prog_tests/kfree_skb.c | 6 ++---- tools/testing/selftests/bpf/prog_tests/perf_buffer.c | 6 ++---- tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c | 7 ++----- tools/testing/selftests/bpf/test_tcpnotify_user.c | 4 +--- 6 files changed, 11 insertions(+), 25 deletions(-) diff --git a/tools/testing/selftests/bpf/benchs/bench_ringbufs.c b/tools/testing/selftests/bpf/benchs/bench_ringbufs.c index d167bffac679..52d4a2f91dbd 100644 --- a/tools/testing/selftests/bpf/benchs/bench_ringbufs.c +++ b/tools/testing/selftests/bpf/benchs/bench_ringbufs.c @@ -394,11 +394,6 @@ static void perfbuf_libbpf_setup() { struct perfbuf_libbpf_ctx *ctx = &perfbuf_libbpf_ctx; struct perf_event_attr attr; - struct perf_buffer_raw_opts pb_opts = { - .event_cb = perfbuf_process_sample_raw, - .ctx = (void *)(long)0, - .attr = &attr, - }; struct bpf_link *link; ctx->skel = perfbuf_setup_skeleton(); @@ -423,7 +418,8 @@ static void perfbuf_libbpf_setup() } ctx->perfbuf = perf_buffer__new_raw(bpf_map__fd(ctx->skel->maps.perfbuf), - args.perfbuf_sz, &pb_opts); + args.perfbuf_sz, &attr, + perfbuf_process_sample_raw, NULL, NULL); if (!ctx->perfbuf) { fprintf(stderr, "failed to create perfbuf\n"); exit(1); diff --git a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c index 569fcc6ed660..4184c399d4c6 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c +++ b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c @@ -85,7 +85,6 @@ void test_get_stack_raw_tp(void) const char *file_err = "./test_get_stack_rawtp_err.o"; const char *prog_name = "raw_tracepoint/sys_enter"; int i, err, prog_fd, exp_cnt = MAX_CNT_RAWTP; - struct perf_buffer_opts pb_opts = {}; struct perf_buffer *pb = NULL; struct bpf_link *link = NULL; struct timespec tv = {0, 10}; @@ -124,8 +123,8 @@ void test_get_stack_raw_tp(void) if (!ASSERT_OK_PTR(link, "attach_raw_tp")) goto close_prog; - pb_opts.sample_cb = get_stack_print_output; - pb = perf_buffer__new(bpf_map__fd(map), 8, &pb_opts); + pb = perf_buffer__new(bpf_map__fd(map), 8, get_stack_print_output, + NULL, NULL, NULL); if (!ASSERT_OK_PTR(pb, "perf_buf__new")) goto close_prog; diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c index 885413ed5c96..2a49f8fcde06 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c +++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c @@ -66,7 +66,6 @@ void serial_test_kfree_skb(void) struct bpf_map *perf_buf_map, *global_data; struct bpf_program *prog, *fentry, *fexit; struct bpf_object *obj, *obj2 = NULL; - struct perf_buffer_opts pb_opts = {}; struct perf_buffer *pb = NULL; int err, kfree_skb_fd; bool passed = false; @@ -112,9 +111,8 @@ void serial_test_kfree_skb(void) goto close_prog; /* set up perf buffer */ - pb_opts.sample_cb = on_sample; - pb_opts.ctx = &passed; - pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts); + pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, + on_sample, NULL, &passed, NULL); if (!ASSERT_OK_PTR(pb, "perf_buf__new")) goto close_prog; diff --git a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c index 4e32f3586a75..5fc2b3a0711e 100644 --- a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c +++ b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c @@ -47,7 +47,6 @@ void serial_test_perf_buffer(void) { int err, on_len, nr_on_cpus = 0, nr_cpus, i, j; int zero = 0, my_pid = getpid(); - struct perf_buffer_opts pb_opts = {}; struct test_perf_buffer *skel; cpu_set_t cpu_seen; struct perf_buffer *pb; @@ -82,9 +81,8 @@ void serial_test_perf_buffer(void) goto out_close; /* set up perf buffer */ - pb_opts.sample_cb = on_sample; - pb_opts.ctx = &cpu_seen; - pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1, &pb_opts); + pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1, + on_sample, NULL, &cpu_seen, NULL); if (!ASSERT_OK_PTR(pb, "perf_buf__new")) goto out_close; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c index 3bd5904b4db5..f99386d1dc4c 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c @@ -49,7 +49,6 @@ void test_xdp_bpf2bpf(void) struct vip key4 = {.protocol = 6, .family = AF_INET}; struct bpf_program *prog; struct perf_buffer *pb = NULL; - struct perf_buffer_opts pb_opts = {}; /* Load XDP program to introspect */ pkt_skel = test_xdp__open_and_load(); @@ -86,10 +85,8 @@ void test_xdp_bpf2bpf(void) goto out; /* Set up perf buffer */ - pb_opts.sample_cb = on_sample; - pb_opts.ctx = &passed; - pb = perf_buffer__new(bpf_map__fd(ftrace_skel->maps.perf_buf_map), - 1, &pb_opts); + pb = perf_buffer__new(bpf_map__fd(ftrace_skel->maps.perf_buf_map), 1, + on_sample, NULL, &passed, NULL); if (!ASSERT_OK_PTR(pb, "perf_buf__new")) goto out; diff --git a/tools/testing/selftests/bpf/test_tcpnotify_user.c b/tools/testing/selftests/bpf/test_tcpnotify_user.c index 63111cb082fe..4c5114765b23 100644 --- a/tools/testing/selftests/bpf/test_tcpnotify_user.c +++ b/tools/testing/selftests/bpf/test_tcpnotify_user.c @@ -72,7 +72,6 @@ int main(int argc, char **argv) { const char *file = "test_tcpnotify_kern.o"; struct bpf_map *perf_map, *global_map; - struct perf_buffer_opts pb_opts = {}; struct tcpnotify_globals g = {0}; struct perf_buffer *pb = NULL; const char *cg_path = "/foo"; @@ -117,8 +116,7 @@ int main(int argc, char **argv) return -1; } - pb_opts.sample_cb = dummyfn; - pb = perf_buffer__new(bpf_map__fd(perf_map), 8, &pb_opts); + pb = perf_buffer__new(bpf_map__fd(perf_map), 8, dummyfn, NULL, NULL, NULL); if (!pb) goto err; -- cgit v1.2.3 From 60ba87bb6bafaaa6e8ef9a73834cf701194d1923 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 10 Nov 2021 21:36:22 -0800 Subject: selftests/bpf: Update btf_dump__new() uses to v1.0+ variant Update to-be-deprecated forms of btf_dump__new(). Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211111053624.190580-8-andrii@kernel.org --- tools/testing/selftests/bpf/btf_helpers.c | 4 +-- tools/testing/selftests/bpf/prog_tests/btf_dump.c | 33 +++++++++------------- tools/testing/selftests/bpf/prog_tests/btf_split.c | 4 +-- 3 files changed, 15 insertions(+), 26 deletions(-) diff --git a/tools/testing/selftests/bpf/btf_helpers.c b/tools/testing/selftests/bpf/btf_helpers.c index 3d1a748d09d8..acb59202486d 100644 --- a/tools/testing/selftests/bpf/btf_helpers.c +++ b/tools/testing/selftests/bpf/btf_helpers.c @@ -238,7 +238,6 @@ const char *btf_type_c_dump(const struct btf *btf) static char buf[16 * 1024]; FILE *buf_file; struct btf_dump *d = NULL; - struct btf_dump_opts opts = {}; int err, i; buf_file = fmemopen(buf, sizeof(buf) - 1, "w"); @@ -247,8 +246,7 @@ const char *btf_type_c_dump(const struct btf *btf) return NULL; } - opts.ctx = buf_file; - d = btf_dump__new(btf, NULL, &opts, btf_dump_printf); + d = btf_dump__new(btf, btf_dump_printf, buf_file, NULL); if (libbpf_get_error(d)) { fprintf(stderr, "Failed to create btf_dump instance: %ld\n", libbpf_get_error(d)); goto err_out; diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c index a04961942dfa..d6272013a5a3 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -13,25 +13,23 @@ static struct btf_dump_test_case { const char *name; const char *file; bool known_ptr_sz; - struct btf_dump_opts opts; } btf_dump_test_cases[] = { - {"btf_dump: syntax", "btf_dump_test_case_syntax", true, {}}, - {"btf_dump: ordering", "btf_dump_test_case_ordering", false, {}}, - {"btf_dump: padding", "btf_dump_test_case_padding", true, {}}, - {"btf_dump: packing", "btf_dump_test_case_packing", true, {}}, - {"btf_dump: bitfields", "btf_dump_test_case_bitfields", true, {}}, - {"btf_dump: multidim", "btf_dump_test_case_multidim", false, {}}, - {"btf_dump: namespacing", "btf_dump_test_case_namespacing", false, {}}, + {"btf_dump: syntax", "btf_dump_test_case_syntax", true}, + {"btf_dump: ordering", "btf_dump_test_case_ordering", false}, + {"btf_dump: padding", "btf_dump_test_case_padding", true}, + {"btf_dump: packing", "btf_dump_test_case_packing", true}, + {"btf_dump: bitfields", "btf_dump_test_case_bitfields", true}, + {"btf_dump: multidim", "btf_dump_test_case_multidim", false}, + {"btf_dump: namespacing", "btf_dump_test_case_namespacing", false}, }; -static int btf_dump_all_types(const struct btf *btf, - const struct btf_dump_opts *opts) +static int btf_dump_all_types(const struct btf *btf, void *ctx) { size_t type_cnt = btf__type_cnt(btf); struct btf_dump *d; int err = 0, id; - d = btf_dump__new(btf, NULL, opts, btf_dump_printf); + d = btf_dump__new(btf, btf_dump_printf, ctx, NULL); err = libbpf_get_error(d); if (err) return err; @@ -88,8 +86,7 @@ static int test_btf_dump_case(int n, struct btf_dump_test_case *t) goto done; } - t->opts.ctx = f; - err = btf_dump_all_types(btf, &t->opts); + err = btf_dump_all_types(btf, f); fclose(f); close(fd); if (CHECK(err, "btf_dump", "failure during C dumping: %d\n", err)) { @@ -137,7 +134,6 @@ static void test_btf_dump_incremental(void) { struct btf *btf = NULL; struct btf_dump *d = NULL; - struct btf_dump_opts opts; int id, err, i; dump_buf_file = open_memstream(&dump_buf, &dump_buf_sz); @@ -146,8 +142,7 @@ static void test_btf_dump_incremental(void) btf = btf__new_empty(); if (!ASSERT_OK_PTR(btf, "new_empty")) goto err_out; - opts.ctx = dump_buf_file; - d = btf_dump__new(btf, NULL, &opts, btf_dump_printf); + d = btf_dump__new(btf, btf_dump_printf, dump_buf_file, NULL); if (!ASSERT_OK(libbpf_get_error(d), "btf_dump__new")) goto err_out; @@ -815,7 +810,6 @@ static void test_btf_datasec(struct btf *btf, struct btf_dump *d, char *str, static void test_btf_dump_datasec_data(char *str) { struct btf *btf; - struct btf_dump_opts opts = { .ctx = str }; char license[4] = "GPL"; struct btf_dump *d; @@ -823,7 +817,7 @@ static void test_btf_dump_datasec_data(char *str) if (!ASSERT_OK_PTR(btf, "xdping_kern.o BTF not found")) return; - d = btf_dump__new(btf, NULL, &opts, btf_dump_snprintf); + d = btf_dump__new(btf, btf_dump_snprintf, str, NULL); if (!ASSERT_OK_PTR(d, "could not create BTF dump")) goto out; @@ -837,7 +831,6 @@ out: void test_btf_dump() { char str[STRSIZE]; - struct btf_dump_opts opts = { .ctx = str }; struct btf_dump *d; struct btf *btf; int i; @@ -857,7 +850,7 @@ void test_btf_dump() { if (!ASSERT_OK_PTR(btf, "no kernel BTF found")) return; - d = btf_dump__new(btf, NULL, &opts, btf_dump_snprintf); + d = btf_dump__new(btf, btf_dump_snprintf, str, NULL); if (!ASSERT_OK_PTR(d, "could not create BTF dump")) return; diff --git a/tools/testing/selftests/bpf/prog_tests/btf_split.c b/tools/testing/selftests/bpf/prog_tests/btf_split.c index b1ffe61f2aa9..eef1158676ed 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_split.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_split.c @@ -13,7 +13,6 @@ static void btf_dump_printf(void *ctx, const char *fmt, va_list args) } void test_btf_split() { - struct btf_dump_opts opts; struct btf_dump *d = NULL; const struct btf_type *t; struct btf *btf1, *btf2; @@ -68,8 +67,7 @@ void test_btf_split() { dump_buf_file = open_memstream(&dump_buf, &dump_buf_sz); if (!ASSERT_OK_PTR(dump_buf_file, "dump_memstream")) return; - opts.ctx = dump_buf_file; - d = btf_dump__new(btf2, NULL, &opts, btf_dump_printf); + d = btf_dump__new(btf2, btf_dump_printf, dump_buf_file, NULL); if (!ASSERT_OK_PTR(d, "btf_dump__new")) goto cleanup; for (i = 1; i < btf__type_cnt(btf2); i++) { -- cgit v1.2.3 From eda8bfa5b7c76d332ece1f24a3662ca843fd880a Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 10 Nov 2021 21:36:23 -0800 Subject: tools/runqslower: Update perf_buffer__new() calls Use v1.0+ compatible variant of perf_buffer__new() call to prepare for deprecation. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211111053624.190580-9-andrii@kernel.org --- tools/bpf/runqslower/runqslower.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tools/bpf/runqslower/runqslower.c b/tools/bpf/runqslower/runqslower.c index d89715844952..2414cc764461 100644 --- a/tools/bpf/runqslower/runqslower.c +++ b/tools/bpf/runqslower/runqslower.c @@ -123,7 +123,6 @@ int main(int argc, char **argv) .parser = parse_arg, .doc = argp_program_doc, }; - struct perf_buffer_opts pb_opts; struct perf_buffer *pb = NULL; struct runqslower_bpf *obj; int err; @@ -165,9 +164,8 @@ int main(int argc, char **argv) printf("Tracing run queue latency higher than %llu us\n", env.min_us); printf("%-8s %-16s %-6s %14s\n", "TIME", "COMM", "PID", "LAT(us)"); - pb_opts.sample_cb = handle_event; - pb_opts.lost_cb = handle_lost_events; - pb = perf_buffer__new(bpf_map__fd(obj->maps.events), 64, &pb_opts); + pb = perf_buffer__new(bpf_map__fd(obj->maps.events), 64, + handle_event, handle_lost_events, NULL, NULL); err = libbpf_get_error(pb); if (err) { pb = NULL; -- cgit v1.2.3 From 164b04f27fbd769f57905dfddd2a8953974eeef4 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 10 Nov 2021 21:36:24 -0800 Subject: bpftool: Update btf_dump__new() and perf_buffer__new_raw() calls Use v1.0-compatible variants of btf_dump and perf_buffer "constructors". This is also a demonstration of reusing struct perf_buffer_raw_opts as OPTS-style option struct for new perf_buffer__new_raw() API. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211111053624.190580-10-andrii@kernel.org --- tools/bpf/bpftool/btf.c | 2 +- tools/bpf/bpftool/gen.c | 2 +- tools/bpf/bpftool/map_perf_ring.c | 9 +++------ 3 files changed, 5 insertions(+), 8 deletions(-) diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c index 015d2758f826..223ac7676027 100644 --- a/tools/bpf/bpftool/btf.c +++ b/tools/bpf/bpftool/btf.c @@ -418,7 +418,7 @@ static int dump_btf_c(const struct btf *btf, struct btf_dump *d; int err = 0, i; - d = btf_dump__new(btf, NULL, NULL, btf_dump_printf); + d = btf_dump__new(btf, btf_dump_printf, NULL, NULL); if (IS_ERR(d)) return PTR_ERR(d); diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c index 5c18351290f0..89f0e828bbfa 100644 --- a/tools/bpf/bpftool/gen.c +++ b/tools/bpf/bpftool/gen.c @@ -218,7 +218,7 @@ static int codegen_datasecs(struct bpf_object *obj, const char *obj_name) char sec_ident[256], map_ident[256]; int i, err = 0; - d = btf_dump__new(btf, NULL, NULL, codegen_btf_dump_printf); + d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL); if (IS_ERR(d)) return PTR_ERR(d); diff --git a/tools/bpf/bpftool/map_perf_ring.c b/tools/bpf/bpftool/map_perf_ring.c index b98ea702d284..6b0c410152de 100644 --- a/tools/bpf/bpftool/map_perf_ring.c +++ b/tools/bpf/bpftool/map_perf_ring.c @@ -124,7 +124,7 @@ int do_event_pipe(int argc, char **argv) .wakeup_events = 1, }; struct bpf_map_info map_info = {}; - struct perf_buffer_raw_opts opts = {}; + LIBBPF_OPTS(perf_buffer_raw_opts, opts); struct event_pipe_ctx ctx = { .all_cpus = true, .cpu = -1, @@ -190,14 +190,11 @@ int do_event_pipe(int argc, char **argv) ctx.idx = 0; } - opts.attr = &perf_attr; - opts.event_cb = print_bpf_output; - opts.ctx = &ctx; opts.cpu_cnt = ctx.all_cpus ? 0 : 1; opts.cpus = &ctx.cpu; opts.map_keys = &ctx.idx; - - pb = perf_buffer__new_raw(map_fd, MMAP_PAGE_CNT, &opts); + pb = perf_buffer__new_raw(map_fd, MMAP_PAGE_CNT, &perf_attr, + print_bpf_output, &ctx, &opts); err = libbpf_get_error(pb); if (err) { p_err("failed to create perf buffer: %s (%d)", -- cgit v1.2.3 From 8c42d2fa4eeab6c37a0b1b1aa7a2715248ef4f34 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 11 Nov 2021 17:26:09 -0800 Subject: bpf: Support BTF_KIND_TYPE_TAG for btf_type_tag attributes LLVM patches ([1] for clang, [2] and [3] for BPF backend) added support for btf_type_tag attributes. This patch added support for the kernel. The main motivation for btf_type_tag is to bring kernel annotations __user, __rcu etc. to btf. With such information available in btf, bpf verifier can detect mis-usages and reject the program. For example, for __user tagged pointer, developers can then use proper helper like bpf_probe_read_user() etc. to read the data. BTF_KIND_TYPE_TAG may also useful for other tracing facility where instead of to require user to specify kernel/user address type, the kernel can detect it by itself with btf. [1] https://reviews.llvm.org/D111199 [2] https://reviews.llvm.org/D113222 [3] https://reviews.llvm.org/D113496 Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211112012609.1505032-1-yhs@fb.com --- include/uapi/linux/btf.h | 3 ++- kernel/bpf/btf.c | 14 +++++++++++++- tools/include/uapi/linux/btf.h | 3 ++- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h index deb12f755f0f..b0d8fea1951d 100644 --- a/include/uapi/linux/btf.h +++ b/include/uapi/linux/btf.h @@ -43,7 +43,7 @@ struct btf_type { * "size" tells the size of the type it is describing. * * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, - * FUNC, FUNC_PROTO, VAR and DECL_TAG. + * FUNC, FUNC_PROTO, VAR, DECL_TAG and TYPE_TAG. * "type" is a type_id referring to another type. */ union { @@ -75,6 +75,7 @@ enum { BTF_KIND_DATASEC = 15, /* Section */ BTF_KIND_FLOAT = 16, /* Floating point */ BTF_KIND_DECL_TAG = 17, /* Decl Tag */ + BTF_KIND_TYPE_TAG = 18, /* Type Tag */ NR_BTF_KINDS, BTF_KIND_MAX = NR_BTF_KINDS - 1, diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index cdb0fba65600..1dd9ba82da1e 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -282,6 +282,7 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = { [BTF_KIND_DATASEC] = "DATASEC", [BTF_KIND_FLOAT] = "FLOAT", [BTF_KIND_DECL_TAG] = "DECL_TAG", + [BTF_KIND_TYPE_TAG] = "TYPE_TAG", }; const char *btf_type_str(const struct btf_type *t) @@ -418,6 +419,7 @@ static bool btf_type_is_modifier(const struct btf_type *t) case BTF_KIND_VOLATILE: case BTF_KIND_CONST: case BTF_KIND_RESTRICT: + case BTF_KIND_TYPE_TAG: return true; } @@ -1737,6 +1739,7 @@ __btf_resolve_size(const struct btf *btf, const struct btf_type *type, case BTF_KIND_VOLATILE: case BTF_KIND_CONST: case BTF_KIND_RESTRICT: + case BTF_KIND_TYPE_TAG: id = type->type; type = btf_type_by_id(btf, type->type); break; @@ -2345,6 +2348,8 @@ static int btf_ref_type_check_meta(struct btf_verifier_env *env, const struct btf_type *t, u32 meta_left) { + const char *value; + if (btf_type_vlen(t)) { btf_verifier_log_type(env, t, "vlen != 0"); return -EINVAL; @@ -2360,7 +2365,7 @@ static int btf_ref_type_check_meta(struct btf_verifier_env *env, return -EINVAL; } - /* typedef type must have a valid name, and other ref types, + /* typedef/type_tag type must have a valid name, and other ref types, * volatile, const, restrict, should have a null name. */ if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) { @@ -2369,6 +2374,12 @@ static int btf_ref_type_check_meta(struct btf_verifier_env *env, btf_verifier_log_type(env, t, "Invalid name"); return -EINVAL; } + } else if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG) { + value = btf_name_by_offset(env->btf, t->name_off); + if (!value || !value[0]) { + btf_verifier_log_type(env, t, "Invalid name"); + return -EINVAL; + } } else { if (t->name_off) { btf_verifier_log_type(env, t, "Invalid name"); @@ -4059,6 +4070,7 @@ static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = { [BTF_KIND_DATASEC] = &datasec_ops, [BTF_KIND_FLOAT] = &float_ops, [BTF_KIND_DECL_TAG] = &decl_tag_ops, + [BTF_KIND_TYPE_TAG] = &modifier_ops, }; static s32 btf_check_meta(struct btf_verifier_env *env, diff --git a/tools/include/uapi/linux/btf.h b/tools/include/uapi/linux/btf.h index deb12f755f0f..b0d8fea1951d 100644 --- a/tools/include/uapi/linux/btf.h +++ b/tools/include/uapi/linux/btf.h @@ -43,7 +43,7 @@ struct btf_type { * "size" tells the size of the type it is describing. * * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, - * FUNC, FUNC_PROTO, VAR and DECL_TAG. + * FUNC, FUNC_PROTO, VAR, DECL_TAG and TYPE_TAG. * "type" is a type_id referring to another type. */ union { @@ -75,6 +75,7 @@ enum { BTF_KIND_DATASEC = 15, /* Section */ BTF_KIND_FLOAT = 16, /* Floating point */ BTF_KIND_DECL_TAG = 17, /* Decl Tag */ + BTF_KIND_TYPE_TAG = 18, /* Type Tag */ NR_BTF_KINDS, BTF_KIND_MAX = NR_BTF_KINDS - 1, -- cgit v1.2.3 From 2dc1e488e5cdfd937554ca81fd46ad874d244b3f Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 11 Nov 2021 17:26:14 -0800 Subject: libbpf: Support BTF_KIND_TYPE_TAG Add libbpf support for BTF_KIND_TYPE_TAG. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211112012614.1505315-1-yhs@fb.com --- tools/lib/bpf/btf.c | 23 +++++++++++++++++++++++ tools/lib/bpf/btf.h | 9 ++++++++- tools/lib/bpf/btf_dump.c | 9 +++++++++ tools/lib/bpf/libbpf.c | 31 ++++++++++++++++++++++++++++++- tools/lib/bpf/libbpf.map | 1 + tools/lib/bpf/libbpf_internal.h | 2 ++ 6 files changed, 73 insertions(+), 2 deletions(-) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index fcec27622e7a..fadf089ae8fe 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -299,6 +299,7 @@ static int btf_type_size(const struct btf_type *t) case BTF_KIND_TYPEDEF: case BTF_KIND_FUNC: case BTF_KIND_FLOAT: + case BTF_KIND_TYPE_TAG: return base_size; case BTF_KIND_INT: return base_size + sizeof(__u32); @@ -349,6 +350,7 @@ static int btf_bswap_type_rest(struct btf_type *t) case BTF_KIND_TYPEDEF: case BTF_KIND_FUNC: case BTF_KIND_FLOAT: + case BTF_KIND_TYPE_TAG: return 0; case BTF_KIND_INT: *(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1)); @@ -649,6 +651,7 @@ int btf__align_of(const struct btf *btf, __u32 id) case BTF_KIND_VOLATILE: case BTF_KIND_CONST: case BTF_KIND_RESTRICT: + case BTF_KIND_TYPE_TAG: return btf__align_of(btf, t->type); case BTF_KIND_ARRAY: return btf__align_of(btf, btf_array(t)->type); @@ -2235,6 +2238,22 @@ int btf__add_restrict(struct btf *btf, int ref_type_id) return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id); } +/* + * Append new BTF_KIND_TYPE_TAG type with: + * - *value*, non-empty/non-NULL tag value; + * - *ref_type_id* - referenced type ID, it might not exist yet; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id) +{ + if (!value|| !value[0]) + return libbpf_err(-EINVAL); + + return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id); +} + /* * Append new BTF_KIND_FUNC type with: * - *name*, non-empty/non-NULL name; @@ -3639,6 +3658,7 @@ static int btf_dedup_prep(struct btf_dedup *d) case BTF_KIND_TYPEDEF: case BTF_KIND_FUNC: case BTF_KIND_FLOAT: + case BTF_KIND_TYPE_TAG: h = btf_hash_common(t); break; case BTF_KIND_INT: @@ -3699,6 +3719,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) case BTF_KIND_VAR: case BTF_KIND_DATASEC: case BTF_KIND_DECL_TAG: + case BTF_KIND_TYPE_TAG: return 0; case BTF_KIND_INT: @@ -4297,6 +4318,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) case BTF_KIND_PTR: case BTF_KIND_TYPEDEF: case BTF_KIND_FUNC: + case BTF_KIND_TYPE_TAG: ref_type_id = btf_dedup_ref_type(d, t->type); if (ref_type_id < 0) return ref_type_id; @@ -4603,6 +4625,7 @@ int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ct case BTF_KIND_FUNC: case BTF_KIND_VAR: case BTF_KIND_DECL_TAG: + case BTF_KIND_TYPE_TAG: return visit(&t->type, ctx); case BTF_KIND_ARRAY: { diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 45310c65e865..5c73a5b0a044 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -227,6 +227,7 @@ LIBBPF_API int btf__add_typedef(struct btf *btf, const char *name, int ref_type_ LIBBPF_API int btf__add_volatile(struct btf *btf, int ref_type_id); LIBBPF_API int btf__add_const(struct btf *btf, int ref_type_id); LIBBPF_API int btf__add_restrict(struct btf *btf, int ref_type_id); +LIBBPF_API int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id); /* func and func_proto construction APIs */ LIBBPF_API int btf__add_func(struct btf *btf, const char *name, @@ -458,7 +459,8 @@ static inline bool btf_is_mod(const struct btf_type *t) return kind == BTF_KIND_VOLATILE || kind == BTF_KIND_CONST || - kind == BTF_KIND_RESTRICT; + kind == BTF_KIND_RESTRICT || + kind == BTF_KIND_TYPE_TAG; } static inline bool btf_is_func(const struct btf_type *t) @@ -491,6 +493,11 @@ static inline bool btf_is_decl_tag(const struct btf_type *t) return btf_kind(t) == BTF_KIND_DECL_TAG; } +static inline bool btf_is_type_tag(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_TYPE_TAG; +} + static inline __u8 btf_int_encoding(const struct btf_type *t) { return BTF_INT_ENCODING(*(__u32 *)(t + 1)); diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index b8cd7e4f557a..05f3e7dfec0a 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -330,6 +330,7 @@ static int btf_dump_mark_referenced(struct btf_dump *d) case BTF_KIND_FUNC: case BTF_KIND_VAR: case BTF_KIND_DECL_TAG: + case BTF_KIND_TYPE_TAG: d->type_states[t->type].referenced = 1; break; @@ -573,6 +574,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr) case BTF_KIND_VOLATILE: case BTF_KIND_CONST: case BTF_KIND_RESTRICT: + case BTF_KIND_TYPE_TAG: return btf_dump_order_type(d, t->type, through_ptr); case BTF_KIND_FUNC_PROTO: { @@ -747,6 +749,7 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id) case BTF_KIND_VOLATILE: case BTF_KIND_CONST: case BTF_KIND_RESTRICT: + case BTF_KIND_TYPE_TAG: btf_dump_emit_type(d, t->type, cont_id); break; case BTF_KIND_ARRAY: @@ -1167,6 +1170,7 @@ skip_mod: case BTF_KIND_CONST: case BTF_KIND_RESTRICT: case BTF_KIND_FUNC_PROTO: + case BTF_KIND_TYPE_TAG: id = t->type; break; case BTF_KIND_ARRAY: @@ -1335,6 +1339,11 @@ static void btf_dump_emit_type_chain(struct btf_dump *d, case BTF_KIND_RESTRICT: btf_dump_printf(d, " restrict"); break; + case BTF_KIND_TYPE_TAG: + btf_dump_emit_mods(d, decls); + name = btf_name_of(d, t->name_off); + btf_dump_printf(d, " __attribute__((btf_type_tag(\"%s\")))", name); + break; case BTF_KIND_ARRAY: { const struct btf_array *a = btf_array(t); const struct btf_type *next_t; diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 7db14859b27c..de7e09a6b5ec 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -197,6 +197,8 @@ enum kern_feature_id { FEAT_PERF_LINK, /* BTF_KIND_DECL_TAG support */ FEAT_BTF_DECL_TAG, + /* BTF_KIND_TYPE_TAG support */ + FEAT_BTF_TYPE_TAG, __FEAT_CNT, }; @@ -2076,6 +2078,7 @@ static const char *__btf_kind_str(__u16 kind) case BTF_KIND_DATASEC: return "datasec"; case BTF_KIND_FLOAT: return "float"; case BTF_KIND_DECL_TAG: return "decl_tag"; + case BTF_KIND_TYPE_TAG: return "type_tag"; default: return "unknown"; } } @@ -2588,8 +2591,10 @@ static bool btf_needs_sanitization(struct bpf_object *obj) bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT); bool has_func = kernel_supports(obj, FEAT_BTF_FUNC); bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG); + bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG); - return !has_func || !has_datasec || !has_func_global || !has_float || !has_decl_tag; + return !has_func || !has_datasec || !has_func_global || !has_float || + !has_decl_tag || !has_type_tag; } static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf) @@ -2599,6 +2604,7 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf) bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT); bool has_func = kernel_supports(obj, FEAT_BTF_FUNC); bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG); + bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG); struct btf_type *t; int i, j, vlen; @@ -2657,6 +2663,10 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf) */ t->name_off = 0; t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0); + } else if (!has_type_tag && btf_is_type_tag(t)) { + /* replace TYPE_TAG with a CONST */ + t->name_off = 0; + t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0); } } } @@ -4460,6 +4470,22 @@ static int probe_kern_btf_decl_tag(void) strs, sizeof(strs))); } +static int probe_kern_btf_type_tag(void) +{ + static const char strs[] = "\0tag"; + __u32 types[] = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* attr */ + BTF_TYPE_TYPE_TAG_ENC(1, 1), /* [2] */ + /* ptr */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), /* [3] */ + }; + + return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), + strs, sizeof(strs))); +} + static int probe_kern_array_mmap(void) { struct bpf_create_map_attr attr = { @@ -4657,6 +4683,9 @@ static struct kern_feature_desc { [FEAT_BTF_DECL_TAG] = { "BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag, }, + [FEAT_BTF_TYPE_TAG] = { + "BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag, + }, }; static bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id) diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 9e91aa8da303..6a59514a48cf 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -403,6 +403,7 @@ LIBBPF_0.6.0 { bpf_program__set_extra_flags; btf__add_btf; btf__add_decl_tag; + btf__add_type_tag; btf__dedup; btf__dedup_deprecated; btf__raw_data; diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index c1e34794b829..f7ac349650a1 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -73,6 +73,8 @@ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz) #define BTF_TYPE_DECL_TAG_ENC(value, type, component_idx) \ BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), type), (component_idx) +#define BTF_TYPE_TYPE_TAG_ENC(value, type) \ + BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_TYPE_TAG, 0, 0), type) #ifndef likely #define likely(x) __builtin_expect(!!(x), 1) -- cgit v1.2.3 From 3da5ba6f0509ace03cad38b554c89797129e90be Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 11 Nov 2021 17:26:20 -0800 Subject: bpftool: Support BTF_KIND_TYPE_TAG Add bpftool support for BTF_KIND_TYPE_TAG. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211112012620.1505506-1-yhs@fb.com --- tools/bpf/bpftool/btf.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c index 223ac7676027..c7e3b0b0029e 100644 --- a/tools/bpf/bpftool/btf.c +++ b/tools/bpf/bpftool/btf.c @@ -39,6 +39,7 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = { [BTF_KIND_DATASEC] = "DATASEC", [BTF_KIND_FLOAT] = "FLOAT", [BTF_KIND_DECL_TAG] = "DECL_TAG", + [BTF_KIND_TYPE_TAG] = "TYPE_TAG", }; struct btf_attach_point { @@ -142,6 +143,7 @@ static int dump_btf_type(const struct btf *btf, __u32 id, case BTF_KIND_VOLATILE: case BTF_KIND_RESTRICT: case BTF_KIND_TYPEDEF: + case BTF_KIND_TYPE_TAG: if (json_output) jsonw_uint_field(w, "type_id", t->type); else -- cgit v1.2.3 From 0dc85872203bf7b15c56c7eb228b8f3fabb17ac2 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 11 Nov 2021 17:26:25 -0800 Subject: selftests/bpf: Test libbpf API function btf__add_type_tag() Add unit tests for btf__add_type_tag(). Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211112012625.1505748-1-yhs@fb.com --- tools/testing/selftests/bpf/btf_helpers.c | 4 +- tools/testing/selftests/bpf/prog_tests/btf_write.c | 67 +++++++++++++--------- 2 files changed, 43 insertions(+), 28 deletions(-) diff --git a/tools/testing/selftests/bpf/btf_helpers.c b/tools/testing/selftests/bpf/btf_helpers.c index acb59202486d..b5941d514e17 100644 --- a/tools/testing/selftests/bpf/btf_helpers.c +++ b/tools/testing/selftests/bpf/btf_helpers.c @@ -25,11 +25,12 @@ static const char * const btf_kind_str_mapping[] = { [BTF_KIND_DATASEC] = "DATASEC", [BTF_KIND_FLOAT] = "FLOAT", [BTF_KIND_DECL_TAG] = "DECL_TAG", + [BTF_KIND_TYPE_TAG] = "TYPE_TAG", }; static const char *btf_kind_str(__u16 kind) { - if (kind > BTF_KIND_DECL_TAG) + if (kind > BTF_KIND_TYPE_TAG) return "UNKNOWN"; return btf_kind_str_mapping[kind]; } @@ -109,6 +110,7 @@ int fprintf_btf_type_raw(FILE *out, const struct btf *btf, __u32 id) case BTF_KIND_VOLATILE: case BTF_KIND_RESTRICT: case BTF_KIND_TYPEDEF: + case BTF_KIND_TYPE_TAG: fprintf(out, " type_id=%u", t->type); break; case BTF_KIND_ARRAY: { diff --git a/tools/testing/selftests/bpf/prog_tests/btf_write.c b/tools/testing/selftests/bpf/prog_tests/btf_write.c index b912eeb0b6b4..addf99c05896 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_write.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_write.c @@ -297,6 +297,16 @@ static void gen_btf(struct btf *btf) ASSERT_EQ(btf_decl_tag(t)->component_idx, 1, "tag_component_idx"); ASSERT_STREQ(btf_type_raw_dump(btf, 19), "[19] DECL_TAG 'tag2' type_id=14 component_idx=1", "raw_dump"); + + /* TYPE_TAG */ + id = btf__add_type_tag(btf, "tag1", 1); + ASSERT_EQ(id, 20, "tag_id"); + t = btf__type_by_id(btf, 20); + ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "tag1", "tag_value"); + ASSERT_EQ(btf_kind(t), BTF_KIND_TYPE_TAG, "tag_kind"); + ASSERT_EQ(t->type, 1, "tag_type"); + ASSERT_STREQ(btf_type_raw_dump(btf, 20), + "[20] TYPE_TAG 'tag1' type_id=1", "raw_dump"); } static void test_btf_add() @@ -337,7 +347,8 @@ static void test_btf_add() "[17] DATASEC 'datasec1' size=12 vlen=1\n" "\ttype_id=1 offset=4 size=8", "[18] DECL_TAG 'tag1' type_id=16 component_idx=-1", - "[19] DECL_TAG 'tag2' type_id=14 component_idx=1"); + "[19] DECL_TAG 'tag2' type_id=14 component_idx=1", + "[20] TYPE_TAG 'tag1' type_id=1"); btf__free(btf); } @@ -359,7 +370,7 @@ static void test_btf_add_btf() gen_btf(btf2); id = btf__add_btf(btf1, btf2); - if (!ASSERT_EQ(id, 20, "id")) + if (!ASSERT_EQ(id, 21, "id")) goto cleanup; VALIDATE_RAW_BTF( @@ -391,35 +402,37 @@ static void test_btf_add_btf() "\ttype_id=1 offset=4 size=8", "[18] DECL_TAG 'tag1' type_id=16 component_idx=-1", "[19] DECL_TAG 'tag2' type_id=14 component_idx=1", + "[20] TYPE_TAG 'tag1' type_id=1", /* types appended from the second BTF */ - "[20] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", - "[21] PTR '(anon)' type_id=20", - "[22] CONST '(anon)' type_id=24", - "[23] VOLATILE '(anon)' type_id=22", - "[24] RESTRICT '(anon)' type_id=23", - "[25] ARRAY '(anon)' type_id=21 index_type_id=20 nr_elems=10", - "[26] STRUCT 's1' size=8 vlen=2\n" - "\t'f1' type_id=20 bits_offset=0\n" - "\t'f2' type_id=20 bits_offset=32 bitfield_size=16", - "[27] UNION 'u1' size=8 vlen=1\n" - "\t'f1' type_id=20 bits_offset=0 bitfield_size=16", - "[28] ENUM 'e1' size=4 vlen=2\n" + "[21] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[22] PTR '(anon)' type_id=21", + "[23] CONST '(anon)' type_id=25", + "[24] VOLATILE '(anon)' type_id=23", + "[25] RESTRICT '(anon)' type_id=24", + "[26] ARRAY '(anon)' type_id=22 index_type_id=21 nr_elems=10", + "[27] STRUCT 's1' size=8 vlen=2\n" + "\t'f1' type_id=21 bits_offset=0\n" + "\t'f2' type_id=21 bits_offset=32 bitfield_size=16", + "[28] UNION 'u1' size=8 vlen=1\n" + "\t'f1' type_id=21 bits_offset=0 bitfield_size=16", + "[29] ENUM 'e1' size=4 vlen=2\n" "\t'v1' val=1\n" "\t'v2' val=2", - "[29] FWD 'struct_fwd' fwd_kind=struct", - "[30] FWD 'union_fwd' fwd_kind=union", - "[31] ENUM 'enum_fwd' size=4 vlen=0", - "[32] TYPEDEF 'typedef1' type_id=20", - "[33] FUNC 'func1' type_id=34 linkage=global", - "[34] FUNC_PROTO '(anon)' ret_type_id=20 vlen=2\n" - "\t'p1' type_id=20\n" - "\t'p2' type_id=21", - "[35] VAR 'var1' type_id=20, linkage=global-alloc", - "[36] DATASEC 'datasec1' size=12 vlen=1\n" - "\ttype_id=20 offset=4 size=8", - "[37] DECL_TAG 'tag1' type_id=35 component_idx=-1", - "[38] DECL_TAG 'tag2' type_id=33 component_idx=1"); + "[30] FWD 'struct_fwd' fwd_kind=struct", + "[31] FWD 'union_fwd' fwd_kind=union", + "[32] ENUM 'enum_fwd' size=4 vlen=0", + "[33] TYPEDEF 'typedef1' type_id=21", + "[34] FUNC 'func1' type_id=35 linkage=global", + "[35] FUNC_PROTO '(anon)' ret_type_id=21 vlen=2\n" + "\t'p1' type_id=21\n" + "\t'p2' type_id=22", + "[36] VAR 'var1' type_id=21, linkage=global-alloc", + "[37] DATASEC 'datasec1' size=12 vlen=1\n" + "\ttype_id=21 offset=4 size=8", + "[38] DECL_TAG 'tag1' type_id=36 component_idx=-1", + "[39] DECL_TAG 'tag2' type_id=34 component_idx=1", + "[40] TYPE_TAG 'tag1' type_id=21"); cleanup: btf__free(btf1); -- cgit v1.2.3 From 6aa5dabc9d0ef722905e4ca4f9751d70cf3ec8a4 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 11 Nov 2021 17:26:30 -0800 Subject: selftests/bpf: Add BTF_KIND_TYPE_TAG unit tests Add BTF_KIND_TYPE_TAG unit tests. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211112012630.1506095-1-yhs@fb.com --- tools/testing/selftests/bpf/prog_tests/btf.c | 18 ++++++++++++++++++ tools/testing/selftests/bpf/test_btf.h | 3 +++ 2 files changed, 21 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index 1e8b36d74df2..88510a2d9858 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -3939,6 +3939,23 @@ static struct btf_raw_test raw_tests[] = { .btf_load_err = true, .err_str = "Invalid component_idx", }, +{ + .descr = "type_tag test #1", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [2] */ + BTF_PTR_ENC(2), /* [3] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, +}, }; /* struct btf_raw_test raw_tests[] */ @@ -7222,6 +7239,7 @@ static int btf_type_size(const struct btf_type *t) case BTF_KIND_TYPEDEF: case BTF_KIND_FUNC: case BTF_KIND_FLOAT: + case BTF_KIND_TYPE_TAG: return base_size; case BTF_KIND_INT: return base_size + sizeof(__u32); diff --git a/tools/testing/selftests/bpf/test_btf.h b/tools/testing/selftests/bpf/test_btf.h index 32c7a57867da..128989bed8b7 100644 --- a/tools/testing/selftests/bpf/test_btf.h +++ b/tools/testing/selftests/bpf/test_btf.h @@ -72,4 +72,7 @@ #define BTF_DECL_TAG_ENC(value, type, component_idx) \ BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), type), (component_idx) +#define BTF_TYPE_TAG_ENC(value, type) \ + BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_TYPE_TAG, 0, 0), type) + #endif /* _TEST_BTF_H */ -- cgit v1.2.3 From 846f4826d18e660ab668eb26e83c6adf0ceb24d2 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 11 Nov 2021 17:26:35 -0800 Subject: selftests/bpf: Test BTF_KIND_DECL_TAG for deduplication Add BTF_KIND_TYPE_TAG duplication unit tests. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211112012635.1506853-1-yhs@fb.com --- tools/testing/selftests/bpf/prog_tests/btf.c | 139 ++++++++++++++++++++++++++- 1 file changed, 135 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index 88510a2d9858..4aa6343dc4c8 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -6878,15 +6878,16 @@ static struct btf_dedup_test dedup_tests[] = { BTF_RESTRICT_ENC(8), /* [11] restrict */ BTF_FUNC_PROTO_ENC(1, 2), /* [12] func_proto */ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), - BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 18), BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */ BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [14] float */ BTF_DECL_TAG_ENC(NAME_TBD, 13, -1), /* [15] decl_tag */ BTF_DECL_TAG_ENC(NAME_TBD, 13, 1), /* [16] decl_tag */ BTF_DECL_TAG_ENC(NAME_TBD, 7, -1), /* [17] decl_tag */ + BTF_TYPE_TAG_ENC(NAME_TBD, 8), /* [18] type_tag */ BTF_END_RAW, }, - BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q"), + BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q\0R"), }, .expect = { .raw_types = { @@ -6907,15 +6908,16 @@ static struct btf_dedup_test dedup_tests[] = { BTF_RESTRICT_ENC(8), /* [11] restrict */ BTF_FUNC_PROTO_ENC(1, 2), /* [12] func_proto */ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), - BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 18), BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */ BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [14] float */ BTF_DECL_TAG_ENC(NAME_TBD, 13, -1), /* [15] decl_tag */ BTF_DECL_TAG_ENC(NAME_TBD, 13, 1), /* [16] decl_tag */ BTF_DECL_TAG_ENC(NAME_TBD, 7, -1), /* [17] decl_tag */ + BTF_TYPE_TAG_ENC(NAME_TBD, 8), /* [18] type_tag */ BTF_END_RAW, }, - BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q"), + BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q\0R"), }, }, { @@ -7221,6 +7223,135 @@ static struct btf_dedup_test dedup_tests[] = { BTF_STR_SEC("\0t\0tag1\0tag2\0tag3"), }, }, +{ + .descr = "dedup: btf_type_tag #1", + .input = { + .raw_types = { + /* ptr -> tag2 -> tag1 -> int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */ + BTF_PTR_ENC(3), /* [4] */ + /* ptr -> tag2 -> tag1 -> int */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [5] */ + BTF_TYPE_TAG_ENC(NAME_NTH(2), 5), /* [6] */ + BTF_PTR_ENC(6), /* [7] */ + /* ptr -> tag1 -> int */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [8] */ + BTF_PTR_ENC(8), /* [9] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag1\0tag2"), + }, + .expect = { + .raw_types = { + /* ptr -> tag2 -> tag1 -> int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */ + BTF_PTR_ENC(3), /* [4] */ + /* ptr -> tag1 -> int */ + BTF_PTR_ENC(2), /* [5] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag1\0tag2"), + }, +}, +{ + .descr = "dedup: btf_type_tag #2", + .input = { + .raw_types = { + /* ptr -> tag2 -> tag1 -> int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */ + BTF_PTR_ENC(3), /* [4] */ + /* ptr -> tag2 -> int */ + BTF_TYPE_TAG_ENC(NAME_NTH(2), 1), /* [5] */ + BTF_PTR_ENC(5), /* [6] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag1\0tag2"), + }, + .expect = { + .raw_types = { + /* ptr -> tag2 -> tag1 -> int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */ + BTF_PTR_ENC(3), /* [4] */ + /* ptr -> tag2 -> int */ + BTF_TYPE_TAG_ENC(NAME_NTH(2), 1), /* [5] */ + BTF_PTR_ENC(5), /* [6] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag1\0tag2"), + }, +}, +{ + .descr = "dedup: btf_type_tag #3", + .input = { + .raw_types = { + /* ptr -> tag2 -> tag1 -> int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */ + BTF_PTR_ENC(3), /* [4] */ + /* ptr -> tag1 -> tag2 -> int */ + BTF_TYPE_TAG_ENC(NAME_NTH(2), 1), /* [5] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 5), /* [6] */ + BTF_PTR_ENC(6), /* [7] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag1\0tag2"), + }, + .expect = { + .raw_types = { + /* ptr -> tag2 -> tag1 -> int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */ + BTF_PTR_ENC(3), /* [4] */ + /* ptr -> tag1 -> tag2 -> int */ + BTF_TYPE_TAG_ENC(NAME_NTH(2), 1), /* [5] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 5), /* [6] */ + BTF_PTR_ENC(6), /* [7] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag1\0tag2"), + }, +}, +{ + .descr = "dedup: btf_type_tag #4", + .input = { + .raw_types = { + /* ptr -> tag1 -> int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_PTR_ENC(2), /* [3] */ + /* ptr -> tag1 -> long */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), /* [4] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 4), /* [5] */ + BTF_PTR_ENC(5), /* [6] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag1"), + }, + .expect = { + .raw_types = { + /* ptr -> tag1 -> int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_PTR_ENC(2), /* [3] */ + /* ptr -> tag1 -> long */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), /* [4] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 4), /* [5] */ + BTF_PTR_ENC(5), /* [6] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag1"), + }, +}, }; -- cgit v1.2.3 From 26c79fcbfa64b18ca1407a3be7ac3442aef51073 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 11 Nov 2021 17:26:41 -0800 Subject: selftests/bpf: Rename progs/tag.c to progs/btf_decl_tag.c Rename progs/tag.c to progs/btf_decl_tag.c so we can introduce progs/btf_type_tag.c in the next patch. Also create a subtest for btf_decl_tag in prog_tests/btf_tag.c so we can introduce btf_type_tag subtest in the next patch. I also took opportunity to remove the check whether __has_attribute is defined or not in progs/btf_decl_tag.c since all recent clangs should already support this macro. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211112012641.1507144-1-yhs@fb.com --- tools/testing/selftests/bpf/prog_tests/btf_tag.c | 20 ++++++--- tools/testing/selftests/bpf/progs/btf_decl_tag.c | 50 ++++++++++++++++++++++ tools/testing/selftests/bpf/progs/tag.c | 54 ------------------------ 3 files changed, 63 insertions(+), 61 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/btf_decl_tag.c delete mode 100644 tools/testing/selftests/bpf/progs/tag.c diff --git a/tools/testing/selftests/bpf/prog_tests/btf_tag.c b/tools/testing/selftests/bpf/prog_tests/btf_tag.c index 91821f42714d..d15cc7a88182 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_tag.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_tag.c @@ -1,20 +1,26 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2021 Facebook */ #include -#include "tag.skel.h" +#include "btf_decl_tag.skel.h" -void test_btf_tag(void) +static void test_btf_decl_tag(void) { - struct tag *skel; + struct btf_decl_tag *skel; - skel = tag__open_and_load(); - if (!ASSERT_OK_PTR(skel, "btf_tag")) + skel = btf_decl_tag__open_and_load(); + if (!ASSERT_OK_PTR(skel, "btf_decl_tag")) return; if (skel->rodata->skip_tests) { - printf("%s:SKIP: btf_tag attribute not supported", __func__); + printf("%s:SKIP: btf_decl_tag attribute not supported", __func__); test__skip(); } - tag__destroy(skel); + btf_decl_tag__destroy(skel); +} + +void test_btf_tag(void) +{ + if (test__start_subtest("btf_decl_tag")) + test_btf_decl_tag(); } diff --git a/tools/testing/selftests/bpf/progs/btf_decl_tag.c b/tools/testing/selftests/bpf/progs/btf_decl_tag.c new file mode 100644 index 000000000000..c88ccc53529a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf_decl_tag.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include "vmlinux.h" +#include +#include + +#if __has_attribute(btf_decl_tag) +#define __tag1 __attribute__((btf_decl_tag("tag1"))) +#define __tag2 __attribute__((btf_decl_tag("tag2"))) +volatile const bool skip_tests __tag1 __tag2 = false; +#else +#define __tag1 +#define __tag2 +volatile const bool skip_tests = true; +#endif + +struct key_t { + int a; + int b __tag1 __tag2; + int c; +} __tag1 __tag2; + +typedef struct { + int a; + int b; +} value_t __tag1 __tag2; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 3); + __type(key, struct key_t); + __type(value, value_t); +} hashmap1 SEC(".maps"); + + +static __noinline int foo(int x __tag1 __tag2) __tag1 __tag2 +{ + struct key_t key; + value_t val = {}; + + key.a = key.b = key.c = x; + bpf_map_update_elem(&hashmap1, &key, &val, 0); + return 0; +} + +SEC("fentry/bpf_fentry_test1") +int BPF_PROG(sub, int x) +{ + return foo(x); +} diff --git a/tools/testing/selftests/bpf/progs/tag.c b/tools/testing/selftests/bpf/progs/tag.c deleted file mode 100644 index 1792f4eda095..000000000000 --- a/tools/testing/selftests/bpf/progs/tag.c +++ /dev/null @@ -1,54 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2021 Facebook */ -#include "vmlinux.h" -#include -#include - -#ifndef __has_attribute -#define __has_attribute(x) 0 -#endif - -#if __has_attribute(btf_decl_tag) -#define __tag1 __attribute__((btf_decl_tag("tag1"))) -#define __tag2 __attribute__((btf_decl_tag("tag2"))) -volatile const bool skip_tests __tag1 __tag2 = false; -#else -#define __tag1 -#define __tag2 -volatile const bool skip_tests = true; -#endif - -struct key_t { - int a; - int b __tag1 __tag2; - int c; -} __tag1 __tag2; - -typedef struct { - int a; - int b; -} value_t __tag1 __tag2; - -struct { - __uint(type, BPF_MAP_TYPE_HASH); - __uint(max_entries, 3); - __type(key, struct key_t); - __type(value, value_t); -} hashmap1 SEC(".maps"); - - -static __noinline int foo(int x __tag1 __tag2) __tag1 __tag2 -{ - struct key_t key; - value_t val = {}; - - key.a = key.b = key.c = x; - bpf_map_update_elem(&hashmap1, &key, &val, 0); - return 0; -} - -SEC("fentry/bpf_fentry_test1") -int BPF_PROG(sub, int x) -{ - return foo(x); -} -- cgit v1.2.3 From 5698a42a73a1d9cb7efd31ca1bf35daa87f5e1a9 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 11 Nov 2021 17:26:46 -0800 Subject: selftests/bpf: Add a C test for btf_type_tag The following is the main btf_type_tag usage in the C test: #define __tag1 __attribute__((btf_type_tag("tag1"))) #define __tag2 __attribute__((btf_type_tag("tag2"))) struct btf_type_tag_test { int __tag1 * __tag1 __tag2 *p; } g; The bpftool raw dump with related types: [4] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED [11] STRUCT 'btf_type_tag_test' size=8 vlen=1 'p' type_id=14 bits_offset=0 [12] TYPE_TAG 'tag1' type_id=16 [13] TYPE_TAG 'tag2' type_id=12 [14] PTR '(anon)' type_id=13 [15] TYPE_TAG 'tag1' type_id=4 [16] PTR '(anon)' type_id=15 [17] VAR 'g' type_id=11, linkage=global With format C dump, we have struct btf_type_tag_test { int __attribute__((btf_type_tag("tag1"))) * __attribute__((btf_type_tag("tag1"))) __attribute__((btf_type_tag("tag2"))) *p; }; The result C code is identical to the original definition except macro's are gone. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211112012646.1508231-1-yhs@fb.com --- tools/testing/selftests/bpf/prog_tests/btf_tag.c | 24 +++++++++++++++++++++++ tools/testing/selftests/bpf/progs/btf_type_tag.c | 25 ++++++++++++++++++++++++ 2 files changed, 49 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/btf_type_tag.c diff --git a/tools/testing/selftests/bpf/prog_tests/btf_tag.c b/tools/testing/selftests/bpf/prog_tests/btf_tag.c index d15cc7a88182..88d63e23e35f 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_tag.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_tag.c @@ -3,6 +3,12 @@ #include #include "btf_decl_tag.skel.h" +/* struct btf_type_tag_test is referenced in btf_type_tag.skel.h */ +struct btf_type_tag_test { + int **p; +}; +#include "btf_type_tag.skel.h" + static void test_btf_decl_tag(void) { struct btf_decl_tag *skel; @@ -19,8 +25,26 @@ static void test_btf_decl_tag(void) btf_decl_tag__destroy(skel); } +static void test_btf_type_tag(void) +{ + struct btf_type_tag *skel; + + skel = btf_type_tag__open_and_load(); + if (!ASSERT_OK_PTR(skel, "btf_type_tag")) + return; + + if (skel->rodata->skip_tests) { + printf("%s:SKIP: btf_type_tag attribute not supported", __func__); + test__skip(); + } + + btf_type_tag__destroy(skel); +} + void test_btf_tag(void) { if (test__start_subtest("btf_decl_tag")) test_btf_decl_tag(); + if (test__start_subtest("btf_type_tag")) + test_btf_type_tag(); } diff --git a/tools/testing/selftests/bpf/progs/btf_type_tag.c b/tools/testing/selftests/bpf/progs/btf_type_tag.c new file mode 100644 index 000000000000..1d488da7e920 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf_type_tag.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include "vmlinux.h" +#include +#include + +#if __has_attribute(btf_type_tag) +#define __tag1 __attribute__((btf_type_tag("tag1"))) +#define __tag2 __attribute__((btf_type_tag("tag2"))) +volatile const bool skip_tests = false; +#else +#define __tag1 +#define __tag2 +volatile const bool skip_tests = true; +#endif + +struct btf_type_tag_test { + int __tag1 * __tag1 __tag2 *p; +} g; + +SEC("fentry/bpf_fentry_test1") +int BPF_PROG(sub, int x) +{ + return 0; +} -- cgit v1.2.3 From 3f1d0dc0ba290aab357083a0abfe267c8cffdc8d Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 11 Nov 2021 17:26:51 -0800 Subject: selftests/bpf: Clarify llvm dependency with btf_tag selftest btf_tag selftest needs certain llvm versions (>= llvm14). Make it clear in the selftests README.rst file. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211112012651.1508549-1-yhs@fb.com --- tools/testing/selftests/bpf/README.rst | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/bpf/README.rst b/tools/testing/selftests/bpf/README.rst index 5e287e445f75..42ef250c7acc 100644 --- a/tools/testing/selftests/bpf/README.rst +++ b/tools/testing/selftests/bpf/README.rst @@ -204,16 +204,17 @@ __ https://reviews.llvm.org/D93563 btf_tag test and Clang version ============================== -The btf_tag selftest require LLVM support to recognize the btf_decl_tag attribute. -It was introduced in `Clang 14`__. +The btf_tag selftest requires LLVM support to recognize the btf_decl_tag and +btf_type_tag attributes. They are introduced in `Clang 14` [0_, 1_]. -Without it, the btf_tag selftest will be skipped and you will observe: +Without them, the btf_tag selftest will be skipped and you will observe: .. code-block:: console # btf_tag:SKIP -__ https://reviews.llvm.org/D111588 +.. _0: https://reviews.llvm.org/D111588 +.. _1: https://reviews.llvm.org/D111199 Clang dependencies for static linking tests =========================================== -- cgit v1.2.3 From d52f5c639dd8605d2563b77b190e278f615a2b8a Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 11 Nov 2021 17:26:56 -0800 Subject: docs/bpf: Update documentation for BTF_KIND_TYPE_TAG support Add BTF_KIND_TYPE_TAG documentation in btf.rst. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211112012656.1509082-1-yhs@fb.com --- Documentation/bpf/btf.rst | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/Documentation/bpf/btf.rst b/Documentation/bpf/btf.rst index 9ad4218a751f..d0ec40d00c28 100644 --- a/Documentation/bpf/btf.rst +++ b/Documentation/bpf/btf.rst @@ -86,6 +86,7 @@ sequentially and type id is assigned to each recognized type starting from id #define BTF_KIND_DATASEC 15 /* Section */ #define BTF_KIND_FLOAT 16 /* Floating point */ #define BTF_KIND_DECL_TAG 17 /* Decl Tag */ + #define BTF_KIND_TYPE_TAG 18 /* Type Tag */ Note that the type section encodes debug info, not just pure types. ``BTF_KIND_FUNC`` is not a type, and it represents a defined subprogram. @@ -107,7 +108,7 @@ Each type contains the following common data:: * "size" tells the size of the type it is describing. * * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, - * FUNC, FUNC_PROTO and DECL_TAG. + * FUNC, FUNC_PROTO, DECL_TAG and TYPE_TAG. * "type" is a type_id referring to another type. */ union { @@ -492,6 +493,16 @@ the attribute is applied to a ``struct``/``union`` member or a ``func`` argument, and ``btf_decl_tag.component_idx`` should be a valid index (starting from 0) pointing to a member or an argument. +2.2.17 BTF_KIND_TYPE_TAG +~~~~~~~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: offset to a non-empty string + * ``info.kind_flag``: 0 + * ``info.kind``: BTF_KIND_TYPE_TAG + * ``info.vlen``: 0 + * ``type``: the type with ``btf_type_tag`` attribute + 3. BTF Kernel API ***************** -- cgit v1.2.3 From 314f14abdeca78de6b16f97d796a9966ce4b90ae Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Wed, 10 Nov 2021 11:23:24 -0800 Subject: bpftool: Enable libbpf's strict mode by default Otherwise, attaching with bpftool doesn't work with strict section names. Also: - Add --legacy option to switch back to pre-1.0 behavior - Print a warning when program fails to load in strict mode to point to --legacy flag - By default, don't append / to the section name; in strict mode it's relevant only for a small subset of prog types + bpftool --legacy prog loadall tools/testing/selftests/bpf/test_cgroup_link.o /sys/fs/bpf/kprobe type kprobe libbpf: failed to pin program: File exists Error: failed to pin all programs + bpftool prog loadall tools/testing/selftests/bpf/test_cgroup_link.o /sys/fs/bpf/kprobe type kprobe v1 -> v2: - strict by default (Quentin Monnet) - add more info to --legacy description (Quentin Monnet) - add bash completion (Quentin Monnet) Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann Reviewed-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20211110192324.920934-1-sdf@google.com --- tools/bpf/bpftool/Documentation/common_options.rst | 9 +++++ tools/bpf/bpftool/bash-completion/bpftool | 2 +- tools/bpf/bpftool/main.c | 13 ++++++- tools/bpf/bpftool/main.h | 3 +- tools/bpf/bpftool/prog.c | 40 +++++++++++++--------- 5 files changed, 48 insertions(+), 19 deletions(-) diff --git a/tools/bpf/bpftool/Documentation/common_options.rst b/tools/bpf/bpftool/Documentation/common_options.rst index 05d06c74dcaa..75adf23202d8 100644 --- a/tools/bpf/bpftool/Documentation/common_options.rst +++ b/tools/bpf/bpftool/Documentation/common_options.rst @@ -20,3 +20,12 @@ Print all logs available, even debug-level information. This includes logs from libbpf as well as from the verifier, when attempting to load programs. + +-l, --legacy + Use legacy libbpf mode which has more relaxed BPF program + requirements. By default, bpftool has more strict requirements + about section names, changes pinning logic and doesn't support + some of the older non-BTF map declarations. + + See https://github.com/libbpf/libbpf/wiki/Libbpf:-the-road-to-v1.0 + for details. diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index 88e2bcf16cca..4a1b02ff72c1 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -261,7 +261,7 @@ _bpftool() # Deal with options if [[ ${words[cword]} == -* ]]; then local c='--version --json --pretty --bpffs --mapcompat --debug \ - --use-loader --base-btf' + --use-loader --base-btf --legacy' COMPREPLY=( $( compgen -W "$c" -- "$cur" ) ) return 0 fi diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c index 28237d7cef67..473791e87f7d 100644 --- a/tools/bpf/bpftool/main.c +++ b/tools/bpf/bpftool/main.c @@ -31,6 +31,7 @@ bool block_mount; bool verifier_logs; bool relaxed_maps; bool use_loader; +bool legacy_libbpf; struct btf *base_btf; struct hashmap *refs_table; @@ -396,6 +397,7 @@ int main(int argc, char **argv) { "debug", no_argument, NULL, 'd' }, { "use-loader", no_argument, NULL, 'L' }, { "base-btf", required_argument, NULL, 'B' }, + { "legacy", no_argument, NULL, 'l' }, { 0 } }; int opt, ret; @@ -408,7 +410,7 @@ int main(int argc, char **argv) bin_name = argv[0]; opterr = 0; - while ((opt = getopt_long(argc, argv, "VhpjfLmndB:", + while ((opt = getopt_long(argc, argv, "VhpjfLmndB:l", options, NULL)) >= 0) { switch (opt) { case 'V': @@ -454,6 +456,9 @@ int main(int argc, char **argv) case 'L': use_loader = true; break; + case 'l': + legacy_libbpf = true; + break; default: p_err("unrecognized option '%s'", argv[optind - 1]); if (json_output) @@ -463,6 +468,12 @@ int main(int argc, char **argv) } } + if (!legacy_libbpf) { + ret = libbpf_set_strict_mode(LIBBPF_STRICT_ALL); + if (ret) + p_err("failed to enable libbpf strict mode: %d", ret); + } + argc -= optind; argv += optind; if (argc < 0) diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index 383835c2604d..8d76d937a62b 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -57,7 +57,7 @@ static inline void *u64_to_ptr(__u64 ptr) #define HELP_SPEC_PROGRAM \ "PROG := { id PROG_ID | pinned FILE | tag PROG_TAG | name PROG_NAME }" #define HELP_SPEC_OPTIONS \ - "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-d|--debug}" + "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-d|--debug} | {-l|--legacy}" #define HELP_SPEC_MAP \ "MAP := { id MAP_ID | pinned FILE | name MAP_NAME }" #define HELP_SPEC_LINK \ @@ -90,6 +90,7 @@ extern bool block_mount; extern bool verifier_logs; extern bool relaxed_maps; extern bool use_loader; +extern bool legacy_libbpf; extern struct btf *base_btf; extern struct hashmap *refs_table; diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index dea7a49ec26e..bf85c914f2fa 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -1483,8 +1483,6 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) while (argc) { if (is_prefix(*argv, "type")) { - char *type; - NEXT_ARG(); if (common_prog_type != BPF_PROG_TYPE_UNSPEC) { @@ -1494,21 +1492,26 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) if (!REQ_ARGS(1)) goto err_free_reuse_maps; - /* Put a '/' at the end of type to appease libbpf */ - type = malloc(strlen(*argv) + 2); - if (!type) { - p_err("mem alloc failed"); - goto err_free_reuse_maps; - } - *type = 0; - strcat(type, *argv); - strcat(type, "/"); + err = libbpf_prog_type_by_name(*argv, &common_prog_type, + &expected_attach_type); + if (err < 0) { + /* Put a '/' at the end of type to appease libbpf */ + char *type = malloc(strlen(*argv) + 2); - err = get_prog_type_by_name(type, &common_prog_type, - &expected_attach_type); - free(type); - if (err < 0) - goto err_free_reuse_maps; + if (!type) { + p_err("mem alloc failed"); + goto err_free_reuse_maps; + } + *type = 0; + strcat(type, *argv); + strcat(type, "/"); + + err = get_prog_type_by_name(type, &common_prog_type, + &expected_attach_type); + free(type); + if (err < 0) + goto err_free_reuse_maps; + } NEXT_ARG(); } else if (is_prefix(*argv, "map")) { @@ -1731,6 +1734,11 @@ err_unpin: else bpf_object__unpin_programs(obj, pinfile); err_close_obj: + if (!legacy_libbpf) { + p_info("Warning: bpftool is now running in libbpf strict mode and has more stringent requirements about BPF programs.\n" + "If it used to work for this object file but now doesn't, see --legacy option for more details.\n"); + } + bpf_object__close(obj); err_free_reuse_maps: for (i = 0; i < old_map_fds; i++) -- cgit v1.2.3 From 9e2ad638ae3632ef916ceb39f70e3104bf8fdc97 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Fri, 12 Nov 2021 07:02:42 -0800 Subject: bpf: Extend BTF_ID_LIST_GLOBAL with parameter for number of IDs syzbot reported the following BUG w/o CONFIG_DEBUG_INFO_BTF BUG: KASAN: global-out-of-bounds in task_iter_init+0x212/0x2e7 kernel/bpf/task_iter.c:661 Read of size 4 at addr ffffffff90297404 by task swapper/0/1 CPU: 1 PID: 1 Comm: swapper/0 Not tainted 5.15.0-syzkaller #0 Hardware name: ... Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:106 print_address_description.constprop.0.cold+0xf/0x309 mm/kasan/report.c:256 __kasan_report mm/kasan/report.c:442 [inline] kasan_report.cold+0x83/0xdf mm/kasan/report.c:459 task_iter_init+0x212/0x2e7 kernel/bpf/task_iter.c:661 do_one_initcall+0x103/0x650 init/main.c:1295 do_initcall_level init/main.c:1368 [inline] do_initcalls init/main.c:1384 [inline] do_basic_setup init/main.c:1403 [inline] kernel_init_freeable+0x6b1/0x73a init/main.c:1606 kernel_init+0x1a/0x1d0 init/main.c:1497 ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:295 This is caused by hard-coded name[1] in BTF_ID_LIST_GLOBAL (w/o CONFIG_DEBUG_INFO_BTF). Fix this by adding a parameter n to BTF_ID_LIST_GLOBAL. This avoids ifdef CONFIG_DEBUG_INFO_BTF in btf.c and filter.c. Fixes: 7c7e3d31e785 ("bpf: Introduce helper bpf_find_vma") Reported-by: syzbot+e0d81ec552a21d9071aa@syzkaller.appspotmail.com Reported-by: Eric Dumazet Suggested-by: Eric Dumazet Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20211112150243.1270987-2-songliubraving@fb.com --- include/linux/btf_ids.h | 6 +++--- kernel/bpf/btf.c | 2 +- net/core/filter.c | 6 +----- 3 files changed, 5 insertions(+), 9 deletions(-) diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index 47d9abfbdb55..6bb42b785293 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -73,7 +73,7 @@ asm( \ __BTF_ID_LIST(name, local) \ extern u32 name[]; -#define BTF_ID_LIST_GLOBAL(name) \ +#define BTF_ID_LIST_GLOBAL(name, n) \ __BTF_ID_LIST(name, globl) /* The BTF_ID_LIST_SINGLE macro defines a BTF_ID_LIST with @@ -83,7 +83,7 @@ __BTF_ID_LIST(name, globl) BTF_ID_LIST(name) \ BTF_ID(prefix, typename) #define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) \ - BTF_ID_LIST_GLOBAL(name) \ + BTF_ID_LIST_GLOBAL(name, 1) \ BTF_ID(prefix, typename) /* @@ -149,7 +149,7 @@ extern struct btf_id_set name; #define BTF_ID_LIST(name) static u32 name[5]; #define BTF_ID(prefix, name) #define BTF_ID_UNUSED -#define BTF_ID_LIST_GLOBAL(name) u32 name[1]; +#define BTF_ID_LIST_GLOBAL(name, n) u32 name[n]; #define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 name[1]; #define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) u32 name[1]; #define BTF_SET_START(name) static struct btf_id_set name = { 0 }; diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 1dd9ba82da1e..2a9d8a1fee1d 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6354,7 +6354,7 @@ const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = { .arg4_type = ARG_ANYTHING, }; -BTF_ID_LIST_GLOBAL(btf_task_struct_ids) +BTF_ID_LIST_GLOBAL(btf_task_struct_ids, 3) BTF_ID(struct, task_struct) BTF_ID(struct, file) BTF_ID(struct, vm_area_struct) diff --git a/net/core/filter.c b/net/core/filter.c index 315a58466fc9..46f09a8fba20 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -10611,14 +10611,10 @@ void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog) bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(xdp), prev_prog, prog); } -#ifdef CONFIG_DEBUG_INFO_BTF -BTF_ID_LIST_GLOBAL(btf_sock_ids) +BTF_ID_LIST_GLOBAL(btf_sock_ids, MAX_BTF_SOCK_TYPE) #define BTF_SOCK_TYPE(name, type) BTF_ID(struct, type) BTF_SOCK_TYPE_xxx #undef BTF_SOCK_TYPE -#else -u32 btf_sock_ids[MAX_BTF_SOCK_TYPE]; -#endif BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk) { -- cgit v1.2.3 From d19ddb476a539fd78ad1028ae13bb38506286931 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Fri, 12 Nov 2021 07:02:43 -0800 Subject: bpf: Introduce btf_tracing_ids Similar to btf_sock_ids, btf_tracing_ids provides btf ID for task_struct, file, and vm_area_struct via easy to understand format like btf_tracing_ids[BTF_TRACING_TYPE_[TASK|file|VMA]]. Suggested-by: Alexei Starovoitov Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20211112150243.1270987-3-songliubraving@fb.com --- include/linux/btf_ids.h | 14 +++++++++++++- kernel/bpf/bpf_task_storage.c | 4 ++-- kernel/bpf/btf.c | 8 ++++---- kernel/bpf/stackmap.c | 2 +- kernel/bpf/task_iter.c | 12 ++++++------ kernel/bpf/verifier.c | 2 +- kernel/trace/bpf_trace.c | 4 ++-- 7 files changed, 29 insertions(+), 17 deletions(-) diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index 6bb42b785293..919c0fde1c51 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -189,6 +189,18 @@ MAX_BTF_SOCK_TYPE, extern u32 btf_sock_ids[]; #endif -extern u32 btf_task_struct_ids[]; +#define BTF_TRACING_TYPE_xxx \ + BTF_TRACING_TYPE(BTF_TRACING_TYPE_TASK, task_struct) \ + BTF_TRACING_TYPE(BTF_TRACING_TYPE_FILE, file) \ + BTF_TRACING_TYPE(BTF_TRACING_TYPE_VMA, vm_area_struct) + +enum { +#define BTF_TRACING_TYPE(name, type) name, +BTF_TRACING_TYPE_xxx +#undef BTF_TRACING_TYPE +MAX_BTF_TRACING_TYPE, +}; + +extern u32 btf_tracing_ids[]; #endif diff --git a/kernel/bpf/bpf_task_storage.c b/kernel/bpf/bpf_task_storage.c index ebfa8bc90892..bb69aea1a777 100644 --- a/kernel/bpf/bpf_task_storage.c +++ b/kernel/bpf/bpf_task_storage.c @@ -323,7 +323,7 @@ const struct bpf_func_proto bpf_task_storage_get_proto = { .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_PTR_TO_BTF_ID, - .arg2_btf_id = &btf_task_struct_ids[0], + .arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL, .arg4_type = ARG_ANYTHING, }; @@ -334,5 +334,5 @@ const struct bpf_func_proto bpf_task_storage_delete_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_PTR_TO_BTF_ID, - .arg2_btf_id = &btf_task_struct_ids[0], + .arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], }; diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 2a9d8a1fee1d..6b9d23be1e99 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6354,10 +6354,10 @@ const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = { .arg4_type = ARG_ANYTHING, }; -BTF_ID_LIST_GLOBAL(btf_task_struct_ids, 3) -BTF_ID(struct, task_struct) -BTF_ID(struct, file) -BTF_ID(struct, vm_area_struct) +BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE) +#define BTF_TRACING_TYPE(name, type) BTF_ID(struct, type) +BTF_TRACING_TYPE_xxx +#undef BTF_TRACING_TYPE /* BTF ID set registration API for modules */ diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 1de0a1b03636..49e567209c6b 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -489,7 +489,7 @@ const struct bpf_func_proto bpf_get_task_stack_proto = { .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_BTF_ID, - .arg1_btf_id = &btf_task_struct_ids[0], + .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], .arg2_type = ARG_PTR_TO_UNINIT_MEM, .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c index f171479f7dd6..d94696198ef8 100644 --- a/kernel/bpf/task_iter.c +++ b/kernel/bpf/task_iter.c @@ -622,7 +622,7 @@ const struct bpf_func_proto bpf_find_vma_proto = { .func = bpf_find_vma, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_BTF_ID, - .arg1_btf_id = &btf_task_struct_ids[0], + .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_FUNC, .arg4_type = ARG_PTR_TO_STACK_OR_NULL, @@ -652,19 +652,19 @@ static int __init task_iter_init(void) init_irq_work(&work->irq_work, do_mmap_read_unlock); } - task_reg_info.ctx_arg_info[0].btf_id = btf_task_struct_ids[0]; + task_reg_info.ctx_arg_info[0].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_TASK]; ret = bpf_iter_reg_target(&task_reg_info); if (ret) return ret; - task_file_reg_info.ctx_arg_info[0].btf_id = btf_task_struct_ids[0]; - task_file_reg_info.ctx_arg_info[1].btf_id = btf_task_struct_ids[1]; + task_file_reg_info.ctx_arg_info[0].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_TASK]; + task_file_reg_info.ctx_arg_info[1].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_FILE]; ret = bpf_iter_reg_target(&task_file_reg_info); if (ret) return ret; - task_vma_reg_info.ctx_arg_info[0].btf_id = btf_task_struct_ids[0]; - task_vma_reg_info.ctx_arg_info[1].btf_id = btf_task_struct_ids[2]; + task_vma_reg_info.ctx_arg_info[0].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_TASK]; + task_vma_reg_info.ctx_arg_info[1].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA]; return bpf_iter_reg_target(&task_vma_reg_info); } late_initcall(task_iter_init); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 1aafb43f61d1..d31a031ab377 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -6147,7 +6147,7 @@ static int set_find_vma_callback_state(struct bpf_verifier_env *env, callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID; __mark_reg_known_zero(&callee->regs[BPF_REG_2]); callee->regs[BPF_REG_2].btf = btf_vmlinux; - callee->regs[BPF_REG_2].btf_id = btf_task_struct_ids[2]; + callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA], /* pointer to stack or null */ callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4]; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 390176a3031a..25ea521fb8f1 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -764,7 +764,7 @@ const struct bpf_func_proto bpf_get_current_task_btf_proto = { .func = bpf_get_current_task_btf, .gpl_only = true, .ret_type = RET_PTR_TO_BTF_ID, - .ret_btf_id = &btf_task_struct_ids[0], + .ret_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], }; BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task) @@ -779,7 +779,7 @@ const struct bpf_func_proto bpf_task_pt_regs_proto = { .func = bpf_task_pt_regs, .gpl_only = true, .arg1_type = ARG_PTR_TO_BTF_ID, - .arg1_btf_id = &btf_task_struct_ids[0], + .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], .ret_type = RET_PTR_TO_BTF_ID, .ret_btf_id = &bpf_task_pt_regs_ids[0], }; -- cgit v1.2.3 From 21c6ec3d5275a77348b1af0e78cbbed0ee1558d4 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Fri, 12 Nov 2021 12:48:33 -0800 Subject: selftests/bpf: Fix an unused-but-set-variable compiler warning When using clang to build selftests with LLVM=1 in make commandline, I hit the following compiler warning: xdpxceiver.c:747:6: warning: variable 'total' set but not used [-Wunused-but-set-variable] u32 total = 0; ^ This patch fixed the issue by removing that declaration and its assocatied unused operation. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211112204833.3579457-1-yhs@fb.com --- tools/testing/selftests/bpf/xdpxceiver.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/tools/testing/selftests/bpf/xdpxceiver.c b/tools/testing/selftests/bpf/xdpxceiver.c index 6c7cf8aadc79..fe7f423b8c3f 100644 --- a/tools/testing/selftests/bpf/xdpxceiver.c +++ b/tools/testing/selftests/bpf/xdpxceiver.c @@ -744,7 +744,6 @@ static void receive_pkts(struct pkt_stream *pkt_stream, struct xsk_socket_info * struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream); struct xsk_umem_info *umem = xsk->umem; u32 idx_rx = 0, idx_fq = 0, rcvd, i; - u32 total = 0; int ret; while (pkt) { @@ -799,7 +798,6 @@ static void receive_pkts(struct pkt_stream *pkt_stream, struct xsk_socket_info * pthread_mutex_lock(&pacing_mutex); pkts_in_flight -= rcvd; - total += rcvd; if (pkts_in_flight < umem->num_frames) pthread_cond_signal(&pacing_cond); pthread_mutex_unlock(&pacing_mutex); -- cgit v1.2.3 From 325d956d67178af92b5b12ff950a2f93a433f2c4 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Fri, 12 Nov 2021 12:48:38 -0800 Subject: selftests/bpf: Fix a tautological-constant-out-of-range-compare compiler warning When using clang to build selftests with LLVM=1 in make commandline, I hit the following compiler warning: benchs/bench_bloom_filter_map.c:84:46: warning: result of comparison of constant 256 with expression of type '__u8' (aka 'unsigned char') is always false [-Wtautological-constant-out-of-range-compare] if (args.value_size < 2 || args.value_size > 256) { ~~~~~~~~~~~~~~~ ^ ~~~ The reason is arg.vaue_size has type __u8, so comparison "args.value_size > 256" is always false. This patch fixed the issue by doing proper comparison before assigning the value to args.value_size. The patch also fixed the same issue in two other places. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20211112204838.3579953-1-yhs@fb.com --- .../selftests/bpf/benchs/bench_bloom_filter_map.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c b/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c index 6eeeed2913e6..5bcb8a8cdeb2 100644 --- a/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c +++ b/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c @@ -63,29 +63,34 @@ static const struct argp_option opts[] = { static error_t parse_arg(int key, char *arg, struct argp_state *state) { + long ret; + switch (key) { case ARG_NR_ENTRIES: - args.nr_entries = strtol(arg, NULL, 10); - if (args.nr_entries == 0) { + ret = strtol(arg, NULL, 10); + if (ret < 1 || ret > UINT_MAX) { fprintf(stderr, "Invalid nr_entries count."); argp_usage(state); } + args.nr_entries = ret; break; case ARG_NR_HASH_FUNCS: - args.nr_hash_funcs = strtol(arg, NULL, 10); - if (args.nr_hash_funcs == 0 || args.nr_hash_funcs > 15) { + ret = strtol(arg, NULL, 10); + if (ret < 1 || ret > 15) { fprintf(stderr, "The bloom filter must use 1 to 15 hash functions."); argp_usage(state); } + args.nr_hash_funcs = ret; break; case ARG_VALUE_SIZE: - args.value_size = strtol(arg, NULL, 10); - if (args.value_size < 2 || args.value_size > 256) { + ret = strtol(arg, NULL, 10); + if (ret < 2 || ret > 256) { fprintf(stderr, "Invalid value size. Must be between 2 and 256 bytes"); argp_usage(state); } + args.value_size = ret; break; default: return ARGP_ERR_UNKNOWN; -- cgit v1.2.3 From ebbd7f64a3fbe9e0f235e39fc244ee9735e2a52a Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Wed, 10 Nov 2021 11:46:27 +0000 Subject: bpftool: Fix memory leak in prog_dump() Following the extraction of prog_dump() from do_dump(), the struct btf allocated in prog_dump() is no longer freed on error; the struct bpf_prog_linfo is not freed at all. Make sure we release them before exiting the function. Fixes: ec2025095cf6 ("bpftool: Match several programs with same tag") Signed-off-by: Quentin Monnet Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211110114632.24537-2-quentin@isovalent.com --- tools/bpf/bpftool/prog.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index bf85c914f2fa..e47e8b06cc3d 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -709,8 +709,8 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode, char func_sig[1024]; unsigned char *buf; __u32 member_len; + int fd, err = -1; ssize_t n; - int fd; if (mode == DUMP_JITED) { if (info->jited_prog_len == 0 || !info->jited_prog_insns) { @@ -749,7 +749,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode, if (fd < 0) { p_err("can't open file %s: %s", filepath, strerror(errno)); - return -1; + goto exit_free; } n = write(fd, buf, member_len); @@ -757,7 +757,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode, if (n != (ssize_t)member_len) { p_err("error writing output file: %s", n < 0 ? strerror(errno) : "short write"); - return -1; + goto exit_free; } if (json_output) @@ -771,7 +771,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode, info->netns_ino, &disasm_opt); if (!name) - return -1; + goto exit_free; } if (info->nr_jited_func_lens && info->jited_func_lens) { @@ -866,9 +866,12 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode, kernel_syms_destroy(&dd); } - btf__free(btf); + err = 0; - return 0; +exit_free: + btf__free(btf); + bpf_prog_linfo__free(prog_linfo); + return err; } static int do_dump(int argc, char **argv) -- cgit v1.2.3 From 48f5aef4c458c19ab337eed8c95a6486cc014aa3 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Wed, 10 Nov 2021 11:46:28 +0000 Subject: bpftool: Remove inclusion of utilities.mak from Makefiles Bpftool's Makefile, and the Makefile for its documentation, both include scripts/utilities.mak, but they use none of the items defined in this file. Remove the includes. Fixes: 71bb428fe2c1 ("tools: bpf: add bpftool") Signed-off-by: Quentin Monnet Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211110114632.24537-3-quentin@isovalent.com --- tools/bpf/bpftool/Documentation/Makefile | 1 - tools/bpf/bpftool/Makefile | 1 - 2 files changed, 2 deletions(-) diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile index 44b60784847b..692e1b947490 100644 --- a/tools/bpf/bpftool/Documentation/Makefile +++ b/tools/bpf/bpftool/Documentation/Makefile @@ -1,6 +1,5 @@ # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) include ../../../scripts/Makefile.include -include ../../../scripts/utilities.mak INSTALL ?= install RM ?= rm -f diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index 31dfef6a4121..1b36c11a2a26 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -1,6 +1,5 @@ # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) include ../../scripts/Makefile.include -include ../../scripts/utilities.mak ifeq ($(srctree),) srctree := $(patsubst %/,%,$(dir $(CURDIR))) -- cgit v1.2.3 From 986dec18bbf41f50edc2e0aa4ac5ef8e0f64f328 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Wed, 10 Nov 2021 11:46:30 +0000 Subject: bpftool: Fix indent in option lists in the documentation Mixed indentation levels in the lists of options in bpftool's documentation produces some unexpected results. For the "bpftool" man page, it prints a warning: $ make -C bpftool.8 GEN bpftool.8 :26: (ERROR/3) Unexpected indentation. For other pages, there is no warning, but it results in a line break appearing in the option lists in the generated man pages. RST paragraphs should have a uniform indentation level. Let's fix it. Fixes: c07ba629df97 ("tools: bpftool: Update and synchronise option list in doc and help msg") Fixes: 8cc8c6357c8f ("tools: bpftool: Document and add bash completion for -L, -B options") Signed-off-by: Quentin Monnet Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211110114632.24537-5-quentin@isovalent.com --- tools/bpf/bpftool/Documentation/bpftool-btf.rst | 2 +- tools/bpf/bpftool/Documentation/bpftool-cgroup.rst | 2 +- tools/bpf/bpftool/Documentation/bpftool-gen.rst | 2 +- tools/bpf/bpftool/Documentation/bpftool-link.rst | 2 +- tools/bpf/bpftool/Documentation/bpftool-map.rst | 6 +++--- tools/bpf/bpftool/Documentation/bpftool-prog.rst | 8 ++++---- tools/bpf/bpftool/Documentation/bpftool.rst | 6 +++--- 7 files changed, 14 insertions(+), 14 deletions(-) diff --git a/tools/bpf/bpftool/Documentation/bpftool-btf.rst b/tools/bpf/bpftool/Documentation/bpftool-btf.rst index 88b28aa7431f..4425d942dd39 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-btf.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-btf.rst @@ -13,7 +13,7 @@ SYNOPSIS **bpftool** [*OPTIONS*] **btf** *COMMAND* *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | {**-d** | **--debug** } | - { **-B** | **--base-btf** } } + { **-B** | **--base-btf** } } *COMMANDS* := { **dump** | **help** } diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst index 3e4395eede4f..13a217a2503d 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst @@ -13,7 +13,7 @@ SYNOPSIS **bpftool** [*OPTIONS*] **cgroup** *COMMAND* *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } | - { **-f** | **--bpffs** } } + { **-f** | **--bpffs** } } *COMMANDS* := { **show** | **list** | **tree** | **attach** | **detach** | **help** } diff --git a/tools/bpf/bpftool/Documentation/bpftool-gen.rst b/tools/bpf/bpftool/Documentation/bpftool-gen.rst index 2ef2f2df0279..2a137f8a4cea 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-gen.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-gen.rst @@ -13,7 +13,7 @@ SYNOPSIS **bpftool** [*OPTIONS*] **gen** *COMMAND* *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } | - { **-L** | **--use-loader** } } + { **-L** | **--use-loader** } } *COMMAND* := { **object** | **skeleton** | **help** } diff --git a/tools/bpf/bpftool/Documentation/bpftool-link.rst b/tools/bpf/bpftool/Documentation/bpftool-link.rst index 0de90f086238..9434349636a5 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-link.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-link.rst @@ -13,7 +13,7 @@ SYNOPSIS **bpftool** [*OPTIONS*] **link** *COMMAND* *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } | - { **-f** | **--bpffs** } | { **-n** | **--nomount** } } + { **-f** | **--bpffs** } | { **-n** | **--nomount** } } *COMMANDS* := { **show** | **list** | **pin** | **help** } diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst index d0c4abe08aba..1445cadc15d4 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-map.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst @@ -13,11 +13,11 @@ SYNOPSIS **bpftool** [*OPTIONS*] **map** *COMMAND* *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } | - { **-f** | **--bpffs** } | { **-n** | **--nomount** } } + { **-f** | **--bpffs** } | { **-n** | **--nomount** } } *COMMANDS* := - { **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext** - | **delete** | **pin** | **help** } + { **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext** | + **delete** | **pin** | **help** } MAP COMMANDS ============= diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst index 91608cb7e44a..f27265bd589b 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst @@ -13,12 +13,12 @@ SYNOPSIS **bpftool** [*OPTIONS*] **prog** *COMMAND* *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } | - { **-f** | **--bpffs** } | { **-m** | **--mapcompat** } | { **-n** | **--nomount** } | - { **-L** | **--use-loader** } } + { **-f** | **--bpffs** } | { **-m** | **--mapcompat** } | { **-n** | **--nomount** } | + { **-L** | **--use-loader** } } *COMMANDS* := - { **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load** - | **loadall** | **help** } + { **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load** | + **loadall** | **help** } PROG COMMANDS ============= diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst index bb23f55bb05a..8ac86565c501 100644 --- a/tools/bpf/bpftool/Documentation/bpftool.rst +++ b/tools/bpf/bpftool/Documentation/bpftool.rst @@ -19,14 +19,14 @@ SYNOPSIS *OBJECT* := { **map** | **program** | **cgroup** | **perf** | **net** | **feature** } *OPTIONS* := { { **-V** | **--version** } | - { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } } + { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } } *MAP-COMMANDS* := { **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext** | - **delete** | **pin** | **event_pipe** | **help** } + **delete** | **pin** | **event_pipe** | **help** } *PROG-COMMANDS* := { **show** | **list** | **dump jited** | **dump xlated** | **pin** | - **load** | **attach** | **detach** | **help** } + **load** | **attach** | **detach** | **help** } *CGROUP-COMMANDS* := { **show** | **list** | **attach** | **detach** | **help** } -- cgit v1.2.3 From 3811e2753a39efb8aa5b8c133dc24f6d26f6cd96 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Wed, 10 Nov 2021 11:46:31 +0000 Subject: bpftool: Update the lists of names for maps and prog-attach types To support the different BPF map or attach types, bpftool must remain up-to-date with the types supported by the kernel. Let's update the lists, by adding the missing Bloom filter map type and the perf_event attach type. Both missing items were found with test_bpftool_synctypes.py. Signed-off-by: Quentin Monnet Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211110114632.24537-6-quentin@isovalent.com --- tools/bpf/bpftool/Documentation/bpftool-map.rst | 2 +- tools/bpf/bpftool/bash-completion/bpftool | 3 ++- tools/bpf/bpftool/common.c | 1 + tools/bpf/bpftool/map.c | 3 ++- 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst index 1445cadc15d4..991d18fd84f2 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-map.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst @@ -52,7 +52,7 @@ MAP COMMANDS | | **devmap** | **devmap_hash** | **sockmap** | **cpumap** | **xskmap** | **sockhash** | | **cgroup_storage** | **reuseport_sockarray** | **percpu_cgroup_storage** | | **queue** | **stack** | **sk_storage** | **struct_ops** | **ringbuf** | **inode_storage** - | **task_storage** } +| | **task_storage** | **bloom_filter** } DESCRIPTION =========== diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index 4a1b02ff72c1..493753a4962e 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -710,7 +710,8 @@ _bpftool() hash_of_maps devmap devmap_hash sockmap cpumap \ xskmap sockhash cgroup_storage reuseport_sockarray \ percpu_cgroup_storage queue stack sk_storage \ - struct_ops inode_storage task_storage ringbuf' + struct_ops ringbuf inode_storage task_storage \ + bloom_filter' COMPREPLY=( $( compgen -W "$BPFTOOL_MAP_CREATE_TYPES" -- "$cur" ) ) return 0 ;; diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 511eccdbdfe6..fa8eb8134344 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -74,6 +74,7 @@ const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE] = { [BPF_XDP] = "xdp", [BPF_SK_REUSEPORT_SELECT] = "sk_skb_reuseport_select", [BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_skb_reuseport_select_or_migrate", + [BPF_PERF_EVENT] = "perf_event", }; void p_err(const char *fmt, ...) diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index cae1f1119296..68cb121e65c4 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -53,6 +53,7 @@ const char * const map_type_name[] = { [BPF_MAP_TYPE_RINGBUF] = "ringbuf", [BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage", [BPF_MAP_TYPE_TASK_STORAGE] = "task_storage", + [BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter", }; const size_t map_type_name_size = ARRAY_SIZE(map_type_name); @@ -1477,7 +1478,7 @@ static int do_help(int argc, char **argv) " devmap | devmap_hash | sockmap | cpumap | xskmap | sockhash |\n" " cgroup_storage | reuseport_sockarray | percpu_cgroup_storage |\n" " queue | stack | sk_storage | struct_ops | ringbuf | inode_storage |\n" - " task_storage }\n" + " task_storage | bloom_filter }\n" " " HELP_SPEC_OPTIONS " |\n" " {-f|--bpffs} | {-n|--nomount} }\n" "", -- cgit v1.2.3 From b06be5651f08bc5bc305e2a3c722ddb33b783ee5 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Wed, 10 Nov 2021 11:46:32 +0000 Subject: bpftool: Fix mixed indentation in documentation Some paragraphs in bpftool's documentation have a mix of tabs and spaces for indentation. Let's make it consistent. This patch brings no change to the text content. Signed-off-by: Quentin Monnet Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211110114632.24537-7-quentin@isovalent.com --- tools/bpf/bpftool/Documentation/bpftool-cgroup.rst | 10 ++-- tools/bpf/bpftool/Documentation/bpftool-net.rst | 66 +++++++++++----------- 2 files changed, 38 insertions(+), 38 deletions(-) diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst index 13a217a2503d..8069d37dd991 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst @@ -30,9 +30,9 @@ CGROUP COMMANDS | *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* } | *ATTACH_TYPE* := { **ingress** | **egress** | **sock_create** | **sock_ops** | **device** | | **bind4** | **bind6** | **post_bind4** | **post_bind6** | **connect4** | **connect6** | -| **getpeername4** | **getpeername6** | **getsockname4** | **getsockname6** | **sendmsg4** | -| **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** | **getsockopt** | **setsockopt** | -| **sock_release** } +| **getpeername4** | **getpeername6** | **getsockname4** | **getsockname6** | **sendmsg4** | +| **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** | **getsockopt** | **setsockopt** | +| **sock_release** } | *ATTACH_FLAGS* := { **multi** | **override** } DESCRIPTION @@ -98,9 +98,9 @@ DESCRIPTION **sendmsg6** call to sendto(2), sendmsg(2), sendmmsg(2) for an unconnected udp6 socket (since 4.18); **recvmsg4** call to recvfrom(2), recvmsg(2), recvmmsg(2) for - an unconnected udp4 socket (since 5.2); + an unconnected udp4 socket (since 5.2); **recvmsg6** call to recvfrom(2), recvmsg(2), recvmmsg(2) for - an unconnected udp6 socket (since 5.2); + an unconnected udp6 socket (since 5.2); **sysctl** sysctl access (since 5.2); **getsockopt** call to getsockopt (since 5.3); **setsockopt** call to setsockopt (since 5.3); diff --git a/tools/bpf/bpftool/Documentation/bpftool-net.rst b/tools/bpf/bpftool/Documentation/bpftool-net.rst index 1ae0375e8fea..7ec57535a7c1 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-net.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-net.rst @@ -31,44 +31,44 @@ NET COMMANDS DESCRIPTION =========== **bpftool net { show | list }** [ **dev** *NAME* ] - List bpf program attachments in the kernel networking subsystem. - - Currently, only device driver xdp attachments and tc filter - classification/action attachments are implemented, i.e., for - program types **BPF_PROG_TYPE_SCHED_CLS**, - **BPF_PROG_TYPE_SCHED_ACT** and **BPF_PROG_TYPE_XDP**. - For programs attached to a particular cgroup, e.g., - **BPF_PROG_TYPE_CGROUP_SKB**, **BPF_PROG_TYPE_CGROUP_SOCK**, - **BPF_PROG_TYPE_SOCK_OPS** and **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**, - users can use **bpftool cgroup** to dump cgroup attachments. - For sk_{filter, skb, msg, reuseport} and lwt/seg6 - bpf programs, users should consult other tools, e.g., iproute2. - - The current output will start with all xdp program attachments, followed by - all tc class/qdisc bpf program attachments. Both xdp programs and - tc programs are ordered based on ifindex number. If multiple bpf - programs attached to the same networking device through **tc filter**, - the order will be first all bpf programs attached to tc classes, then - all bpf programs attached to non clsact qdiscs, and finally all - bpf programs attached to root and clsact qdisc. + List bpf program attachments in the kernel networking subsystem. + + Currently, only device driver xdp attachments and tc filter + classification/action attachments are implemented, i.e., for + program types **BPF_PROG_TYPE_SCHED_CLS**, + **BPF_PROG_TYPE_SCHED_ACT** and **BPF_PROG_TYPE_XDP**. + For programs attached to a particular cgroup, e.g., + **BPF_PROG_TYPE_CGROUP_SKB**, **BPF_PROG_TYPE_CGROUP_SOCK**, + **BPF_PROG_TYPE_SOCK_OPS** and **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**, + users can use **bpftool cgroup** to dump cgroup attachments. + For sk_{filter, skb, msg, reuseport} and lwt/seg6 + bpf programs, users should consult other tools, e.g., iproute2. + + The current output will start with all xdp program attachments, followed by + all tc class/qdisc bpf program attachments. Both xdp programs and + tc programs are ordered based on ifindex number. If multiple bpf + programs attached to the same networking device through **tc filter**, + the order will be first all bpf programs attached to tc classes, then + all bpf programs attached to non clsact qdiscs, and finally all + bpf programs attached to root and clsact qdisc. **bpftool** **net attach** *ATTACH_TYPE* *PROG* **dev** *NAME* [ **overwrite** ] - Attach bpf program *PROG* to network interface *NAME* with - type specified by *ATTACH_TYPE*. Previously attached bpf program - can be replaced by the command used with **overwrite** option. - Currently, only XDP-related modes are supported for *ATTACH_TYPE*. + Attach bpf program *PROG* to network interface *NAME* with + type specified by *ATTACH_TYPE*. Previously attached bpf program + can be replaced by the command used with **overwrite** option. + Currently, only XDP-related modes are supported for *ATTACH_TYPE*. - *ATTACH_TYPE* can be of: - **xdp** - try native XDP and fallback to generic XDP if NIC driver does not support it; - **xdpgeneric** - Generic XDP. runs at generic XDP hook when packet already enters receive path as skb; - **xdpdrv** - Native XDP. runs earliest point in driver's receive path; - **xdpoffload** - Offload XDP. runs directly on NIC on each packet reception; + *ATTACH_TYPE* can be of: + **xdp** - try native XDP and fallback to generic XDP if NIC driver does not support it; + **xdpgeneric** - Generic XDP. runs at generic XDP hook when packet already enters receive path as skb; + **xdpdrv** - Native XDP. runs earliest point in driver's receive path; + **xdpoffload** - Offload XDP. runs directly on NIC on each packet reception; **bpftool** **net detach** *ATTACH_TYPE* **dev** *NAME* - Detach bpf program attached to network interface *NAME* with - type specified by *ATTACH_TYPE*. To detach bpf program, same - *ATTACH_TYPE* previously used for attach must be specified. - Currently, only XDP-related modes are supported for *ATTACH_TYPE*. + Detach bpf program attached to network interface *NAME* with + type specified by *ATTACH_TYPE*. To detach bpf program, same + *ATTACH_TYPE* previously used for attach must be specified. + Currently, only XDP-related modes are supported for *ATTACH_TYPE*. **bpftool net help** Print short help message. -- cgit v1.2.3 From e5043894b21f7d99d3db31ad06308d6c5726caa6 Mon Sep 17 00:00:00 2001 From: Hengqi Chen Date: Mon, 15 Nov 2021 09:24:36 +0800 Subject: bpftool: Use libbpf_get_error() to check error Currently, LIBBPF_STRICT_ALL mode is enabled by default for bpftool which means on error cases, some libbpf APIs would return NULL pointers. This makes IS_ERR check failed to detect such cases and result in segfault error. Use libbpf_get_error() instead like we do in libbpf itself. Signed-off-by: Hengqi Chen Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20211115012436.3143318-1-hengqi.chen@gmail.com --- tools/bpf/bpftool/btf.c | 9 +++++---- tools/bpf/bpftool/gen.c | 10 ++++++---- tools/bpf/bpftool/iter.c | 7 ++++--- tools/bpf/bpftool/map.c | 10 +++++----- tools/bpf/bpftool/struct_ops.c | 14 +++++++------- 5 files changed, 27 insertions(+), 23 deletions(-) diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c index c7e3b0b0029e..59833125ac0a 100644 --- a/tools/bpf/bpftool/btf.c +++ b/tools/bpf/bpftool/btf.c @@ -421,8 +421,9 @@ static int dump_btf_c(const struct btf *btf, int err = 0, i; d = btf_dump__new(btf, btf_dump_printf, NULL, NULL); - if (IS_ERR(d)) - return PTR_ERR(d); + err = libbpf_get_error(d); + if (err) + return err; printf("#ifndef __VMLINUX_H__\n"); printf("#define __VMLINUX_H__\n"); @@ -549,8 +550,8 @@ static int do_dump(int argc, char **argv) } btf = btf__parse_split(*argv, base ?: base_btf); - if (IS_ERR(btf)) { - err = -PTR_ERR(btf); + err = libbpf_get_error(btf); + if (err) { btf = NULL; p_err("failed to load BTF from %s: %s", *argv, strerror(err)); diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c index 89f0e828bbfa..997a2865e04a 100644 --- a/tools/bpf/bpftool/gen.c +++ b/tools/bpf/bpftool/gen.c @@ -219,8 +219,9 @@ static int codegen_datasecs(struct bpf_object *obj, const char *obj_name) int i, err = 0; d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL); - if (IS_ERR(d)) - return PTR_ERR(d); + err = libbpf_get_error(d); + if (err) + return err; bpf_object__for_each_map(map, obj) { /* only generate definitions for memory-mapped internal maps */ @@ -719,10 +720,11 @@ static int do_skeleton(int argc, char **argv) get_obj_name(obj_name, file); opts.object_name = obj_name; obj = bpf_object__open_mem(obj_data, file_sz, &opts); - if (IS_ERR(obj)) { + err = libbpf_get_error(obj); + if (err) { char err_buf[256]; - libbpf_strerror(PTR_ERR(obj), err_buf, sizeof(err_buf)); + libbpf_strerror(err, err_buf, sizeof(err_buf)); p_err("failed to open BPF object file: %s", err_buf); obj = NULL; goto out; diff --git a/tools/bpf/bpftool/iter.c b/tools/bpf/bpftool/iter.c index 6c0de647b8ad..f88fdc820d23 100644 --- a/tools/bpf/bpftool/iter.c +++ b/tools/bpf/bpftool/iter.c @@ -46,7 +46,8 @@ static int do_pin(int argc, char **argv) } obj = bpf_object__open(objfile); - if (IS_ERR(obj)) { + err = libbpf_get_error(obj); + if (err) { p_err("can't open objfile %s", objfile); goto close_map_fd; } @@ -64,8 +65,8 @@ static int do_pin(int argc, char **argv) } link = bpf_program__attach_iter(prog, &iter_opts); - if (IS_ERR(link)) { - err = PTR_ERR(link); + err = libbpf_get_error(link); + if (err) { p_err("attach_iter failed for program %s", bpf_program__name(prog)); goto close_obj; diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index 68cb121e65c4..25b258804f11 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -812,7 +812,7 @@ static struct btf *get_map_kv_btf(const struct bpf_map_info *info) if (info->btf_vmlinux_value_type_id) { if (!btf_vmlinux) { btf_vmlinux = libbpf_find_kernel_btf(); - if (IS_ERR(btf_vmlinux)) + if (libbpf_get_error(btf_vmlinux)) p_err("failed to get kernel btf"); } return btf_vmlinux; @@ -832,13 +832,13 @@ static struct btf *get_map_kv_btf(const struct bpf_map_info *info) static void free_map_kv_btf(struct btf *btf) { - if (!IS_ERR(btf) && btf != btf_vmlinux) + if (!libbpf_get_error(btf) && btf != btf_vmlinux) btf__free(btf); } static void free_btf_vmlinux(void) { - if (!IS_ERR(btf_vmlinux)) + if (!libbpf_get_error(btf_vmlinux)) btf__free(btf_vmlinux); } @@ -863,8 +863,8 @@ map_dump(int fd, struct bpf_map_info *info, json_writer_t *wtr, if (wtr) { btf = get_map_kv_btf(info); - if (IS_ERR(btf)) { - err = PTR_ERR(btf); + err = libbpf_get_error(btf); + if (err) { goto exit_free; } diff --git a/tools/bpf/bpftool/struct_ops.c b/tools/bpf/bpftool/struct_ops.c index 20f803dce2e4..cbdca37a53f0 100644 --- a/tools/bpf/bpftool/struct_ops.c +++ b/tools/bpf/bpftool/struct_ops.c @@ -32,7 +32,7 @@ static const struct btf *get_btf_vmlinux(void) return btf_vmlinux; btf_vmlinux = libbpf_find_kernel_btf(); - if (IS_ERR(btf_vmlinux)) + if (libbpf_get_error(btf_vmlinux)) p_err("struct_ops requires kernel CONFIG_DEBUG_INFO_BTF=y"); return btf_vmlinux; @@ -45,7 +45,7 @@ static const char *get_kern_struct_ops_name(const struct bpf_map_info *info) const char *st_ops_name; kern_btf = get_btf_vmlinux(); - if (IS_ERR(kern_btf)) + if (libbpf_get_error(kern_btf)) return ""; t = btf__type_by_id(kern_btf, info->btf_vmlinux_value_type_id); @@ -63,7 +63,7 @@ static __s32 get_map_info_type_id(void) return map_info_type_id; kern_btf = get_btf_vmlinux(); - if (IS_ERR(kern_btf)) { + if (libbpf_get_error(kern_btf)) { map_info_type_id = PTR_ERR(kern_btf); return map_info_type_id; } @@ -415,7 +415,7 @@ static int do_dump(int argc, char **argv) } kern_btf = get_btf_vmlinux(); - if (IS_ERR(kern_btf)) + if (libbpf_get_error(kern_btf)) return -1; if (!json_output) { @@ -495,7 +495,7 @@ static int do_register(int argc, char **argv) file = GET_ARG(); obj = bpf_object__open(file); - if (IS_ERR_OR_NULL(obj)) + if (libbpf_get_error(obj)) return -1; set_max_rlimit(); @@ -516,7 +516,7 @@ static int do_register(int argc, char **argv) continue; link = bpf_map__attach_struct_ops(map); - if (IS_ERR(link)) { + if (libbpf_get_error(link)) { p_err("can't register struct_ops %s: %s", bpf_map__name(map), strerror(-PTR_ERR(link))); @@ -596,7 +596,7 @@ int do_struct_ops(int argc, char **argv) err = cmd_select(cmds, argc, argv, do_help); - if (!IS_ERR(btf_vmlinux)) + if (!libbpf_get_error(btf_vmlinux)) btf__free(btf_vmlinux); return err; -- cgit v1.2.3