From e8b012e9fabe7eafc1b2c72414e174547683860d Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Mon, 6 Jul 2020 16:01:26 -0700 Subject: libbpf: Add support for BPF_CGROUP_INET_SOCK_RELEASE Add auto-detection for the cgroup/sock_release programs. Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200706230128.4073544-3-sdf@google.com --- tools/lib/bpf/libbpf.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'tools/lib/bpf/libbpf.c') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 4ea7f4f1a691..88a483627a2b 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -6917,6 +6917,10 @@ static const struct bpf_sec_def section_defs[] = { BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB, BPF_CGROUP_INET_EGRESS), BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB), + BPF_EAPROG_SEC("cgroup/sock_create", BPF_PROG_TYPE_CGROUP_SOCK, + BPF_CGROUP_INET_SOCK_CREATE), + BPF_EAPROG_SEC("cgroup/sock_release", BPF_PROG_TYPE_CGROUP_SOCK, + BPF_CGROUP_INET_SOCK_RELEASE), BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE), BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK, -- cgit v1.2.3 From bfc96656a766bd0566565d8a7b6232b2b036fdfb Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 7 Jul 2020 18:53:13 -0700 Subject: libbpf: Make BTF finalization strict With valid ELF and valid BTF, there is no reason (apart from bugs) why BTF finalization should fail. So make it strict and return error if it fails. This makes CO-RE relocation more reliable, as they are not going to be just silently skipped, if BTF finalization failed. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200708015318.3827358-2-andriin@fb.com --- tools/lib/bpf/libbpf.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) (limited to 'tools/lib/bpf/libbpf.c') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 88a483627a2b..efd857ebd5ee 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2473,19 +2473,11 @@ static int bpf_object__finalize_btf(struct bpf_object *obj) return 0; err = btf__finalize_data(obj, obj->btf); - if (!err) - return 0; - - pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err); - btf__free(obj->btf); - obj->btf = NULL; - btf_ext__free(obj->btf_ext); - obj->btf_ext = NULL; - - if (libbpf_needs_btf(obj)) { - pr_warn("BTF is required, but is missing or corrupted.\n"); - return -ENOENT; + if (err) { + pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err); + return err; } + return 0; } -- cgit v1.2.3 From 0f0e55d8247c0742a9b026fc097fcc605383b9db Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 7 Jul 2020 18:53:15 -0700 Subject: libbpf: Improve BTF sanitization handling Change sanitization process to preserve original BTF, which might be used by libbpf itself for Kconfig externs, CO-RE relocs, etc, even if kernel is old and doesn't support BTF. To achieve that, if libbpf detects the need for BTF sanitization, it would clone original BTF, sanitize it in-place, attempt to load it into kernel, and if successful, will preserve loaded BTF FD in original `struct btf`, while freeing sanitized local copy. If kernel doesn't support any BTF, original btf and btf_ext will still be preserved to be used later for CO-RE relocation and other BTF-dependent libbpf features, which don't dependon kernel BTF support. Patch takes care to not specify BTF and BTF.ext features when loading BPF programs and/or maps, if it was detected that kernel doesn't support BTF features. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200708015318.3827358-4-andriin@fb.com --- tools/lib/bpf/libbpf.c | 103 ++++++++++++++++++++++++++++--------------------- 1 file changed, 58 insertions(+), 45 deletions(-) (limited to 'tools/lib/bpf/libbpf.c') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index efd857ebd5ee..460e5790a95f 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2338,18 +2338,23 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx) return false; } -static void bpf_object__sanitize_btf(struct bpf_object *obj) +static bool btf_needs_sanitization(struct bpf_object *obj) +{ + bool has_func_global = obj->caps.btf_func_global; + bool has_datasec = obj->caps.btf_datasec; + bool has_func = obj->caps.btf_func; + + return !has_func || !has_datasec || !has_func_global; +} + +static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf) { bool has_func_global = obj->caps.btf_func_global; bool has_datasec = obj->caps.btf_datasec; bool has_func = obj->caps.btf_func; - struct btf *btf = obj->btf; struct btf_type *t; int i, j, vlen; - if (!obj->btf || (has_func && has_datasec && has_func_global)) - return; - for (i = 1; i <= btf__get_nr_types(btf); i++) { t = (struct btf_type *)btf__type_by_id(btf, i); @@ -2402,17 +2407,6 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj) } } -static void bpf_object__sanitize_btf_ext(struct bpf_object *obj) -{ - if (!obj->btf_ext) - return; - - if (!obj->caps.btf_func) { - btf_ext__free(obj->btf_ext); - obj->btf_ext = NULL; - } -} - static bool libbpf_needs_btf(const struct bpf_object *obj) { return obj->efile.btf_maps_shndx >= 0 || @@ -2530,30 +2524,50 @@ static int bpf_object__load_vmlinux_btf(struct bpf_object *obj) static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) { + struct btf *kern_btf = obj->btf; + bool btf_mandatory, sanitize; int err = 0; if (!obj->btf) return 0; - bpf_object__sanitize_btf(obj); - bpf_object__sanitize_btf_ext(obj); + sanitize = btf_needs_sanitization(obj); + if (sanitize) { + const void *orig_data; + void *san_data; + __u32 sz; - err = btf__load(obj->btf); - if (err) { - pr_warn("Error loading %s into kernel: %d.\n", - BTF_ELF_SEC, err); - btf__free(obj->btf); - obj->btf = NULL; - /* btf_ext can't exist without btf, so free it as well */ - if (obj->btf_ext) { - btf_ext__free(obj->btf_ext); - obj->btf_ext = NULL; - } + /* clone BTF to sanitize a copy and leave the original intact */ + orig_data = btf__get_raw_data(obj->btf, &sz); + san_data = malloc(sz); + if (!san_data) + return -ENOMEM; + memcpy(san_data, orig_data, sz); + kern_btf = btf__new(san_data, sz); + if (IS_ERR(kern_btf)) + return PTR_ERR(kern_btf); - if (kernel_needs_btf(obj)) - return err; + bpf_object__sanitize_btf(obj, kern_btf); } - return 0; + + err = btf__load(kern_btf); + if (sanitize) { + if (!err) { + /* move fd to libbpf's BTF */ + btf__set_fd(obj->btf, btf__fd(kern_btf)); + btf__set_fd(kern_btf, -1); + } + btf__free(kern_btf); + } + if (err) { + btf_mandatory = kernel_needs_btf(obj); + pr_warn("Error loading .BTF into kernel: %d. %s\n", err, + btf_mandatory ? "BTF is mandatory, can't proceed." + : "BTF is optional, ignoring."); + if (!btf_mandatory) + err = 0; + } + return err; } static int bpf_object__elf_collect(struct bpf_object *obj) @@ -3777,7 +3791,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map) create_attr.btf_fd = 0; create_attr.btf_key_type_id = 0; create_attr.btf_value_type_id = 0; - if (obj->btf && !bpf_map_find_btf_info(obj, map)) { + if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) { create_attr.btf_fd = btf__fd(obj->btf); create_attr.btf_key_type_id = map->btf_key_type_id; create_attr.btf_value_type_id = map->btf_value_type_id; @@ -5361,18 +5375,17 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, load_attr.kern_version = kern_version; load_attr.prog_ifindex = prog->prog_ifindex; } - /* if .BTF.ext was loaded, kernel supports associated BTF for prog */ - if (prog->obj->btf_ext) - btf_fd = bpf_object__btf_fd(prog->obj); - else - btf_fd = -1; - load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0; - load_attr.func_info = prog->func_info; - load_attr.func_info_rec_size = prog->func_info_rec_size; - load_attr.func_info_cnt = prog->func_info_cnt; - load_attr.line_info = prog->line_info; - load_attr.line_info_rec_size = prog->line_info_rec_size; - load_attr.line_info_cnt = prog->line_info_cnt; + /* specify func_info/line_info only if kernel supports them */ + btf_fd = bpf_object__btf_fd(prog->obj); + if (btf_fd >= 0 && prog->obj->caps.btf_func) { + load_attr.prog_btf_fd = btf_fd; + load_attr.func_info = prog->func_info; + load_attr.func_info_rec_size = prog->func_info_rec_size; + load_attr.func_info_cnt = prog->func_info_cnt; + load_attr.line_info = prog->line_info; + load_attr.line_info_rec_size = prog->line_info_rec_size; + load_attr.line_info_cnt = prog->line_info_cnt; + } load_attr.log_level = prog->log_level; load_attr.prog_flags = prog->prog_flags; -- cgit v1.2.3 From 0e289487308236903b19273f2ddb4f0adf732b9e Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 7 Jul 2020 18:53:17 -0700 Subject: libbpf: Handle missing BPF_OBJ_GET_INFO_BY_FD gracefully in perf_buffer perf_buffer__new() is relying on BPF_OBJ_GET_INFO_BY_FD availability for few sanity checks. OBJ_GET_INFO for maps is actually much more recent feature than perf_buffer support itself, so this causes unnecessary problems on old kernels before BPF_OBJ_GET_INFO_BY_FD was added. This patch makes those sanity checks optional and just assumes best if command is not supported. If user specified something incorrectly (e.g., wrong map type), kernel will reject it later anyway, except user won't get a nice explanation as to why it failed. This seems like a good trade off for supporting perf_buffer on old kernels. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200708015318.3827358-6-andriin@fb.com --- tools/lib/bpf/libbpf.c | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) (limited to 'tools/lib/bpf/libbpf.c') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 460e5790a95f..6602eb479596 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -8591,7 +8591,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, struct perf_buffer_params *p) { const char *online_cpus_file = "/sys/devices/system/cpu/online"; - struct bpf_map_info map = {}; + struct bpf_map_info map; char msg[STRERR_BUFSIZE]; struct perf_buffer *pb; bool *online = NULL; @@ -8604,19 +8604,28 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, return ERR_PTR(-EINVAL); } + /* best-effort sanity checks */ + memset(&map, 0, sizeof(map)); map_info_len = sizeof(map); err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len); if (err) { err = -errno; - pr_warn("failed to get map info for map FD %d: %s\n", - map_fd, libbpf_strerror_r(err, msg, sizeof(msg))); - return ERR_PTR(err); - } - - if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { - pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", - map.name); - return ERR_PTR(-EINVAL); + /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return + * -EBADFD, -EFAULT, or -E2BIG on real error + */ + if (err != -EINVAL) { + pr_warn("failed to get map info for map FD %d: %s\n", + map_fd, libbpf_strerror_r(err, msg, sizeof(msg))); + return ERR_PTR(err); + } + pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n", + map_fd); + } else { + if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { + pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", + map.name); + return ERR_PTR(-EINVAL); + } } pb = calloc(1, sizeof(*pb)); @@ -8648,7 +8657,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, err = pb->cpu_cnt; goto error; } - if (map.max_entries < pb->cpu_cnt) + if (map.max_entries && map.max_entries < pb->cpu_cnt) pb->cpu_cnt = map.max_entries; } -- cgit v1.2.3 From 5c3320d7fece4612d4a413aa3c8e82cdb5b49fcb Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 9 Jul 2020 18:10:23 -0700 Subject: libbpf: Fix memory leak and optimize BTF sanitization Coverity's static analysis helpfully reported a memory leak introduced by 0f0e55d8247c ("libbpf: Improve BTF sanitization handling"). While fixing it, I realized that btf__new() already creates a memory copy, so there is no need to do this. So this patch also fixes misleading btf__new() signature to make data into a `const void *` input parameter. And it avoids unnecessary memory allocation and copy in BTF sanitization code altogether. Fixes: 0f0e55d8247c ("libbpf: Improve BTF sanitization handling") Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200710011023.1655008-1-andriin@fb.com --- tools/lib/bpf/btf.c | 2 +- tools/lib/bpf/btf.h | 2 +- tools/lib/bpf/libbpf.c | 11 +++-------- 3 files changed, 5 insertions(+), 10 deletions(-) (limited to 'tools/lib/bpf/libbpf.c') diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index c8861c9e3635..c9e760e120dc 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -397,7 +397,7 @@ void btf__free(struct btf *btf) free(btf); } -struct btf *btf__new(__u8 *data, __u32 size) +struct btf *btf__new(const void *data, __u32 size) { struct btf *btf; int err; diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 173eff23c472..a3b7ef9b737f 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -63,7 +63,7 @@ struct btf_ext_header { }; LIBBPF_API void btf__free(struct btf *btf); -LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size); +LIBBPF_API struct btf *btf__new(const void *data, __u32 size); LIBBPF_API struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext); LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf); diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 6602eb479596..25e4f77be8d7 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2533,17 +2533,12 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) sanitize = btf_needs_sanitization(obj); if (sanitize) { - const void *orig_data; - void *san_data; + const void *raw_data; __u32 sz; /* clone BTF to sanitize a copy and leave the original intact */ - orig_data = btf__get_raw_data(obj->btf, &sz); - san_data = malloc(sz); - if (!san_data) - return -ENOMEM; - memcpy(san_data, orig_data, sz); - kern_btf = btf__new(san_data, sz); + raw_data = btf__get_raw_data(obj->btf, &sz); + kern_btf = btf__new(raw_data, sz); if (IS_ERR(kern_btf)) return PTR_ERR(kern_btf); -- cgit v1.2.3