From f5836749c9c04a10decd2742845ad4870965fdef Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Mon, 6 Jul 2020 16:01:25 -0700 Subject: bpf: Add BPF_CGROUP_INET_SOCK_RELEASE hook Sometimes it's handy to know when the socket gets freed. In particular, we'd like to try to use a smarter allocation of ports for bpf_bind and explore the possibility of limiting the number of SOCK_DGRAM sockets the process can have. Implement BPF_CGROUP_INET_SOCK_RELEASE hook that triggers on inet socket release. It triggers only for userspace sockets (not in-kernel ones) and therefore has the same semantics as the existing BPF_CGROUP_INET_SOCK_CREATE. Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200706230128.4073544-2-sdf@google.com --- include/linux/bpf-cgroup.h | 4 ++++ include/uapi/linux/bpf.h | 1 + kernel/bpf/syscall.c | 3 +++ net/core/filter.c | 1 + net/ipv4/af_inet.c | 3 +++ 5 files changed, 12 insertions(+) diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index c66c545e161a..2c6f26670acc 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -210,6 +210,9 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE) +#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \ + BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE) + #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \ BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND) @@ -401,6 +404,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; }) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index da9bf35a26f8..548a749aebb3 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -226,6 +226,7 @@ enum bpf_attach_type { BPF_CGROUP_INET4_GETSOCKNAME, BPF_CGROUP_INET6_GETSOCKNAME, BPF_XDP_DEVMAP, + BPF_CGROUP_INET_SOCK_RELEASE, __MAX_BPF_ATTACH_TYPE }; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 8da159936bab..156f51ffada2 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1981,6 +1981,7 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type, case BPF_PROG_TYPE_CGROUP_SOCK: switch (expected_attach_type) { case BPF_CGROUP_INET_SOCK_CREATE: + case BPF_CGROUP_INET_SOCK_RELEASE: case BPF_CGROUP_INET4_POST_BIND: case BPF_CGROUP_INET6_POST_BIND: return 0; @@ -2779,6 +2780,7 @@ attach_type_to_prog_type(enum bpf_attach_type attach_type) return BPF_PROG_TYPE_CGROUP_SKB; break; case BPF_CGROUP_INET_SOCK_CREATE: + case BPF_CGROUP_INET_SOCK_RELEASE: case BPF_CGROUP_INET4_POST_BIND: case BPF_CGROUP_INET6_POST_BIND: return BPF_PROG_TYPE_CGROUP_SOCK; @@ -2929,6 +2931,7 @@ static int bpf_prog_query(const union bpf_attr *attr, case BPF_CGROUP_INET_INGRESS: case BPF_CGROUP_INET_EGRESS: case BPF_CGROUP_INET_SOCK_CREATE: + case BPF_CGROUP_INET_SOCK_RELEASE: case BPF_CGROUP_INET4_BIND: case BPF_CGROUP_INET6_BIND: case BPF_CGROUP_INET4_POST_BIND: diff --git a/net/core/filter.c b/net/core/filter.c index c5e696e6c315..ddcc0d6209e1 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -6890,6 +6890,7 @@ static bool __sock_filter_check_attach_type(int off, case offsetof(struct bpf_sock, priority): switch (attach_type) { case BPF_CGROUP_INET_SOCK_CREATE: + case BPF_CGROUP_INET_SOCK_RELEASE: goto full_access; default: return false; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index ea6ed6d487ed..ff141d630bdf 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -411,6 +411,9 @@ int inet_release(struct socket *sock) if (sk) { long timeout; + if (!sk->sk_kern_sock) + BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk); + /* Applications forget to leave groups before exiting */ ip_mc_drop_socket(sk); -- cgit v1.2.3 From e8b012e9fabe7eafc1b2c72414e174547683860d Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Mon, 6 Jul 2020 16:01:26 -0700 Subject: libbpf: Add support for BPF_CGROUP_INET_SOCK_RELEASE Add auto-detection for the cgroup/sock_release programs. Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200706230128.4073544-3-sdf@google.com --- tools/include/uapi/linux/bpf.h | 1 + tools/lib/bpf/libbpf.c | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index da9bf35a26f8..548a749aebb3 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -226,6 +226,7 @@ enum bpf_attach_type { BPF_CGROUP_INET4_GETSOCKNAME, BPF_CGROUP_INET6_GETSOCKNAME, BPF_XDP_DEVMAP, + BPF_CGROUP_INET_SOCK_RELEASE, __MAX_BPF_ATTACH_TYPE }; diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 4ea7f4f1a691..88a483627a2b 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -6917,6 +6917,10 @@ static const struct bpf_sec_def section_defs[] = { BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB, BPF_CGROUP_INET_EGRESS), BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB), + BPF_EAPROG_SEC("cgroup/sock_create", BPF_PROG_TYPE_CGROUP_SOCK, + BPF_CGROUP_INET_SOCK_CREATE), + BPF_EAPROG_SEC("cgroup/sock_release", BPF_PROG_TYPE_CGROUP_SOCK, + BPF_CGROUP_INET_SOCK_RELEASE), BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE), BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK, -- cgit v1.2.3 From db94cc0b4805968a0357ed2507730cdf77adf174 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Mon, 6 Jul 2020 16:01:27 -0700 Subject: bpftool: Add support for BPF_CGROUP_INET_SOCK_RELEASE Support attaching to BPF_CGROUP_INET_SOCK_RELEASE and properly display attach type upon prog dump. Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200706230128.4073544-4-sdf@google.com --- tools/bpf/bpftool/common.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 18e5604fe260..29f4e7611ae8 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -33,6 +33,7 @@ const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE] = { [BPF_CGROUP_INET_INGRESS] = "ingress", [BPF_CGROUP_INET_EGRESS] = "egress", [BPF_CGROUP_INET_SOCK_CREATE] = "sock_create", + [BPF_CGROUP_INET_SOCK_RELEASE] = "sock_release", [BPF_CGROUP_SOCK_OPS] = "sock_ops", [BPF_CGROUP_DEVICE] = "device", [BPF_CGROUP_INET4_BIND] = "bind4", -- cgit v1.2.3 From 65ffd797861a44ff97081de1db01e4aef716ed46 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Mon, 6 Jul 2020 16:01:28 -0700 Subject: selftests/bpf: Test BPF_CGROUP_INET_SOCK_RELEASE Simple test that enforces a single SOCK_DGRAM socket per cgroup. Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200706230128.4073544-5-sdf@google.com --- tools/testing/selftests/bpf/prog_tests/udp_limit.c | 75 ++++++++++++++++++++++ tools/testing/selftests/bpf/progs/udp_limit.c | 42 ++++++++++++ 2 files changed, 117 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/udp_limit.c create mode 100644 tools/testing/selftests/bpf/progs/udp_limit.c diff --git a/tools/testing/selftests/bpf/prog_tests/udp_limit.c b/tools/testing/selftests/bpf/prog_tests/udp_limit.c new file mode 100644 index 000000000000..2aba09d4d01b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/udp_limit.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include "udp_limit.skel.h" + +#include +#include + +static int duration; + +void test_udp_limit(void) +{ + struct udp_limit *skel; + int fd1 = -1, fd2 = -1; + int cgroup_fd; + + cgroup_fd = test__join_cgroup("/udp_limit"); + if (CHECK(cgroup_fd < 0, "cg-join", "errno %d", errno)) + return; + + skel = udp_limit__open_and_load(); + if (CHECK(!skel, "skel-load", "errno %d", errno)) + goto close_cgroup_fd; + + skel->links.sock = bpf_program__attach_cgroup(skel->progs.sock, cgroup_fd); + skel->links.sock_release = bpf_program__attach_cgroup(skel->progs.sock_release, cgroup_fd); + if (CHECK(IS_ERR(skel->links.sock) || IS_ERR(skel->links.sock_release), + "cg-attach", "sock %ld sock_release %ld", + PTR_ERR(skel->links.sock), + PTR_ERR(skel->links.sock_release))) + goto close_skeleton; + + /* BPF program enforces a single UDP socket per cgroup, + * verify that. + */ + fd1 = socket(AF_INET, SOCK_DGRAM, 0); + if (CHECK(fd1 < 0, "fd1", "errno %d", errno)) + goto close_skeleton; + + fd2 = socket(AF_INET, SOCK_DGRAM, 0); + if (CHECK(fd2 >= 0, "fd2", "errno %d", errno)) + goto close_skeleton; + + /* We can reopen again after close. */ + close(fd1); + fd1 = -1; + + fd1 = socket(AF_INET, SOCK_DGRAM, 0); + if (CHECK(fd1 < 0, "fd1-again", "errno %d", errno)) + goto close_skeleton; + + /* Make sure the program was invoked the expected + * number of times: + * - open fd1 - BPF_CGROUP_INET_SOCK_CREATE + * - attempt to openfd2 - BPF_CGROUP_INET_SOCK_CREATE + * - close fd1 - BPF_CGROUP_INET_SOCK_RELEASE + * - open fd1 again - BPF_CGROUP_INET_SOCK_CREATE + */ + if (CHECK(skel->bss->invocations != 4, "bss-invocations", + "invocations=%d", skel->bss->invocations)) + goto close_skeleton; + + /* We should still have a single socket in use */ + if (CHECK(skel->bss->in_use != 1, "bss-in_use", + "in_use=%d", skel->bss->in_use)) + goto close_skeleton; + +close_skeleton: + if (fd1 >= 0) + close(fd1); + if (fd2 >= 0) + close(fd2); + udp_limit__destroy(skel); +close_cgroup_fd: + close(cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/progs/udp_limit.c b/tools/testing/selftests/bpf/progs/udp_limit.c new file mode 100644 index 000000000000..8429b22525a7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/udp_limit.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include + +int invocations = 0, in_use = 0; + +SEC("cgroup/sock_create") +int sock(struct bpf_sock *ctx) +{ + __u32 key; + + if (ctx->type != SOCK_DGRAM) + return 1; + + __sync_fetch_and_add(&invocations, 1); + + if (in_use > 0) { + /* BPF_CGROUP_INET_SOCK_RELEASE is _not_ called + * when we return an error from the BPF + * program! + */ + return 0; + } + + __sync_fetch_and_add(&in_use, 1); + return 1; +} + +SEC("cgroup/sock_release") +int sock_release(struct bpf_sock *ctx) +{ + __u32 key; + + if (ctx->type != SOCK_DGRAM) + return 1; + + __sync_fetch_and_add(&invocations, 1); + __sync_fetch_and_add(&in_use, -1); + return 1; +} -- cgit v1.2.3 From af9bd3e3331b8af42b6606c75797d041ab39380c Mon Sep 17 00:00:00 2001 From: "Daniel T. Lee" Date: Wed, 8 Jul 2020 03:48:52 +0900 Subject: samples: bpf: Fix bpf programs with kprobe/sys_connect event Currently, BPF programs with kprobe/sys_connect does not work properly. Commit 34745aed515c ("samples/bpf: fix kprobe attachment issue on x64") This commit modifies the bpf_load behavior of kprobe events in the x64 architecture. If the current kprobe event target starts with "sys_*", add the prefix "__x64_" to the front of the event. Appending "__x64_" prefix with kprobe/sys_* event was appropriate as a solution to most of the problems caused by the commit below. commit d5a00528b58c ("syscalls/core, syscalls/x86: Rename struct pt_regs-based sys_*() to __x64_sys_*()") However, there is a problem with the sys_connect kprobe event that does not work properly. For __sys_connect event, parameters can be fetched normally, but for __x64_sys_connect, parameters cannot be fetched. ffffffff818d3520 <__x64_sys_connect>: ffffffff818d3520: e8 fb df 32 00 callq 0xffffffff81c01520 <__fentry__> ffffffff818d3525: 48 8b 57 60 movq 96(%rdi), %rdx ffffffff818d3529: 48 8b 77 68 movq 104(%rdi), %rsi ffffffff818d352d: 48 8b 7f 70 movq 112(%rdi), %rdi ffffffff818d3531: e8 1a ff ff ff callq 0xffffffff818d3450 <__sys_connect> ffffffff818d3536: 48 98 cltq ffffffff818d3538: c3 retq ffffffff818d3539: 0f 1f 80 00 00 00 00 nopl (%rax) As the assembly code for __x64_sys_connect shows, parameters should be fetched and set into rdi, rsi, rdx registers prior to calling __sys_connect. Because of this problem, this commit fixes the sys_connect event by first getting the value of the rdi register and then the value of the rdi, rsi, and rdx register through an offset based on that value. Fixes: 34745aed515c ("samples/bpf: fix kprobe attachment issue on x64") Signed-off-by: Daniel T. Lee Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200707184855.30968-2-danieltimlee@gmail.com --- samples/bpf/map_perf_test_kern.c | 9 ++++++--- samples/bpf/test_map_in_map_kern.c | 9 ++++++--- samples/bpf/test_probe_write_user_kern.c | 9 ++++++--- 3 files changed, 18 insertions(+), 9 deletions(-) diff --git a/samples/bpf/map_perf_test_kern.c b/samples/bpf/map_perf_test_kern.c index 12e91ae64d4d..c9b31193ca12 100644 --- a/samples/bpf/map_perf_test_kern.c +++ b/samples/bpf/map_perf_test_kern.c @@ -11,6 +11,8 @@ #include #include "bpf_legacy.h" #include +#include +#include "trace_common.h" #define MAX_ENTRIES 1000 #define MAX_NR_CPUS 1024 @@ -154,9 +156,10 @@ int stress_percpu_hmap_alloc(struct pt_regs *ctx) return 0; } -SEC("kprobe/sys_connect") +SEC("kprobe/" SYSCALL(sys_connect)) int stress_lru_hmap_alloc(struct pt_regs *ctx) { + struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx); char fmt[] = "Failed at stress_lru_hmap_alloc. ret:%dn"; union { u16 dst6[8]; @@ -175,8 +178,8 @@ int stress_lru_hmap_alloc(struct pt_regs *ctx) long val = 1; u32 key = 0; - in6 = (struct sockaddr_in6 *)PT_REGS_PARM2(ctx); - addrlen = (int)PT_REGS_PARM3(ctx); + in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(real_regs); + addrlen = (int)PT_REGS_PARM3_CORE(real_regs); if (addrlen != sizeof(*in6)) return 0; diff --git a/samples/bpf/test_map_in_map_kern.c b/samples/bpf/test_map_in_map_kern.c index 6cee61e8ce9b..36a203e69064 100644 --- a/samples/bpf/test_map_in_map_kern.c +++ b/samples/bpf/test_map_in_map_kern.c @@ -13,6 +13,8 @@ #include #include "bpf_legacy.h" #include +#include +#include "trace_common.h" #define MAX_NR_PORTS 65536 @@ -102,9 +104,10 @@ static __always_inline int do_inline_hash_lookup(void *inner_map, u32 port) return result ? *result : -ENOENT; } -SEC("kprobe/sys_connect") +SEC("kprobe/" SYSCALL(sys_connect)) int trace_sys_connect(struct pt_regs *ctx) { + struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx); struct sockaddr_in6 *in6; u16 test_case, port, dst6[8]; int addrlen, ret, inline_ret, ret_key = 0; @@ -112,8 +115,8 @@ int trace_sys_connect(struct pt_regs *ctx) void *outer_map, *inner_map; bool inline_hash = false; - in6 = (struct sockaddr_in6 *)PT_REGS_PARM2(ctx); - addrlen = (int)PT_REGS_PARM3(ctx); + in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(real_regs); + addrlen = (int)PT_REGS_PARM3_CORE(real_regs); if (addrlen != sizeof(*in6)) return 0; diff --git a/samples/bpf/test_probe_write_user_kern.c b/samples/bpf/test_probe_write_user_kern.c index f033f36a13a3..fd651a65281e 100644 --- a/samples/bpf/test_probe_write_user_kern.c +++ b/samples/bpf/test_probe_write_user_kern.c @@ -10,6 +10,8 @@ #include #include #include +#include +#include "trace_common.h" struct bpf_map_def SEC("maps") dnat_map = { .type = BPF_MAP_TYPE_HASH, @@ -26,13 +28,14 @@ struct bpf_map_def SEC("maps") dnat_map = { * This example sits on a syscall, and the syscall ABI is relatively stable * of course, across platforms, and over time, the ABI may change. */ -SEC("kprobe/sys_connect") +SEC("kprobe/" SYSCALL(sys_connect)) int bpf_prog1(struct pt_regs *ctx) { + struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx); + void *sockaddr_arg = (void *)PT_REGS_PARM2_CORE(real_regs); + int sockaddr_len = (int)PT_REGS_PARM3_CORE(real_regs); struct sockaddr_in new_addr, orig_addr = {}; struct sockaddr_in *mapped_addr; - void *sockaddr_arg = (void *)PT_REGS_PARM2(ctx); - int sockaddr_len = (int)PT_REGS_PARM3(ctx); if (sockaddr_len > sizeof(orig_addr)) return 0; -- cgit v1.2.3 From 88795b4adb01a30fbfd75ef1c1ef73b4442e38b2 Mon Sep 17 00:00:00 2001 From: "Daniel T. Lee" Date: Wed, 8 Jul 2020 03:48:53 +0900 Subject: samples: bpf: Refactor BPF map in map test with libbpf From commit 646f02ffdd49 ("libbpf: Add BTF-defined map-in-map support"), a way to define internal map in BTF-defined map has been added. Instead of using previous 'inner_map_idx' definition, the structure to be used for the inner map can be directly defined using array directive. __array(values, struct inner_map) This commit refactors map in map test program with libbpf by explicitly defining inner map with BTF-defined format. Signed-off-by: Daniel T. Lee Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200707184855.30968-3-danieltimlee@gmail.com --- samples/bpf/Makefile | 2 +- samples/bpf/test_map_in_map_kern.c | 85 +++++++++++++++++++------------------- samples/bpf/test_map_in_map_user.c | 53 +++++++++++++++++++++--- 3 files changed, 91 insertions(+), 49 deletions(-) diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 8403e4762306..f87ee02073ba 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -93,7 +93,7 @@ sampleip-objs := sampleip_user.o $(TRACE_HELPERS) tc_l2_redirect-objs := bpf_load.o tc_l2_redirect_user.o lwt_len_hist-objs := bpf_load.o lwt_len_hist_user.o xdp_tx_iptunnel-objs := xdp_tx_iptunnel_user.o -test_map_in_map-objs := bpf_load.o test_map_in_map_user.o +test_map_in_map-objs := test_map_in_map_user.o per_socket_stats_example-objs := cookie_uid_helper_example.o xdp_redirect-objs := xdp_redirect_user.o xdp_redirect_map-objs := xdp_redirect_map_user.o diff --git a/samples/bpf/test_map_in_map_kern.c b/samples/bpf/test_map_in_map_kern.c index 36a203e69064..8def45c5b697 100644 --- a/samples/bpf/test_map_in_map_kern.c +++ b/samples/bpf/test_map_in_map_kern.c @@ -11,7 +11,6 @@ #include #include #include -#include "bpf_legacy.h" #include #include #include "trace_common.h" @@ -19,60 +18,60 @@ #define MAX_NR_PORTS 65536 /* map #0 */ -struct bpf_map_def_legacy SEC("maps") port_a = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(int), - .max_entries = MAX_NR_PORTS, -}; +struct inner_a { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, u32); + __type(value, int); + __uint(max_entries, MAX_NR_PORTS); +} port_a SEC(".maps"); /* map #1 */ -struct bpf_map_def_legacy SEC("maps") port_h = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(int), - .max_entries = 1, -}; +struct inner_h { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, u32); + __type(value, int); + __uint(max_entries, 1); +} port_h SEC(".maps"); /* map #2 */ -struct bpf_map_def_legacy SEC("maps") reg_result_h = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(int), - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, u32); + __type(value, int); + __uint(max_entries, 1); +} reg_result_h SEC(".maps"); /* map #3 */ -struct bpf_map_def_legacy SEC("maps") inline_result_h = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(int), - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, u32); + __type(value, int); + __uint(max_entries, 1); +} inline_result_h SEC(".maps"); /* map #4 */ /* Test case #0 */ -struct bpf_map_def_legacy SEC("maps") a_of_port_a = { - .type = BPF_MAP_TYPE_ARRAY_OF_MAPS, - .key_size = sizeof(u32), - .inner_map_idx = 0, /* map_fd[0] is port_a */ - .max_entries = MAX_NR_PORTS, -}; +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, MAX_NR_PORTS); + __uint(key_size, sizeof(u32)); + __array(values, struct inner_a); /* use inner_a as inner map */ +} a_of_port_a SEC(".maps"); /* map #5 */ /* Test case #1 */ -struct bpf_map_def_legacy SEC("maps") h_of_port_a = { - .type = BPF_MAP_TYPE_HASH_OF_MAPS, - .key_size = sizeof(u32), - .inner_map_idx = 0, /* map_fd[0] is port_a */ - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); + __uint(max_entries, 1); + __uint(key_size, sizeof(u32)); + __array(values, struct inner_a); /* use inner_a as inner map */ +} h_of_port_a SEC(".maps"); /* map #6 */ /* Test case #2 */ -struct bpf_map_def_legacy SEC("maps") h_of_port_h = { - .type = BPF_MAP_TYPE_HASH_OF_MAPS, - .key_size = sizeof(u32), - .inner_map_idx = 1, /* map_fd[1] is port_h */ - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); + __uint(max_entries, 1); + __uint(key_size, sizeof(u32)); + __array(values, struct inner_h); /* use inner_h as inner map */ +} h_of_port_h SEC(".maps"); static __always_inline int do_reg_lookup(void *inner_map, u32 port) { diff --git a/samples/bpf/test_map_in_map_user.c b/samples/bpf/test_map_in_map_user.c index eb29bcb76f3f..98656de56b83 100644 --- a/samples/bpf/test_map_in_map_user.c +++ b/samples/bpf/test_map_in_map_user.c @@ -11,7 +11,9 @@ #include #include #include -#include "bpf_load.h" +#include + +static int map_fd[7]; #define PORT_A (map_fd[0]) #define PORT_H (map_fd[1]) @@ -113,18 +115,59 @@ static void test_map_in_map(void) int main(int argc, char **argv) { struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; + struct bpf_link *link = NULL; + struct bpf_program *prog; + struct bpf_object *obj; char filename[256]; - assert(!setrlimit(RLIMIT_MEMLOCK, &r)); + if (setrlimit(RLIMIT_MEMLOCK, &r)) { + perror("setrlimit(RLIMIT_MEMLOCK)"); + return 1; + } snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + obj = bpf_object__open_file(filename, NULL); + if (libbpf_get_error(obj)) { + fprintf(stderr, "ERROR: opening BPF object file failed\n"); + return 0; + } - if (load_bpf_file(filename)) { - printf("%s", bpf_log_buf); - return 1; + prog = bpf_object__find_program_by_name(obj, "trace_sys_connect"); + if (!prog) { + printf("finding a prog in obj file failed\n"); + goto cleanup; + } + + /* load BPF program */ + if (bpf_object__load(obj)) { + fprintf(stderr, "ERROR: loading BPF object file failed\n"); + goto cleanup; + } + + map_fd[0] = bpf_object__find_map_fd_by_name(obj, "port_a"); + map_fd[1] = bpf_object__find_map_fd_by_name(obj, "port_h"); + map_fd[2] = bpf_object__find_map_fd_by_name(obj, "reg_result_h"); + map_fd[3] = bpf_object__find_map_fd_by_name(obj, "inline_result_h"); + map_fd[4] = bpf_object__find_map_fd_by_name(obj, "a_of_port_a"); + map_fd[5] = bpf_object__find_map_fd_by_name(obj, "h_of_port_a"); + map_fd[6] = bpf_object__find_map_fd_by_name(obj, "h_of_port_h"); + if (map_fd[0] < 0 || map_fd[1] < 0 || map_fd[2] < 0 || + map_fd[3] < 0 || map_fd[4] < 0 || map_fd[5] < 0 || map_fd[6] < 0) { + fprintf(stderr, "ERROR: finding a map in obj file failed\n"); + goto cleanup; + } + + link = bpf_program__attach(prog); + if (libbpf_get_error(link)) { + fprintf(stderr, "ERROR: bpf_program__attach failed\n"); + link = NULL; + goto cleanup; } test_map_in_map(); +cleanup: + bpf_link__destroy(link); + bpf_object__close(obj); return 0; } -- cgit v1.2.3 From cc7f641d637bef0b31194d28667553f77c4ef947 Mon Sep 17 00:00:00 2001 From: "Daniel T. Lee" Date: Wed, 8 Jul 2020 03:48:54 +0900 Subject: samples: bpf: Refactor BPF map performance test with libbpf Previously, in order to set the numa_node attribute at the time of map creation using "libbpf", it was necessary to call bpf_create_map_node() directly (bpf_load approach), instead of calling bpf_object_load() that handles everything on its own, including map creation. And because of this problem, this sample had problems with refactoring from bpf_load to libbbpf. However, by commit 1bdb6c9a1c43 ("libbpf: Add a bunch of attribute getters/setters for map definitions") added the numa_node attribute and allowed it to be set in the map. By using libbpf instead of bpf_load, the inner map definition has been explicitly declared with BTF-defined format. Also, the element of ARRAY_OF_MAPS was also statically specified using the BTF format. And for this reason some logic in fixup_map() was not needed and changed or removed. Signed-off-by: Daniel T. Lee Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200707184855.30968-4-danieltimlee@gmail.com --- samples/bpf/map_perf_test_kern.c | 179 ++++++++++++++++++++------------------- samples/bpf/map_perf_test_user.c | 164 ++++++++++++++++++++++------------- 2 files changed, 196 insertions(+), 147 deletions(-) diff --git a/samples/bpf/map_perf_test_kern.c b/samples/bpf/map_perf_test_kern.c index c9b31193ca12..8773f22b6a98 100644 --- a/samples/bpf/map_perf_test_kern.c +++ b/samples/bpf/map_perf_test_kern.c @@ -9,7 +9,6 @@ #include #include #include -#include "bpf_legacy.h" #include #include #include "trace_common.h" @@ -17,89 +16,93 @@ #define MAX_ENTRIES 1000 #define MAX_NR_CPUS 1024 -struct bpf_map_def_legacy SEC("maps") hash_map = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = MAX_ENTRIES, +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, u32); + __type(value, long); + __uint(max_entries, MAX_ENTRIES); +} hash_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __type(key, u32); + __type(value, long); + __uint(max_entries, 10000); +} lru_hash_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __type(key, u32); + __type(value, long); + __uint(max_entries, 10000); + __uint(map_flags, BPF_F_NO_COMMON_LRU); +} nocommon_lru_hash_map SEC(".maps"); + +struct inner_lru { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __type(key, u32); + __type(value, long); + __uint(max_entries, MAX_ENTRIES); + __uint(map_flags, BPF_F_NUMA_NODE); + __uint(numa_node, 0); +} inner_lru_hash_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, MAX_NR_CPUS); + __uint(key_size, sizeof(u32)); + __array(values, struct inner_lru); /* use inner_lru as inner map */ +} array_of_lru_hashs SEC(".maps") = { + /* statically initialize the first element */ + .values = { &inner_lru_hash_map }, }; -struct bpf_map_def_legacy SEC("maps") lru_hash_map = { - .type = BPF_MAP_TYPE_LRU_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = 10000, -}; - -struct bpf_map_def_legacy SEC("maps") nocommon_lru_hash_map = { - .type = BPF_MAP_TYPE_LRU_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = 10000, - .map_flags = BPF_F_NO_COMMON_LRU, -}; - -struct bpf_map_def_legacy SEC("maps") inner_lru_hash_map = { - .type = BPF_MAP_TYPE_LRU_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = MAX_ENTRIES, - .map_flags = BPF_F_NUMA_NODE, - .numa_node = 0, -}; - -struct bpf_map_def_legacy SEC("maps") array_of_lru_hashs = { - .type = BPF_MAP_TYPE_ARRAY_OF_MAPS, - .key_size = sizeof(u32), - .max_entries = MAX_NR_CPUS, -}; - -struct bpf_map_def_legacy SEC("maps") percpu_hash_map = { - .type = BPF_MAP_TYPE_PERCPU_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = MAX_ENTRIES, -}; - -struct bpf_map_def_legacy SEC("maps") hash_map_alloc = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = MAX_ENTRIES, - .map_flags = BPF_F_NO_PREALLOC, -}; - -struct bpf_map_def_legacy SEC("maps") percpu_hash_map_alloc = { - .type = BPF_MAP_TYPE_PERCPU_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = MAX_ENTRIES, - .map_flags = BPF_F_NO_PREALLOC, -}; - -struct bpf_map_def_legacy SEC("maps") lpm_trie_map_alloc = { - .type = BPF_MAP_TYPE_LPM_TRIE, - .key_size = 8, - .value_size = sizeof(long), - .max_entries = 10000, - .map_flags = BPF_F_NO_PREALLOC, -}; - -struct bpf_map_def_legacy SEC("maps") array_map = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = MAX_ENTRIES, -}; - -struct bpf_map_def_legacy SEC("maps") lru_hash_lookup_map = { - .type = BPF_MAP_TYPE_LRU_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = MAX_ENTRIES, -}; - -SEC("kprobe/sys_getuid") +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(long)); + __uint(max_entries, MAX_ENTRIES); +} percpu_hash_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, u32); + __type(value, long); + __uint(max_entries, MAX_ENTRIES); + __uint(map_flags, BPF_F_NO_PREALLOC); +} hash_map_alloc SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(long)); + __uint(max_entries, MAX_ENTRIES); + __uint(map_flags, BPF_F_NO_PREALLOC); +} percpu_hash_map_alloc SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_LPM_TRIE); + __uint(key_size, 8); + __uint(value_size, sizeof(long)); + __uint(max_entries, 10000); + __uint(map_flags, BPF_F_NO_PREALLOC); +} lpm_trie_map_alloc SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, u32); + __type(value, long); + __uint(max_entries, MAX_ENTRIES); +} array_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __type(key, u32); + __type(value, long); + __uint(max_entries, MAX_ENTRIES); +} lru_hash_lookup_map SEC(".maps"); + +SEC("kprobe/" SYSCALL(sys_getuid)) int stress_hmap(struct pt_regs *ctx) { u32 key = bpf_get_current_pid_tgid(); @@ -114,7 +117,7 @@ int stress_hmap(struct pt_regs *ctx) return 0; } -SEC("kprobe/sys_geteuid") +SEC("kprobe/" SYSCALL(sys_geteuid)) int stress_percpu_hmap(struct pt_regs *ctx) { u32 key = bpf_get_current_pid_tgid(); @@ -128,7 +131,7 @@ int stress_percpu_hmap(struct pt_regs *ctx) return 0; } -SEC("kprobe/sys_getgid") +SEC("kprobe/" SYSCALL(sys_getgid)) int stress_hmap_alloc(struct pt_regs *ctx) { u32 key = bpf_get_current_pid_tgid(); @@ -142,7 +145,7 @@ int stress_hmap_alloc(struct pt_regs *ctx) return 0; } -SEC("kprobe/sys_getegid") +SEC("kprobe/" SYSCALL(sys_getegid)) int stress_percpu_hmap_alloc(struct pt_regs *ctx) { u32 key = bpf_get_current_pid_tgid(); @@ -236,7 +239,7 @@ done: return 0; } -SEC("kprobe/sys_gettid") +SEC("kprobe/" SYSCALL(sys_gettid)) int stress_lpm_trie_map_alloc(struct pt_regs *ctx) { union { @@ -258,7 +261,7 @@ int stress_lpm_trie_map_alloc(struct pt_regs *ctx) return 0; } -SEC("kprobe/sys_getpgid") +SEC("kprobe/" SYSCALL(sys_getpgid)) int stress_hash_map_lookup(struct pt_regs *ctx) { u32 key = 1, i; @@ -271,7 +274,7 @@ int stress_hash_map_lookup(struct pt_regs *ctx) return 0; } -SEC("kprobe/sys_getppid") +SEC("kprobe/" SYSCALL(sys_getppid)) int stress_array_map_lookup(struct pt_regs *ctx) { u32 key = 1, i; diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c index fe5564bff39b..8b13230b4c46 100644 --- a/samples/bpf/map_perf_test_user.c +++ b/samples/bpf/map_perf_test_user.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -19,7 +18,7 @@ #include #include -#include "bpf_load.h" +#include #define TEST_BIT(t) (1U << (t)) #define MAX_NR_CPUS 1024 @@ -61,12 +60,18 @@ const char *test_map_names[NR_TESTS] = { [LRU_HASH_LOOKUP] = "lru_hash_lookup_map", }; +enum map_idx { + array_of_lru_hashs_idx, + hash_map_alloc_idx, + lru_hash_lookup_idx, + NR_IDXES, +}; + +static int map_fd[NR_IDXES]; + static int test_flags = ~0; static uint32_t num_map_entries; static uint32_t inner_lru_hash_size; -static int inner_lru_hash_idx = -1; -static int array_of_lru_hashs_idx = -1; -static int lru_hash_lookup_idx = -1; static int lru_hash_lookup_test_entries = 32; static uint32_t max_cnt = 1000000; @@ -122,30 +127,30 @@ static void do_test_lru(enum test_type test, int cpu) __u64 start_time; int i, ret; - if (test == INNER_LRU_HASH_PREALLOC) { + if (test == INNER_LRU_HASH_PREALLOC && cpu) { + /* If CPU is not 0, create inner_lru hash map and insert the fd + * value into the array_of_lru_hash map. In case of CPU 0, + * 'inner_lru_hash_map' was statically inserted on the map init + */ int outer_fd = map_fd[array_of_lru_hashs_idx]; unsigned int mycpu, mynode; assert(cpu < MAX_NR_CPUS); - if (cpu) { - ret = syscall(__NR_getcpu, &mycpu, &mynode, NULL); - assert(!ret); - - inner_lru_map_fds[cpu] = - bpf_create_map_node(BPF_MAP_TYPE_LRU_HASH, - test_map_names[INNER_LRU_HASH_PREALLOC], - sizeof(uint32_t), - sizeof(long), - inner_lru_hash_size, 0, - mynode); - if (inner_lru_map_fds[cpu] == -1) { - printf("cannot create BPF_MAP_TYPE_LRU_HASH %s(%d)\n", - strerror(errno), errno); - exit(1); - } - } else { - inner_lru_map_fds[cpu] = map_fd[inner_lru_hash_idx]; + ret = syscall(__NR_getcpu, &mycpu, &mynode, NULL); + assert(!ret); + + inner_lru_map_fds[cpu] = + bpf_create_map_node(BPF_MAP_TYPE_LRU_HASH, + test_map_names[INNER_LRU_HASH_PREALLOC], + sizeof(uint32_t), + sizeof(long), + inner_lru_hash_size, 0, + mynode); + if (inner_lru_map_fds[cpu] == -1) { + printf("cannot create BPF_MAP_TYPE_LRU_HASH %s(%d)\n", + strerror(errno), errno); + exit(1); } ret = bpf_map_update_elem(outer_fd, &cpu, @@ -377,7 +382,8 @@ static void fill_lpm_trie(void) key->data[1] = rand() & 0xff; key->data[2] = rand() & 0xff; key->data[3] = rand() & 0xff; - r = bpf_map_update_elem(map_fd[6], key, &value, 0); + r = bpf_map_update_elem(map_fd[hash_map_alloc_idx], + key, &value, 0); assert(!r); } @@ -388,59 +394,52 @@ static void fill_lpm_trie(void) key->data[3] = 1; value = 128; - r = bpf_map_update_elem(map_fd[6], key, &value, 0); + r = bpf_map_update_elem(map_fd[hash_map_alloc_idx], key, &value, 0); assert(!r); } -static void fixup_map(struct bpf_map_data *map, int idx) +static void fixup_map(struct bpf_object *obj) { + struct bpf_map *map; int i; - if (!strcmp("inner_lru_hash_map", map->name)) { - inner_lru_hash_idx = idx; - inner_lru_hash_size = map->def.max_entries; - } + bpf_object__for_each_map(map, obj) { + const char *name = bpf_map__name(map); - if (!strcmp("array_of_lru_hashs", map->name)) { - if (inner_lru_hash_idx == -1) { - printf("inner_lru_hash_map must be defined before array_of_lru_hashs\n"); - exit(1); + /* Only change the max_entries for the enabled test(s) */ + for (i = 0; i < NR_TESTS; i++) { + if (!strcmp(test_map_names[i], name) && + (check_test_flags(i))) { + bpf_map__resize(map, num_map_entries); + continue; + } } - map->def.inner_map_idx = inner_lru_hash_idx; - array_of_lru_hashs_idx = idx; } - if (!strcmp("lru_hash_lookup_map", map->name)) - lru_hash_lookup_idx = idx; - - if (num_map_entries <= 0) - return; - inner_lru_hash_size = num_map_entries; - - /* Only change the max_entries for the enabled test(s) */ - for (i = 0; i < NR_TESTS; i++) { - if (!strcmp(test_map_names[i], map->name) && - (check_test_flags(i))) { - map->def.max_entries = num_map_entries; - } - } } int main(int argc, char **argv) { struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; + int nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); + struct bpf_link *links[8]; + struct bpf_program *prog; + struct bpf_object *obj; + struct bpf_map *map; char filename[256]; - int num_cpu = 8; + int i = 0; - snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); - setrlimit(RLIMIT_MEMLOCK, &r); + if (setrlimit(RLIMIT_MEMLOCK, &r)) { + perror("setrlimit(RLIMIT_MEMLOCK)"); + return 1; + } if (argc > 1) test_flags = atoi(argv[1]) ? : test_flags; if (argc > 2) - num_cpu = atoi(argv[2]) ? : num_cpu; + nr_cpus = atoi(argv[2]) ? : nr_cpus; if (argc > 3) num_map_entries = atoi(argv[3]); @@ -448,14 +447,61 @@ int main(int argc, char **argv) if (argc > 4) max_cnt = atoi(argv[4]); - if (load_bpf_file_fixup_map(filename, fixup_map)) { - printf("%s", bpf_log_buf); - return 1; + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + obj = bpf_object__open_file(filename, NULL); + if (libbpf_get_error(obj)) { + fprintf(stderr, "ERROR: opening BPF object file failed\n"); + return 0; + } + + map = bpf_object__find_map_by_name(obj, "inner_lru_hash_map"); + if (libbpf_get_error(map)) { + fprintf(stderr, "ERROR: finding a map in obj file failed\n"); + goto cleanup; + } + + inner_lru_hash_size = bpf_map__max_entries(map); + if (!inner_lru_hash_size) { + fprintf(stderr, "ERROR: failed to get map attribute\n"); + goto cleanup; + } + + /* resize BPF map prior to loading */ + if (num_map_entries > 0) + fixup_map(obj); + + /* load BPF program */ + if (bpf_object__load(obj)) { + fprintf(stderr, "ERROR: loading BPF object file failed\n"); + goto cleanup; + } + + map_fd[0] = bpf_object__find_map_fd_by_name(obj, "array_of_lru_hashs"); + map_fd[1] = bpf_object__find_map_fd_by_name(obj, "hash_map_alloc"); + map_fd[2] = bpf_object__find_map_fd_by_name(obj, "lru_hash_lookup_map"); + if (map_fd[0] < 0 || map_fd[1] < 0 || map_fd[2] < 0) { + fprintf(stderr, "ERROR: finding a map in obj file failed\n"); + goto cleanup; + } + + bpf_object__for_each_program(prog, obj) { + links[i] = bpf_program__attach(prog); + if (libbpf_get_error(links[i])) { + fprintf(stderr, "ERROR: bpf_program__attach failed\n"); + links[i] = NULL; + goto cleanup; + } + i++; } fill_lpm_trie(); - run_perf_test(num_cpu); + run_perf_test(nr_cpus); + +cleanup: + for (i--; i >= 0; i--) + bpf_link__destroy(links[i]); + bpf_object__close(obj); return 0; } -- cgit v1.2.3 From 5cfd607b49db2b7c05fccc2915a528d97bc8f17d Mon Sep 17 00:00:00 2001 From: "Daniel T. Lee" Date: Wed, 8 Jul 2020 03:48:55 +0900 Subject: selftests: bpf: Remove unused bpf_map_def_legacy struct samples/bpf no longer use bpf_map_def_legacy and instead use the libbpf's bpf_map_def or new BTF-defined MAP format. This commit removes unused bpf_map_def_legacy struct from selftests/bpf/bpf_legacy.h. Signed-off-by: Daniel T. Lee Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200707184855.30968-5-danieltimlee@gmail.com --- tools/testing/selftests/bpf/bpf_legacy.h | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/tools/testing/selftests/bpf/bpf_legacy.h b/tools/testing/selftests/bpf/bpf_legacy.h index 6f8988738bc1..719ab56cdb5d 100644 --- a/tools/testing/selftests/bpf/bpf_legacy.h +++ b/tools/testing/selftests/bpf/bpf_legacy.h @@ -2,20 +2,6 @@ #ifndef __BPF_LEGACY__ #define __BPF_LEGACY__ -/* - * legacy bpf_map_def with extra fields supported only by bpf_load(), do not - * use outside of samples/bpf - */ -struct bpf_map_def_legacy { - unsigned int type; - unsigned int key_size; - unsigned int value_size; - unsigned int max_entries; - unsigned int map_flags; - unsigned int inner_map_idx; - unsigned int numa_node; -}; - #define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \ struct ____btf_map_##name { \ type_key key; \ -- cgit v1.2.3 From 625eb8e85e913273cd9441329a82b4e3496b30cd Mon Sep 17 00:00:00 2001 From: Louis Peens Date: Wed, 8 Jul 2020 13:08:27 +0200 Subject: bpf: Fix another bpftool segfault without skeleton code enabled emit_obj_refs_json needs to added the same as with emit_obj_refs_plain to prevent segfaults, similar to Commit "8ae4121bd89e bpf: Fix bpftool without skeleton code enabled"). See the error below: # ./bpftool -p prog { "error": "bpftool built without PID iterator support" },[{ "id": 2, "type": "cgroup_skb", "tag": "7be49e3934a125ba", "gpl_compatible": true, "loaded_at": 1594052789, "uid": 0, "bytes_xlated": 296, "jited": true, "bytes_jited": 203, "bytes_memlock": 4096, "map_ids": [2,3 Segmentation fault (core dumped) The same happens for ./bpftool -p map, as well as ./bpftool -j prog/map. Fixes: d53dee3fe013 ("tools/bpftool: Show info for processes holding BPF map/prog/link/btf FDs") Signed-off-by: Louis Peens Signed-off-by: Daniel Borkmann Reviewed-by: Simon Horman Reviewed-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20200708110827.7673-1-louis.peens@netronome.com --- tools/bpf/bpftool/pids.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/bpf/bpftool/pids.c b/tools/bpf/bpftool/pids.c index 7d5416667c85..c0d23ce4a6f4 100644 --- a/tools/bpf/bpftool/pids.c +++ b/tools/bpf/bpftool/pids.c @@ -20,6 +20,7 @@ int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type) } void delete_obj_refs_table(struct obj_refs_table *table) {} void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id, const char *prefix) {} +void emit_obj_refs_json(struct obj_refs_table *table, __u32 id, json_writer_t *json_writer) {} #else /* BPFTOOL_WITHOUT_SKELETONS */ -- cgit v1.2.3 From 3220fb667842a9725cbb71656f406eadb03c094b Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 7 Jul 2020 09:12:19 +0200 Subject: selftests/bpf: test_progs use another shell exit on non-actions This is a follow up adjustment to commit 6c92bd5cd465 ("selftests/bpf: Test_progs indicate to shell on non-actions"), that returns shell exit indication EXIT_FAILURE (value 1) when user selects a non-existing test. The problem with using EXIT_FAILURE is that a shell script cannot tell the difference between a non-existing test and the test failing. This patch uses value 2 as shell exit indication. (Aside note unrecognized option parameters use value 64). Fixes: 6c92bd5cd465 ("selftests/bpf: Test_progs indicate to shell on non-actions") Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/159410593992.1093222.90072558386094370.stgit@firesoul --- tools/testing/selftests/bpf/test_progs.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 104e833d0087..65d3f8686e29 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -12,6 +12,8 @@ #include #include /* backtrace */ +#define EXIT_NO_TEST 2 + /* defined in test_progs.h */ struct test_env env = {}; @@ -740,7 +742,7 @@ out: close(env.saved_netns_fd); if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0) - return EXIT_FAILURE; + return EXIT_NO_TEST; return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS; } -- cgit v1.2.3 From b8c50df0cb3eb9008f8372e4ff0317eee993b8d1 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 7 Jul 2020 09:12:25 +0200 Subject: selftests/bpf: test_progs avoid minus shell exit codes There are a number of places in test_progs that use minus-1 as the argument to exit(). This is confusing as a process exit status is masked to be a number between 0 and 255 as defined in man exit(3). Thus, users will see status 255 instead of minus-1. This patch use positive exit code 3 instead of minus-1. These cases are put in the same group of infrastructure setup errors. Fixes: fd27b1835e70 ("selftests/bpf: Reset process and thread affinity after each test/sub-test") Fixes: 811d7e375d08 ("bpf: selftests: Restore netns after each test") Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/159410594499.1093222.11080787853132708654.stgit@firesoul --- tools/testing/selftests/bpf/test_progs.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 65d3f8686e29..b1e4dadacd9b 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -13,6 +13,7 @@ #include /* backtrace */ #define EXIT_NO_TEST 2 +#define EXIT_ERR_SETUP_INFRA 3 /* defined in test_progs.h */ struct test_env env = {}; @@ -113,13 +114,13 @@ static void reset_affinity() { if (err < 0) { stdio_restore(); fprintf(stderr, "Failed to reset process affinity: %d!\n", err); - exit(-1); + exit(EXIT_ERR_SETUP_INFRA); } err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset); if (err < 0) { stdio_restore(); fprintf(stderr, "Failed to reset thread affinity: %d!\n", err); - exit(-1); + exit(EXIT_ERR_SETUP_INFRA); } } @@ -128,7 +129,7 @@ static void save_netns(void) env.saved_netns_fd = open("/proc/self/ns/net", O_RDONLY); if (env.saved_netns_fd == -1) { perror("open(/proc/self/ns/net)"); - exit(-1); + exit(EXIT_ERR_SETUP_INFRA); } } @@ -137,7 +138,7 @@ static void restore_netns(void) if (setns(env.saved_netns_fd, CLONE_NEWNET) == -1) { stdio_restore(); perror("setns(CLONE_NEWNS)"); - exit(-1); + exit(EXIT_ERR_SETUP_INFRA); } } -- cgit v1.2.3 From bfc96656a766bd0566565d8a7b6232b2b036fdfb Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 7 Jul 2020 18:53:13 -0700 Subject: libbpf: Make BTF finalization strict With valid ELF and valid BTF, there is no reason (apart from bugs) why BTF finalization should fail. So make it strict and return error if it fails. This makes CO-RE relocation more reliable, as they are not going to be just silently skipped, if BTF finalization failed. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200708015318.3827358-2-andriin@fb.com --- tools/lib/bpf/libbpf.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 88a483627a2b..efd857ebd5ee 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2473,19 +2473,11 @@ static int bpf_object__finalize_btf(struct bpf_object *obj) return 0; err = btf__finalize_data(obj, obj->btf); - if (!err) - return 0; - - pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err); - btf__free(obj->btf); - obj->btf = NULL; - btf_ext__free(obj->btf_ext); - obj->btf_ext = NULL; - - if (libbpf_needs_btf(obj)) { - pr_warn("BTF is required, but is missing or corrupted.\n"); - return -ENOENT; + if (err) { + pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err); + return err; } + return 0; } -- cgit v1.2.3 From 81372e121802fd57892a0b44d93cc747d9568627 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 7 Jul 2020 18:53:14 -0700 Subject: libbpf: Add btf__set_fd() for more control over loaded BTF FD Add setter for BTF FD to allow application more fine-grained control in more advanced scenarios. Storing BTF FD inside `struct btf` provides little benefit and probably would be better done differently (e.g., btf__load() could just return FD on success), but we are stuck with this due to backwards compatibility. The main problem is that it's impossible to load BTF and than free user-space memory, but keep FD intact, because `struct btf` assumes ownership of that FD upon successful load and will attempt to close it during btf__free(). To allow callers (e.g., libbpf itself for BTF sanitization) to have more control over this, add btf__set_fd() to allow to reset FD arbitrarily, if necessary. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200708015318.3827358-3-andriin@fb.com --- tools/lib/bpf/btf.c | 7 ++++++- tools/lib/bpf/btf.h | 1 + tools/lib/bpf/libbpf.map | 1 + 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index bfef3d606b54..c8861c9e3635 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -389,7 +389,7 @@ void btf__free(struct btf *btf) if (!btf) return; - if (btf->fd != -1) + if (btf->fd >= 0) close(btf->fd); free(btf->data); @@ -700,6 +700,11 @@ int btf__fd(const struct btf *btf) return btf->fd; } +void btf__set_fd(struct btf *btf, int fd) +{ + btf->fd = fd; +} + const void *btf__get_raw_data(const struct btf *btf, __u32 *size) { *size = btf->data_size; diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 06cd1731c154..173eff23c472 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -79,6 +79,7 @@ LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id); LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id); LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id); LIBBPF_API int btf__fd(const struct btf *btf); +LIBBPF_API void btf__set_fd(struct btf *btf, int fd); LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size); LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset); LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 6544d2cd1ed6..c5d5c7664c3b 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -288,4 +288,5 @@ LIBBPF_0.1.0 { bpf_map__value_size; bpf_program__autoload; bpf_program__set_autoload; + btf__set_fd; } LIBBPF_0.0.9; -- cgit v1.2.3 From 0f0e55d8247c0742a9b026fc097fcc605383b9db Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 7 Jul 2020 18:53:15 -0700 Subject: libbpf: Improve BTF sanitization handling Change sanitization process to preserve original BTF, which might be used by libbpf itself for Kconfig externs, CO-RE relocs, etc, even if kernel is old and doesn't support BTF. To achieve that, if libbpf detects the need for BTF sanitization, it would clone original BTF, sanitize it in-place, attempt to load it into kernel, and if successful, will preserve loaded BTF FD in original `struct btf`, while freeing sanitized local copy. If kernel doesn't support any BTF, original btf and btf_ext will still be preserved to be used later for CO-RE relocation and other BTF-dependent libbpf features, which don't dependon kernel BTF support. Patch takes care to not specify BTF and BTF.ext features when loading BPF programs and/or maps, if it was detected that kernel doesn't support BTF features. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200708015318.3827358-4-andriin@fb.com --- tools/lib/bpf/libbpf.c | 103 ++++++++++++++++++++++++++++--------------------- 1 file changed, 58 insertions(+), 45 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index efd857ebd5ee..460e5790a95f 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2338,18 +2338,23 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx) return false; } -static void bpf_object__sanitize_btf(struct bpf_object *obj) +static bool btf_needs_sanitization(struct bpf_object *obj) +{ + bool has_func_global = obj->caps.btf_func_global; + bool has_datasec = obj->caps.btf_datasec; + bool has_func = obj->caps.btf_func; + + return !has_func || !has_datasec || !has_func_global; +} + +static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf) { bool has_func_global = obj->caps.btf_func_global; bool has_datasec = obj->caps.btf_datasec; bool has_func = obj->caps.btf_func; - struct btf *btf = obj->btf; struct btf_type *t; int i, j, vlen; - if (!obj->btf || (has_func && has_datasec && has_func_global)) - return; - for (i = 1; i <= btf__get_nr_types(btf); i++) { t = (struct btf_type *)btf__type_by_id(btf, i); @@ -2402,17 +2407,6 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj) } } -static void bpf_object__sanitize_btf_ext(struct bpf_object *obj) -{ - if (!obj->btf_ext) - return; - - if (!obj->caps.btf_func) { - btf_ext__free(obj->btf_ext); - obj->btf_ext = NULL; - } -} - static bool libbpf_needs_btf(const struct bpf_object *obj) { return obj->efile.btf_maps_shndx >= 0 || @@ -2530,30 +2524,50 @@ static int bpf_object__load_vmlinux_btf(struct bpf_object *obj) static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) { + struct btf *kern_btf = obj->btf; + bool btf_mandatory, sanitize; int err = 0; if (!obj->btf) return 0; - bpf_object__sanitize_btf(obj); - bpf_object__sanitize_btf_ext(obj); + sanitize = btf_needs_sanitization(obj); + if (sanitize) { + const void *orig_data; + void *san_data; + __u32 sz; - err = btf__load(obj->btf); - if (err) { - pr_warn("Error loading %s into kernel: %d.\n", - BTF_ELF_SEC, err); - btf__free(obj->btf); - obj->btf = NULL; - /* btf_ext can't exist without btf, so free it as well */ - if (obj->btf_ext) { - btf_ext__free(obj->btf_ext); - obj->btf_ext = NULL; - } + /* clone BTF to sanitize a copy and leave the original intact */ + orig_data = btf__get_raw_data(obj->btf, &sz); + san_data = malloc(sz); + if (!san_data) + return -ENOMEM; + memcpy(san_data, orig_data, sz); + kern_btf = btf__new(san_data, sz); + if (IS_ERR(kern_btf)) + return PTR_ERR(kern_btf); - if (kernel_needs_btf(obj)) - return err; + bpf_object__sanitize_btf(obj, kern_btf); } - return 0; + + err = btf__load(kern_btf); + if (sanitize) { + if (!err) { + /* move fd to libbpf's BTF */ + btf__set_fd(obj->btf, btf__fd(kern_btf)); + btf__set_fd(kern_btf, -1); + } + btf__free(kern_btf); + } + if (err) { + btf_mandatory = kernel_needs_btf(obj); + pr_warn("Error loading .BTF into kernel: %d. %s\n", err, + btf_mandatory ? "BTF is mandatory, can't proceed." + : "BTF is optional, ignoring."); + if (!btf_mandatory) + err = 0; + } + return err; } static int bpf_object__elf_collect(struct bpf_object *obj) @@ -3777,7 +3791,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map) create_attr.btf_fd = 0; create_attr.btf_key_type_id = 0; create_attr.btf_value_type_id = 0; - if (obj->btf && !bpf_map_find_btf_info(obj, map)) { + if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) { create_attr.btf_fd = btf__fd(obj->btf); create_attr.btf_key_type_id = map->btf_key_type_id; create_attr.btf_value_type_id = map->btf_value_type_id; @@ -5361,18 +5375,17 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, load_attr.kern_version = kern_version; load_attr.prog_ifindex = prog->prog_ifindex; } - /* if .BTF.ext was loaded, kernel supports associated BTF for prog */ - if (prog->obj->btf_ext) - btf_fd = bpf_object__btf_fd(prog->obj); - else - btf_fd = -1; - load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0; - load_attr.func_info = prog->func_info; - load_attr.func_info_rec_size = prog->func_info_rec_size; - load_attr.func_info_cnt = prog->func_info_cnt; - load_attr.line_info = prog->line_info; - load_attr.line_info_rec_size = prog->line_info_rec_size; - load_attr.line_info_cnt = prog->line_info_cnt; + /* specify func_info/line_info only if kernel supports them */ + btf_fd = bpf_object__btf_fd(prog->obj); + if (btf_fd >= 0 && prog->obj->caps.btf_func) { + load_attr.prog_btf_fd = btf_fd; + load_attr.func_info = prog->func_info; + load_attr.func_info_rec_size = prog->func_info_rec_size; + load_attr.func_info_cnt = prog->func_info_cnt; + load_attr.line_info = prog->line_info; + load_attr.line_info_rec_size = prog->line_info_rec_size; + load_attr.line_info_cnt = prog->line_info_cnt; + } load_attr.log_level = prog->log_level; load_attr.prog_flags = prog->prog_flags; -- cgit v1.2.3 From fcda189a5133b6fe50eb63d1062a15be4df9e3de Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 7 Jul 2020 18:53:16 -0700 Subject: selftests/bpf: Add test relying only on CO-RE and no recent kernel features Add a test that relies on CO-RE, but doesn't expect any of the recent features, not available on old kernels. This is useful for Travis CI tests running against very old kernels (e.g., libbpf has 4.9 kernel testing now), to verify that CO-RE still works, even if kernel itself doesn't support BTF yet, as long as there is .BTF embedded into vmlinux image by pahole. Given most of CO-RE doesn't require any kernel awareness of BTF, it is a useful test to validate that libbpf's BTF sanitization is working well even with ancient kernels. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200708015318.3827358-5-andriin@fb.com --- .../testing/selftests/bpf/prog_tests/core_retro.c | 33 ++++++++++++++++++++++ .../testing/selftests/bpf/progs/test_core_retro.c | 30 ++++++++++++++++++++ 2 files changed, 63 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/core_retro.c create mode 100644 tools/testing/selftests/bpf/progs/test_core_retro.c diff --git a/tools/testing/selftests/bpf/prog_tests/core_retro.c b/tools/testing/selftests/bpf/prog_tests/core_retro.c new file mode 100644 index 000000000000..78e30d3a23d5 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/core_retro.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook +#define _GNU_SOURCE +#include +#include "test_core_retro.skel.h" + +void test_core_retro(void) +{ + int err, zero = 0, res, duration = 0; + struct test_core_retro *skel; + + /* load program */ + skel = test_core_retro__open_and_load(); + if (CHECK(!skel, "skel_load", "skeleton open/load failed\n")) + goto out_close; + + /* attach probe */ + err = test_core_retro__attach(skel); + if (CHECK(err, "attach_kprobe", "err %d\n", err)) + goto out_close; + + /* trigger */ + usleep(1); + + err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.results), &zero, &res); + if (CHECK(err, "map_lookup", "failed to lookup result: %d\n", errno)) + goto out_close; + + CHECK(res != getpid(), "pid_check", "got %d != exp %d\n", res, getpid()); + +out_close: + test_core_retro__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/test_core_retro.c b/tools/testing/selftests/bpf/progs/test_core_retro.c new file mode 100644 index 000000000000..75c60c3c29cf --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_core_retro.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook +#include +#include +#include + +struct task_struct { + int tgid; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); +} results SEC(".maps"); + +SEC("tp/raw_syscalls/sys_enter") +int handle_sys_enter(void *ctx) +{ + struct task_struct *task = (void *)bpf_get_current_task(); + int tgid = BPF_CORE_READ(task, tgid); + int zero = 0; + + bpf_map_update_elem(&results, &zero, &tgid, 0); + + return 0; +} + +char _license[] SEC("license") = "GPL"; -- cgit v1.2.3 From 0e289487308236903b19273f2ddb4f0adf732b9e Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 7 Jul 2020 18:53:17 -0700 Subject: libbpf: Handle missing BPF_OBJ_GET_INFO_BY_FD gracefully in perf_buffer perf_buffer__new() is relying on BPF_OBJ_GET_INFO_BY_FD availability for few sanity checks. OBJ_GET_INFO for maps is actually much more recent feature than perf_buffer support itself, so this causes unnecessary problems on old kernels before BPF_OBJ_GET_INFO_BY_FD was added. This patch makes those sanity checks optional and just assumes best if command is not supported. If user specified something incorrectly (e.g., wrong map type), kernel will reject it later anyway, except user won't get a nice explanation as to why it failed. This seems like a good trade off for supporting perf_buffer on old kernels. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200708015318.3827358-6-andriin@fb.com --- tools/lib/bpf/libbpf.c | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 460e5790a95f..6602eb479596 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -8591,7 +8591,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, struct perf_buffer_params *p) { const char *online_cpus_file = "/sys/devices/system/cpu/online"; - struct bpf_map_info map = {}; + struct bpf_map_info map; char msg[STRERR_BUFSIZE]; struct perf_buffer *pb; bool *online = NULL; @@ -8604,19 +8604,28 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, return ERR_PTR(-EINVAL); } + /* best-effort sanity checks */ + memset(&map, 0, sizeof(map)); map_info_len = sizeof(map); err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len); if (err) { err = -errno; - pr_warn("failed to get map info for map FD %d: %s\n", - map_fd, libbpf_strerror_r(err, msg, sizeof(msg))); - return ERR_PTR(err); - } - - if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { - pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", - map.name); - return ERR_PTR(-EINVAL); + /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return + * -EBADFD, -EFAULT, or -E2BIG on real error + */ + if (err != -EINVAL) { + pr_warn("failed to get map info for map FD %d: %s\n", + map_fd, libbpf_strerror_r(err, msg, sizeof(msg))); + return ERR_PTR(err); + } + pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n", + map_fd); + } else { + if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { + pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", + map.name); + return ERR_PTR(-EINVAL); + } } pb = calloc(1, sizeof(*pb)); @@ -8648,7 +8657,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, err = pb->cpu_cnt; goto error; } - if (map.max_entries < pb->cpu_cnt) + if (map.max_entries && map.max_entries < pb->cpu_cnt) pb->cpu_cnt = map.max_entries; } -- cgit v1.2.3 From 6984cbc6dfa280687367b9660d8c830518239851 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 7 Jul 2020 18:53:18 -0700 Subject: selftests/bpf: Switch perf_buffer test to tracepoint and skeleton Switch perf_buffer test to use skeleton to avoid use of bpf_prog_load() and make test a bit more succinct. Also switch BPF program to use tracepoint instead of kprobe, as that allows to support older kernels, which had tracepoint support before kprobe support in the form that libbpf expects (i.e., libbpf expects /sys/bus/event_source/devices/kprobe/type, which doesn't always exist on old kernels). Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200708015318.3827358-7-andriin@fb.com --- .../testing/selftests/bpf/prog_tests/perf_buffer.c | 42 +++++++--------------- .../testing/selftests/bpf/progs/test_perf_buffer.c | 4 +-- 2 files changed, 14 insertions(+), 32 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c index a122ce3b360e..c33ec180b3f2 100644 --- a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c +++ b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c @@ -4,6 +4,7 @@ #include #include #include +#include "test_perf_buffer.skel.h" #include "bpf/libbpf_internal.h" /* AddressSanitizer sometimes crashes due to data dereference below, due to @@ -25,16 +26,11 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size) void test_perf_buffer(void) { - int err, prog_fd, on_len, nr_on_cpus = 0, nr_cpus, i, duration = 0; - const char *prog_name = "kprobe/sys_nanosleep"; - const char *file = "./test_perf_buffer.o"; + int err, on_len, nr_on_cpus = 0, nr_cpus, i, duration = 0; struct perf_buffer_opts pb_opts = {}; - struct bpf_map *perf_buf_map; + struct test_perf_buffer *skel; cpu_set_t cpu_set, cpu_seen; - struct bpf_program *prog; - struct bpf_object *obj; struct perf_buffer *pb; - struct bpf_link *link; bool *online; nr_cpus = libbpf_num_possible_cpus(); @@ -51,33 +47,21 @@ void test_perf_buffer(void) nr_on_cpus++; /* load program */ - err = bpf_prog_load(file, BPF_PROG_TYPE_KPROBE, &obj, &prog_fd); - if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) { - obj = NULL; - goto out_close; - } - - prog = bpf_object__find_program_by_title(obj, prog_name); - if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name)) + skel = test_perf_buffer__open_and_load(); + if (CHECK(!skel, "skel_load", "skeleton open/load failed\n")) goto out_close; - /* load map */ - perf_buf_map = bpf_object__find_map_by_name(obj, "perf_buf_map"); - if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n")) - goto out_close; - - /* attach kprobe */ - link = bpf_program__attach_kprobe(prog, false /* retprobe */, - SYS_NANOSLEEP_KPROBE_NAME); - if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link))) + /* attach probe */ + err = test_perf_buffer__attach(skel); + if (CHECK(err, "attach_kprobe", "err %d\n", err)) goto out_close; /* set up perf buffer */ pb_opts.sample_cb = on_sample; pb_opts.ctx = &cpu_seen; - pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts); + pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1, &pb_opts); if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb))) - goto out_detach; + goto out_close; /* trigger kprobe on every CPU */ CPU_ZERO(&cpu_seen); @@ -94,7 +78,7 @@ void test_perf_buffer(void) &cpu_set); if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n", i, err)) - goto out_detach; + goto out_close; usleep(1); } @@ -110,9 +94,7 @@ void test_perf_buffer(void) out_free_pb: perf_buffer__free(pb); -out_detach: - bpf_link__destroy(link); out_close: - bpf_object__close(obj); + test_perf_buffer__destroy(skel); free(online); } diff --git a/tools/testing/selftests/bpf/progs/test_perf_buffer.c b/tools/testing/selftests/bpf/progs/test_perf_buffer.c index ad59c4c9aba8..8207a2dc2f9d 100644 --- a/tools/testing/selftests/bpf/progs/test_perf_buffer.c +++ b/tools/testing/selftests/bpf/progs/test_perf_buffer.c @@ -12,8 +12,8 @@ struct { __uint(value_size, sizeof(int)); } perf_buf_map SEC(".maps"); -SEC("kprobe/sys_nanosleep") -int BPF_KPROBE(handle_sys_nanosleep_entry) +SEC("tp/raw_syscalls/sys_enter") +int handle_sys_enter(void *ctx) { int cpu = bpf_get_smp_processor_id(); -- cgit v1.2.3 From 5c3320d7fece4612d4a413aa3c8e82cdb5b49fcb Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 9 Jul 2020 18:10:23 -0700 Subject: libbpf: Fix memory leak and optimize BTF sanitization Coverity's static analysis helpfully reported a memory leak introduced by 0f0e55d8247c ("libbpf: Improve BTF sanitization handling"). While fixing it, I realized that btf__new() already creates a memory copy, so there is no need to do this. So this patch also fixes misleading btf__new() signature to make data into a `const void *` input parameter. And it avoids unnecessary memory allocation and copy in BTF sanitization code altogether. Fixes: 0f0e55d8247c ("libbpf: Improve BTF sanitization handling") Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200710011023.1655008-1-andriin@fb.com --- tools/lib/bpf/btf.c | 2 +- tools/lib/bpf/btf.h | 2 +- tools/lib/bpf/libbpf.c | 11 +++-------- 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index c8861c9e3635..c9e760e120dc 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -397,7 +397,7 @@ void btf__free(struct btf *btf) free(btf); } -struct btf *btf__new(__u8 *data, __u32 size) +struct btf *btf__new(const void *data, __u32 size) { struct btf *btf; int err; diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 173eff23c472..a3b7ef9b737f 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -63,7 +63,7 @@ struct btf_ext_header { }; LIBBPF_API void btf__free(struct btf *btf); -LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size); +LIBBPF_API struct btf *btf__new(const void *data, __u32 size); LIBBPF_API struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext); LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf); diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 6602eb479596..25e4f77be8d7 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2533,17 +2533,12 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) sanitize = btf_needs_sanitization(obj); if (sanitize) { - const void *orig_data; - void *san_data; + const void *raw_data; __u32 sz; /* clone BTF to sanitize a copy and leave the original intact */ - orig_data = btf__get_raw_data(obj->btf, &sz); - san_data = malloc(sz); - if (!san_data) - return -ENOMEM; - memcpy(san_data, orig_data, sz); - kern_btf = btf__new(san_data, sz); + raw_data = btf__get_raw_data(obj->btf, &sz); + kern_btf = btf__new(raw_data, sz); if (IS_ERR(kern_btf)) return PTR_ERR(kern_btf); -- cgit v1.2.3 From eef8a42d6ce087d1c81c960ae0d14f955b742feb Mon Sep 17 00:00:00 2001 From: Wenbo Zhang Date: Fri, 10 Jul 2020 05:20:35 -0400 Subject: bpf: Fix fds_example SIGSEGV error The `BPF_LOG_BUF_SIZE`'s value is `UINT32_MAX >> 8`, so define an array with it on stack caused an overflow. Signed-off-by: Wenbo Zhang Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200710092035.28919-1-ethercflow@gmail.com --- samples/bpf/fds_example.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/samples/bpf/fds_example.c b/samples/bpf/fds_example.c index d5992f787232..59f45fef5110 100644 --- a/samples/bpf/fds_example.c +++ b/samples/bpf/fds_example.c @@ -30,6 +30,8 @@ #define BPF_M_MAP 1 #define BPF_M_PROG 2 +char bpf_log_buf[BPF_LOG_BUF_SIZE]; + static void usage(void) { printf("Usage: fds_example [...]\n"); @@ -57,7 +59,6 @@ static int bpf_prog_create(const char *object) BPF_EXIT_INSN(), }; size_t insns_cnt = sizeof(insns) / sizeof(struct bpf_insn); - char bpf_log_buf[BPF_LOG_BUF_SIZE]; struct bpf_object *obj; int prog_fd; -- cgit v1.2.3 From fbbb68de80a45ad66b66600bee275485c5073aa7 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sat, 11 Jul 2020 23:53:21 +0200 Subject: bpf: Add resolve_btfids tool to resolve BTF IDs in ELF object The resolve_btfids tool scans elf object for .BTF_ids section and resolves its symbols with BTF ID values. It will be used to during linking time to resolve arrays of BTF ID values used in verifier, so these IDs do not need to be resolved in runtime. The expected layout of .BTF_ids section is described in main.c header. Related kernel changes are coming in following changes. Build issue reported by 0-DAY CI Kernel Test Service. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200711215329.41165-2-jolsa@kernel.org --- tools/bpf/resolve_btfids/Build | 10 + tools/bpf/resolve_btfids/Makefile | 77 ++++ tools/bpf/resolve_btfids/main.c | 721 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 808 insertions(+) create mode 100644 tools/bpf/resolve_btfids/Build create mode 100644 tools/bpf/resolve_btfids/Makefile create mode 100644 tools/bpf/resolve_btfids/main.c diff --git a/tools/bpf/resolve_btfids/Build b/tools/bpf/resolve_btfids/Build new file mode 100644 index 000000000000..ae82da03f9bf --- /dev/null +++ b/tools/bpf/resolve_btfids/Build @@ -0,0 +1,10 @@ +resolve_btfids-y += main.o +resolve_btfids-y += rbtree.o +resolve_btfids-y += zalloc.o +resolve_btfids-y += string.o +resolve_btfids-y += ctype.o +resolve_btfids-y += str_error_r.o + +$(OUTPUT)%.o: ../../lib/%.c FORCE + $(call rule_mkdir) + $(call if_changed_dep,cc_o_c) diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile new file mode 100644 index 000000000000..948378ca73d4 --- /dev/null +++ b/tools/bpf/resolve_btfids/Makefile @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: GPL-2.0-only +include ../../scripts/Makefile.include + +ifeq ($(srctree),) +srctree := $(patsubst %/,%,$(dir $(CURDIR))) +srctree := $(patsubst %/,%,$(dir $(srctree))) +srctree := $(patsubst %/,%,$(dir $(srctree))) +endif + +ifeq ($(V),1) + Q = + msg = +else + Q = @ + msg = @printf ' %-8s %s%s\n' "$(1)" "$(notdir $(2))" "$(if $(3), $(3))"; + MAKEFLAGS=--no-print-directory +endif + +OUTPUT ?= $(srctree)/tools/bpf/resolve_btfids/ + +LIBBPF_SRC := $(srctree)/tools/lib/bpf/ +SUBCMD_SRC := $(srctree)/tools/lib/subcmd/ + +BPFOBJ := $(OUTPUT)/libbpf.a +SUBCMDOBJ := $(OUTPUT)/libsubcmd.a + +BINARY := $(OUTPUT)/resolve_btfids +BINARY_IN := $(BINARY)-in.o + +all: $(BINARY) + +$(OUTPUT): + $(call msg,MKDIR,,$@) + $(Q)mkdir -p $(OUTPUT) + +$(SUBCMDOBJ): fixdep FORCE + $(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(OUTPUT) + +$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT) + $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(abspath $(dir $@))/ $(abspath $@) + +CFLAGS := -g \ + -I$(srctree)/tools/include \ + -I$(srctree)/tools/include/uapi \ + -I$(LIBBPF_SRC) \ + -I$(SUBCMD_SRC) + +LIBS = -lelf -lz + +export srctree OUTPUT CFLAGS Q +include $(srctree)/tools/build/Makefile.include + +$(BINARY_IN): fixdep FORCE + $(Q)$(MAKE) $(build)=resolve_btfids + +$(BINARY): $(BPFOBJ) $(SUBCMDOBJ) $(BINARY_IN) + $(call msg,LINK,$@) + $(Q)$(CC) $(BINARY_IN) $(LDFLAGS) -o $@ $(BPFOBJ) $(SUBCMDOBJ) $(LIBS) + +libsubcmd-clean: + $(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(OUTPUT) clean + +libbpf-clean: + $(Q)$(MAKE) -C $(LIBBPF_SRC) OUTPUT=$(OUTPUT) clean + +clean: libsubcmd-clean libbpf-clean fixdep-clean + $(call msg,CLEAN,$(BINARY)) + $(Q)$(RM) -f $(BINARY); \ + find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM) + +tags: + $(call msg,GEN,,tags) + $(Q)ctags -R . $(LIBBPF_SRC) $(SUBCMD_SRC) + +FORCE: + +.PHONY: all FORCE clean tags diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c new file mode 100644 index 000000000000..6956b6350cad --- /dev/null +++ b/tools/bpf/resolve_btfids/main.c @@ -0,0 +1,721 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * resolve_btfids scans Elf object for .BTF_ids section and resolves + * its symbols with BTF ID values. + * + * Each symbol points to 4 bytes data and is expected to have + * following name syntax: + * + * __BTF_ID____[__] + * + * type is: + * + * func - lookup BTF_KIND_FUNC symbol with name + * and store its ID into the data: + * + * __BTF_ID__func__vfs_close__1: + * .zero 4 + * + * struct - lookup BTF_KIND_STRUCT symbol with name + * and store its ID into the data: + * + * __BTF_ID__struct__sk_buff__1: + * .zero 4 + * + * union - lookup BTF_KIND_UNION symbol with name + * and store its ID into the data: + * + * __BTF_ID__union__thread_union__1: + * .zero 4 + * + * typedef - lookup BTF_KIND_TYPEDEF symbol with name + * and store its ID into the data: + * + * __BTF_ID__typedef__pid_t__1: + * .zero 4 + * + * set - store symbol size into first 4 bytes and sort following + * ID list + * + * __BTF_ID__set__list: + * .zero 4 + * list: + * __BTF_ID__func__vfs_getattr__3: + * .zero 4 + * __BTF_ID__func__vfs_fallocate__4: + * .zero 4 + */ + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BTF_IDS_SECTION ".BTF_ids" +#define BTF_ID "__BTF_ID__" + +#define BTF_STRUCT "struct" +#define BTF_UNION "union" +#define BTF_TYPEDEF "typedef" +#define BTF_FUNC "func" +#define BTF_SET "set" + +#define ADDR_CNT 100 + +struct btf_id { + struct rb_node rb_node; + char *name; + union { + int id; + int cnt; + }; + int addr_cnt; + Elf64_Addr addr[ADDR_CNT]; +}; + +struct object { + const char *path; + const char *btf; + + struct { + int fd; + Elf *elf; + Elf_Data *symbols; + Elf_Data *idlist; + int symbols_shndx; + int idlist_shndx; + size_t strtabidx; + unsigned long idlist_addr; + } efile; + + struct rb_root sets; + struct rb_root structs; + struct rb_root unions; + struct rb_root typedefs; + struct rb_root funcs; + + int nr_funcs; + int nr_structs; + int nr_unions; + int nr_typedefs; +}; + +static int verbose; + +int eprintf(int level, int var, const char *fmt, ...) +{ + va_list args; + int ret; + + if (var >= level) { + va_start(args, fmt); + ret = vfprintf(stderr, fmt, args); + va_end(args); + } + return ret; +} + +#ifndef pr_fmt +#define pr_fmt(fmt) fmt +#endif + +#define pr_debug(fmt, ...) \ + eprintf(1, verbose, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_debugN(n, fmt, ...) \ + eprintf(n, verbose, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_err(fmt, ...) \ + eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__) + +static bool is_btf_id(const char *name) +{ + return name && !strncmp(name, BTF_ID, sizeof(BTF_ID) - 1); +} + +static struct btf_id *btf_id__find(struct rb_root *root, const char *name) +{ + struct rb_node *p = root->rb_node; + struct btf_id *id; + int cmp; + + while (p) { + id = rb_entry(p, struct btf_id, rb_node); + cmp = strcmp(id->name, name); + if (cmp < 0) + p = p->rb_left; + else if (cmp > 0) + p = p->rb_right; + else + return id; + } + return NULL; +} + +static struct btf_id* +btf_id__add(struct rb_root *root, char *name, bool unique) +{ + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct btf_id *id; + int cmp; + + while (*p != NULL) { + parent = *p; + id = rb_entry(parent, struct btf_id, rb_node); + cmp = strcmp(id->name, name); + if (cmp < 0) + p = &(*p)->rb_left; + else if (cmp > 0) + p = &(*p)->rb_right; + else + return unique ? NULL : id; + } + + id = zalloc(sizeof(*id)); + if (id) { + pr_debug("adding symbol %s\n", name); + id->name = name; + rb_link_node(&id->rb_node, parent, p); + rb_insert_color(&id->rb_node, root); + } + return id; +} + +static char *get_id(const char *prefix_end) +{ + /* + * __BTF_ID__func__vfs_truncate__0 + * prefix_end = ^ + */ + char *p, *id = strdup(prefix_end + sizeof("__") - 1); + + if (id) { + /* + * __BTF_ID__func__vfs_truncate__0 + * id = ^ + * + * cut the unique id part + */ + p = strrchr(id, '_'); + p--; + if (*p != '_') { + free(id); + return NULL; + } + *p = '\0'; + } + return id; +} + +static struct btf_id *add_symbol(struct rb_root *root, char *name, size_t size) +{ + char *id; + + id = get_id(name + size); + if (!id) { + pr_err("FAILED to parse symbol name: %s\n", name); + return NULL; + } + + return btf_id__add(root, id, false); +} + +static int elf_collect(struct object *obj) +{ + Elf_Scn *scn = NULL; + size_t shdrstrndx; + int idx = 0; + Elf *elf; + int fd; + + fd = open(obj->path, O_RDWR, 0666); + if (fd == -1) { + pr_err("FAILED cannot open %s: %s\n", + obj->path, strerror(errno)); + return -1; + } + + elf_version(EV_CURRENT); + + elf = elf_begin(fd, ELF_C_RDWR_MMAP, NULL); + if (!elf) { + pr_err("FAILED cannot create ELF descriptor: %s\n", + elf_errmsg(-1)); + return -1; + } + + obj->efile.fd = fd; + obj->efile.elf = elf; + + elf_flagelf(elf, ELF_C_SET, ELF_F_LAYOUT); + + if (elf_getshdrstrndx(elf, &shdrstrndx) != 0) { + pr_err("FAILED cannot get shdr str ndx\n"); + return -1; + } + + /* + * Scan all the elf sections and look for save data + * from .BTF_ids section and symbols. + */ + while ((scn = elf_nextscn(elf, scn)) != NULL) { + Elf_Data *data; + GElf_Shdr sh; + char *name; + + idx++; + if (gelf_getshdr(scn, &sh) != &sh) { + pr_err("FAILED get section(%d) header\n", idx); + return -1; + } + + name = elf_strptr(elf, shdrstrndx, sh.sh_name); + if (!name) { + pr_err("FAILED get section(%d) name\n", idx); + return -1; + } + + data = elf_getdata(scn, 0); + if (!data) { + pr_err("FAILED to get section(%d) data from %s\n", + idx, name); + return -1; + } + + pr_debug2("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", + idx, name, (unsigned long) data->d_size, + (int) sh.sh_link, (unsigned long) sh.sh_flags, + (int) sh.sh_type); + + if (sh.sh_type == SHT_SYMTAB) { + obj->efile.symbols = data; + obj->efile.symbols_shndx = idx; + obj->efile.strtabidx = sh.sh_link; + } else if (!strcmp(name, BTF_IDS_SECTION)) { + obj->efile.idlist = data; + obj->efile.idlist_shndx = idx; + obj->efile.idlist_addr = sh.sh_addr; + } + } + + return 0; +} + +static int symbols_collect(struct object *obj) +{ + Elf_Scn *scn = NULL; + int n, i, err = 0; + GElf_Shdr sh; + char *name; + + scn = elf_getscn(obj->efile.elf, obj->efile.symbols_shndx); + if (!scn) + return -1; + + if (gelf_getshdr(scn, &sh) != &sh) + return -1; + + n = sh.sh_size / sh.sh_entsize; + + /* + * Scan symbols and look for the ones starting with + * __BTF_ID__* over .BTF_ids section. + */ + for (i = 0; !err && i < n; i++) { + char *tmp, *prefix; + struct btf_id *id; + GElf_Sym sym; + int err = -1; + + if (!gelf_getsym(obj->efile.symbols, i, &sym)) + return -1; + + if (sym.st_shndx != obj->efile.idlist_shndx) + continue; + + name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, + sym.st_name); + + if (!is_btf_id(name)) + continue; + + /* + * __BTF_ID__TYPE__vfs_truncate__0 + * prefix = ^ + */ + prefix = name + sizeof(BTF_ID) - 1; + + /* struct */ + if (!strncmp(prefix, BTF_STRUCT, sizeof(BTF_STRUCT) - 1)) { + obj->nr_structs++; + id = add_symbol(&obj->structs, prefix, sizeof(BTF_STRUCT) - 1); + /* union */ + } else if (!strncmp(prefix, BTF_UNION, sizeof(BTF_UNION) - 1)) { + obj->nr_unions++; + id = add_symbol(&obj->unions, prefix, sizeof(BTF_UNION) - 1); + /* typedef */ + } else if (!strncmp(prefix, BTF_TYPEDEF, sizeof(BTF_TYPEDEF) - 1)) { + obj->nr_typedefs++; + id = add_symbol(&obj->typedefs, prefix, sizeof(BTF_TYPEDEF) - 1); + /* func */ + } else if (!strncmp(prefix, BTF_FUNC, sizeof(BTF_FUNC) - 1)) { + obj->nr_funcs++; + id = add_symbol(&obj->funcs, prefix, sizeof(BTF_FUNC) - 1); + /* set */ + } else if (!strncmp(prefix, BTF_SET, sizeof(BTF_SET) - 1)) { + id = add_symbol(&obj->sets, prefix, sizeof(BTF_SET) - 1); + /* + * SET objects store list's count, which is encoded + * in symbol's size, together with 'cnt' field hence + * that - 1. + */ + if (id) + id->cnt = sym.st_size / sizeof(int) - 1; + } else { + pr_err("FAILED unsupported prefix %s\n", prefix); + return -1; + } + + if (!id) + return -ENOMEM; + + if (id->addr_cnt >= ADDR_CNT) { + pr_err("FAILED symbol %s crossed the number of allowed lists", + id->name); + return -1; + } + id->addr[id->addr_cnt++] = sym.st_value; + } + + return 0; +} + +static struct btf *btf__parse_raw(const char *file) +{ + struct btf *btf; + struct stat st; + __u8 *buf; + FILE *f; + + if (stat(file, &st)) + return NULL; + + f = fopen(file, "rb"); + if (!f) + return NULL; + + buf = malloc(st.st_size); + if (!buf) { + btf = ERR_PTR(-ENOMEM); + goto exit_close; + } + + if ((size_t) st.st_size != fread(buf, 1, st.st_size, f)) { + btf = ERR_PTR(-EINVAL); + goto exit_free; + } + + btf = btf__new(buf, st.st_size); + +exit_free: + free(buf); +exit_close: + fclose(f); + return btf; +} + +static bool is_btf_raw(const char *file) +{ + __u16 magic = 0; + int fd, nb_read; + + fd = open(file, O_RDONLY); + if (fd < 0) + return false; + + nb_read = read(fd, &magic, sizeof(magic)); + close(fd); + return nb_read == sizeof(magic) && magic == BTF_MAGIC; +} + +static struct btf *btf_open(const char *path) +{ + if (is_btf_raw(path)) + return btf__parse_raw(path); + else + return btf__parse_elf(path, NULL); +} + +static int symbols_resolve(struct object *obj) +{ + int nr_typedefs = obj->nr_typedefs; + int nr_structs = obj->nr_structs; + int nr_unions = obj->nr_unions; + int nr_funcs = obj->nr_funcs; + int err, type_id; + struct btf *btf; + __u32 nr; + + btf = btf_open(obj->btf ?: obj->path); + err = libbpf_get_error(btf); + if (err) { + pr_err("FAILED: load BTF from %s: %s", + obj->path, strerror(err)); + return -1; + } + + err = -1; + nr = btf__get_nr_types(btf); + + /* + * Iterate all the BTF types and search for collected symbol IDs. + */ + for (type_id = 1; type_id <= nr; type_id++) { + const struct btf_type *type; + struct rb_root *root; + struct btf_id *id; + const char *str; + int *nr; + + type = btf__type_by_id(btf, type_id); + if (!type) { + pr_err("FAILED: malformed BTF, can't resolve type for ID %d\n", + type_id); + goto out; + } + + if (btf_is_func(type) && nr_funcs) { + nr = &nr_funcs; + root = &obj->funcs; + } else if (btf_is_struct(type) && nr_structs) { + nr = &nr_structs; + root = &obj->structs; + } else if (btf_is_union(type) && nr_unions) { + nr = &nr_unions; + root = &obj->unions; + } else if (btf_is_typedef(type) && nr_typedefs) { + nr = &nr_typedefs; + root = &obj->typedefs; + } else + continue; + + str = btf__name_by_offset(btf, type->name_off); + if (!str) { + pr_err("FAILED: malformed BTF, can't resolve name for ID %d\n", + type_id); + goto out; + } + + id = btf_id__find(root, str); + if (id) { + id->id = type_id; + (*nr)--; + } + } + + err = 0; +out: + btf__free(btf); + return err; +} + +static int id_patch(struct object *obj, struct btf_id *id) +{ + Elf_Data *data = obj->efile.idlist; + int *ptr = data->d_buf; + int i; + + if (!id->id) { + pr_err("FAILED unresolved symbol %s\n", id->name); + return -EINVAL; + } + + for (i = 0; i < id->addr_cnt; i++) { + unsigned long addr = id->addr[i]; + unsigned long idx = addr - obj->efile.idlist_addr; + + pr_debug("patching addr %5lu: ID %7d [%s]\n", + idx, id->id, id->name); + + if (idx >= data->d_size) { + pr_err("FAILED patching index %lu out of bounds %lu\n", + idx, data->d_size); + return -1; + } + + idx = idx / sizeof(int); + ptr[idx] = id->id; + } + + return 0; +} + +static int __symbols_patch(struct object *obj, struct rb_root *root) +{ + struct rb_node *next; + struct btf_id *id; + + next = rb_first(root); + while (next) { + id = rb_entry(next, struct btf_id, rb_node); + + if (id_patch(obj, id)) + return -1; + + next = rb_next(next); + } + return 0; +} + +static int cmp_id(const void *pa, const void *pb) +{ + const int *a = pa, *b = pb; + + return *a - *b; +} + +static int sets_patch(struct object *obj) +{ + Elf_Data *data = obj->efile.idlist; + int *ptr = data->d_buf; + struct rb_node *next; + + next = rb_first(&obj->sets); + while (next) { + unsigned long addr, idx; + struct btf_id *id; + int *base; + int cnt; + + id = rb_entry(next, struct btf_id, rb_node); + addr = id->addr[0]; + idx = addr - obj->efile.idlist_addr; + + /* sets are unique */ + if (id->addr_cnt != 1) { + pr_err("FAILED malformed data for set '%s'\n", + id->name); + return -1; + } + + idx = idx / sizeof(int); + base = &ptr[idx] + 1; + cnt = ptr[idx]; + + pr_debug("sorting addr %5lu: cnt %6d [%s]\n", + (idx + 1) * sizeof(int), cnt, id->name); + + qsort(base, cnt, sizeof(int), cmp_id); + + next = rb_next(next); + } +} + +static int symbols_patch(struct object *obj) +{ + int err; + + if (__symbols_patch(obj, &obj->structs) || + __symbols_patch(obj, &obj->unions) || + __symbols_patch(obj, &obj->typedefs) || + __symbols_patch(obj, &obj->funcs) || + __symbols_patch(obj, &obj->sets)) + return -1; + + if (sets_patch(obj)) + return -1; + + elf_flagdata(obj->efile.idlist, ELF_C_SET, ELF_F_DIRTY); + + err = elf_update(obj->efile.elf, ELF_C_WRITE); + if (err < 0) { + pr_err("FAILED elf_update(WRITE): %s\n", + elf_errmsg(-1)); + } + + pr_debug("update %s for %s\n", + err >= 0 ? "ok" : "failed", obj->path); + return err < 0 ? -1 : 0; +} + +static const char * const resolve_btfids_usage[] = { + "resolve_btfids [] ", + NULL +}; + +int main(int argc, const char **argv) +{ + bool no_fail = false; + struct object obj = { + .efile = { + .idlist_shndx = -1, + .symbols_shndx = -1, + }, + .structs = RB_ROOT, + .unions = RB_ROOT, + .typedefs = RB_ROOT, + .funcs = RB_ROOT, + .sets = RB_ROOT, + }; + struct option btfid_options[] = { + OPT_INCR('v', "verbose", &verbose, + "be more verbose (show errors, etc)"), + OPT_STRING(0, "btf", &obj.btf, "BTF data", + "BTF data"), + OPT_BOOLEAN(0, "no-fail", &no_fail, + "do not fail if " BTF_IDS_SECTION " section is not found"), + OPT_END() + }; + int err = -1; + + argc = parse_options(argc, argv, btfid_options, resolve_btfids_usage, + PARSE_OPT_STOP_AT_NON_OPTION); + if (argc != 1) + usage_with_options(resolve_btfids_usage, btfid_options); + + obj.path = argv[0]; + + if (elf_collect(&obj)) + goto out; + + /* + * We did not find .BTF_ids section or symbols section, + * nothing to do.. + */ + if (obj.efile.idlist_shndx == -1 || + obj.efile.symbols_shndx == -1) { + if (no_fail) + return 0; + pr_err("FAILED to find needed sections\n"); + return -1; + } + + if (symbols_collect(&obj)) + goto out; + + if (symbols_resolve(&obj)) + goto out; + + if (symbols_patch(&obj)) + goto out; + + err = 0; +out: + if (obj.efile.elf) + elf_end(obj.efile.elf); + close(obj.efile.fd); + return err; +} -- cgit v1.2.3 From 33a57ce0a54d498275f432db04850001175dfdfa Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sat, 11 Jul 2020 23:53:22 +0200 Subject: bpf: Compile resolve_btfids tool at kernel compilation start The resolve_btfids tool will be used during the vmlinux linking, so it's necessary it's ready for it. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Tested-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200711215329.41165-3-jolsa@kernel.org --- Makefile | 22 ++++++++++++++++++---- tools/Makefile | 3 +++ tools/bpf/Makefile | 9 ++++++++- 3 files changed, 29 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index ac2c61c37a73..017e775b3288 100644 --- a/Makefile +++ b/Makefile @@ -1053,9 +1053,10 @@ export mod_sign_cmd HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf) +has_libelf = $(call try-run,\ + echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0) + ifdef CONFIG_STACK_VALIDATION - has_libelf := $(call try-run,\ - echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0) ifeq ($(has_libelf),1) objtool_target := tools/objtool FORCE else @@ -1064,6 +1065,14 @@ ifdef CONFIG_STACK_VALIDATION endif endif +ifdef CONFIG_DEBUG_INFO_BTF + ifeq ($(has_libelf),1) + resolve_btfids_target := tools/bpf/resolve_btfids FORCE + else + ERROR_RESOLVE_BTFIDS := 1 + endif +endif + PHONY += prepare0 export MODORDER := $(extmod-prefix)modules.order @@ -1175,7 +1184,7 @@ prepare0: archprepare $(Q)$(MAKE) $(build)=. # All the preparing.. -prepare: prepare0 prepare-objtool +prepare: prepare0 prepare-objtool prepare-resolve_btfids # Support for using generic headers in asm-generic asm-generic := -f $(srctree)/scripts/Makefile.asm-generic obj @@ -1188,7 +1197,7 @@ uapi-asm-generic: $(Q)$(MAKE) $(asm-generic)=arch/$(SRCARCH)/include/generated/uapi/asm \ generic=include/uapi/asm-generic -PHONY += prepare-objtool +PHONY += prepare-objtool prepare-resolve_btfids prepare-objtool: $(objtool_target) ifeq ($(SKIP_STACK_VALIDATION),1) ifdef CONFIG_UNWINDER_ORC @@ -1199,6 +1208,11 @@ else endif endif +prepare-resolve_btfids: $(resolve_btfids_target) +ifeq ($(ERROR_RESOLVE_BTFIDS),1) + @echo "error: Cannot resolve BTF IDs for CONFIG_DEBUG_INFO_BTF, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2 + @false +endif # Generate some files # --------------------------------------------------------------------------- diff --git a/tools/Makefile b/tools/Makefile index bd778812e915..85af6ebbce91 100644 --- a/tools/Makefile +++ b/tools/Makefile @@ -67,6 +67,9 @@ cpupower: FORCE cgroup firewire hv guest bootconfig spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware debugging: FORCE $(call descend,$@) +bpf/%: FORCE + $(call descend,$@) + liblockdep: FORCE $(call descend,lib/lockdep) diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile index 6df1850f8353..74bc9a105225 100644 --- a/tools/bpf/Makefile +++ b/tools/bpf/Makefile @@ -123,5 +123,12 @@ runqslower_install: runqslower_clean: $(call descend,runqslower,clean) +resolve_btfids: + $(call descend,resolve_btfids) + +resolve_btfids_clean: + $(call descend,resolve_btfids,clean) + .PHONY: all install clean bpftool bpftool_install bpftool_clean \ - runqslower runqslower_install runqslower_clean + runqslower runqslower_install runqslower_clean \ + resolve_btfids resolve_btfids_clean -- cgit v1.2.3 From 5a2798ab32ba2952cfe25701ee460bccbd434c75 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sat, 11 Jul 2020 23:53:23 +0200 Subject: bpf: Add BTF_ID_LIST/BTF_ID/BTF_ID_UNUSED macros Adding support to generate .BTF_ids section that will hold BTF ID lists for verifier. Adding macros that will help to define lists of BTF ID values placed in .BTF_ids section. They are initially filled with zeros (during compilation) and resolved later during the linking phase by resolve_btfids tool. Following defines list of one BTF ID value: BTF_ID_LIST(bpf_skb_output_btf_ids) BTF_ID(struct, sk_buff) It also defines following variable to access the list: extern u32 bpf_skb_output_btf_ids[]; The BTF_ID_UNUSED macro defines 4 zero bytes. It's used when we want to define 'unused' entry in BTF_ID_LIST, like: BTF_ID_LIST(bpf_skb_output_btf_ids) BTF_ID(struct, sk_buff) BTF_ID_UNUSED BTF_ID(struct, task_struct) Suggested-by: Andrii Nakryiko Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Tested-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200711215329.41165-4-jolsa@kernel.org --- include/asm-generic/vmlinux.lds.h | 4 ++ include/linux/btf_ids.h | 87 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+) create mode 100644 include/linux/btf_ids.h diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index db600ef218d7..0be2ee265931 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -641,6 +641,10 @@ __start_BTF = .; \ *(.BTF) \ __stop_BTF = .; \ + } \ + . = ALIGN(4); \ + .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \ + *(.BTF_ids) \ } #else #define BTF diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h new file mode 100644 index 000000000000..fe019774f8a7 --- /dev/null +++ b/include/linux/btf_ids.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_BTF_IDS_H +#define _LINUX_BTF_IDS_H + +#include /* for __PASTE */ + +/* + * Following macros help to define lists of BTF IDs placed + * in .BTF_ids section. They are initially filled with zeros + * (during compilation) and resolved later during the + * linking phase by resolve_btfids tool. + * + * Any change in list layout must be reflected in resolve_btfids + * tool logic. + */ + +#define BTF_IDS_SECTION ".BTF_ids" + +#define ____BTF_ID(symbol) \ +asm( \ +".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ +".local " #symbol " ; \n" \ +".type " #symbol ", @object; \n" \ +".size " #symbol ", 4; \n" \ +#symbol ": \n" \ +".zero 4 \n" \ +".popsection; \n"); + +#define __BTF_ID(symbol) \ + ____BTF_ID(symbol) + +#define __ID(prefix) \ + __PASTE(prefix, __COUNTER__) + +/* + * The BTF_ID defines unique symbol for each ID pointing + * to 4 zero bytes. + */ +#define BTF_ID(prefix, name) \ + __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__)) + +/* + * The BTF_ID_LIST macro defines pure (unsorted) list + * of BTF IDs, with following layout: + * + * BTF_ID_LIST(list1) + * BTF_ID(type1, name1) + * BTF_ID(type2, name2) + * + * list1: + * __BTF_ID__type1__name1__1: + * .zero 4 + * __BTF_ID__type2__name2__2: + * .zero 4 + * + */ +#define __BTF_ID_LIST(name) \ +asm( \ +".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ +".local " #name "; \n" \ +#name ":; \n" \ +".popsection; \n"); \ + +#define BTF_ID_LIST(name) \ +__BTF_ID_LIST(name) \ +extern u32 name[]; + +/* + * The BTF_ID_UNUSED macro defines 4 zero bytes. + * It's used when we want to define 'unused' entry + * in BTF_ID_LIST, like: + * + * BTF_ID_LIST(bpf_skb_output_btf_ids) + * BTF_ID(struct, sk_buff) + * BTF_ID_UNUSED + * BTF_ID(struct, task_struct) + */ + +#define BTF_ID_UNUSED \ +asm( \ +".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ +".zero 4 \n" \ +".popsection; \n"); + + +#endif -- cgit v1.2.3 From c9a0f3b85e09dd16665b639cb884490410619434 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sat, 11 Jul 2020 23:53:24 +0200 Subject: bpf: Resolve BTF IDs in vmlinux image Using BTF_ID_LIST macro to define lists for several helpers using BTF arguments. And running resolve_btfids on vmlinux elf object during linking, so the .BTF_ids section gets the IDs resolved. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Tested-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200711215329.41165-5-jolsa@kernel.org --- Makefile | 3 ++- kernel/bpf/stackmap.c | 5 ++++- kernel/trace/bpf_trace.c | 9 +++++++-- net/core/filter.c | 9 +++++++-- scripts/link-vmlinux.sh | 6 ++++++ 5 files changed, 26 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index 017e775b3288..462f1a75fcb3 100644 --- a/Makefile +++ b/Makefile @@ -448,6 +448,7 @@ OBJSIZE = $(CROSS_COMPILE)size STRIP = $(CROSS_COMPILE)strip endif PAHOLE = pahole +RESOLVE_BTFIDS = $(objtree)/tools/bpf/resolve_btfids/resolve_btfids LEX = flex YACC = bison AWK = awk @@ -510,7 +511,7 @@ GCC_PLUGINS_CFLAGS := CLANG_FLAGS := export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC -export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE LEX YACC AWK INSTALLKERNEL +export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL export PERL PYTHON PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index a6c361ed7937..48d8e739975f 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "percpu_freelist.h" #define STACK_CREATE_FLAG_MASK \ @@ -576,7 +577,9 @@ BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf, return __bpf_get_stack(regs, task, buf, size, flags); } -static int bpf_get_task_stack_btf_ids[5]; +BTF_ID_LIST(bpf_get_task_stack_btf_ids) +BTF_ID(struct, task_struct) + const struct bpf_func_proto bpf_get_task_stack_proto = { .func = bpf_get_task_stack, .gpl_only = false, diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index e0b7775039ab..e178e8e32b33 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -13,6 +13,7 @@ #include #include #include +#include #include @@ -710,7 +711,9 @@ out: return err; } -static int bpf_seq_printf_btf_ids[5]; +BTF_ID_LIST(bpf_seq_printf_btf_ids) +BTF_ID(struct, seq_file) + static const struct bpf_func_proto bpf_seq_printf_proto = { .func = bpf_seq_printf, .gpl_only = true, @@ -728,7 +731,9 @@ BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len) return seq_write(m, data, len) ? -EOVERFLOW : 0; } -static int bpf_seq_write_btf_ids[5]; +BTF_ID_LIST(bpf_seq_write_btf_ids) +BTF_ID(struct, seq_file) + static const struct bpf_func_proto bpf_seq_write_proto = { .func = bpf_seq_write, .gpl_only = true, diff --git a/net/core/filter.c b/net/core/filter.c index ddcc0d6209e1..4e572441e64a 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -75,6 +75,7 @@ #include #include #include +#include /** * sk_filter_trim_cap - run a packet through a socket filter @@ -3779,7 +3780,9 @@ static const struct bpf_func_proto bpf_skb_event_output_proto = { .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; -static int bpf_skb_output_btf_ids[5]; +BTF_ID_LIST(bpf_skb_output_btf_ids) +BTF_ID(struct, sk_buff) + const struct bpf_func_proto bpf_skb_output_proto = { .func = bpf_skb_event_output, .gpl_only = true, @@ -4173,7 +4176,9 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = { .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; -static int bpf_xdp_output_btf_ids[5]; +BTF_ID_LIST(bpf_xdp_output_btf_ids) +BTF_ID(struct, xdp_buff) + const struct bpf_func_proto bpf_xdp_output_proto = { .func = bpf_xdp_event_output, .gpl_only = true, diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index 92dd745906f4..e26f02dbedee 100755 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -336,6 +336,12 @@ fi vmlinux_link vmlinux "${kallsymso}" ${btf_vmlinux_bin_o} +# fill in BTF IDs +if [ -n "${CONFIG_DEBUG_INFO_BTF}" ]; then +info BTFIDS vmlinux +${RESOLVE_BTFIDS} vmlinux +fi + if [ -n "${CONFIG_BUILDTIME_TABLE_SORT}" ]; then info SORTTAB vmlinux if ! sorttable vmlinux; then -- cgit v1.2.3 From 138b9a0511c789f2451ff1d80e7fd3f9eef3a9e3 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sat, 11 Jul 2020 23:53:25 +0200 Subject: bpf: Remove btf_id helpers resolving Now when we moved the helpers btf_id arrays into .BTF_ids section, we can remove the code that resolve those IDs in runtime. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Tested-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200711215329.41165-6-jolsa@kernel.org --- kernel/bpf/btf.c | 89 ++++---------------------------------------------------- 1 file changed, 5 insertions(+), 84 deletions(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 4c3007f428b1..71140b73ae3c 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -4079,96 +4079,17 @@ error: return -EINVAL; } -static int __btf_resolve_helper_id(struct bpf_verifier_log *log, void *fn, - int arg) -{ - char fnname[KSYM_SYMBOL_LEN + 4] = "btf_"; - const struct btf_param *args; - const struct btf_type *t; - const char *tname, *sym; - u32 btf_id, i; - - if (IS_ERR(btf_vmlinux)) { - bpf_log(log, "btf_vmlinux is malformed\n"); - return -EINVAL; - } - - sym = kallsyms_lookup((long)fn, NULL, NULL, NULL, fnname + 4); - if (!sym) { - bpf_log(log, "kernel doesn't have kallsyms\n"); - return -EFAULT; - } - - for (i = 1; i <= btf_vmlinux->nr_types; i++) { - t = btf_type_by_id(btf_vmlinux, i); - if (BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF) - continue; - tname = __btf_name_by_offset(btf_vmlinux, t->name_off); - if (!strcmp(tname, fnname)) - break; - } - if (i > btf_vmlinux->nr_types) { - bpf_log(log, "helper %s type is not found\n", fnname); - return -ENOENT; - } - - t = btf_type_by_id(btf_vmlinux, t->type); - if (!btf_type_is_ptr(t)) - return -EFAULT; - t = btf_type_by_id(btf_vmlinux, t->type); - if (!btf_type_is_func_proto(t)) - return -EFAULT; - - args = (const struct btf_param *)(t + 1); - if (arg >= btf_type_vlen(t)) { - bpf_log(log, "bpf helper %s doesn't have %d-th argument\n", - fnname, arg); - return -EINVAL; - } - - t = btf_type_by_id(btf_vmlinux, args[arg].type); - if (!btf_type_is_ptr(t) || !t->type) { - /* anything but the pointer to struct is a helper config bug */ - bpf_log(log, "ARG_PTR_TO_BTF is misconfigured\n"); - return -EFAULT; - } - btf_id = t->type; - t = btf_type_by_id(btf_vmlinux, t->type); - /* skip modifiers */ - while (btf_type_is_modifier(t)) { - btf_id = t->type; - t = btf_type_by_id(btf_vmlinux, t->type); - } - if (!btf_type_is_struct(t)) { - bpf_log(log, "ARG_PTR_TO_BTF is not a struct\n"); - return -EFAULT; - } - bpf_log(log, "helper %s arg%d has btf_id %d struct %s\n", fnname + 4, - arg, btf_id, __btf_name_by_offset(btf_vmlinux, t->name_off)); - return btf_id; -} - int btf_resolve_helper_id(struct bpf_verifier_log *log, const struct bpf_func_proto *fn, int arg) { - int *btf_id = &fn->btf_id[arg]; - int ret; + int id; if (fn->arg_type[arg] != ARG_PTR_TO_BTF_ID) return -EINVAL; - - ret = READ_ONCE(*btf_id); - if (ret) - return ret; - /* ok to race the search. The result is the same */ - ret = __btf_resolve_helper_id(log, fn->func, arg); - if (!ret) { - /* Function argument cannot be type 'void' */ - bpf_log(log, "BTF resolution bug\n"); - return -EFAULT; - } - WRITE_ONCE(*btf_id, ret); - return ret; + id = fn->btf_id[arg]; + if (!id || id > btf_vmlinux->nr_types) + return -EINVAL; + return id; } static int __get_type_size(struct btf *btf, u32 btf_id, -- cgit v1.2.3 From 49f4e6720748c778795df62d222d0200b61345be Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sat, 11 Jul 2020 23:53:26 +0200 Subject: bpf: Use BTF_ID to resolve bpf_ctx_convert struct This way the ID is resolved during compile time, and we can remove the runtime name search. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Tested-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200711215329.41165-7-jolsa@kernel.org --- kernel/bpf/btf.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 71140b73ae3c..a710e3ee1f18 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -3621,12 +3622,15 @@ static int btf_translate_to_vmlinux(struct bpf_verifier_log *log, return kern_ctx_type->type; } +BTF_ID_LIST(bpf_ctx_convert_btf_id) +BTF_ID(struct, bpf_ctx_convert) + struct btf *btf_parse_vmlinux(void) { struct btf_verifier_env *env = NULL; struct bpf_verifier_log *log; struct btf *btf = NULL; - int err, btf_id; + int err; env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); if (!env) @@ -3659,14 +3663,8 @@ struct btf *btf_parse_vmlinux(void) if (err) goto errout; - /* find struct bpf_ctx_convert for type checking later */ - btf_id = btf_find_by_name_kind(btf, "bpf_ctx_convert", BTF_KIND_STRUCT); - if (btf_id < 0) { - err = btf_id; - goto errout; - } /* btf_parse_vmlinux() runs under bpf_verifier_lock */ - bpf_ctx_convert.t = btf_type_by_id(btf, btf_id); + bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]); /* find bpf map structs for map_ptr access checking */ err = btf_vmlinux_map_ids_init(btf, log); -- cgit v1.2.3 From 232ce4be295743deb2cf9a7cf9c60a4fde367964 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sat, 11 Jul 2020 23:53:27 +0200 Subject: bpf: Add info about .BTF_ids section to btf.rst Updating btf.rst doc with info about .BTF_ids section Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Tested-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200711215329.41165-8-jolsa@kernel.org --- Documentation/bpf/btf.rst | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/Documentation/bpf/btf.rst b/Documentation/bpf/btf.rst index 4d565d202ce3..b5361b8621c9 100644 --- a/Documentation/bpf/btf.rst +++ b/Documentation/bpf/btf.rst @@ -691,6 +691,42 @@ kernel API, the ``insn_off`` is the instruction offset in the unit of ``struct bpf_insn``. For ELF API, the ``insn_off`` is the byte offset from the beginning of section (``btf_ext_info_sec->sec_name_off``). +4.2 .BTF_ids section +==================== + +The .BTF_ids section encodes BTF ID values that are used within the kernel. + +This section is created during the kernel compilation with the help of +macros defined in ``include/linux/btf_ids.h`` header file. Kernel code can +use them to create lists and sets (sorted lists) of BTF ID values. + +The ``BTF_ID_LIST`` and ``BTF_ID`` macros define unsorted list of BTF ID values, +with following syntax:: + + BTF_ID_LIST(list) + BTF_ID(type1, name1) + BTF_ID(type2, name2) + +resulting in following layout in .BTF_ids section:: + + __BTF_ID__type1__name1__1: + .zero 4 + __BTF_ID__type2__name2__2: + .zero 4 + +The ``u32 list[];`` variable is defined to access the list. + +The ``BTF_ID_UNUSED`` macro defines 4 zero bytes. It's used when we +want to define unused entry in BTF_ID_LIST, like:: + + BTF_ID_LIST(bpf_skb_output_btf_ids) + BTF_ID(struct, sk_buff) + BTF_ID_UNUSED + BTF_ID(struct, task_struct) + +All the BTF ID lists and sets are compiled in the .BTF_ids section and +resolved during the linking phase of kernel build by ``resolve_btfids`` tool. + 5. Using BTF ************ -- cgit v1.2.3 From e5a0516ec9681daf5c1f0d05144d21430b6ca6d7 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sat, 11 Jul 2020 23:53:28 +0200 Subject: tools headers: Adopt verbatim copy of btf_ids.h from kernel sources It will be needed by bpf selftest for resolve_btfids tool. Also adding __PASTE macro as btf_ids.h dependency, which is defined in: include/linux/compiler_types.h but because tools/include do not have this header, I'm putting the macro into linux/compiler.h header. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Tested-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200711215329.41165-9-jolsa@kernel.org --- tools/include/linux/btf_ids.h | 87 ++++++++++++++++++++++++++++++++++++++++++ tools/include/linux/compiler.h | 4 ++ 2 files changed, 91 insertions(+) create mode 100644 tools/include/linux/btf_ids.h diff --git a/tools/include/linux/btf_ids.h b/tools/include/linux/btf_ids.h new file mode 100644 index 000000000000..fe019774f8a7 --- /dev/null +++ b/tools/include/linux/btf_ids.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_BTF_IDS_H +#define _LINUX_BTF_IDS_H + +#include /* for __PASTE */ + +/* + * Following macros help to define lists of BTF IDs placed + * in .BTF_ids section. They are initially filled with zeros + * (during compilation) and resolved later during the + * linking phase by resolve_btfids tool. + * + * Any change in list layout must be reflected in resolve_btfids + * tool logic. + */ + +#define BTF_IDS_SECTION ".BTF_ids" + +#define ____BTF_ID(symbol) \ +asm( \ +".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ +".local " #symbol " ; \n" \ +".type " #symbol ", @object; \n" \ +".size " #symbol ", 4; \n" \ +#symbol ": \n" \ +".zero 4 \n" \ +".popsection; \n"); + +#define __BTF_ID(symbol) \ + ____BTF_ID(symbol) + +#define __ID(prefix) \ + __PASTE(prefix, __COUNTER__) + +/* + * The BTF_ID defines unique symbol for each ID pointing + * to 4 zero bytes. + */ +#define BTF_ID(prefix, name) \ + __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__)) + +/* + * The BTF_ID_LIST macro defines pure (unsorted) list + * of BTF IDs, with following layout: + * + * BTF_ID_LIST(list1) + * BTF_ID(type1, name1) + * BTF_ID(type2, name2) + * + * list1: + * __BTF_ID__type1__name1__1: + * .zero 4 + * __BTF_ID__type2__name2__2: + * .zero 4 + * + */ +#define __BTF_ID_LIST(name) \ +asm( \ +".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ +".local " #name "; \n" \ +#name ":; \n" \ +".popsection; \n"); \ + +#define BTF_ID_LIST(name) \ +__BTF_ID_LIST(name) \ +extern u32 name[]; + +/* + * The BTF_ID_UNUSED macro defines 4 zero bytes. + * It's used when we want to define 'unused' entry + * in BTF_ID_LIST, like: + * + * BTF_ID_LIST(bpf_skb_output_btf_ids) + * BTF_ID(struct, sk_buff) + * BTF_ID_UNUSED + * BTF_ID(struct, task_struct) + */ + +#define BTF_ID_UNUSED \ +asm( \ +".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ +".zero 4 \n" \ +".popsection; \n"); + + +#endif diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h index 9f9002734e19..6eac24d44e81 100644 --- a/tools/include/linux/compiler.h +++ b/tools/include/linux/compiler.h @@ -201,4 +201,8 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s # define __fallthrough #endif +/* Indirect macros required for expanded argument pasting, eg. __LINE__. */ +#define ___PASTE(a, b) a##b +#define __PASTE(a, b) ___PASTE(a, b) + #endif /* _TOOLS_LINUX_COMPILER_H */ -- cgit v1.2.3 From cc15a20d5f3abc3cbd7911b70156b7b9e2bc7d41 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sat, 11 Jul 2020 23:53:29 +0200 Subject: selftests/bpf: Add test for resolve_btfids Adding resolve_btfids test under test_progs suite. It's possible to use btf_ids.h header and its logic in user space application, so we can add easy test for it. The test defines BTF_ID_LIST and checks it gets properly resolved. For this reason the test_progs binary (and other binaries that use TRUNNER* macros) is processed with resolve_btfids tool, which resolves BTF IDs in .BTF_ids section. The BTF data are taken from btf_data.o object rceated from progs/btf_data.c. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Tested-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200711215329.41165-10-jolsa@kernel.org --- tools/testing/selftests/bpf/Makefile | 15 ++- .../selftests/bpf/prog_tests/resolve_btfids.c | 111 +++++++++++++++++++++ tools/testing/selftests/bpf/progs/btf_data.c | 50 ++++++++++ 3 files changed, 175 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/bpf/prog_tests/resolve_btfids.c create mode 100644 tools/testing/selftests/bpf/progs/btf_data.c diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 1f9c696b3edf..e7a8cf83ba48 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -111,6 +111,7 @@ SCRATCH_DIR := $(OUTPUT)/tools BUILD_DIR := $(SCRATCH_DIR)/build INCLUDE_DIR := $(SCRATCH_DIR)/include BPFOBJ := $(BUILD_DIR)/libbpf/libbpf.a +RESOLVE_BTFIDS := $(BUILD_DIR)/resolve_btfids/resolve_btfids # Define simple and short `make test_progs`, `make test_sysctl`, etc targets # to build individual tests. @@ -177,7 +178,7 @@ $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \ $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \ DESTDIR=$(SCRATCH_DIR) prefix= all install_headers -$(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(INCLUDE_DIR): +$(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(BUILD_DIR)/resolve_btfids $(INCLUDE_DIR): $(call msg,MKDIR,,$@) mkdir -p $@ @@ -190,6 +191,16 @@ else cp "$(VMLINUX_H)" $@ endif +$(RESOLVE_BTFIDS): $(BPFOBJ) | $(BUILD_DIR)/resolve_btfids \ + $(TOOLSDIR)/bpf/resolve_btfids/main.c \ + $(TOOLSDIR)/lib/rbtree.c \ + $(TOOLSDIR)/lib/zalloc.c \ + $(TOOLSDIR)/lib/string.c \ + $(TOOLSDIR)/lib/ctype.c \ + $(TOOLSDIR)/lib/str_error_r.c + $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/resolve_btfids \ + OUTPUT=$(BUILD_DIR)/resolve_btfids/ BPFOBJ=$(BPFOBJ) + # Get Clang's default includes on this system, as opposed to those seen by # '-target bpf'. This fixes "missing" files on some architectures/distros, # such as asm/byteorder.h, asm/socket.h, asm/sockios.h, sys/cdefs.h etc. @@ -352,9 +363,11 @@ endif $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \ $(TRUNNER_EXTRA_OBJS) $$(BPFOBJ) \ + $(RESOLVE_BTFIDS) \ | $(TRUNNER_BINARY)-extras $$(call msg,BINARY,,$$@) $$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@ + $(RESOLVE_BTFIDS) --no-fail --btf btf_data.o $$@ endef diff --git a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c new file mode 100644 index 000000000000..403be6f36cba --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include +#include "test_progs.h" + +static int duration; + +struct symbol { + const char *name; + int type; + int id; +}; + +struct symbol test_symbols[] = { + { "unused", BTF_KIND_UNKN, 0 }, + { "S", BTF_KIND_TYPEDEF, -1 }, + { "T", BTF_KIND_TYPEDEF, -1 }, + { "U", BTF_KIND_TYPEDEF, -1 }, + { "S", BTF_KIND_STRUCT, -1 }, + { "U", BTF_KIND_UNION, -1 }, + { "func", BTF_KIND_FUNC, -1 }, +}; + +BTF_ID_LIST(test_list) +BTF_ID_UNUSED +BTF_ID(typedef, S) +BTF_ID(typedef, T) +BTF_ID(typedef, U) +BTF_ID(struct, S) +BTF_ID(union, U) +BTF_ID(func, func) + +static int +__resolve_symbol(struct btf *btf, int type_id) +{ + const struct btf_type *type; + const char *str; + unsigned int i; + + type = btf__type_by_id(btf, type_id); + if (!type) { + PRINT_FAIL("Failed to get type for ID %d\n", type_id); + return -1; + } + + for (i = 0; i < ARRAY_SIZE(test_symbols); i++) { + if (test_symbols[i].id != -1) + continue; + + if (BTF_INFO_KIND(type->info) != test_symbols[i].type) + continue; + + str = btf__name_by_offset(btf, type->name_off); + if (!str) { + PRINT_FAIL("Failed to get name for BTF ID %d\n", type_id); + return -1; + } + + if (!strcmp(str, test_symbols[i].name)) + test_symbols[i].id = type_id; + } + + return 0; +} + +static int resolve_symbols(void) +{ + struct btf *btf; + int type_id; + __u32 nr; + + btf = btf__parse_elf("btf_data.o", NULL); + if (CHECK(libbpf_get_error(btf), "resolve", + "Failed to load BTF from btf_data.o\n")) + return -1; + + nr = btf__get_nr_types(btf); + + for (type_id = 1; type_id <= nr; type_id++) { + if (__resolve_symbol(btf, type_id)) + break; + } + + btf__free(btf); + return 0; +} + +int test_resolve_btfids(void) +{ + unsigned int i; + int ret = 0; + + if (resolve_symbols()) + return -1; + + /* Check BTF_ID_LIST(test_list) IDs */ + for (i = 0; i < ARRAY_SIZE(test_symbols) && !ret; i++) { + ret = CHECK(test_list[i] != test_symbols[i].id, + "id_check", + "wrong ID for %s (%d != %d)\n", test_symbols[i].name, + test_list[i], test_symbols[i].id); + } + + return ret; +} diff --git a/tools/testing/selftests/bpf/progs/btf_data.c b/tools/testing/selftests/bpf/progs/btf_data.c new file mode 100644 index 000000000000..baa525275bde --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf_data.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +struct S { + int a; + int b; + int c; +}; + +union U { + int a; + int b; + int c; +}; + +struct S1 { + int a; + int b; + int c; +}; + +union U1 { + int a; + int b; + int c; +}; + +typedef int T; +typedef int S; +typedef int U; +typedef int T1; +typedef int S1; +typedef int U1; + +struct root_struct { + S m_1; + T m_2; + U m_3; + S1 m_4; + T1 m_5; + U1 m_6; + struct S m_7; + struct S1 m_8; + union U m_9; + union U1 m_10; +}; + +int func(struct root_struct *root) +{ + return 0; +} -- cgit v1.2.3 From 8aa5a33578e9685d06020bd10d1637557423e945 Mon Sep 17 00:00:00 2001 From: Ciara Loftus Date: Wed, 8 Jul 2020 07:28:33 +0000 Subject: xsk: Add new statistics It can be useful for the user to know the reason behind a dropped packet. Introduce new counters which track drops on the receive path caused by: 1. rx ring being full 2. fill ring being empty Also, on the tx path introduce a counter which tracks the number of times we attempt pull from the tx ring when it is empty. Signed-off-by: Ciara Loftus Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200708072835.4427-2-ciara.loftus@intel.com --- include/net/xdp_sock.h | 4 ++++ include/uapi/linux/if_xdp.h | 5 ++++- net/xdp/xsk.c | 36 +++++++++++++++++++++++++++++++----- net/xdp/xsk_buff_pool.c | 1 + net/xdp/xsk_queue.h | 6 ++++++ tools/include/uapi/linux/if_xdp.h | 5 ++++- 6 files changed, 50 insertions(+), 7 deletions(-) diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index 96bfc5f5f24e..c9d87cc40c11 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -69,7 +69,11 @@ struct xdp_sock { spinlock_t tx_completion_lock; /* Protects generic receive. */ spinlock_t rx_lock; + + /* Statistics */ u64 rx_dropped; + u64 rx_queue_full; + struct list_head map_list; /* Protects map_list */ spinlock_t map_list_lock; diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h index be328c59389d..a78a8096f4ce 100644 --- a/include/uapi/linux/if_xdp.h +++ b/include/uapi/linux/if_xdp.h @@ -73,9 +73,12 @@ struct xdp_umem_reg { }; struct xdp_statistics { - __u64 rx_dropped; /* Dropped for reasons other than invalid desc */ + __u64 rx_dropped; /* Dropped for other reasons */ __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */ __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */ + __u64 rx_ring_full; /* Dropped due to rx ring being full */ + __u64 rx_fill_ring_empty_descs; /* Failed to retrieve item from fill ring */ + __u64 tx_ring_empty_descs; /* Failed to retrieve item from tx ring */ }; struct xdp_options { diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 3700266229f6..26e3bba8c204 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -123,7 +123,7 @@ static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) addr = xp_get_handle(xskb); err = xskq_prod_reserve_desc(xs->rx, addr, len); if (err) { - xs->rx_dropped++; + xs->rx_queue_full++; return err; } @@ -274,8 +274,10 @@ bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc) rcu_read_lock(); list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) { - if (!xskq_cons_peek_desc(xs->tx, desc, umem)) + if (!xskq_cons_peek_desc(xs->tx, desc, umem)) { + xs->tx->queue_empty_descs++; continue; + } /* This is the backpressure mechanism for the Tx path. * Reserve space in the completion queue and only proceed @@ -387,6 +389,8 @@ static int xsk_generic_xmit(struct sock *sk) sent_frame = true; } + xs->tx->queue_empty_descs++; + out: if (sent_frame) sk->sk_write_space(sk); @@ -812,6 +816,12 @@ static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring) ring->desc = offsetof(struct xdp_umem_ring, desc); } +struct xdp_statistics_v1 { + __u64 rx_dropped; + __u64 rx_invalid_descs; + __u64 tx_invalid_descs; +}; + static int xsk_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { @@ -831,19 +841,35 @@ static int xsk_getsockopt(struct socket *sock, int level, int optname, case XDP_STATISTICS: { struct xdp_statistics stats; + bool extra_stats = true; + size_t stats_size; - if (len < sizeof(stats)) + if (len < sizeof(struct xdp_statistics_v1)) { return -EINVAL; + } else if (len < sizeof(stats)) { + extra_stats = false; + stats_size = sizeof(struct xdp_statistics_v1); + } else { + stats_size = sizeof(stats); + } mutex_lock(&xs->mutex); stats.rx_dropped = xs->rx_dropped; + if (extra_stats) { + stats.rx_ring_full = xs->rx_queue_full; + stats.rx_fill_ring_empty_descs = + xs->umem ? xskq_nb_queue_empty_descs(xs->umem->fq) : 0; + stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx); + } else { + stats.rx_dropped += xs->rx_queue_full; + } stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx); stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx); mutex_unlock(&xs->mutex); - if (copy_to_user(optval, &stats, sizeof(stats))) + if (copy_to_user(optval, &stats, stats_size)) return -EFAULT; - if (put_user(sizeof(stats), optlen)) + if (put_user(stats_size, optlen)) return -EFAULT; return 0; diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index 540ed75e4482..89cf3551d3e9 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -235,6 +235,7 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool) for (;;) { if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) { + pool->fq->queue_empty_descs++; xp_release(xskb); return NULL; } diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index 5b5d24d2dd37..bf42cfd74b89 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -38,6 +38,7 @@ struct xsk_queue { u32 cached_cons; struct xdp_ring *ring; u64 invalid_descs; + u64 queue_empty_descs; }; /* The structure of the shared state of the rings are the same as the @@ -354,6 +355,11 @@ static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) return q ? q->invalid_descs : 0; } +static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q) +{ + return q ? q->queue_empty_descs : 0; +} + struct xsk_queue *xskq_create(u32 nentries, bool umem_queue); void xskq_destroy(struct xsk_queue *q_ops); diff --git a/tools/include/uapi/linux/if_xdp.h b/tools/include/uapi/linux/if_xdp.h index be328c59389d..a78a8096f4ce 100644 --- a/tools/include/uapi/linux/if_xdp.h +++ b/tools/include/uapi/linux/if_xdp.h @@ -73,9 +73,12 @@ struct xdp_umem_reg { }; struct xdp_statistics { - __u64 rx_dropped; /* Dropped for reasons other than invalid desc */ + __u64 rx_dropped; /* Dropped for other reasons */ __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */ __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */ + __u64 rx_ring_full; /* Dropped due to rx ring being full */ + __u64 rx_fill_ring_empty_descs; /* Failed to retrieve item from fill ring */ + __u64 tx_ring_empty_descs; /* Failed to retrieve item from tx ring */ }; struct xdp_options { -- cgit v1.2.3 From b36c3206f9ef3ea2844e9092c12d29c0d1f56c54 Mon Sep 17 00:00:00 2001 From: Ciara Loftus Date: Wed, 8 Jul 2020 07:28:34 +0000 Subject: samples: bpf: Add an option for printing extra statistics in xdpsock Introduce the --extra-stats (or simply -x) flag to the xdpsock application which prints additional statistics alongside the regular rx and tx counters. The new statistics printed report error conditions eg. rx ring full, invalid descriptors, etc. Signed-off-by: Ciara Loftus Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200708072835.4427-3-ciara.loftus@intel.com --- samples/bpf/xdpsock_user.c | 87 ++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 85 insertions(+), 2 deletions(-) diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c index c91e91362a0c..19c679456a0e 100644 --- a/samples/bpf/xdpsock_user.c +++ b/samples/bpf/xdpsock_user.c @@ -77,6 +77,7 @@ static u32 opt_batch_size = 64; static int opt_pkt_count; static u16 opt_pkt_size = MIN_PKT_SIZE; static u32 opt_pkt_fill_pattern = 0x12345678; +static bool opt_extra_stats; static int opt_poll; static int opt_interval = 1; static u32 opt_xdp_bind_flags = XDP_USE_NEED_WAKEUP; @@ -103,8 +104,20 @@ struct xsk_socket_info { struct xsk_socket *xsk; unsigned long rx_npkts; unsigned long tx_npkts; + unsigned long rx_dropped_npkts; + unsigned long rx_invalid_npkts; + unsigned long tx_invalid_npkts; + unsigned long rx_full_npkts; + unsigned long rx_fill_empty_npkts; + unsigned long tx_empty_npkts; unsigned long prev_rx_npkts; unsigned long prev_tx_npkts; + unsigned long prev_rx_dropped_npkts; + unsigned long prev_rx_invalid_npkts; + unsigned long prev_tx_invalid_npkts; + unsigned long prev_rx_full_npkts; + unsigned long prev_rx_fill_empty_npkts; + unsigned long prev_tx_empty_npkts; u32 outstanding_tx; }; @@ -147,6 +160,30 @@ static void print_benchmark(bool running) } } +static int xsk_get_xdp_stats(int fd, struct xsk_socket_info *xsk) +{ + struct xdp_statistics stats; + socklen_t optlen; + int err; + + optlen = sizeof(stats); + err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen); + if (err) + return err; + + if (optlen == sizeof(struct xdp_statistics)) { + xsk->rx_dropped_npkts = stats.rx_dropped; + xsk->rx_invalid_npkts = stats.rx_invalid_descs; + xsk->tx_invalid_npkts = stats.tx_invalid_descs; + xsk->rx_full_npkts = stats.rx_ring_full; + xsk->rx_fill_empty_npkts = stats.rx_fill_ring_empty_descs; + xsk->tx_empty_npkts = stats.tx_ring_empty_descs; + return 0; + } + + return -EINVAL; +} + static void dump_stats(void) { unsigned long now = get_nsecs(); @@ -157,7 +194,8 @@ static void dump_stats(void) for (i = 0; i < num_socks && xsks[i]; i++) { char *fmt = "%-15s %'-11.0f %'-11lu\n"; - double rx_pps, tx_pps; + double rx_pps, tx_pps, dropped_pps, rx_invalid_pps, full_pps, fill_empty_pps, + tx_invalid_pps, tx_empty_pps; rx_pps = (xsks[i]->rx_npkts - xsks[i]->prev_rx_npkts) * 1000000000. / dt; @@ -175,6 +213,46 @@ static void dump_stats(void) xsks[i]->prev_rx_npkts = xsks[i]->rx_npkts; xsks[i]->prev_tx_npkts = xsks[i]->tx_npkts; + + if (opt_extra_stats) { + if (!xsk_get_xdp_stats(xsk_socket__fd(xsks[i]->xsk), xsks[i])) { + dropped_pps = (xsks[i]->rx_dropped_npkts - + xsks[i]->prev_rx_dropped_npkts) * 1000000000. / dt; + rx_invalid_pps = (xsks[i]->rx_invalid_npkts - + xsks[i]->prev_rx_invalid_npkts) * 1000000000. / dt; + tx_invalid_pps = (xsks[i]->tx_invalid_npkts - + xsks[i]->prev_tx_invalid_npkts) * 1000000000. / dt; + full_pps = (xsks[i]->rx_full_npkts - + xsks[i]->prev_rx_full_npkts) * 1000000000. / dt; + fill_empty_pps = (xsks[i]->rx_fill_empty_npkts - + xsks[i]->prev_rx_fill_empty_npkts) + * 1000000000. / dt; + tx_empty_pps = (xsks[i]->tx_empty_npkts - + xsks[i]->prev_tx_empty_npkts) * 1000000000. / dt; + + printf(fmt, "rx dropped", dropped_pps, + xsks[i]->rx_dropped_npkts); + printf(fmt, "rx invalid", rx_invalid_pps, + xsks[i]->rx_invalid_npkts); + printf(fmt, "tx invalid", tx_invalid_pps, + xsks[i]->tx_invalid_npkts); + printf(fmt, "rx queue full", full_pps, + xsks[i]->rx_full_npkts); + printf(fmt, "fill ring empty", fill_empty_pps, + xsks[i]->rx_fill_empty_npkts); + printf(fmt, "tx ring empty", tx_empty_pps, + xsks[i]->tx_empty_npkts); + + xsks[i]->prev_rx_dropped_npkts = xsks[i]->rx_dropped_npkts; + xsks[i]->prev_rx_invalid_npkts = xsks[i]->rx_invalid_npkts; + xsks[i]->prev_tx_invalid_npkts = xsks[i]->tx_invalid_npkts; + xsks[i]->prev_rx_full_npkts = xsks[i]->rx_full_npkts; + xsks[i]->prev_rx_fill_empty_npkts = xsks[i]->rx_fill_empty_npkts; + xsks[i]->prev_tx_empty_npkts = xsks[i]->tx_empty_npkts; + } else { + printf("%-15s\n", "Error retrieving extra stats"); + } + } } } @@ -630,6 +708,7 @@ static struct option long_options[] = { {"tx-pkt-count", required_argument, 0, 'C'}, {"tx-pkt-size", required_argument, 0, 's'}, {"tx-pkt-pattern", required_argument, 0, 'P'}, + {"extra-stats", no_argument, 0, 'x'}, {0, 0, 0, 0} }; @@ -664,6 +743,7 @@ static void usage(const char *prog) " (Default: %d bytes)\n" " Min size: %d, Max size %d.\n" " -P, --tx-pkt-pattern=nPacket fill pattern. Default: 0x%x\n" + " -x, --extra-stats Display extra statistics.\n" "\n"; fprintf(stderr, str, prog, XSK_UMEM__DEFAULT_FRAME_SIZE, opt_batch_size, MIN_PKT_SIZE, MIN_PKT_SIZE, @@ -679,7 +759,7 @@ static void parse_command_line(int argc, char **argv) opterr = 0; for (;;) { - c = getopt_long(argc, argv, "Frtli:q:pSNn:czf:muMd:b:C:s:P:", + c = getopt_long(argc, argv, "Frtli:q:pSNn:czf:muMd:b:C:s:P:x", long_options, &option_index); if (c == -1) break; @@ -760,6 +840,9 @@ static void parse_command_line(int argc, char **argv) case 'P': opt_pkt_fill_pattern = strtol(optarg, NULL, 16); break; + case 'x': + opt_extra_stats = 1; + break; default: usage(basename(argv[0])); } -- cgit v1.2.3 From 0d80cb4612aa32dc0faa17fa3ab6f96f33e2b4a7 Mon Sep 17 00:00:00 2001 From: Ciara Loftus Date: Wed, 8 Jul 2020 07:28:35 +0000 Subject: xsk: Add xdp statistics to xsk_diag Add xdp statistics to the information dumped through the xsk_diag interface Signed-off-by: Ciara Loftus Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200708072835.4427-4-ciara.loftus@intel.com --- include/uapi/linux/xdp_diag.h | 11 +++++++++++ net/xdp/xsk_diag.c | 17 +++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/include/uapi/linux/xdp_diag.h b/include/uapi/linux/xdp_diag.h index 78b2591a7782..66b9973b4f4c 100644 --- a/include/uapi/linux/xdp_diag.h +++ b/include/uapi/linux/xdp_diag.h @@ -30,6 +30,7 @@ struct xdp_diag_msg { #define XDP_SHOW_RING_CFG (1 << 1) #define XDP_SHOW_UMEM (1 << 2) #define XDP_SHOW_MEMINFO (1 << 3) +#define XDP_SHOW_STATS (1 << 4) enum { XDP_DIAG_NONE, @@ -41,6 +42,7 @@ enum { XDP_DIAG_UMEM_FILL_RING, XDP_DIAG_UMEM_COMPLETION_RING, XDP_DIAG_MEMINFO, + XDP_DIAG_STATS, __XDP_DIAG_MAX, }; @@ -69,4 +71,13 @@ struct xdp_diag_umem { __u32 refs; }; +struct xdp_diag_stats { + __u64 n_rx_dropped; + __u64 n_rx_invalid; + __u64 n_rx_full; + __u64 n_fill_ring_empty; + __u64 n_tx_invalid; + __u64 n_tx_ring_empty; +}; + #endif /* _LINUX_XDP_DIAG_H */ diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c index 0163b26aaf63..21e9c2d123ee 100644 --- a/net/xdp/xsk_diag.c +++ b/net/xdp/xsk_diag.c @@ -76,6 +76,19 @@ static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb) return err; } +static int xsk_diag_put_stats(const struct xdp_sock *xs, struct sk_buff *nlskb) +{ + struct xdp_diag_stats du = {}; + + du.n_rx_dropped = xs->rx_dropped; + du.n_rx_invalid = xskq_nb_invalid_descs(xs->rx); + du.n_rx_full = xs->rx_queue_full; + du.n_fill_ring_empty = xs->umem ? xskq_nb_queue_empty_descs(xs->umem->fq) : 0; + du.n_tx_invalid = xskq_nb_invalid_descs(xs->tx); + du.n_tx_ring_empty = xskq_nb_queue_empty_descs(xs->tx); + return nla_put(nlskb, XDP_DIAG_STATS, sizeof(du), &du); +} + static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb, struct xdp_diag_req *req, struct user_namespace *user_ns, @@ -118,6 +131,10 @@ static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb, sock_diag_put_meminfo(sk, nlskb, XDP_DIAG_MEMINFO)) goto out_nlmsg_trim; + if ((req->xdiag_show & XDP_SHOW_STATS) && + xsk_diag_put_stats(xs, nlskb)) + goto out_nlmsg_trim; + mutex_unlock(&xs->mutex); nlmsg_end(nlskb, nlh); return 0; -- cgit v1.2.3 From 93776cb9ee91aeed43ba53dcec97ffed4ae6f1f7 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 10 Jul 2020 16:26:04 -0700 Subject: tools/bpftool: Remove warning about PID iterator support Don't emit warning that bpftool was built without PID iterator support. This error garbles JSON output of otherwise perfectly valid show commands. Reported-by: Andrey Ignatov Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20200710232605.20918-1-andriin@fb.com --- tools/bpf/bpftool/pids.c | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/bpf/bpftool/pids.c b/tools/bpf/bpftool/pids.c index c0d23ce4a6f4..e3b116325403 100644 --- a/tools/bpf/bpftool/pids.c +++ b/tools/bpf/bpftool/pids.c @@ -15,7 +15,6 @@ int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type) { - p_err("bpftool built without PID iterator support"); return -ENOTSUP; } void delete_obj_refs_table(struct obj_refs_table *table) {} -- cgit v1.2.3 From ac5a72ea5c8989871e61f6bb0852e0f91de51ebe Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Mon, 13 Jul 2020 12:52:33 +0100 Subject: bpf: Use dedicated bpf_trace_printk event instead of trace_printk() The bpf helper bpf_trace_printk() uses trace_printk() under the hood. This leads to an alarming warning message originating from trace buffer allocation which occurs the first time a program using bpf_trace_printk() is loaded. We can instead create a trace event for bpf_trace_printk() and enable it in-kernel when/if we encounter a program using the bpf_trace_printk() helper. With this approach, trace_printk() is not used directly and no warning message appears. This work was started by Steven (see Link) and finished by Alan; added Steven's Signed-off-by with his permission. Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Alan Maguire Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20200628194334.6238b933@oasis.local.home Link: https://lore.kernel.org/bpf/1594641154-18897-2-git-send-email-alan.maguire@oracle.com --- kernel/trace/Makefile | 2 ++ kernel/trace/bpf_trace.c | 42 +++++++++++++++++++++++++++++++++++++----- kernel/trace/bpf_trace.h | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 73 insertions(+), 5 deletions(-) create mode 100644 kernel/trace/bpf_trace.h diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 6575bb0a0434..aeba5ee7325a 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -31,6 +31,8 @@ ifdef CONFIG_GCOV_PROFILE_FTRACE GCOV_PROFILE := y endif +CFLAGS_bpf_trace.o := -I$(src) + CFLAGS_trace_benchmark.o := -I$(src) CFLAGS_trace_events_filter.o := -I$(src) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index e178e8e32b33..3cc0dcb60ca2 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -20,6 +21,9 @@ #include "trace_probe.h" #include "trace.h" +#define CREATE_TRACE_POINTS +#include "bpf_trace.h" + #define bpf_event_rcu_dereference(p) \ rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex)) @@ -375,6 +379,30 @@ static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype, } } +static DEFINE_RAW_SPINLOCK(trace_printk_lock); + +#define BPF_TRACE_PRINTK_SIZE 1024 + +static inline __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...) +{ + static char buf[BPF_TRACE_PRINTK_SIZE]; + unsigned long flags; + va_list ap; + int ret; + + raw_spin_lock_irqsave(&trace_printk_lock, flags); + va_start(ap, fmt); + ret = vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + /* vsnprintf() will not append null for zero-length strings */ + if (ret == 0) + buf[0] = '\0'; + trace_bpf_trace_printk(buf); + raw_spin_unlock_irqrestore(&trace_printk_lock, flags); + + return ret; +} + /* * Only limited trace_printk() conversion specifiers allowed: * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s @@ -484,8 +512,7 @@ fmt_next: */ #define __BPF_TP_EMIT() __BPF_ARG3_TP() #define __BPF_TP(...) \ - __trace_printk(0 /* Fake ip */, \ - fmt, ##__VA_ARGS__) + bpf_do_trace_printk(fmt, ##__VA_ARGS__) #define __BPF_ARG1_TP(...) \ ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \ @@ -522,10 +549,15 @@ static const struct bpf_func_proto bpf_trace_printk_proto = { const struct bpf_func_proto *bpf_get_trace_printk_proto(void) { /* - * this program might be calling bpf_trace_printk, - * so allocate per-cpu printk buffers + * This program might be calling bpf_trace_printk, + * so enable the associated bpf_trace/bpf_trace_printk event. + * Repeat this each time as it is possible a user has + * disabled bpf_trace_printk events. By loading a program + * calling bpf_trace_printk() however the user has expressed + * the intent to see such events. */ - trace_printk_init_buffers(); + if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1)) + pr_warn_ratelimited("could not enable bpf_trace_printk events"); return &bpf_trace_printk_proto; } diff --git a/kernel/trace/bpf_trace.h b/kernel/trace/bpf_trace.h new file mode 100644 index 000000000000..9acbc11ac7bb --- /dev/null +++ b/kernel/trace/bpf_trace.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM bpf_trace + +#if !defined(_TRACE_BPF_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) + +#define _TRACE_BPF_TRACE_H + +#include + +TRACE_EVENT(bpf_trace_printk, + + TP_PROTO(const char *bpf_string), + + TP_ARGS(bpf_string), + + TP_STRUCT__entry( + __string(bpf_string, bpf_string) + ), + + TP_fast_assign( + __assign_str(bpf_string, bpf_string); + ), + + TP_printk("%s", __get_str(bpf_string)) +); + +#endif /* _TRACE_BPF_TRACE_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE bpf_trace + +#include -- cgit v1.2.3 From 59e8b60bf068180fcadb0ae06ce8f6f835132ce6 Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Mon, 13 Jul 2020 12:52:34 +0100 Subject: selftests/bpf: Add selftests verifying bpf_trace_printk() behaviour Simple selftests that verifies bpf_trace_printk() returns a sensible value and tracing messages appear. Signed-off-by: Alan Maguire Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/1594641154-18897-3-git-send-email-alan.maguire@oracle.com --- .../selftests/bpf/prog_tests/trace_printk.c | 75 ++++++++++++++++++++++ tools/testing/selftests/bpf/progs/trace_printk.c | 21 ++++++ 2 files changed, 96 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/trace_printk.c create mode 100644 tools/testing/selftests/bpf/progs/trace_printk.c diff --git a/tools/testing/selftests/bpf/prog_tests/trace_printk.c b/tools/testing/selftests/bpf/prog_tests/trace_printk.c new file mode 100644 index 000000000000..39b0decb1bb2 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/trace_printk.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020, Oracle and/or its affiliates. */ + +#include + +#include "trace_printk.skel.h" + +#define TRACEBUF "/sys/kernel/debug/tracing/trace_pipe" +#define SEARCHMSG "testing,testing" + +void test_trace_printk(void) +{ + int err, iter = 0, duration = 0, found = 0; + struct trace_printk__bss *bss; + struct trace_printk *skel; + char *buf = NULL; + FILE *fp = NULL; + size_t buflen; + + skel = trace_printk__open(); + if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + return; + + err = trace_printk__load(skel); + if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err)) + goto cleanup; + + bss = skel->bss; + + err = trace_printk__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + goto cleanup; + + fp = fopen(TRACEBUF, "r"); + if (CHECK(fp == NULL, "could not open trace buffer", + "error %d opening %s", errno, TRACEBUF)) + goto cleanup; + + /* We do not want to wait forever if this test fails... */ + fcntl(fileno(fp), F_SETFL, O_NONBLOCK); + + /* wait for tracepoint to trigger */ + usleep(1); + trace_printk__detach(skel); + + if (CHECK(bss->trace_printk_ran == 0, + "bpf_trace_printk never ran", + "ran == %d", bss->trace_printk_ran)) + goto cleanup; + + if (CHECK(bss->trace_printk_ret <= 0, + "bpf_trace_printk returned <= 0 value", + "got %d", bss->trace_printk_ret)) + goto cleanup; + + /* verify our search string is in the trace buffer */ + while (getline(&buf, &buflen, fp) >= 0 || errno == EAGAIN) { + if (strstr(buf, SEARCHMSG) != NULL) + found++; + if (found == bss->trace_printk_ran) + break; + if (++iter > 1000) + break; + } + + if (CHECK(!found, "message from bpf_trace_printk not found", + "no instance of %s in %s", SEARCHMSG, TRACEBUF)) + goto cleanup; + +cleanup: + trace_printk__destroy(skel); + free(buf); + if (fp) + fclose(fp); +} diff --git a/tools/testing/selftests/bpf/progs/trace_printk.c b/tools/testing/selftests/bpf/progs/trace_printk.c new file mode 100644 index 000000000000..8ca7f399b670 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/trace_printk.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020, Oracle and/or its affiliates. + +#include "vmlinux.h" +#include +#include + +char _license[] SEC("license") = "GPL"; + +int trace_printk_ret = 0; +int trace_printk_ran = 0; + +SEC("tp/raw_syscalls/sys_enter") +int sys_enter(void *ctx) +{ + static const char fmt[] = "testing,testing %d\n"; + + trace_printk_ret = bpf_trace_printk(fmt, sizeof(fmt), + ++trace_printk_ran); + return 0; +} -- cgit v1.2.3 From 7c819e701382d4969ca4837b8cbe157895f5d0bf Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 13 Jul 2020 16:24:08 -0700 Subject: libbpf: Support stripping modifiers for btf_dump One important use case when emitting const/volatile/restrict is undesirable is BPF skeleton generation of DATASEC layout. These are further memory-mapped and can be written/read from user-space directly. For important case of .rodata variables, bpftool strips away first-level modifiers, to make their use on user-space side simple and not requiring extra type casts to override compiler complaining about writing to const variables. This logic works mostly fine, but breaks in some more complicated cases. E.g.: const volatile int params[10]; Because in BTF it's a chain of ARRAY -> CONST -> VOLATILE -> INT, bpftool stops at ARRAY and doesn't strip CONST and VOLATILE. In skeleton this variable will be emitted as is. So when used from user-space, compiler will complain about writing to const array. This is problematic, as also mentioned in [0]. To solve this for arrays and other non-trivial cases (e.g., inner const/volatile fields inside the struct), teach btf_dump to strip away any modifier, when requested. This is done as an extra option on btf_dump__emit_type_decl() API. Reported-by: Anton Protopopov Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200713232409.3062144-2-andriin@fb.com --- tools/lib/bpf/btf.h | 2 ++ tools/lib/bpf/btf_dump.c | 10 ++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index a3b7ef9b737f..227d3f844801 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -144,6 +144,8 @@ struct btf_dump_emit_type_decl_opts { * necessary indentation already */ int indent_level; + /* strip all the const/volatile/restrict mods */ + bool strip_mods; }; #define btf_dump_emit_type_decl_opts__last_field indent_level diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index bbb430317260..e1c344504cae 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -60,6 +60,7 @@ struct btf_dump { const struct btf_ext *btf_ext; btf_dump_printf_fn_t printf_fn; struct btf_dump_opts opts; + bool strip_mods; /* per-type auxiliary state */ struct btf_dump_type_aux_state *type_states; @@ -1032,7 +1033,9 @@ int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id, fname = OPTS_GET(opts, field_name, ""); lvl = OPTS_GET(opts, indent_level, 0); + d->strip_mods = OPTS_GET(opts, strip_mods, false); btf_dump_emit_type_decl(d, id, fname, lvl); + d->strip_mods = false; return 0; } @@ -1045,6 +1048,10 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id, stack_start = d->decl_stack_cnt; for (;;) { + t = btf__type_by_id(d->btf, id); + if (d->strip_mods && btf_is_mod(t)) + goto skip_mod; + err = btf_dump_push_decl_stack_id(d, id); if (err < 0) { /* @@ -1056,12 +1063,11 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id, d->decl_stack_cnt = stack_start; return; } - +skip_mod: /* VOID */ if (id == 0) break; - t = btf__type_by_id(d->btf, id); switch (btf_kind(t)) { case BTF_KIND_PTR: case BTF_KIND_VOLATILE: -- cgit v1.2.3 From 0b20933d8cfe2bf6473e9b581b5d1ed9a2117ecc Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 13 Jul 2020 16:24:09 -0700 Subject: tools/bpftool: Strip away modifiers from global variables Reliably remove all the type modifiers from read-only (.rodata) global variable definitions, including cases of inner field const modifiers and arrays of const values. Also modify one of selftests to ensure that const volatile struct doesn't prevent user-space from modifying .rodata variable. Fixes: 985ead416df3 ("bpftool: Add skeleton codegen command") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200713232409.3062144-3-andriin@fb.com --- tools/bpf/bpftool/gen.c | 23 ++++++++++------------- tools/lib/bpf/btf.h | 2 +- tools/testing/selftests/bpf/prog_tests/skeleton.c | 6 +++--- tools/testing/selftests/bpf/progs/test_skeleton.c | 6 ++++-- 4 files changed, 18 insertions(+), 19 deletions(-) diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c index 10de76b296ba..b59d26e89367 100644 --- a/tools/bpf/bpftool/gen.c +++ b/tools/bpf/bpftool/gen.c @@ -88,7 +88,7 @@ static const char *get_map_ident(const struct bpf_map *map) return NULL; } -static void codegen_btf_dump_printf(void *ct, const char *fmt, va_list args) +static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args) { vprintf(fmt, args); } @@ -104,17 +104,20 @@ static int codegen_datasec_def(struct bpf_object *obj, int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec); const char *sec_ident; char var_ident[256]; + bool strip_mods = false; - if (strcmp(sec_name, ".data") == 0) + if (strcmp(sec_name, ".data") == 0) { sec_ident = "data"; - else if (strcmp(sec_name, ".bss") == 0) + } else if (strcmp(sec_name, ".bss") == 0) { sec_ident = "bss"; - else if (strcmp(sec_name, ".rodata") == 0) + } else if (strcmp(sec_name, ".rodata") == 0) { sec_ident = "rodata"; - else if (strcmp(sec_name, ".kconfig") == 0) + strip_mods = true; + } else if (strcmp(sec_name, ".kconfig") == 0) { sec_ident = "kconfig"; - else + } else { return 0; + } printf(" struct %s__%s {\n", obj_name, sec_ident); for (i = 0; i < vlen; i++, sec_var++) { @@ -123,16 +126,10 @@ static int codegen_datasec_def(struct bpf_object *obj, DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts, .field_name = var_ident, .indent_level = 2, + .strip_mods = strip_mods, ); int need_off = sec_var->offset, align_off, align; __u32 var_type_id = var->type; - const struct btf_type *t; - - t = btf__type_by_id(btf, var_type_id); - while (btf_is_mod(t)) { - var_type_id = t->type; - t = btf__type_by_id(btf, var_type_id); - } if (off > need_off) { p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n", diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 227d3f844801..491c7b41ffdc 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -147,7 +147,7 @@ struct btf_dump_emit_type_decl_opts { /* strip all the const/volatile/restrict mods */ bool strip_mods; }; -#define btf_dump_emit_type_decl_opts__last_field indent_level +#define btf_dump_emit_type_decl_opts__last_field strip_mods LIBBPF_API int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id, diff --git a/tools/testing/selftests/bpf/prog_tests/skeleton.c b/tools/testing/selftests/bpf/prog_tests/skeleton.c index fa153cf67b1b..fe87b77af459 100644 --- a/tools/testing/selftests/bpf/prog_tests/skeleton.c +++ b/tools/testing/selftests/bpf/prog_tests/skeleton.c @@ -41,7 +41,7 @@ void test_skeleton(void) CHECK(bss->in4 != 0, "in4", "got %lld != exp %lld\n", bss->in4, 0LL); CHECK(bss->out4 != 0, "out4", "got %lld != exp %lld\n", bss->out4, 0LL); - CHECK(rodata->in6 != 0, "in6", "got %d != exp %d\n", rodata->in6, 0); + CHECK(rodata->in.in6 != 0, "in6", "got %d != exp %d\n", rodata->in.in6, 0); CHECK(bss->out6 != 0, "out6", "got %d != exp %d\n", bss->out6, 0); /* validate we can pre-setup global variables, even in .bss */ @@ -49,7 +49,7 @@ void test_skeleton(void) data->in2 = 11; bss->in3 = 12; bss->in4 = 13; - rodata->in6 = 14; + rodata->in.in6 = 14; err = test_skeleton__load(skel); if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err)) @@ -60,7 +60,7 @@ void test_skeleton(void) CHECK(data->in2 != 11, "in2", "got %lld != exp %lld\n", data->in2, 11LL); CHECK(bss->in3 != 12, "in3", "got %d != exp %d\n", bss->in3, 12); CHECK(bss->in4 != 13, "in4", "got %lld != exp %lld\n", bss->in4, 13LL); - CHECK(rodata->in6 != 14, "in6", "got %d != exp %d\n", rodata->in6, 14); + CHECK(rodata->in.in6 != 14, "in6", "got %d != exp %d\n", rodata->in.in6, 14); /* now set new values and attach to get them into outX variables */ data->in1 = 1; diff --git a/tools/testing/selftests/bpf/progs/test_skeleton.c b/tools/testing/selftests/bpf/progs/test_skeleton.c index 77ae86f44db5..374ccef704e1 100644 --- a/tools/testing/selftests/bpf/progs/test_skeleton.c +++ b/tools/testing/selftests/bpf/progs/test_skeleton.c @@ -20,7 +20,9 @@ long long in4 __attribute__((aligned(64))) = 0; struct s in5 = {}; /* .rodata section */ -const volatile int in6 = 0; +const volatile struct { + const int in6; +} in = {}; /* .data section */ int out1 = -1; @@ -46,7 +48,7 @@ int handler(const void *ctx) out3 = in3; out4 = in4; out5 = in5; - out6 = in6; + out6 = in.in6; bpf_syscall = CONFIG_BPF_SYSCALL; kern_ver = LINUX_KERNEL_VERSION; -- cgit v1.2.3