summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--samples/bpf/Makefile42
-rw-r--r--samples/bpf/xdp_monitor.bpf.c8
-rw-r--r--samples/bpf/xdp_monitor_kern.c257
3 files changed, 49 insertions, 258 deletions
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index ff1932e16bc5..0d7086a2a393 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -164,7 +164,6 @@ always-y += xdp_redirect_kern.o
always-y += xdp_redirect_map_kern.o
always-y += xdp_redirect_map_multi_kern.o
always-y += xdp_redirect_cpu_kern.o
-always-y += xdp_monitor_kern.o
always-y += xdp_rxq_info_kern.o
always-y += xdp2skb_meta_kern.o
always-y += syscall_tp_kern.o
@@ -338,6 +337,47 @@ endif
clean-files += vmlinux.h
+# Get Clang's default includes on this system, as opposed to those seen by
+# '-target bpf'. This fixes "missing" files on some architectures/distros,
+# such as asm/byteorder.h, asm/socket.h, asm/sockios.h, sys/cdefs.h etc.
+#
+# Use '-idirafter': Don't interfere with include mechanics except where the
+# build would have failed anyways.
+define get_sys_includes
+$(shell $(1) -v -E - </dev/null 2>&1 \
+ | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \
+$(shell $(1) -dM -E - </dev/null | grep '#define __riscv_xlen ' | sed 's/#define /-D/' | sed 's/ /=/')
+endef
+
+CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG))
+
+$(obj)/xdp_monitor.bpf.o: $(obj)/xdp_sample.bpf.o
+
+$(obj)/%.bpf.o: $(src)/%.bpf.c $(obj)/vmlinux.h $(src)/xdp_sample.bpf.h $(src)/xdp_sample_shared.h
+ @echo " CLANG-BPF " $@
+ $(Q)$(CLANG) -g -O2 -target bpf -D__TARGET_ARCH_$(SRCARCH) \
+ -Wno-compare-distinct-pointer-types -I$(srctree)/include \
+ -I$(srctree)/samples/bpf -I$(srctree)/tools/include \
+ -I$(srctree)/tools/lib $(CLANG_SYS_INCLUDES) \
+ -c $(filter %.bpf.c,$^) -o $@
+
+LINKED_SKELS := xdp_monitor.skel.h
+clean-files += $(LINKED_SKELS)
+
+xdp_monitor.skel.h-deps := xdp_monitor.bpf.o xdp_sample.bpf.o
+
+LINKED_BPF_SRCS := $(patsubst %.bpf.o,%.bpf.c,$(foreach skel,$(LINKED_SKELS),$($(skel)-deps)))
+
+BPF_SRCS_LINKED := $(notdir $(wildcard $(src)/*.bpf.c))
+BPF_OBJS_LINKED := $(patsubst %.bpf.c,$(obj)/%.bpf.o, $(BPF_SRCS_LINKED))
+BPF_SKELS_LINKED := $(addprefix $(obj)/,$(LINKED_SKELS))
+
+$(BPF_SKELS_LINKED): $(BPF_OBJS_LINKED) $(BPFTOOL)
+ @echo " BPF GEN-OBJ " $(@:.skel.h=)
+ $(Q)$(BPFTOOL) gen object $(@:.skel.h=.lbpf.o) $(addprefix $(obj)/,$($(@F)-deps))
+ @echo " BPF GEN-SKEL" $(@:.skel.h=)
+ $(Q)$(BPFTOOL) gen skeleton $(@:.skel.h=.lbpf.o) name $(notdir $(@:.skel.h=)) > $@
+
# asm/sysreg.h - inline assembly used by it is incompatible with llvm.
# But, there is no easy way to fix it, so just exclude it since it is
# useless for BPF samples.
diff --git a/samples/bpf/xdp_monitor.bpf.c b/samples/bpf/xdp_monitor.bpf.c
new file mode 100644
index 000000000000..cfb41e2205f4
--- /dev/null
+++ b/samples/bpf/xdp_monitor.bpf.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2017-2018 Jesper Dangaard Brouer, Red Hat Inc.
+ *
+ * XDP monitor tool, based on tracepoints
+ */
+#include "xdp_sample.bpf.h"
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_monitor_kern.c b/samples/bpf/xdp_monitor_kern.c
deleted file mode 100644
index 5c955b812c47..000000000000
--- a/samples/bpf/xdp_monitor_kern.c
+++ /dev/null
@@ -1,257 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright(c) 2017-2018 Jesper Dangaard Brouer, Red Hat Inc.
- *
- * XDP monitor tool, based on tracepoints
- */
-#include <uapi/linux/bpf.h>
-#include <bpf/bpf_helpers.h>
-
-struct {
- __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
- __type(key, u32);
- __type(value, u64);
- __uint(max_entries, 2);
- /* TODO: have entries for all possible errno's */
-} redirect_err_cnt SEC(".maps");
-
-#define XDP_UNKNOWN XDP_REDIRECT + 1
-struct {
- __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
- __type(key, u32);
- __type(value, u64);
- __uint(max_entries, XDP_UNKNOWN + 1);
-} exception_cnt SEC(".maps");
-
-/* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_redirect/format
- * Code in: kernel/include/trace/events/xdp.h
- */
-struct xdp_redirect_ctx {
- u64 __pad; // First 8 bytes are not accessible by bpf code
- int prog_id; // offset:8; size:4; signed:1;
- u32 act; // offset:12 size:4; signed:0;
- int ifindex; // offset:16 size:4; signed:1;
- int err; // offset:20 size:4; signed:1;
- int to_ifindex; // offset:24 size:4; signed:1;
- u32 map_id; // offset:28 size:4; signed:0;
- int map_index; // offset:32 size:4; signed:1;
-}; // offset:36
-
-enum {
- XDP_REDIRECT_SUCCESS = 0,
- XDP_REDIRECT_ERROR = 1
-};
-
-static __always_inline
-int xdp_redirect_collect_stat(struct xdp_redirect_ctx *ctx)
-{
- u32 key = XDP_REDIRECT_ERROR;
- int err = ctx->err;
- u64 *cnt;
-
- if (!err)
- key = XDP_REDIRECT_SUCCESS;
-
- cnt = bpf_map_lookup_elem(&redirect_err_cnt, &key);
- if (!cnt)
- return 1;
- *cnt += 1;
-
- return 0; /* Indicate event was filtered (no further processing)*/
- /*
- * Returning 1 here would allow e.g. a perf-record tracepoint
- * to see and record these events, but it doesn't work well
- * in-practice as stopping perf-record also unload this
- * bpf_prog. Plus, there is additional overhead of doing so.
- */
-}
-
-SEC("tracepoint/xdp/xdp_redirect_err")
-int trace_xdp_redirect_err(struct xdp_redirect_ctx *ctx)
-{
- return xdp_redirect_collect_stat(ctx);
-}
-
-
-SEC("tracepoint/xdp/xdp_redirect_map_err")
-int trace_xdp_redirect_map_err(struct xdp_redirect_ctx *ctx)
-{
- return xdp_redirect_collect_stat(ctx);
-}
-
-/* Likely unloaded when prog starts */
-SEC("tracepoint/xdp/xdp_redirect")
-int trace_xdp_redirect(struct xdp_redirect_ctx *ctx)
-{
- return xdp_redirect_collect_stat(ctx);
-}
-
-/* Likely unloaded when prog starts */
-SEC("tracepoint/xdp/xdp_redirect_map")
-int trace_xdp_redirect_map(struct xdp_redirect_ctx *ctx)
-{
- return xdp_redirect_collect_stat(ctx);
-}
-
-/* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_exception/format
- * Code in: kernel/include/trace/events/xdp.h
- */
-struct xdp_exception_ctx {
- u64 __pad; // First 8 bytes are not accessible by bpf code
- int prog_id; // offset:8; size:4; signed:1;
- u32 act; // offset:12; size:4; signed:0;
- int ifindex; // offset:16; size:4; signed:1;
-};
-
-SEC("tracepoint/xdp/xdp_exception")
-int trace_xdp_exception(struct xdp_exception_ctx *ctx)
-{
- u64 *cnt;
- u32 key;
-
- key = ctx->act;
- if (key > XDP_REDIRECT)
- key = XDP_UNKNOWN;
-
- cnt = bpf_map_lookup_elem(&exception_cnt, &key);
- if (!cnt)
- return 1;
- *cnt += 1;
-
- return 0;
-}
-
-/* Common stats data record shared with _user.c */
-struct datarec {
- u64 processed;
- u64 dropped;
- u64 info;
- u64 err;
-};
-#define MAX_CPUS 64
-
-struct {
- __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
- __type(key, u32);
- __type(value, struct datarec);
- __uint(max_entries, MAX_CPUS);
-} cpumap_enqueue_cnt SEC(".maps");
-
-struct {
- __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
- __type(key, u32);
- __type(value, struct datarec);
- __uint(max_entries, 1);
-} cpumap_kthread_cnt SEC(".maps");
-
-/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_enqueue/format
- * Code in: kernel/include/trace/events/xdp.h
- */
-struct cpumap_enqueue_ctx {
- u64 __pad; // First 8 bytes are not accessible by bpf code
- int map_id; // offset:8; size:4; signed:1;
- u32 act; // offset:12; size:4; signed:0;
- int cpu; // offset:16; size:4; signed:1;
- unsigned int drops; // offset:20; size:4; signed:0;
- unsigned int processed; // offset:24; size:4; signed:0;
- int to_cpu; // offset:28; size:4; signed:1;
-};
-
-SEC("tracepoint/xdp/xdp_cpumap_enqueue")
-int trace_xdp_cpumap_enqueue(struct cpumap_enqueue_ctx *ctx)
-{
- u32 to_cpu = ctx->to_cpu;
- struct datarec *rec;
-
- if (to_cpu >= MAX_CPUS)
- return 1;
-
- rec = bpf_map_lookup_elem(&cpumap_enqueue_cnt, &to_cpu);
- if (!rec)
- return 0;
- rec->processed += ctx->processed;
- rec->dropped += ctx->drops;
-
- /* Record bulk events, then userspace can calc average bulk size */
- if (ctx->processed > 0)
- rec->info += 1;
-
- return 0;
-}
-
-/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_kthread/format
- * Code in: kernel/include/trace/events/xdp.h
- */
-struct cpumap_kthread_ctx {
- u64 __pad; // First 8 bytes are not accessible by bpf code
- int map_id; // offset:8; size:4; signed:1;
- u32 act; // offset:12; size:4; signed:0;
- int cpu; // offset:16; size:4; signed:1;
- unsigned int drops; // offset:20; size:4; signed:0;
- unsigned int processed; // offset:24; size:4; signed:0;
- int sched; // offset:28; size:4; signed:1;
-};
-
-SEC("tracepoint/xdp/xdp_cpumap_kthread")
-int trace_xdp_cpumap_kthread(struct cpumap_kthread_ctx *ctx)
-{
- struct datarec *rec;
- u32 key = 0;
-
- rec = bpf_map_lookup_elem(&cpumap_kthread_cnt, &key);
- if (!rec)
- return 0;
- rec->processed += ctx->processed;
- rec->dropped += ctx->drops;
-
- /* Count times kthread yielded CPU via schedule call */
- if (ctx->sched)
- rec->info++;
-
- return 0;
-}
-
-struct {
- __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
- __type(key, u32);
- __type(value, struct datarec);
- __uint(max_entries, 1);
-} devmap_xmit_cnt SEC(".maps");
-
-/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_devmap_xmit/format
- * Code in: kernel/include/trace/events/xdp.h
- */
-struct devmap_xmit_ctx {
- u64 __pad; // First 8 bytes are not accessible by bpf code
- int from_ifindex; // offset:8; size:4; signed:1;
- u32 act; // offset:12; size:4; signed:0;
- int to_ifindex; // offset:16; size:4; signed:1;
- int drops; // offset:20; size:4; signed:1;
- int sent; // offset:24; size:4; signed:1;
- int err; // offset:28; size:4; signed:1;
-};
-
-SEC("tracepoint/xdp/xdp_devmap_xmit")
-int trace_xdp_devmap_xmit(struct devmap_xmit_ctx *ctx)
-{
- struct datarec *rec;
- u32 key = 0;
-
- rec = bpf_map_lookup_elem(&devmap_xmit_cnt, &key);
- if (!rec)
- return 0;
- rec->processed += ctx->sent;
- rec->dropped += ctx->drops;
-
- /* Record bulk events, then userspace can calc average bulk size */
- rec->info += 1;
-
- /* Record error cases, where no frame were sent */
- if (ctx->err)
- rec->err++;
-
- /* Catch API error of drv ndo_xdp_xmit sent more than count */
- if (ctx->drops < 0)
- rec->err++;
-
- return 1;
-}