summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLorenzo Bianconi <lorenzo@kernel.org>2020-07-14 16:56:38 +0300
committerDaniel Borkmann <daniel@iogearbox.net>2020-07-16 18:00:32 +0300
commit9216477449f33cdbc9c9a99d49f500b7fbb81702 (patch)
treee4ee557ea02621c4b98694a53f3b94c4e03cdc73 /include
parent644bfe51fa49c22244d24e896cd3fe3ee2f2cfd1 (diff)
downloadlinux-9216477449f33cdbc9c9a99d49f500b7fbb81702.tar.xz
bpf: cpumap: Add the possibility to attach an eBPF program to cpumap
Introduce the capability to attach an eBPF program to cpumap entries. The idea behind this feature is to add the possibility to define on which CPU run the eBPF program if the underlying hw does not support RSS. Current supported verdicts are XDP_DROP and XDP_PASS. This patch has been tested on Marvell ESPRESSObin using xdp_redirect_cpu sample available in the kernel tree to identify possible performance regressions. Results show there are no observable differences in packet-per-second: $./xdp_redirect_cpu --progname xdp_cpu_map0 --dev eth0 --cpu 1 rx: 354.8 Kpps rx: 356.0 Kpps rx: 356.8 Kpps rx: 356.3 Kpps rx: 356.6 Kpps rx: 356.6 Kpps rx: 356.7 Kpps rx: 355.8 Kpps rx: 356.8 Kpps rx: 356.8 Kpps Co-developed-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Link: https://lore.kernel.org/bpf/5c9febdf903d810b3415732e5cd98491d7d9067a.1594734381.git.lorenzo@kernel.org
Diffstat (limited to 'include')
-rw-r--r--include/linux/bpf.h6
-rw-r--r--include/net/xdp.h5
-rw-r--r--include/trace/events/xdp.h14
-rw-r--r--include/uapi/linux/bpf.h5
4 files changed, 26 insertions, 4 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index c67c88ad35f8..54ad426dbea1 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1272,6 +1272,7 @@ struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
void __cpu_map_flush(void);
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
struct net_device *dev_rx);
+bool cpu_map_prog_allowed(struct bpf_map *map);
/* Return map's numa specified by userspace */
static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
@@ -1432,6 +1433,11 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
return 0;
}
+static inline bool cpu_map_prog_allowed(struct bpf_map *map)
+{
+ return false;
+}
+
static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
enum bpf_prog_type type)
{
diff --git a/include/net/xdp.h b/include/net/xdp.h
index 5b383c450858..83b9e0142b52 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -98,6 +98,11 @@ struct xdp_frame {
struct net_device *dev_rx; /* used by cpumap */
};
+struct xdp_cpumap_stats {
+ unsigned int pass;
+ unsigned int drop;
+};
+
/* Clear kernel pointers in xdp_frame */
static inline void xdp_scrub_frame(struct xdp_frame *frame)
{
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
index b73d3e141323..e2c99f5bee39 100644
--- a/include/trace/events/xdp.h
+++ b/include/trace/events/xdp.h
@@ -177,9 +177,9 @@ DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
TRACE_EVENT(xdp_cpumap_kthread,
TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
- int sched),
+ int sched, struct xdp_cpumap_stats *xdp_stats),
- TP_ARGS(map_id, processed, drops, sched),
+ TP_ARGS(map_id, processed, drops, sched, xdp_stats),
TP_STRUCT__entry(
__field(int, map_id)
@@ -188,6 +188,8 @@ TRACE_EVENT(xdp_cpumap_kthread,
__field(unsigned int, drops)
__field(unsigned int, processed)
__field(int, sched)
+ __field(unsigned int, xdp_pass)
+ __field(unsigned int, xdp_drop)
),
TP_fast_assign(
@@ -197,16 +199,20 @@ TRACE_EVENT(xdp_cpumap_kthread,
__entry->drops = drops;
__entry->processed = processed;
__entry->sched = sched;
+ __entry->xdp_pass = xdp_stats->pass;
+ __entry->xdp_drop = xdp_stats->drop;
),
TP_printk("kthread"
" cpu=%d map_id=%d action=%s"
" processed=%u drops=%u"
- " sched=%d",
+ " sched=%d"
+ " xdp_pass=%u xdp_drop=%u",
__entry->cpu, __entry->map_id,
__print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
__entry->processed, __entry->drops,
- __entry->sched)
+ __entry->sched,
+ __entry->xdp_pass, __entry->xdp_drop)
);
TRACE_EVENT(xdp_cpumap_enqueue,
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 109623527358..c010b57fce3f 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -227,6 +227,7 @@ enum bpf_attach_type {
BPF_CGROUP_INET6_GETSOCKNAME,
BPF_XDP_DEVMAP,
BPF_CGROUP_INET_SOCK_RELEASE,
+ BPF_XDP_CPUMAP,
__MAX_BPF_ATTACH_TYPE
};
@@ -3856,6 +3857,10 @@ struct bpf_devmap_val {
*/
struct bpf_cpumap_val {
__u32 qsize; /* queue size to remote target CPU */
+ union {
+ int fd; /* prog fd on map write */
+ __u32 id; /* prog id on map read */
+ } bpf_prog;
};
enum sk_action {