summaryrefslogtreecommitdiff
path: root/tools/lib/bpf/xsk.c
diff options
context:
space:
mode:
authorFletcher Dunn <fletcherd@valvesoftware.com>2020-03-27 06:24:07 +0300
committerDaniel Borkmann <daniel@iogearbox.net>2020-03-28 19:12:56 +0300
commit291cfe365bd3d5002a9c49e9fca8f30136eebc28 (patch)
tree1470d902a48b8056ab3006894c8865e724322e0b /tools/lib/bpf/xsk.c
parent2cf69d3fa50e15fd628c512a7d34ad759ac476cb (diff)
downloadlinux-291cfe365bd3d5002a9c49e9fca8f30136eebc28.tar.xz
libbpf, xsk: Init all ring members in xsk_umem__create and xsk_socket__create
Fix a sharp edge in xsk_umem__create and xsk_socket__create. Almost all of the members of the ring buffer structs are initialized, but the "cached_xxx" variables are not all initialized. The caller is required to zero them. This is needlessly dangerous. The results if you don't do it can be very bad. For example, they can cause xsk_prod_nb_free and xsk_cons_nb_avail to return values greater than the size of the queue. xsk_ring_cons__peek can return an index that does not refer to an item that has been queued. I have confirmed that without this change, my program misbehaves unless I memset the ring buffers to zero before calling the function. Afterwards, my program works without (or with) the memset. Signed-off-by: Fletcher Dunn <fletcherd@valvesoftware.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Magnus Karlsson <magnus.karlsson@intel.com> Link: https://lore.kernel.org/bpf/85f12913cde94b19bfcb598344701c38@valvesoftware.com
Diffstat (limited to 'tools/lib/bpf/xsk.c')
-rw-r--r--tools/lib/bpf/xsk.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index 9807903f121e..f7f4efb70a4c 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -280,7 +280,11 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
fill->consumer = map + off.fr.consumer;
fill->flags = map + off.fr.flags;
fill->ring = map + off.fr.desc;
- fill->cached_cons = umem->config.fill_size;
+ fill->cached_prod = *fill->producer;
+ /* cached_cons is "size" bigger than the real consumer pointer
+ * See xsk_prod_nb_free
+ */
+ fill->cached_cons = *fill->consumer + umem->config.fill_size;
map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd,
@@ -297,6 +301,8 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
comp->consumer = map + off.cr.consumer;
comp->flags = map + off.cr.flags;
comp->ring = map + off.cr.desc;
+ comp->cached_prod = *comp->producer;
+ comp->cached_cons = *comp->consumer;
*umem_ptr = umem;
return 0;
@@ -672,6 +678,8 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
rx->consumer = rx_map + off.rx.consumer;
rx->flags = rx_map + off.rx.flags;
rx->ring = rx_map + off.rx.desc;
+ rx->cached_prod = *rx->producer;
+ rx->cached_cons = *rx->consumer;
}
xsk->rx = rx;
@@ -691,7 +699,11 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
tx->consumer = tx_map + off.tx.consumer;
tx->flags = tx_map + off.tx.flags;
tx->ring = tx_map + off.tx.desc;
- tx->cached_cons = xsk->config.tx_size;
+ tx->cached_prod = *tx->producer;
+ /* cached_cons is r->size bigger than the real consumer pointer
+ * See xsk_prod_nb_free
+ */
+ tx->cached_cons = *tx->consumer + xsk->config.tx_size;
}
xsk->tx = tx;