summaryrefslogtreecommitdiff
path: root/net/xdp
diff options
context:
space:
mode:
authorJalal Mostafa <jalal.a.mostapha@gmail.com>2022-09-21 16:57:01 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-10-15 08:55:51 +0300
commit132590d776e26491fa85af727f7980ac24563337 (patch)
tree03aad2c7e4b7899422cf2ba0caf64943dff42b00 /net/xdp
parentbeffc38dc6b208dd32957299e4f83a2f5731d0f9 (diff)
downloadlinux-132590d776e26491fa85af727f7980ac24563337.tar.xz
xsk: Inherit need_wakeup flag for shared sockets
commit 60240bc26114543fcbfcd8a28466e67e77b20388 upstream. The flag for need_wakeup is not set for xsks with `XDP_SHARED_UMEM` flag and of different queue ids and/or devices. They should inherit the flag from the first socket buffer pool since no flags can be specified once `XDP_SHARED_UMEM` is specified. Fixes: b5aea28dca134 ("xsk: Add shared umem support between queue ids") Signed-off-by: Jalal Mostafa <jalal.a.mostapha@gmail.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Magnus Karlsson <magnus.karlsson@intel.com> Link: https://lore.kernel.org/bpf/20220921135701.10199-1-jalal.a.mostapha@gmail.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'net/xdp')
-rw-r--r--net/xdp/xsk.c4
-rw-r--r--net/xdp/xsk_buff_pool.c5
2 files changed, 5 insertions, 4 deletions
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index ca4716b92774..691841dc6d33 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -742,8 +742,8 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
goto out_unlock;
}
- err = xp_assign_dev_shared(xs->pool, umem_xs->umem,
- dev, qid);
+ err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
+ qid);
if (err) {
xp_destroy(xs->pool);
xs->pool = NULL;
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index e63a285a9856..c347e52f58df 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -198,17 +198,18 @@ int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
return __xp_assign_dev(pool, dev, queue_id, flags);
}
-int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
+int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
struct net_device *dev, u16 queue_id)
{
u16 flags;
+ struct xdp_umem *umem = umem_xs->umem;
/* One fill and completion ring required for each queue id. */
if (!pool->fq || !pool->cq)
return -EINVAL;
flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY;
- if (pool->uses_need_wakeup)
+ if (umem_xs->pool->uses_need_wakeup)
flags |= XDP_USE_NEED_WAKEUP;
return __xp_assign_dev(pool, dev, queue_id, flags);