summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2024-03-28 17:40:30 +0300
committerJakub Kicinski <kuba@kernel.org>2024-03-30 01:03:10 +0300
commit6a1f12dd85a8b24f871dfcf467378660af9c064d (patch)
treecb52518234de5cc5eabe543f60265f8b3699ae35 /net
parent60557969951304dad829f2829019907dfb43ecb3 (diff)
downloadlinux-6a1f12dd85a8b24f871dfcf467378660af9c064d.tar.xz
udp: relax atomic operation on sk->sk_rmem_alloc
atomic_add_return() is more expensive than atomic_add() and seems overkill in UDP rx fast path. Signed-off-by: Eric Dumazet <edumazet@google.com> Link: https://lore.kernel.org/r/20240328144032.1864988-3-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/udp.c7
1 files changed, 1 insertions, 6 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 6a39e7fa0616..19d7db4563ac 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1516,12 +1516,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
size = skb->truesize;
udp_set_dev_scratch(skb);
- /* we drop only if the receive buf is full and the receive
- * queue contains some other skb
- */
- rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
- if (rmem > (size + (unsigned int)sk->sk_rcvbuf))
- goto uncharge_drop;
+ atomic_add(size, &sk->sk_rmem_alloc);
spin_lock(&list->lock);
err = udp_rmem_schedule(sk, size);