summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2024-03-29 18:42:20 +0300
committerDavid S. Miller <davem@davemloft.net>2024-04-01 13:28:31 +0300
commit95e48d862ada73188be6d91a33c49d1712815bd2 (patch)
treeb8d6429d9e0e50d7a1bc6510e4f2fb8af7b93272 /net
parent2fe50a4d7225cf10748775e290361896637091a9 (diff)
downloadlinux-95e48d862ada73188be6d91a33c49d1712815bd2.tar.xz
net: enqueue_to_backlog() change vs not running device
If the device attached to the packet given to enqueue_to_backlog() is not running, we drop the packet. But we accidentally increase sd->dropped, giving false signals to admins: sd->dropped should be reserved to cpu backlog pressure, not to temporary glitches at device dismantles. While we are at it, perform the netif_running() test before we get the rps lock, and use REASON_DEV_READY drop reason instead of NOT_SPECIFIED. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index c136e80dea61..4ad7836365e6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4801,12 +4801,13 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
unsigned long flags;
unsigned int qlen;
- reason = SKB_DROP_REASON_NOT_SPECIFIED;
+ reason = SKB_DROP_REASON_DEV_READY;
+ if (!netif_running(skb->dev))
+ goto bad_dev;
+
sd = &per_cpu(softnet_data, cpu);
backlog_lock_irq_save(sd, &flags);
- if (!netif_running(skb->dev))
- goto drop;
qlen = skb_queue_len(&sd->input_pkt_queue);
if (qlen <= READ_ONCE(net_hotdata.max_backlog) &&
!skb_flow_limit(skb, qlen)) {
@@ -4827,10 +4828,10 @@ enqueue:
}
reason = SKB_DROP_REASON_CPU_BACKLOG;
-drop:
sd->dropped++;
backlog_unlock_irq_restore(sd, &flags);
+bad_dev:
dev_core_stats_rx_dropped_inc(skb->dev);
kfree_skb_reason(skb, reason);
return NET_RX_DROP;