From 84b6823cd96b38c40b3b30beabbfa48d92990e1a Mon Sep 17 00:00:00 2001 From: Jason Xing Date: Thu, 18 Apr 2024 15:36:01 +0800 Subject: net: rps: protect last_qtail with rps_input_queue_tail_save() helper Removing one unnecessary reader protection and add another writer protection to finish the locklessly proctection job. Note: the removed READ_ONCE() is not needed because we only have to protect the locklessly reader in the different context (rps_may_expire_flow()). Signed-off-by: Jason Xing Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/core/dev.c b/net/core/dev.c index f6c6e494f0a9..6f027f676243 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4507,7 +4507,7 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb, struct netdev_rx_queue *rxqueue; struct rps_dev_flow_table *flow_table; struct rps_dev_flow *old_rflow; - u32 flow_id; + u32 flow_id, head; u16 rxq_index; int rc; @@ -4535,8 +4535,8 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb, old_rflow->filter = RPS_NO_FILTER; out: #endif - rflow->last_qtail = - READ_ONCE(per_cpu(softnet_data, next_cpu).input_queue_head); + head = READ_ONCE(per_cpu(softnet_data, next_cpu).input_queue_head); + rps_input_queue_tail_save(&rflow->last_qtail, head); } rflow->cpu = next_cpu; @@ -4619,7 +4619,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, if (unlikely(tcpu != next_cpu) && (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || ((int)(READ_ONCE(per_cpu(softnet_data, tcpu).input_queue_head) - - READ_ONCE(rflow->last_qtail))) >= 0)) { + rflow->last_qtail)) >= 0)) { tcpu = next_cpu; rflow = set_rps_cpu(dev, skb, rflow, next_cpu); } -- cgit v1.2.3