summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2022-07-07 22:18:46 +0300
committerDavid S. Miller <davem@davemloft.net>2022-07-08 16:21:08 +0300
commitc2dd4059dc31ee6f5b83c8d2064bb1f1f465bcec (patch)
treeca262868da3fc1e3880a78ec3efc9beaa7522810 /net
parent8e1514579246ddc36ba0b860fc8bdd03be085aee (diff)
downloadlinux-c2dd4059dc31ee6f5b83c8d2064bb1f1f465bcec.tar.xz
net: minor optimization in __alloc_skb()
TCP allocates 'fast clones' skbs for packets in tx queues. Currently, __alloc_skb() initializes the companion fclone field to SKB_FCLONE_CLONE, and leaves other fields untouched. It makes sense to defer this init much later in skb_clone(), because all fclone fields are copied and hot in cpu caches at that time. This removes one cache line miss in __alloc_skb(), cost seen on an host with 256 cpus all competing on memory accesses. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/skbuff.c3
1 files changed, 1 insertions, 2 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index c62e42d0c531..c4a751781581 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -454,8 +454,6 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
skb->fclone = SKB_FCLONE_ORIG;
refcount_set(&fclones->fclone_ref, 1);
-
- fclones->skb2.fclone = SKB_FCLONE_CLONE;
}
return skb;
@@ -1513,6 +1511,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
refcount_read(&fclones->fclone_ref) == 1) {
n = &fclones->skb2;
refcount_set(&fclones->fclone_ref, 2);
+ n->fclone = SKB_FCLONE_CLONE;
} else {
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;