summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/skbuff.h19
-rw-r--r--include/net/tcp.h8
2 files changed, 24 insertions, 3 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index a63e13082397..686a666d073d 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -454,9 +454,15 @@ enum {
* all frags to avoid possible bad checksum
*/
SKBFL_SHARED_FRAG = BIT(1),
+
+ /* segment contains only zerocopy data and should not be
+ * charged to the kernel memory.
+ */
+ SKBFL_PURE_ZEROCOPY = BIT(2),
};
#define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG)
+#define SKBFL_ALL_ZEROCOPY (SKBFL_ZEROCOPY_FRAG | SKBFL_PURE_ZEROCOPY)
/*
* The callback notifies userspace to release buffers when skb DMA is done in
@@ -1464,6 +1470,17 @@ static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
return is_zcopy ? skb_uarg(skb) : NULL;
}
+static inline bool skb_zcopy_pure(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->flags & SKBFL_PURE_ZEROCOPY;
+}
+
+static inline bool skb_pure_zcopy_same(const struct sk_buff *skb1,
+ const struct sk_buff *skb2)
+{
+ return skb_zcopy_pure(skb1) == skb_zcopy_pure(skb2);
+}
+
static inline void net_zcopy_get(struct ubuf_info *uarg)
{
refcount_inc(&uarg->refcnt);
@@ -1528,7 +1545,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success)
if (!skb_zcopy_is_nouarg(skb))
uarg->callback(skb, uarg, zerocopy_success);
- skb_shinfo(skb)->flags &= ~SKBFL_ZEROCOPY_FRAG;
+ skb_shinfo(skb)->flags &= ~SKBFL_ALL_ZEROCOPY;
}
}
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 70972f3ac8fa..4da22b41bde6 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -293,7 +293,10 @@ static inline bool tcp_out_of_memory(struct sock *sk)
static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
{
sk_wmem_queued_add(sk, -skb->truesize);
- sk_mem_uncharge(sk, skb->truesize);
+ if (!skb_zcopy_pure(skb))
+ sk_mem_uncharge(sk, skb->truesize);
+ else
+ sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb)));
__kfree_skb(skb);
}
@@ -974,7 +977,8 @@ static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
const struct sk_buff *from)
{
return likely(tcp_skb_can_collapse_to(to) &&
- mptcp_skb_can_collapse(to, from));
+ mptcp_skb_can_collapse(to, from) &&
+ skb_pure_zcopy_same(to, from));
}
/* Events passed to congestion control interface */