summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c4
-rw-r--r--include/linux/skbuff.h16
-rw-r--r--net/Kconfig12
-rw-r--r--net/packet/af_packet.c4
4 files changed, 21 insertions, 15 deletions
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index af281e271f88..3e1de4c842cc 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -2314,9 +2314,9 @@ static int cxgbi_sock_tx_queue_up(struct cxgbi_sock *csk, struct sk_buff *skb)
frags++;
if (frags >= SKB_WR_LIST_SIZE) {
- pr_err("csk 0x%p, frags %u, %u,%u >%lu.\n",
+ pr_err("csk 0x%p, frags %u, %u,%u >%u.\n",
csk, skb_shinfo(skb)->nr_frags, skb->len,
- skb->data_len, SKB_WR_LIST_SIZE);
+ skb->data_len, (unsigned int)SKB_WR_LIST_SIZE);
return -EINVAL;
}
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index fe661011644b..82511b2f61ea 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -345,18 +345,12 @@ struct sk_buff_head {
struct sk_buff;
-/* To allow 64K frame to be packed as single skb without frag_list we
- * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
- * buffers which do not start on a page boundary.
- *
- * Since GRO uses frags we allocate at least 16 regardless of page
- * size.
- */
-#if (65536/PAGE_SIZE + 1) < 16
-#define MAX_SKB_FRAGS 16UL
-#else
-#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
+#ifndef CONFIG_MAX_SKB_FRAGS
+# define CONFIG_MAX_SKB_FRAGS 17
#endif
+
+#define MAX_SKB_FRAGS CONFIG_MAX_SKB_FRAGS
+
extern int sysctl_max_skb_frags;
/* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to
diff --git a/net/Kconfig b/net/Kconfig
index 48c33c222199..f806722bccf4 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -251,6 +251,18 @@ config PCPU_DEV_REFCNT
network device refcount are using per cpu variables if this option is set.
This can be forced to N to detect underflows (with a performance drop).
+config MAX_SKB_FRAGS
+ int "Maximum number of fragments per skb_shared_info"
+ range 17 45
+ default 17
+ help
+ Having more fragments per skb_shared_info can help GRO efficiency.
+ This helps BIG TCP workloads, but might expose bugs in some
+ legacy drivers.
+ This also increases memory overhead of small packets,
+ and in drivers using build_skb().
+ If unsure, say 17.
+
config RPS
bool
depends on SMP && SYSFS
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 497193f73030..568f8d76e3c1 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2622,8 +2622,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
nr_frags = skb_shinfo(skb)->nr_frags;
if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
- pr_err("Packet exceed the number of skb frags(%lu)\n",
- MAX_SKB_FRAGS);
+ pr_err("Packet exceed the number of skb frags(%u)\n",
+ (unsigned int)MAX_SKB_FRAGS);
return -EFAULT;
}