summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJarek Poplawski <jarkao2@gmail.com>2008-10-06 20:54:39 +0400
committerDavid S. Miller <davem@davemloft.net>2008-10-06 20:54:39 +0400
commit554794de7949d1a6279336404c066f974d4c2bde (patch)
tree90cb890a68bdf6c9947ddfc451f798bffb8f8990
parent13c1d18931ebb5cf407cb348ef2cd6284d68902d (diff)
downloadlinux-554794de7949d1a6279336404c066f974d4c2bde.tar.xz
pkt_sched: Fix handling of gso skbs on requeuing
Jay Cliburn noticed and diagnosed a bug triggered in dev_gso_skb_destructor() after last change from qdisc->gso_skb to qdisc->requeue list. Since gso_segmented skbs can't be queued to another list this patch brings back qdisc->gso_skb for them. Reported-by: Jay Cliburn <jcliburn@gmail.com> Signed-off-by: Jarek Poplawski <jarkao2@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/sch_generic.h1
-rw-r--r--net/sched/sch_generic.c22
2 files changed, 18 insertions, 5 deletions
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 3b983e8a0555..3fe49d808957 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -52,6 +52,7 @@ struct Qdisc
u32 parent;
atomic_t refcnt;
unsigned long state;
+ struct sk_buff *gso_skb;
struct sk_buff_head requeue;
struct sk_buff_head q;
struct netdev_queue *dev_queue;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 5e7e0bd38fe8..3db4cf1bd263 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -44,7 +44,10 @@ static inline int qdisc_qlen(struct Qdisc *q)
static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
{
- __skb_queue_head(&q->requeue, skb);
+ if (unlikely(skb->next))
+ q->gso_skb = skb;
+ else
+ __skb_queue_head(&q->requeue, skb);
__netif_schedule(q);
return 0;
@@ -52,7 +55,10 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
{
- struct sk_buff *skb = skb_peek(&q->requeue);
+ struct sk_buff *skb = q->gso_skb;
+
+ if (!skb)
+ skb = skb_peek(&q->requeue);
if (unlikely(skb)) {
struct net_device *dev = qdisc_dev(q);
@@ -60,10 +66,15 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
/* check the reason of requeuing without tx lock first */
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
- if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
- __skb_unlink(skb, &q->requeue);
- else
+ if (!netif_tx_queue_stopped(txq) &&
+ !netif_tx_queue_frozen(txq)) {
+ if (q->gso_skb)
+ q->gso_skb = NULL;
+ else
+ __skb_unlink(skb, &q->requeue);
+ } else {
skb = NULL;
+ }
} else {
skb = q->dequeue(q);
}
@@ -548,6 +559,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
module_put(ops->owner);
dev_put(qdisc_dev(qdisc));
+ kfree_skb(qdisc->gso_skb);
__skb_queue_purge(&qdisc->requeue);
kfree((char *) qdisc - qdisc->padded);