summaryrefslogtreecommitdiff
path: root/net/sched/sch_teql.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-17 11:34:19 +0400
committerDavid S. Miller <davem@davemloft.net>2008-07-18 06:21:00 +0400
commite8a0464cc950972824e2e128028ae3db666ec1ed (patch)
tree5022b95396c0f3b313531bc39b19543c03551b9a /net/sched/sch_teql.c
parent070825b3840a743e21ebcc44f8279708a4fed977 (diff)
downloadlinux-e8a0464cc950972824e2e128028ae3db666ec1ed.tar.xz
netdev: Allocate multiple queues for TX.
alloc_netdev_mq() now allocates an array of netdev_queue structures for TX, based upon the queue_count argument. Furthermore, all accesses to the TX queues are now vectored through the netdev_get_tx_queue() and netdev_for_each_tx_queue() interfaces. This makes it easy to grep the tree for all things that want to get to a TX queue of a net device. Problem spots which are not really multiqueue aware yet, and only work with one queue, can easily be spotted by grepping for all netdev_get_tx_queue() calls that pass in a zero index. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_teql.c')
-rw-r--r--net/sched/sch_teql.c21
1 files changed, 14 insertions, 7 deletions
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 8ac05981be20..44a2c3451f4d 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -111,7 +111,7 @@ teql_dequeue(struct Qdisc* sch)
struct sk_buff *skb;
skb = __skb_dequeue(&dat->q);
- dat_queue = &dat->m->dev->tx_queue;
+ dat_queue = netdev_get_tx_queue(dat->m->dev, 0);
if (skb == NULL) {
struct net_device *m = qdisc_dev(dat_queue->qdisc);
if (m) {
@@ -155,10 +155,13 @@ teql_destroy(struct Qdisc* sch)
if (q == master->slaves) {
master->slaves = NEXT_SLAVE(q);
if (q == master->slaves) {
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(master->dev, 0);
master->slaves = NULL;
- spin_lock_bh(&master->dev->tx_queue.lock);
- qdisc_reset(master->dev->tx_queue.qdisc);
- spin_unlock_bh(&master->dev->tx_queue.lock);
+ spin_lock_bh(&txq->lock);
+ qdisc_reset(txq->qdisc);
+ spin_unlock_bh(&txq->lock);
}
}
skb_queue_purge(&dat->q);
@@ -218,7 +221,8 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
static int
__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
{
- struct teql_sched_data *q = qdisc_priv(dev->tx_queue.qdisc);
+ struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
+ struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc);
struct neighbour *mn = skb->dst->neighbour;
struct neighbour *n = q->ncache;
@@ -254,7 +258,8 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
static inline int teql_resolve(struct sk_buff *skb,
struct sk_buff *skb_res, struct net_device *dev)
{
- if (dev->tx_queue.qdisc == &noop_qdisc)
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+ if (txq->qdisc == &noop_qdisc)
return -ENODEV;
if (dev->header_ops == NULL ||
@@ -285,8 +290,10 @@ restart:
do {
struct net_device *slave = qdisc_dev(q);
+ struct netdev_queue *slave_txq;
- if (slave->tx_queue.qdisc_sleeping != q)
+ slave_txq = netdev_get_tx_queue(slave, 0);
+ if (slave_txq->qdisc_sleeping != q)
continue;
if (netif_queue_stopped(slave) ||
__netif_subqueue_stopped(slave, subq) ||