summaryrefslogtreecommitdiff
path: root/drivers/net/bonding
diff options
context:
space:
mode:
authorPhil Oester <kernel@linuxace.com>2011-03-14 09:22:04 +0300
committerDavid S. Miller <davem@davemloft.net>2011-03-16 05:29:37 +0300
commitfd0e435b0fe85622f167b84432552885a4856ac8 (patch)
tree787a74b3a33ea940fffbee7a2e46fb14e9c27fa2 /drivers/net/bonding
parent4a37390de98547e42ad0cb617bd2f2d452f2d4c7 (diff)
downloadlinux-fd0e435b0fe85622f167b84432552885a4856ac8.tar.xz
bonding: Incorrect TX queue offset
When packets come in from a device with >= 16 receive queues headed out a bonding interface, syslog gets filled with this: kernel: bond0 selects TX queue 16, but real number of TX queues is 16 because queue_mapping is offset by 1. Adjust return value to account for the offset. This is a revision of my earlier patch (which did not use the skb_rx_queue_* helpers - thanks to Ben for the suggestion). Andy submitted a similar patch which emits a pr_warning on invalid queue selection, but I believe the log spew is not useful. We can revisit that question in the future, but in the interim I believe fixing the core problem is worthwhile. Signed-off-by: Phil Oester <kernel@linuxace.com> Signed-off-by: Andy Gospodarek <andy@greyhouse.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bonding')
-rw-r--r--drivers/net/bonding/bond_main.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 3ad4f501949e..a93d9417dc15 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4341,11 +4341,18 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
{
/*
* This helper function exists to help dev_pick_tx get the correct
- * destination queue. Using a helper function skips the a call to
+ * destination queue. Using a helper function skips a call to
* skb_tx_hash and will put the skbs in the queue we expect on their
* way down to the bonding driver.
*/
- return skb->queue_mapping;
+ u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
+
+ if (unlikely(txq >= dev->real_num_tx_queues)) {
+ do
+ txq -= dev->real_num_tx_queues;
+ while (txq >= dev->real_num_tx_queues);
+ }
+ return txq;
}
static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)