summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx4/en_tx.c
diff options
context:
space:
mode:
authorAmir Vadai <amirv@mellanox.com>2012-05-17 04:58:10 +0400
committerDavid S. Miller <davem@davemloft.net>2012-05-18 00:17:50 +0400
commitbc6a4744b827c5a78ca591acca81809bddb8b2db (patch)
tree49328e3c1d680c5532605b2f701c01061958ebd3 /drivers/net/ethernet/mellanox/mlx4/en_tx.c
parentcad456d5abbb6307be7a658d701bc44ca689e906 (diff)
downloadlinux-bc6a4744b827c5a78ca591acca81809bddb8b2db.tar.xz
net/mlx4_en: num cores tx rings for every UP
Change the TX ring scheme such that the number of rings for untagged packets and for tagged packets (per each of the vlan priorities) is the same, unlike the current situation where for tagged traffic there's one ring per priority and for untagged rings as the number of core. Queue selection is done as follows: If the mqprio qdisc is operates on the interface, such that the core networking code invoked the device setup_tc ndo callback, a mapping of skb->priority => queue set is forced - for both, tagged and untagged traffic. Else, the egress map skb->priority => User priority is used for tagged traffic, and all untagged traffic is sent through tx rings of UP 0. The patch follows the convergence of discussing that issue with John Fastabend over this thread http://comments.gmane.org/gmane.linux.network/229877 Cc: John Fastabend <john.r.fastabend@intel.com> Cc: Liran Liss <liranl@mellanox.com> Signed-off-by: Amir Vadai <amirv@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4/en_tx.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 9a38483feb92..019d856b1334 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -525,14 +525,17 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
{
- u16 vlan_tag = 0;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ u16 rings_p_up = priv->mdev->profile.num_tx_rings_p_up;
+ u8 up = 0;
- if (vlan_tx_tag_present(skb)) {
- vlan_tag = vlan_tx_tag_get(skb);
- return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
- }
+ if (dev->num_tc)
+ return skb_tx_hash(dev, skb);
+
+ if (vlan_tx_tag_present(skb))
+ up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT;
- return skb_tx_hash(dev, skb);
+ return __skb_tx_hash(dev, skb, rings_p_up) + up * rings_p_up;
}
static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt)