summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
diff options
context:
space:
mode:
authorYuval Atias <yuvala@mellanox.com>2014-05-25 18:47:27 +0400
committerDavid S. Miller <davem@davemloft.net>2014-06-02 06:16:29 +0400
commit70a640d0dae3a9b1b222ce673eb5d92c263ddd61 (patch)
tree89dbe2c36c695f2e4c79429f4362bc07df2cc1de /drivers/net/ethernet/mellanox/mlx4/en_netdev.c
parentc8865b64b05b2f4eeefd369373e9c8aeb069e7a1 (diff)
downloadlinux-70a640d0dae3a9b1b222ce673eb5d92c263ddd61.tar.xz
net/mlx4_en: Use affinity hint
The “affinity hint” mechanism is used by the user space daemon, irqbalancer, to indicate a preferred CPU mask for irqs. Irqbalancer can use this hint to balance the irqs between the cpus indicated by the mask. We wish the HCA to preferentially map the IRQs it uses to numa cores close to it. To accomplish this, we use cpumask_set_cpu_local_first(), that sets the affinity hint according the following policy: First it maps IRQs to “close” numa cores. If these are exhausted, the remaining IRQs are mapped to “far” numa cores. Signed-off-by: Yuval Atias <yuvala@mellanox.com> Signed-off-by: Amir Vadai <amirv@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4/en_netdev.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c30
1 files changed, 30 insertions, 0 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 58209bd0c94c..05d135572abc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1526,6 +1526,32 @@ static void mlx4_en_linkstate(struct work_struct *work)
mutex_unlock(&mdev->state_lock);
}
+static void mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
+{
+ struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
+ int numa_node = priv->mdev->dev->numa_node;
+
+ if (numa_node == -1)
+ return;
+
+ if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL)) {
+ en_err(priv, "Failed to allocate core mask\n");
+ return;
+ }
+
+ if (cpumask_set_cpu_local_first(ring_idx, numa_node,
+ ring->affinity_mask)) {
+ en_err(priv, "Failed setting affinity hint\n");
+ free_cpumask_var(ring->affinity_mask);
+ ring->affinity_mask = NULL;
+ }
+}
+
+static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
+{
+ free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
+ priv->rx_ring[ring_idx]->affinity_mask = NULL;
+}
int mlx4_en_start_port(struct net_device *dev)
{
@@ -1567,6 +1593,8 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_en_cq_init_lock(cq);
+ mlx4_en_init_affinity_hint(priv, i);
+
err = mlx4_en_activate_cq(priv, cq, i);
if (err) {
en_err(priv, "Failed activating Rx CQ\n");
@@ -1847,6 +1875,8 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
msleep(1);
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
mlx4_en_deactivate_cq(priv, cq);
+
+ mlx4_en_free_affinity_hint(priv, i);
}
}