summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/eq.c
diff options
context:
space:
mode:
authorMaher Sanalla <msanalla@nvidia.com>2023-06-12 11:58:14 +0300
committerSaeed Mahameed <saeedm@nvidia.com>2023-08-07 20:53:51 +0300
commitf3147015fa0769cf1dcbfdb9040ad380cc4daeb5 (patch)
tree306e1a747d65f3899ff4944f468b95c41c6d11e2 /drivers/net/ethernet/mellanox/mlx5/core/eq.c
parentddd2c79da02021153dc8674e5a7e9748e56c1240 (diff)
downloadlinux-f3147015fa0769cf1dcbfdb9040ad380cc4daeb5.tar.xz
net/mlx5: Add IRQ vector to CPU lookup function
Currently, once driver load completes, IRQ requests were performed for all vectors. However, as we move to support dynamic creation of EQs, this will not be the case as some IRQs will not exist at this stage. Thus, in such case, use the default CPU to IRQ mapping which is the serial mapping based on IRQ vector index. Meaning, the n'th vector gets mapped to the n'th CPU. Introduce an API function mlx5_comp_vector_cpu() that takes an IRQ index and provides the corresponding CPU mapping. It utilizes the existing IRQ affinity if defined, or resorts to the default serialized CPU mapping otherwise. Signed-off-by: Maher Sanalla <msanalla@nvidia.com> Reviewed-by: Shay Drory <shayd@nvidia.com> Reviewed-by: Moshe Shemesh <moshe@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/eq.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c19
1 files changed, 16 insertions, 3 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index ad654d460d0c..2f5c0d00285f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -1058,7 +1058,7 @@ unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_comp_vectors_count);
-struct cpumask *
+static struct cpumask *
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
@@ -1068,10 +1068,23 @@ mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
if (eq)
return mlx5_irq_get_affinity_mask(eq->core.irq);
- WARN_ON_ONCE(1);
return NULL;
}
-EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
+
+int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector)
+{
+ struct cpumask *mask;
+ int cpu;
+
+ mask = mlx5_comp_irq_get_affinity_mask(dev, vector);
+ if (mask)
+ cpu = cpumask_first(mask);
+ else
+ cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vector);
+
+ return cpu;
+}
+EXPORT_SYMBOL(mlx5_comp_vector_get_cpu);
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)