summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/eq.c
diff options
context:
space:
mode:
authorMaher Sanalla <msanalla@nvidia.com>2023-06-11 14:35:36 +0300
committerSaeed Mahameed <saeedm@nvidia.com>2023-08-07 20:53:50 +0300
commita1772de78d7303e33517d1741e0fce1c7247bec4 (patch)
tree9feab3f713830b9038f44ece3ad7117d144cc5b0 /drivers/net/ethernet/mellanox/mlx5/core/eq.c
parent18cf3d31f8292e3994ec972097419f1c23e0d2ff (diff)
downloadlinux-a1772de78d7303e33517d1741e0fce1c7247bec4.tar.xz
net/mlx5: Refactor completion IRQ request/release API
Introduce a per-vector completion IRQ request API that requests a single IRQ for a given vector index instead of multiple IRQs request API. On driver load, loop over all completion vectors and request an IRQ for each one via the newly introduced API. Symmetrically, introduce an IRQ release API per vector. On driver unload, loop over all vectors and release each completion IRQ via the new per-vector API. As IRQ vectors will be requested dynamically later in the patchset, add a cpumask of the bounded CPUs to avoid the possible mapping of two IRQs of the same device to the same cpu. Signed-off-by: Maher Sanalla <msanalla@nvidia.com> Reviewed-by: Shay Drory <shayd@nvidia.com> Reviewed-by: Moshe Shemesh <moshe@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/eq.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c36
1 files changed, 29 insertions, 7 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 66257f7879b7..268fd61ae8c0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -64,6 +64,7 @@ struct mlx5_eq_table {
struct mlx5_irq **comp_irqs;
struct mlx5_irq *ctrl_irq;
struct cpu_rmap *rmap;
+ struct cpumask used_cpus;
};
#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
@@ -453,6 +454,7 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev)
ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
eq_table->irq_table = mlx5_irq_table_get(dev);
+ cpumask_clear(&eq_table->used_cpus);
eq_table->curr_comp_eqs = 0;
return 0;
}
@@ -808,8 +810,10 @@ EXPORT_SYMBOL(mlx5_eq_update_ci);
static void comp_irqs_release_pci(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
+ int i;
- mlx5_irqs_release_vectors(table->comp_irqs, table->max_comp_eqs);
+ for (i = 0; i < table->max_comp_eqs; i++)
+ mlx5_irq_release_vector(table->comp_irqs[i]);
}
static int comp_irqs_request_pci(struct mlx5_core_dev *dev)
@@ -817,9 +821,9 @@ static int comp_irqs_request_pci(struct mlx5_core_dev *dev)
struct mlx5_eq_table *table = dev->priv.eq_table;
const struct cpumask *prev = cpu_none_mask;
const struct cpumask *mask;
+ struct mlx5_irq *irq;
int ncomp_eqs;
u16 *cpus;
- int ret;
int cpu;
int i;
@@ -840,24 +844,42 @@ static int comp_irqs_request_pci(struct mlx5_core_dev *dev)
}
spread_done:
rcu_read_unlock();
- ret = mlx5_irqs_request_vectors(dev, cpus, ncomp_eqs, table->comp_irqs, &table->rmap);
+ for (i = 0; i < ncomp_eqs; i++) {
+ irq = mlx5_irq_request_vector(dev, cpus[i], i, &table->rmap);
+ if (IS_ERR(irq))
+ break;
+
+ table->comp_irqs[i] = irq;
+ }
+
kfree(cpus);
- return ret;
+ return i ? i : PTR_ERR(irq);
}
static void comp_irqs_release_sf(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
+ int i;
- mlx5_irq_affinity_irqs_release(dev, table->comp_irqs, table->max_comp_eqs);
+ for (i = 0; i < table->max_comp_eqs; i++)
+ mlx5_irq_affinity_irq_release(dev, table->comp_irqs[i]);
}
static int comp_irqs_request_sf(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- int ncomp_eqs = table->max_comp_eqs;
+ struct mlx5_irq *irq;
+ int i;
+
+ for (i = 0; i < table->max_comp_eqs; i++) {
+ irq = mlx5_irq_affinity_irq_request_auto(dev, &table->used_cpus, i);
+ if (IS_ERR(irq))
+ break;
+
+ table->comp_irqs[i] = irq;
+ }
- return mlx5_irq_affinity_irqs_request_auto(dev, ncomp_eqs, table->comp_irqs);
+ return i ? i : PTR_ERR(irq);
}
static void comp_irqs_release(struct mlx5_core_dev *dev)