summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/eq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/eq.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c67
1 files changed, 66 insertions, 1 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index ef0fe499eaed..898ae3d47f20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -58,6 +58,9 @@ struct mlx5_eq_table {
struct mutex lock; /* sync async eqs creations */
int num_comp_eqs;
struct mlx5_irq_table *irq_table;
+#ifdef CONFIG_RFS_ACCEL
+ struct cpu_rmap *rmap;
+#endif
};
#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
@@ -899,7 +902,7 @@ EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
{
- return mlx5_irq_get_rmap(dev->priv.eq_table->irq_table);
+ return dev->priv.eq_table->rmap;
}
#endif
@@ -916,12 +919,57 @@ struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
return ERR_PTR(-ENOENT);
}
+static void clear_rmap(struct mlx5_core_dev *dev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct mlx5_eq_table *eq_table = dev->priv.eq_table;
+
+ free_irq_cpu_rmap(eq_table->rmap);
+#endif
+}
+
+static int set_rmap(struct mlx5_core_dev *mdev)
+{
+ int err = 0;
+#ifdef CONFIG_RFS_ACCEL
+ struct mlx5_eq_table *eq_table = mdev->priv.eq_table;
+ int vecidx;
+
+ eq_table->rmap = alloc_irq_cpu_rmap(eq_table->num_comp_eqs);
+ if (!eq_table->rmap) {
+ err = -ENOMEM;
+ mlx5_core_err(mdev, "Failed to allocate cpu_rmap. err %d", err);
+ goto err_out;
+ }
+
+ vecidx = MLX5_IRQ_VEC_COMP_BASE;
+ for (; vecidx < eq_table->num_comp_eqs + MLX5_IRQ_VEC_COMP_BASE;
+ vecidx++) {
+ err = irq_cpu_rmap_add(eq_table->rmap,
+ pci_irq_vector(mdev->pdev, vecidx));
+ if (err) {
+ mlx5_core_err(mdev, "irq_cpu_rmap_add failed. err %d",
+ err);
+ goto err_irq_cpu_rmap_add;
+ }
+ }
+ return 0;
+
+err_irq_cpu_rmap_add:
+ clear_rmap(mdev);
+err_out:
+#endif
+ return err;
+}
+
/* This function should only be called after mlx5_cmd_force_teardown_hca */
void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
+ if (!mlx5_core_is_sf(dev))
+ clear_rmap(dev);
mlx5_irq_table_destroy(dev);
mutex_unlock(&table->lock);
}
@@ -951,6 +999,18 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev)
goto err_async_eqs;
}
+ if (!mlx5_core_is_sf(dev)) {
+ /* rmap is a mapping between irq number and queue number.
+ * each irq can be assign only to a single rmap.
+ * since SFs share IRQs, rmap mapping cannot function correctly
+ * for irqs that are shared for different core/netdev RX rings.
+ * Hence we don't allow netdev rmap for SFs
+ */
+ err = set_rmap(dev);
+ if (err)
+ goto err_rmap;
+ }
+
err = create_comp_eqs(dev);
if (err) {
mlx5_core_err(dev, "Failed to create completion EQs\n");
@@ -959,6 +1019,9 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev)
return 0;
err_comp_eqs:
+ if (!mlx5_core_is_sf(dev))
+ clear_rmap(dev);
+err_rmap:
destroy_async_eqs(dev);
err_async_eqs:
return err;
@@ -966,6 +1029,8 @@ err_async_eqs:
void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
{
+ if (!mlx5_core_is_sf(dev))
+ clear_rmap(dev);
destroy_comp_eqs(dev);
destroy_async_eqs(dev);
}