From 02d92f7903647119e125b24f5470f96cee0d4b4b Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Fri, 19 Jan 2018 16:13:01 -0800 Subject: net/mlx5: CQ Database per EQ Before this patch the driver had one CQ database protected via one spinlock, this spinlock is meant to synchronize between CQ adding/removing and CQ IRQ interrupt handling. On a system with large number of CPUs and on a work load that requires lots of interrupts, this global spinlock becomes a very nasty hotspot and introduces a contention between the active cores, which will significantly hurt performance and becomes a bottleneck that prevents seamless cpu scaling. To solve this we simply move the CQ database and its spinlock to be per EQ (IRQ), thus per core. Tested with: system: 2 sockets, 14 cores per socket, hyperthreading, 2x14x2=56 cores netperf command: ./super_netperf 200 -P 0 -t TCP_RR -H -l 30 -- -r 300,300 -o -s 1M,1M -S 1M,1M WITHOUT THIS PATCH: Average: CPU %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle Average: all 4.32 0.00 36.15 0.09 0.00 34.02 0.00 0.00 0.00 25.41 Samples: 2M of event 'cycles:pp', Event count (approx.): 1554616897271 Overhead Command Shared Object Symbol + 14.28% swapper [kernel.vmlinux] [k] intel_idle + 12.25% swapper [kernel.vmlinux] [k] queued_spin_lock_slowpath + 10.29% netserver [kernel.vmlinux] [k] queued_spin_lock_slowpath + 1.32% netserver [kernel.vmlinux] [k] mlx5e_xmit WITH THIS PATCH: Average: CPU %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle Average: all 4.27 0.00 34.31 0.01 0.00 18.71 0.00 0.00 0.00 42.69 Samples: 2M of event 'cycles:pp', Event count (approx.): 1498132937483 Overhead Command Shared Object Symbol + 23.33% swapper [kernel.vmlinux] [k] intel_idle + 1.69% netserver [kernel.vmlinux] [k] mlx5e_xmit Tested-by: Song Liu Signed-off-by: Saeed Mahameed Reviewed-by: Gal Pressman --- drivers/net/ethernet/mellanox/mlx5/core/cq.c | 69 +++++++++++++++----------- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 10 +++- drivers/net/ethernet/mellanox/mlx5/core/main.c | 8 +-- 3 files changed, 52 insertions(+), 35 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index 1016e05c7ec7..dfbeeaa43276 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -86,10 +86,10 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq) spin_unlock_irqrestore(&tasklet_ctx->lock, flags); } -void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn) +void mlx5_cq_completion(struct mlx5_eq *eq, u32 cqn) { + struct mlx5_cq_table *table = &eq->cq_table; struct mlx5_core_cq *cq; - struct mlx5_cq_table *table = &dev->priv.cq_table; spin_lock(&table->lock); cq = radix_tree_lookup(&table->tree, cqn); @@ -98,7 +98,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn) spin_unlock(&table->lock); if (!cq) { - mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn); + mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn); return; } @@ -110,9 +110,9 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn) complete(&cq->free); } -void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type) +void mlx5_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type) { - struct mlx5_cq_table *table = &dev->priv.cq_table; + struct mlx5_cq_table *table = &eq->cq_table; struct mlx5_core_cq *cq; spin_lock(&table->lock); @@ -124,7 +124,7 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type) spin_unlock(&table->lock); if (!cq) { - mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn); + mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn); return; } @@ -137,19 +137,22 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type) int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *in, int inlen) { - struct mlx5_cq_table *table = &dev->priv.cq_table; u32 out[MLX5_ST_SZ_DW(create_cq_out)]; u32 din[MLX5_ST_SZ_DW(destroy_cq_in)]; u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)]; int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn); - struct mlx5_eq *eq; + struct mlx5_eq *eq, *async_eq; + struct mlx5_cq_table *table; int err; + async_eq = &dev->priv.eq_table.async_eq; eq = mlx5_eqn2eq(dev, eqn); if (IS_ERR(eq)) return PTR_ERR(eq); + table = &eq->cq_table; + memset(out, 0, sizeof(out)); MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ); err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); @@ -159,6 +162,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, cq->cqn = MLX5_GET(create_cq_out, out, cqn); cq->cons_index = 0; cq->arm_sn = 0; + cq->eq = eq; refcount_set(&cq->refcount, 1); init_completion(&cq->free); if (!cq->comp) @@ -167,12 +171,20 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, cq->tasklet_ctx.priv = &eq->tasklet_ctx; INIT_LIST_HEAD(&cq->tasklet_ctx.list); + /* Add to comp EQ CQ tree to recv comp events */ spin_lock_irq(&table->lock); err = radix_tree_insert(&table->tree, cq->cqn, cq); spin_unlock_irq(&table->lock); if (err) goto err_cmd; + /* Add to async EQ CQ tree to recv Async events */ + spin_lock_irq(&async_eq->cq_table.lock); + err = radix_tree_insert(&async_eq->cq_table.tree, cq->cqn, cq); + spin_unlock_irq(&async_eq->cq_table.lock); + if (err) + goto err_cq_table; + cq->pid = current->pid; err = mlx5_debug_cq_add(dev, cq); if (err) @@ -183,6 +195,10 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, return 0; +err_cq_table: + spin_lock_irq(&table->lock); + radix_tree_delete(&table->tree, cq->cqn); + spin_unlock_irq(&table->lock); err_cmd: memset(din, 0, sizeof(din)); memset(dout, 0, sizeof(dout)); @@ -195,21 +211,34 @@ EXPORT_SYMBOL(mlx5_core_create_cq); int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) { - struct mlx5_cq_table *table = &dev->priv.cq_table; + struct mlx5_cq_table *asyn_eq_cq_table = &dev->priv.eq_table.async_eq.cq_table; + struct mlx5_cq_table *table = &cq->eq->cq_table; u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0}; u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0}; struct mlx5_core_cq *tmp; int err; + spin_lock_irq(&asyn_eq_cq_table->lock); + tmp = radix_tree_delete(&asyn_eq_cq_table->tree, cq->cqn); + spin_unlock_irq(&asyn_eq_cq_table->lock); + if (!tmp) { + mlx5_core_warn(dev, "cq 0x%x not found in async eq cq tree\n", cq->cqn); + return -EINVAL; + } + if (tmp != cq) { + mlx5_core_warn(dev, "corruption on cqn 0x%x in async eq cq tree\n", cq->cqn); + return -EINVAL; + } + spin_lock_irq(&table->lock); tmp = radix_tree_delete(&table->tree, cq->cqn); spin_unlock_irq(&table->lock); if (!tmp) { - mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn); + mlx5_core_warn(dev, "cq 0x%x not found in comp eq cq tree\n", cq->cqn); return -EINVAL; } if (tmp != cq) { - mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn); + mlx5_core_warn(dev, "corruption on cqn 0x%x in comp eq cq tree\n", cq->cqn); return -EINVAL; } @@ -270,21 +299,3 @@ int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, return mlx5_core_modify_cq(dev, cq, in, sizeof(in)); } EXPORT_SYMBOL(mlx5_core_modify_cq_moderation); - -int mlx5_init_cq_table(struct mlx5_core_dev *dev) -{ - struct mlx5_cq_table *table = &dev->priv.cq_table; - int err; - - memset(table, 0, sizeof(*table)); - spin_lock_init(&table->lock); - INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); - err = mlx5_cq_debugfs_init(dev); - - return err; -} - -void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev) -{ - mlx5_cq_debugfs_cleanup(dev); -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 25106e996a96..328403ebf2f5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -415,7 +415,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) switch (eqe->type) { case MLX5_EVENT_TYPE_COMP: cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; - mlx5_cq_completion(dev, cqn); + mlx5_cq_completion(eq, cqn); break; case MLX5_EVENT_TYPE_DCT_DRAINED: rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff; @@ -472,7 +472,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n", cqn, eqe->data.cq_err.syndrome); - mlx5_cq_event(dev, cqn, eqe->type); + mlx5_cq_event(eq, cqn, eqe->type); break; case MLX5_EVENT_TYPE_PAGE_REQUEST: @@ -567,6 +567,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int nent, u64 mask, const char *name, enum mlx5_eq_type type) { + struct mlx5_cq_table *cq_table = &eq->cq_table; u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0}; struct mlx5_priv *priv = &dev->priv; irq_handler_t handler; @@ -576,6 +577,11 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, u32 *in; int err; + /* Init CQ table */ + memset(cq_table, 0, sizeof(*cq_table)); + spin_lock_init(&cq_table->lock); + INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); + eq->type = type; eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); eq->cons_index = 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 2ef641c91c26..8cc22bf80c87 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -942,9 +942,9 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) goto out; } - err = mlx5_init_cq_table(dev); + err = mlx5_cq_debugfs_init(dev); if (err) { - dev_err(&pdev->dev, "failed to initialize cq table\n"); + dev_err(&pdev->dev, "failed to initialize cq debugfs\n"); goto err_eq_cleanup; } @@ -1002,7 +1002,7 @@ err_tables_cleanup: mlx5_cleanup_mkey_table(dev); mlx5_cleanup_srq_table(dev); mlx5_cleanup_qp_table(dev); - mlx5_cleanup_cq_table(dev); + mlx5_cq_debugfs_cleanup(dev); err_eq_cleanup: mlx5_eq_cleanup(dev); @@ -1023,7 +1023,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) mlx5_cleanup_mkey_table(dev); mlx5_cleanup_srq_table(dev); mlx5_cleanup_qp_table(dev); - mlx5_cleanup_cq_table(dev); + mlx5_cq_debugfs_cleanup(dev); mlx5_eq_cleanup(dev); } -- cgit v1.2.3 From d2ff4fa575000058def5f5c602784e233211d4e7 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Wed, 7 Feb 2018 20:48:43 -0800 Subject: net/mlx5: Add missing likely/unlikely hints to cq events If a hardware event is targeting a CQ, that CQ should exist. Add unlikely to error handling flows. Signed-off-by: Saeed Mahameed Reviewed-by: Gal Pressman --- drivers/net/ethernet/mellanox/mlx5/core/cq.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index dfbeeaa43276..9feeb555e937 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -97,7 +97,7 @@ void mlx5_cq_completion(struct mlx5_eq *eq, u32 cqn) refcount_inc(&cq->refcount); spin_unlock(&table->lock); - if (!cq) { + if (unlikely(!cq)) { mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn); return; } @@ -118,12 +118,12 @@ void mlx5_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type) spin_lock(&table->lock); cq = radix_tree_lookup(&table->tree, cqn); - if (cq) + if (likely(cq)) refcount_inc(&cq->refcount); spin_unlock(&table->lock); - if (!cq) { + if (unlikely(!cq)) { mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn); return; } -- cgit v1.2.3 From d5c07157dd4f5ab9123eaab7db572ca360c19a55 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Thu, 1 Feb 2018 04:28:17 -0800 Subject: net/mlx5: EQ add/del CQ API Add API to add/del CQ to/from EQs CQ table to be used in cq.c upon CQ creation/destruction, as CQ table is now private to eq.c. Signed-off-by: Saeed Mahameed Reviewed-by: Gal Pressman --- drivers/net/ethernet/mellanox/mlx5/core/cq.c | 60 ++++++---------------- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 34 ++++++++++++ .../net/ethernet/mellanox/mlx5/core/mlx5_core.h | 4 ++ 3 files changed, 53 insertions(+), 45 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index 9feeb555e937..f6e478d05ecc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -137,22 +137,17 @@ void mlx5_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type) int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *in, int inlen) { + int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn); + u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)]; u32 out[MLX5_ST_SZ_DW(create_cq_out)]; u32 din[MLX5_ST_SZ_DW(destroy_cq_in)]; - u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)]; - int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), - c_eqn); - struct mlx5_eq *eq, *async_eq; - struct mlx5_cq_table *table; + struct mlx5_eq *eq; int err; - async_eq = &dev->priv.eq_table.async_eq; eq = mlx5_eqn2eq(dev, eqn); if (IS_ERR(eq)) return PTR_ERR(eq); - table = &eq->cq_table; - memset(out, 0, sizeof(out)); MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ); err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); @@ -172,18 +167,14 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, INIT_LIST_HEAD(&cq->tasklet_ctx.list); /* Add to comp EQ CQ tree to recv comp events */ - spin_lock_irq(&table->lock); - err = radix_tree_insert(&table->tree, cq->cqn, cq); - spin_unlock_irq(&table->lock); + err = mlx5_eq_add_cq(eq, cq); if (err) goto err_cmd; - /* Add to async EQ CQ tree to recv Async events */ - spin_lock_irq(&async_eq->cq_table.lock); - err = radix_tree_insert(&async_eq->cq_table.tree, cq->cqn, cq); - spin_unlock_irq(&async_eq->cq_table.lock); + /* Add to async EQ CQ tree to recv async events */ + err = mlx5_eq_add_cq(&dev->priv.eq_table.async_eq, cq); if (err) - goto err_cq_table; + goto err_cq_add; cq->pid = current->pid; err = mlx5_debug_cq_add(dev, cq); @@ -195,10 +186,8 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, return 0; -err_cq_table: - spin_lock_irq(&table->lock); - radix_tree_delete(&table->tree, cq->cqn); - spin_unlock_irq(&table->lock); +err_cq_add: + mlx5_eq_del_cq(eq, cq); err_cmd: memset(din, 0, sizeof(din)); memset(dout, 0, sizeof(dout)); @@ -211,36 +200,17 @@ EXPORT_SYMBOL(mlx5_core_create_cq); int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) { - struct mlx5_cq_table *asyn_eq_cq_table = &dev->priv.eq_table.async_eq.cq_table; - struct mlx5_cq_table *table = &cq->eq->cq_table; u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0}; u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0}; - struct mlx5_core_cq *tmp; int err; - spin_lock_irq(&asyn_eq_cq_table->lock); - tmp = radix_tree_delete(&asyn_eq_cq_table->tree, cq->cqn); - spin_unlock_irq(&asyn_eq_cq_table->lock); - if (!tmp) { - mlx5_core_warn(dev, "cq 0x%x not found in async eq cq tree\n", cq->cqn); - return -EINVAL; - } - if (tmp != cq) { - mlx5_core_warn(dev, "corruption on cqn 0x%x in async eq cq tree\n", cq->cqn); - return -EINVAL; - } + err = mlx5_eq_del_cq(&dev->priv.eq_table.async_eq, cq); + if (err) + return err; - spin_lock_irq(&table->lock); - tmp = radix_tree_delete(&table->tree, cq->cqn); - spin_unlock_irq(&table->lock); - if (!tmp) { - mlx5_core_warn(dev, "cq 0x%x not found in comp eq cq tree\n", cq->cqn); - return -EINVAL; - } - if (tmp != cq) { - mlx5_core_warn(dev, "corruption on cqn 0x%x in comp eq cq tree\n", cq->cqn); - return -EINVAL; - } + err = mlx5_eq_del_cq(cq->eq, cq); + if (err) + return err; MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ); MLX5_SET(destroy_cq_in, in, cqn, cq->cqn); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 328403ebf2f5..c1f0468e95bd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -704,6 +704,40 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) } EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq); +int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) +{ + struct mlx5_cq_table *table = &eq->cq_table; + int err; + + spin_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, cq->cqn, cq); + spin_unlock_irq(&table->lock); + + return err; +} + +int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) +{ + struct mlx5_cq_table *table = &eq->cq_table; + struct mlx5_core_cq *tmp; + + spin_lock_irq(&table->lock); + tmp = radix_tree_delete(&table->tree, cq->cqn); + spin_unlock_irq(&table->lock); + + if (!tmp) { + mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn); + return -ENOENT; + } + + if (tmp != cq) { + mlx5_core_warn(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", eq->eqn, cq->cqn); + return -EINVAL; + } + + return 0; +} + int mlx5_eq_init(struct mlx5_core_dev *dev) { int err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 394552f36fcf..54a1cbfb1b5a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -38,6 +38,7 @@ #include #include #include +#include #define DRIVER_NAME "mlx5_core" #define DRIVER_VERSION "5.0-0" @@ -115,6 +116,9 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, u32 element_id); int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev); u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev); + +int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq); +int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq); struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn); u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq); void mlx5_cq_tasklet_cb(unsigned long data); -- cgit v1.2.3 From f105b45bf77ced96e516e1cd771c41bb7e8c830b Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Thu, 1 Feb 2018 03:32:00 -0800 Subject: net/mlx5: CQ hold/put API Now as the CQ table is per EQ, add an API to hold/put CQ to be used from eq.c in downstream patch. Signed-off-by: Saeed Mahameed Reviewed-by: Gal Pressman --- drivers/net/ethernet/mellanox/mlx5/core/cq.c | 42 +++++++++++++--------------- include/linux/mlx5/cq.h | 11 ++++++++ 2 files changed, 30 insertions(+), 23 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index f6e478d05ecc..06dc7bd302ed 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -58,8 +58,7 @@ void mlx5_cq_tasklet_cb(unsigned long data) tasklet_ctx.list) { list_del_init(&mcq->tasklet_ctx.list); mcq->tasklet_ctx.comp(mcq); - if (refcount_dec_and_test(&mcq->refcount)) - complete(&mcq->free); + mlx5_cq_put(mcq); if (time_after(jiffies, end)) break; } @@ -80,23 +79,31 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq) * still arrive. */ if (list_empty_careful(&cq->tasklet_ctx.list)) { - refcount_inc(&cq->refcount); + mlx5_cq_hold(cq); list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); } spin_unlock_irqrestore(&tasklet_ctx->lock, flags); } -void mlx5_cq_completion(struct mlx5_eq *eq, u32 cqn) +/* caller must eventually call mlx5_cq_put on the returned cq */ +static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn) { struct mlx5_cq_table *table = &eq->cq_table; - struct mlx5_core_cq *cq; + struct mlx5_core_cq *cq = NULL; spin_lock(&table->lock); cq = radix_tree_lookup(&table->tree, cqn); if (likely(cq)) - refcount_inc(&cq->refcount); + mlx5_cq_hold(cq); spin_unlock(&table->lock); + return cq; +} + +void mlx5_cq_completion(struct mlx5_eq *eq, u32 cqn) +{ + struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn); + if (unlikely(!cq)) { mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn); return; @@ -106,22 +113,12 @@ void mlx5_cq_completion(struct mlx5_eq *eq, u32 cqn) cq->comp(cq); - if (refcount_dec_and_test(&cq->refcount)) - complete(&cq->free); + mlx5_cq_put(cq); } void mlx5_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type) { - struct mlx5_cq_table *table = &eq->cq_table; - struct mlx5_core_cq *cq; - - spin_lock(&table->lock); - - cq = radix_tree_lookup(&table->tree, cqn); - if (likely(cq)) - refcount_inc(&cq->refcount); - - spin_unlock(&table->lock); + struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn); if (unlikely(!cq)) { mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn); @@ -130,8 +127,7 @@ void mlx5_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type) cq->event(cq, event_type); - if (refcount_dec_and_test(&cq->refcount)) - complete(&cq->free); + mlx5_cq_put(cq); } int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, @@ -158,7 +154,8 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, cq->cons_index = 0; cq->arm_sn = 0; cq->eq = eq; - refcount_set(&cq->refcount, 1); + refcount_set(&cq->refcount, 0); + mlx5_cq_hold(cq); init_completion(&cq->free); if (!cq->comp) cq->comp = mlx5_add_cq_to_tasklet; @@ -221,8 +218,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) synchronize_irq(cq->irqn); mlx5_debug_cq_remove(dev, cq); - if (refcount_dec_and_test(&cq->refcount)) - complete(&cq->free); + mlx5_cq_put(cq); wait_for_completion(&cq->free); return 0; diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index 06ba425a6ad7..445ad194e0fe 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -172,6 +172,17 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, NULL); } +static inline void mlx5_cq_hold(struct mlx5_core_cq *cq) +{ + refcount_inc(&cq->refcount); +} + +static inline void mlx5_cq_put(struct mlx5_core_cq *cq) +{ + if (refcount_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *in, int inlen); int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); -- cgit v1.2.3 From 3ac7afdbcf243d6c79c1569d9e29aef0096e4743 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Thu, 1 Feb 2018 04:37:07 -0800 Subject: net/mlx5: Move CQ completion and event forwarding logic to eq.c Since CQ tree is now per EQ, CQ completion and event forwarding became specific implementation of EQ logic, this patch moves that logic to eq.c and makes those functions static. Signed-off-by: Saeed Mahameed Reviewed-by: Gal Pressman --- drivers/net/ethernet/mellanox/mlx5/core/cq.c | 45 ------------------------- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 49 ++++++++++++++++++++++++++-- include/linux/mlx5/driver.h | 2 -- 3 files changed, 47 insertions(+), 49 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index 06dc7bd302ed..669ed16938b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -85,51 +85,6 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq) spin_unlock_irqrestore(&tasklet_ctx->lock, flags); } -/* caller must eventually call mlx5_cq_put on the returned cq */ -static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn) -{ - struct mlx5_cq_table *table = &eq->cq_table; - struct mlx5_core_cq *cq = NULL; - - spin_lock(&table->lock); - cq = radix_tree_lookup(&table->tree, cqn); - if (likely(cq)) - mlx5_cq_hold(cq); - spin_unlock(&table->lock); - - return cq; -} - -void mlx5_cq_completion(struct mlx5_eq *eq, u32 cqn) -{ - struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn); - - if (unlikely(!cq)) { - mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn); - return; - } - - ++cq->arm_sn; - - cq->comp(cq); - - mlx5_cq_put(cq); -} - -void mlx5_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type) -{ - struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn); - - if (unlikely(!cq)) { - mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn); - return; - } - - cq->event(cq, event_type); - - mlx5_cq_put(cq); -} - int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *in, int inlen) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index c1f0468e95bd..7e442b38a8ca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -393,6 +393,51 @@ static void general_event_handler(struct mlx5_core_dev *dev, } } +/* caller must eventually call mlx5_cq_put on the returned cq */ +static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn) +{ + struct mlx5_cq_table *table = &eq->cq_table; + struct mlx5_core_cq *cq = NULL; + + spin_lock(&table->lock); + cq = radix_tree_lookup(&table->tree, cqn); + if (likely(cq)) + mlx5_cq_hold(cq); + spin_unlock(&table->lock); + + return cq; +} + +static void mlx5_eq_cq_completion(struct mlx5_eq *eq, u32 cqn) +{ + struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn); + + if (unlikely(!cq)) { + mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn); + return; + } + + ++cq->arm_sn; + + cq->comp(cq); + + mlx5_cq_put(cq); +} + +static void mlx5_eq_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type) +{ + struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn); + + if (unlikely(!cq)) { + mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn); + return; + } + + cq->event(cq, event_type); + + mlx5_cq_put(cq); +} + static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) { struct mlx5_eq *eq = eq_ptr; @@ -415,7 +460,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) switch (eqe->type) { case MLX5_EVENT_TYPE_COMP: cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; - mlx5_cq_completion(eq, cqn); + mlx5_eq_cq_completion(eq, cqn); break; case MLX5_EVENT_TYPE_DCT_DRAINED: rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff; @@ -472,7 +517,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n", cqn, eqe->data.cq_err.syndrome); - mlx5_cq_event(eq, cqn, eqe->type); + mlx5_eq_cq_event(eq, cqn, eqe->type); break; case MLX5_EVENT_TYPE_PAGE_REQUEST: diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 96e003db2bcd..09e2f3e8753c 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1049,12 +1049,10 @@ int mlx5_eq_init(struct mlx5_core_dev *dev); void mlx5_eq_cleanup(struct mlx5_core_dev *dev); void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); -void mlx5_cq_completion(struct mlx5_eq *eq, u32 cqn); void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced); -void mlx5_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type); int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int nent, u64 mask, const char *name, enum mlx5_eq_type type); -- cgit v1.2.3 From 3ec5693b17314b58977ba3c8d720d1f9cfef39f8 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Thu, 1 Feb 2018 05:42:06 -0800 Subject: net/mlx5: Remove redundant EQ API exports EQ structure and API is private to mlx5_core driver only, external drivers should not have access or the means to manipulate EQ objects. Remove redundant exports and move API functions out of the linux/mlx5 include directory into the driver's mlx5_core.h private include file. Signed-off-by: Saeed Mahameed Reviewed-by: Gal Pressman --- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 3 --- drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h | 17 +++++++++++++++++ include/linux/mlx5/driver.h | 17 ----------------- 3 files changed, 17 insertions(+), 20 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 7e442b38a8ca..c1c94974e16b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -720,7 +720,6 @@ err_buf: mlx5_buf_free(dev, &eq->buf); return err; } -EXPORT_SYMBOL_GPL(mlx5_create_map_eq); int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) { @@ -747,7 +746,6 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) return err; } -EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq); int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) { @@ -925,4 +923,3 @@ int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, MLX5_SET(query_eq_in, in, eq_number, eq->eqn); return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } -EXPORT_SYMBOL_GPL(mlx5_core_eq_query); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 54a1cbfb1b5a..23e17ac0cba5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -117,11 +117,28 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev); u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev); +int mlx5_eq_init(struct mlx5_core_dev *dev); +void mlx5_eq_cleanup(struct mlx5_core_dev *dev); +int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, + int nent, u64 mask, const char *name, + enum mlx5_eq_type type); +int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq); int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq); +int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, + u32 *out, int outlen); +int mlx5_start_eqs(struct mlx5_core_dev *dev); +void mlx5_stop_eqs(struct mlx5_core_dev *dev); struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn); u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq); void mlx5_cq_tasklet_cb(unsigned long data); +void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced); +int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); +void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); +int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev); +int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group, u8 access_reg_group); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 09e2f3e8753c..2860a253275b 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1045,20 +1045,11 @@ int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); void mlx5_register_debugfs(void); void mlx5_unregister_debugfs(void); -int mlx5_eq_init(struct mlx5_core_dev *dev); -void mlx5_eq_cleanup(struct mlx5_core_dev *dev); void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); -void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced); -int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, - int nent, u64 mask, const char *name, - enum mlx5_eq_type type); -int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); -int mlx5_start_eqs(struct mlx5_core_dev *dev); -void mlx5_stop_eqs(struct mlx5_core_dev *dev); int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, unsigned int *irqn); int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); @@ -1070,14 +1061,6 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, u16 reg_num, int arg, int write); -int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); -void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); -int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, - u32 *out, int outlen); -int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev); -void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev); -int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); -void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node); -- cgit v1.2.3 From 388ca8be00370db132464e27f745b8a0add19fcb Mon Sep 17 00:00:00 2001 From: Yonatan Cohen Date: Tue, 2 Jan 2018 16:08:06 +0200 Subject: IB/mlx5: Implement fragmented completion queue (CQ) The current implementation of create CQ requires contiguous memory, such requirement is problematic once the memory is fragmented or the system is low in memory, it causes for failures in dma_zalloc_coherent(). This patch implements new scheme of fragmented CQ to overcome this issue by introducing new type: 'struct mlx5_frag_buf_ctrl' to allocate fragmented buffers, rather than contiguous ones. Base the Completion Queues (CQs) on this new fragmented buffer. It fixes following crashes: kworker/29:0: page allocation failure: order:6, mode:0x80d0 CPU: 29 PID: 8374 Comm: kworker/29:0 Tainted: G OE 3.10.0 Workqueue: ib_cm cm_work_handler [ib_cm] Call Trace: [<>] dump_stack+0x19/0x1b [<>] warn_alloc_failed+0x110/0x180 [<>] __alloc_pages_slowpath+0x6b7/0x725 [<>] __alloc_pages_nodemask+0x405/0x420 [<>] dma_generic_alloc_coherent+0x8f/0x140 [<>] x86_swiotlb_alloc_coherent+0x21/0x50 [<>] mlx5_dma_zalloc_coherent_node+0xad/0x110 [mlx5_core] [<>] ? mlx5_db_alloc_node+0x69/0x1b0 [mlx5_core] [<>] mlx5_buf_alloc_node+0x3e/0xa0 [mlx5_core] [<>] mlx5_buf_alloc+0x14/0x20 [mlx5_core] [<>] create_cq_kernel+0x90/0x1f0 [mlx5_ib] [<>] mlx5_ib_create_cq+0x3b0/0x4e0 [mlx5_ib] Signed-off-by: Yonatan Cohen Reviewed-by: Tariq Toukan Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/infiniband/hw/mlx5/cq.c | 64 +++++++++++++++---------- drivers/infiniband/hw/mlx5/mlx5_ib.h | 6 +-- drivers/net/ethernet/mellanox/mlx5/core/alloc.c | 37 +++++++++----- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 11 +++-- drivers/net/ethernet/mellanox/mlx5/core/wq.c | 18 +++---- drivers/net/ethernet/mellanox/mlx5/core/wq.h | 22 +++------ include/linux/mlx5/driver.h | 51 ++++++++++++++------ 7 files changed, 124 insertions(+), 85 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 5b974fb97611..c4c7b82f4ac1 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -64,14 +64,9 @@ static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type) } } -static void *get_cqe_from_buf(struct mlx5_ib_cq_buf *buf, int n, int size) -{ - return mlx5_buf_offset(&buf->buf, n * size); -} - static void *get_cqe(struct mlx5_ib_cq *cq, int n) { - return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz); + return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n); } static u8 sw_ownership_bit(int n, int nent) @@ -403,7 +398,7 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) { - mlx5_buf_free(dev->mdev, &buf->buf); + mlx5_frag_buf_free(dev->mdev, &buf->fbc.frag_buf); } static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe, @@ -724,12 +719,25 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) return ret; } -static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf, - int nent, int cqe_size) +static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev, + struct mlx5_ib_cq_buf *buf, + int nent, + int cqe_size) { + struct mlx5_frag_buf_ctrl *c = &buf->fbc; + struct mlx5_frag_buf *frag_buf = &c->frag_buf; + u32 cqc_buff[MLX5_ST_SZ_DW(cqc)] = {0}; int err; - err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, &buf->buf); + MLX5_SET(cqc, cqc_buff, log_cq_size, ilog2(cqe_size)); + MLX5_SET(cqc, cqc_buff, cqe_sz, (cqe_size == 128) ? 1 : 0); + + mlx5_core_init_cq_frag_buf(&buf->fbc, cqc_buff); + + err = mlx5_frag_buf_alloc_node(dev->mdev, + nent * cqe_size, + frag_buf, + dev->mdev->priv.numa_node); if (err) return err; @@ -862,14 +870,15 @@ static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context) ib_umem_release(cq->buf.umem); } -static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf) +static void init_cq_frag_buf(struct mlx5_ib_cq *cq, + struct mlx5_ib_cq_buf *buf) { int i; void *cqe; struct mlx5_cqe64 *cqe64; for (i = 0; i < buf->nent; i++) { - cqe = get_cqe_from_buf(buf, i, buf->cqe_size); + cqe = get_cqe(cq, i); cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64; cqe64->op_own = MLX5_CQE_INVALID << 4; } @@ -891,14 +900,15 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, cq->mcq.arm_db = cq->db.db + 1; cq->mcq.cqe_sz = cqe_size; - err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size); + err = alloc_cq_frag_buf(dev, &cq->buf, entries, cqe_size); if (err) goto err_db; - init_cq_buf(cq, &cq->buf); + init_cq_frag_buf(cq, &cq->buf); *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + - MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages; + MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * + cq->buf.fbc.frag_buf.npages; *cqb = kvzalloc(*inlen, GFP_KERNEL); if (!*cqb) { err = -ENOMEM; @@ -906,11 +916,12 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, } pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas); - mlx5_fill_page_array(&cq->buf.buf, pas); + mlx5_fill_page_frag_array(&cq->buf.fbc.frag_buf, pas); cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context); MLX5_SET(cqc, cqc, log_page_size, - cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); + cq->buf.fbc.frag_buf.page_shift - + MLX5_ADAPTER_PAGE_SHIFT); *index = dev->mdev->priv.uar->index; @@ -1207,11 +1218,11 @@ static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, if (!cq->resize_buf) return -ENOMEM; - err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size); + err = alloc_cq_frag_buf(dev, cq->resize_buf, entries, cqe_size); if (err) goto ex; - init_cq_buf(cq, cq->resize_buf); + init_cq_frag_buf(cq, cq->resize_buf); return 0; @@ -1256,9 +1267,8 @@ static int copy_resize_cqes(struct mlx5_ib_cq *cq) } while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) { - dcqe = get_cqe_from_buf(cq->resize_buf, - (i + 1) & (cq->resize_buf->nent), - dsize); + dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc, + (i + 1) & cq->resize_buf->nent); dcqe64 = dsize == 64 ? dcqe : dcqe + 64; sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); memcpy(dcqe, scqe, dsize); @@ -1324,8 +1334,11 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) cqe_size = 64; err = resize_kernel(dev, cq, entries, cqe_size); if (!err) { - npas = cq->resize_buf->buf.npages; - page_shift = cq->resize_buf->buf.page_shift; + struct mlx5_frag_buf_ctrl *c; + + c = &cq->resize_buf->fbc; + npas = c->frag_buf.npages; + page_shift = c->frag_buf.page_shift; } } @@ -1346,7 +1359,8 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift, pas, 0); else - mlx5_fill_page_array(&cq->resize_buf->buf, pas); + mlx5_fill_page_frag_array(&cq->resize_buf->fbc.frag_buf, + pas); MLX5_SET(modify_cq_in, in, modify_field_select_resize_field_select.resize_field_select.resize_field_select, diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 139385129973..eafb9751daf6 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -371,7 +371,7 @@ struct mlx5_ib_qp { struct mlx5_ib_rss_qp rss_qp; struct mlx5_ib_dct dct; }; - struct mlx5_buf buf; + struct mlx5_frag_buf buf; struct mlx5_db db; struct mlx5_ib_wq rq; @@ -413,7 +413,7 @@ struct mlx5_ib_qp { }; struct mlx5_ib_cq_buf { - struct mlx5_buf buf; + struct mlx5_frag_buf_ctrl fbc; struct ib_umem *umem; int cqe_size; int nent; @@ -495,7 +495,7 @@ struct mlx5_ib_wc { struct mlx5_ib_srq { struct ib_srq ibsrq; struct mlx5_core_srq msrq; - struct mlx5_buf buf; + struct mlx5_frag_buf buf; struct mlx5_db db; u64 *wrid; /* protect SRQ hanlding diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index 47239bf7bf43..323ffe8bf7e4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -71,19 +71,24 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev, } int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, - struct mlx5_buf *buf, int node) + struct mlx5_frag_buf *buf, int node) { dma_addr_t t; buf->size = size; buf->npages = 1; buf->page_shift = (u8)get_order(size) + PAGE_SHIFT; - buf->direct.buf = mlx5_dma_zalloc_coherent_node(dev, size, - &t, node); - if (!buf->direct.buf) + + buf->frags = kzalloc(sizeof(*buf->frags), GFP_KERNEL); + if (!buf->frags) return -ENOMEM; - buf->direct.map = t; + buf->frags->buf = mlx5_dma_zalloc_coherent_node(dev, size, + &t, node); + if (!buf->frags->buf) + goto err_out; + + buf->frags->map = t; while (t & ((1 << buf->page_shift) - 1)) { --buf->page_shift; @@ -91,18 +96,24 @@ int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, } return 0; +err_out: + kfree(buf->frags); + return -ENOMEM; } -int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf) +int mlx5_buf_alloc(struct mlx5_core_dev *dev, + int size, struct mlx5_frag_buf *buf) { return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node); } -EXPORT_SYMBOL_GPL(mlx5_buf_alloc); +EXPORT_SYMBOL(mlx5_buf_alloc); -void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf) +void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf) { - dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf, - buf->direct.map); + dma_free_coherent(&dev->pdev->dev, buf->size, buf->frags->buf, + buf->frags->map); + + kfree(buf->frags); } EXPORT_SYMBOL_GPL(mlx5_buf_free); @@ -147,6 +158,7 @@ err_free_buf: err_out: return -ENOMEM; } +EXPORT_SYMBOL_GPL(mlx5_frag_buf_alloc_node); void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf) { @@ -162,6 +174,7 @@ void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf) } kfree(buf->frags); } +EXPORT_SYMBOL_GPL(mlx5_frag_buf_free); static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev, int node) @@ -275,13 +288,13 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) } EXPORT_SYMBOL_GPL(mlx5_db_free); -void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas) +void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas) { u64 addr; int i; for (i = 0; i < buf->npages; i++) { - addr = buf->direct.map + (i << buf->page_shift); + addr = buf->frags->map + (i << buf->page_shift); pas[i] = cpu_to_be64(addr); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 0d4bb0688faa..80b84f6af2a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -52,7 +52,7 @@ static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc, void *data) { - u32 ci = cqcc & cq->wq.sz_m1; + u32 ci = cqcc & cq->wq.fbc.sz_m1; memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64)); } @@ -74,9 +74,10 @@ static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc) static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n) { - u8 op_own = (cqcc >> cq->wq.log_sz) & 1; - u32 wq_sz = 1 << cq->wq.log_sz; - u32 ci = cqcc & cq->wq.sz_m1; + struct mlx5_frag_buf_ctrl *fbc = &cq->wq.fbc; + u8 op_own = (cqcc >> fbc->log_sz) & 1; + u32 wq_sz = 1 << fbc->log_sz; + u32 ci = cqcc & fbc->sz_m1; u32 ci_top = min_t(u32, wq_sz, ci + n); for (; ci < ci_top; ci++, n--) { @@ -101,7 +102,7 @@ static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt; cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum; cq->title.op_own &= 0xf0; - cq->title.op_own |= 0x01 & (cqcc >> cq->wq.log_sz); + cq->title.op_own |= 0x01 & (cqcc >> cq->wq.fbc.log_sz); cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter); if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index 6bcfc25350f5..ea66448ba365 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -41,7 +41,7 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) { - return wq->sz_m1 + 1; + return wq->fbc.sz_m1 + 1; } u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq) @@ -62,7 +62,7 @@ static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq) static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq) { - return mlx5_cqwq_get_size(wq) << wq->log_stride; + return mlx5_cqwq_get_size(wq) << wq->fbc.log_stride; } static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq) @@ -92,7 +92,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, goto err_db_free; } - wq->buf = wq_ctrl->buf.direct.buf; + wq->buf = wq_ctrl->buf.frags->buf; wq->db = wq_ctrl->db.db; wq_ctrl->mdev = mdev; @@ -130,7 +130,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, goto err_db_free; } - wq->rq.buf = wq_ctrl->buf.direct.buf; + wq->rq.buf = wq_ctrl->buf.frags->buf; wq->sq.buf = wq->rq.buf + mlx5_wq_cyc_get_byte_size(&wq->rq); wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR]; wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR]; @@ -151,11 +151,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, { int err; - wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz); - wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size); - wq->sz_m1 = (1 << wq->log_sz) - 1; - wq->log_frag_strides = PAGE_SHIFT - wq->log_stride; - wq->frag_sz_m1 = (1 << wq->log_frag_strides) - 1; + mlx5_core_init_cq_frag_buf(&wq->fbc, cqc); err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { @@ -172,7 +168,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, goto err_db_free; } - wq->frag_buf = wq_ctrl->frag_buf; + wq->fbc.frag_buf = wq_ctrl->frag_buf; wq->db = wq_ctrl->db.db; wq_ctrl->mdev = mdev; @@ -209,7 +205,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, goto err_db_free; } - wq->buf = wq_ctrl->buf.direct.buf; + wq->buf = wq_ctrl->buf.frags->buf; wq->db = wq_ctrl->db.db; for (i = 0; i < wq->sz_m1; i++) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index 718589d0cec2..fca90b94596d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -45,7 +45,7 @@ struct mlx5_wq_param { struct mlx5_wq_ctrl { struct mlx5_core_dev *mdev; - struct mlx5_buf buf; + struct mlx5_frag_buf buf; struct mlx5_db db; }; @@ -68,14 +68,9 @@ struct mlx5_wq_qp { }; struct mlx5_cqwq { - struct mlx5_frag_buf frag_buf; - __be32 *db; - u32 sz_m1; - u32 frag_sz_m1; - u32 cc; /* consumer counter */ - u8 log_sz; - u8 log_stride; - u8 log_frag_strides; + struct mlx5_frag_buf_ctrl fbc; + __be32 *db; + u32 cc; /* consumer counter */ }; struct mlx5_wq_ll { @@ -131,20 +126,17 @@ static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2) static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq) { - return wq->cc & wq->sz_m1; + return wq->cc & wq->fbc.sz_m1; } static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix) { - unsigned int frag = (ix >> wq->log_frag_strides); - - return wq->frag_buf.frags[frag].buf + - ((wq->frag_sz_m1 & ix) << wq->log_stride); + return mlx5_frag_buf_get_wqe(&wq->fbc, ix); } static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq) { - return wq->cc >> wq->log_sz; + return wq->cc >> wq->fbc.log_sz; } static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 2860a253275b..bfea26af6de5 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -345,13 +345,6 @@ struct mlx5_buf_list { dma_addr_t map; }; -struct mlx5_buf { - struct mlx5_buf_list direct; - int npages; - int size; - u8 page_shift; -}; - struct mlx5_frag_buf { struct mlx5_buf_list *frags; int npages; @@ -359,6 +352,15 @@ struct mlx5_frag_buf { u8 page_shift; }; +struct mlx5_frag_buf_ctrl { + struct mlx5_frag_buf frag_buf; + u32 sz_m1; + u32 frag_sz_m1; + u8 log_sz; + u8 log_stride; + u8 log_frag_strides; +}; + struct mlx5_eq_tasklet { struct list_head list; struct list_head process_list; @@ -386,7 +388,7 @@ struct mlx5_eq { struct mlx5_cq_table cq_table; __be32 __iomem *doorbell; u32 cons_index; - struct mlx5_buf buf; + struct mlx5_frag_buf buf; int size; unsigned int irqn; u8 eqn; @@ -932,9 +934,9 @@ struct mlx5_hca_vport_context { bool grh_required; }; -static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset) +static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset) { - return buf->direct.buf + offset; + return buf->frags->buf + offset; } #define STRUCT_FIELD(header, field) \ @@ -973,6 +975,25 @@ static inline u32 mlx5_base_mkey(const u32 key) return key & 0xffffff00u; } +static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc, + void *cqc) +{ + fbc->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz); + fbc->log_sz = MLX5_GET(cqc, cqc, log_cq_size); + fbc->sz_m1 = (1 << fbc->log_sz) - 1; + fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride; + fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1; +} + +static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc, + u32 ix) +{ + unsigned int frag = (ix >> fbc->log_frag_strides); + + return fbc->frag_buf.frags[frag].buf + + ((fbc->frag_sz_m1 & ix) << fbc->log_stride); +} + int mlx5_cmd_init(struct mlx5_core_dev *dev); void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); void mlx5_cmd_use_events(struct mlx5_core_dev *dev); @@ -998,9 +1019,10 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev); void mlx5_trigger_health_work(struct mlx5_core_dev *dev); void mlx5_drain_health_recovery(struct mlx5_core_dev *dev); int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, - struct mlx5_buf *buf, int node); -int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); -void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); + struct mlx5_frag_buf *buf, int node); +int mlx5_buf_alloc(struct mlx5_core_dev *dev, + int size, struct mlx5_frag_buf *buf); +void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, struct mlx5_frag_buf *buf, int node); void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); @@ -1045,7 +1067,8 @@ int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); void mlx5_register_debugfs(void); void mlx5_unregister_debugfs(void); -void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); + +void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas); void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); -- cgit v1.2.3 From 22215908d81f61d293e8b128e819a8437f37cc20 Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Wed, 27 Sep 2017 07:24:18 +0000 Subject: net/mlx5: E-Switch, Add callback to get representor device Add a callback interface to get a protocol device (per representor type). The Ethernet representors will expose their netdev via this interface. This functionality can be later used by IB representor in order to find the corresponding net device representor. Signed-off-by: Mark Bloch Reviewed-by: Or Gerlitz Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 11 ++++++++++ drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 5 +++++ .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 24 ++++++++++++++++++++++ 3 files changed, 40 insertions(+) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 363d8dcb7f17..ea4b255380a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1156,6 +1156,15 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep) kfree(ppriv); /* mlx5e_rep_priv */ } +static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep) +{ + struct mlx5e_rep_priv *rpriv; + + rpriv = mlx5e_rep_to_rep_priv(rep); + + return rpriv->netdev; +} + static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; @@ -1168,6 +1177,7 @@ static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv) rep_if.load = mlx5e_vport_rep_load; rep_if.unload = mlx5e_vport_rep_unload; + rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev; mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH); } } @@ -1195,6 +1205,7 @@ void mlx5e_register_vport_reps(struct mlx5e_priv *priv) rep_if.load = mlx5e_nic_rep_load; rep_if.unload = mlx5e_nic_rep_unload; + rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev; rep_if.priv = rpriv; INIT_LIST_HEAD(&rpriv->vport_sqs_list); mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_ETH); /* UPLINK PF vport*/ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 2fa037066b2f..4dfb1da435a4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -150,6 +150,7 @@ struct mlx5_eswitch_rep_if { int (*load)(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep); void (*unload)(struct mlx5_eswitch_rep *rep); + void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep); void *priv; bool valid; }; @@ -286,6 +287,10 @@ void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, int vport_index, u8 rep_type); void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); +void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, + int vport, + u8 rep_type); +void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type); int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 99f583a15cc3..06623c8e92a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1160,6 +1160,7 @@ void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, rep_if->load = __rep_if->load; rep_if->unload = __rep_if->unload; + rep_if->get_proto_dev = __rep_if->get_proto_dev; rep_if->priv = __rep_if->priv; rep_if->valid = true; @@ -1188,3 +1189,26 @@ void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type) rep = &offloads->vport_reps[UPLINK_REP_INDEX]; return rep->rep_if[rep_type].priv; } + +void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, + int vport, + u8 rep_type) +{ + struct mlx5_esw_offload *offloads = &esw->offloads; + struct mlx5_eswitch_rep *rep; + + if (vport == FDB_UPLINK_VPORT) + vport = UPLINK_REP_INDEX; + + rep = &offloads->vport_reps[vport]; + + if (rep->rep_if[rep_type].valid && + rep->rep_if[rep_type].get_proto_dev) + return rep->rep_if[rep_type].get_proto_dev(rep); + return NULL; +} + +void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type) +{ + return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type); +} -- cgit v1.2.3 From 57cbd893c4c575a24594fa6c0835247506ce26e2 Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Tue, 16 Jan 2018 14:04:14 +0000 Subject: net/mlx5: E-Switch, Move representors definition to a global scope In preparation for IB representors, move representors structs to a global scope, also expose functions needed for registration, unregistration, eswitch mode and creating a flow rule to direct traffic from SQs to the right VF. Signed-off-by: Mark Bloch Reviewed-by: Or Gerlitz Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 6 +++ drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 44 +---------------- .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 12 +++++ .../net/ethernet/mellanox/mlx5/core/mlx5_core.h | 6 --- include/linux/mlx5/driver.h | 6 +++ include/linux/mlx5/eswitch.h | 57 ++++++++++++++++++++++ 6 files changed, 82 insertions(+), 49 deletions(-) create mode 100644 include/linux/mlx5/eswitch.h (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 5ecf2cddc16d..aec4653d88bc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -2175,3 +2175,9 @@ free_out: kvfree(out); return err; } + +u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw) +{ + return esw->mode; +} +EXPORT_SYMBOL_GPL(mlx5_eswitch_mode); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 4dfb1da435a4..9c1e1a2d02ef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -37,19 +37,9 @@ #include #include #include +#include #include "lib/mpfs.h" -enum { - SRIOV_NONE, - SRIOV_LEGACY, - SRIOV_OFFLOADS -}; - -enum { - REP_ETH, - NUM_REP_TYPES, -}; - #ifdef CONFIG_MLX5_ESWITCH #define MLX5_MAX_UC_PER_VPORT(dev) \ @@ -145,24 +135,6 @@ struct mlx5_eswitch_fdb { }; }; -struct mlx5_eswitch_rep; -struct mlx5_eswitch_rep_if { - int (*load)(struct mlx5_core_dev *dev, - struct mlx5_eswitch_rep *rep); - void (*unload)(struct mlx5_eswitch_rep *rep); - void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep); - void *priv; - bool valid; -}; - -struct mlx5_eswitch_rep { - struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES]; - u16 vport; - u8 hw_id[ETH_ALEN]; - u16 vlan; - u32 vlan_refcount; -}; - struct mlx5_esw_offload { struct mlx5_flow_table *ft_offloads; struct mlx5_flow_group *vport_rx_group; @@ -232,9 +204,6 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, int vport, struct ifla_vf_stats *vf_stats); -struct mlx5_flow_handle * -mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, - u32 sqn); void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule); struct mlx5_flow_spec; @@ -279,18 +248,7 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode); int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode); int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap); int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap); -void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, - int vport_index, - struct mlx5_eswitch_rep_if *rep_if, - u8 rep_type); -void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, - int vport_index, - u8 rep_type); void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); -void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, - int vport, - u8 rep_type); -void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type); int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 06623c8e92a2..92fdb10dd29f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -338,6 +338,7 @@ out: kvfree(spec); return flow_rule; } +EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule); void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule) { @@ -1165,6 +1166,7 @@ void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, rep_if->valid = true; } +EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep); void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, int vport_index, u8 rep_type) @@ -1179,6 +1181,7 @@ void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, rep->rep_if[rep_type].valid = false; } +EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep); void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type) { @@ -1207,8 +1210,17 @@ void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, return rep->rep_if[rep_type].get_proto_dev(rep); return NULL; } +EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev); void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type) { return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type); } +EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev); + +struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, + int vport) +{ + return &esw->offloads.vport_reps[vport]; +} +EXPORT_SYMBOL(mlx5_eswitch_vport_rep); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 23e17ac0cba5..ee1a42a078ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -43,12 +43,6 @@ #define DRIVER_NAME "mlx5_core" #define DRIVER_VERSION "5.0-0" -#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev)) -#define MLX5_VPORT_MANAGER(mdev) \ - (MLX5_CAP_GEN(mdev, vport_group_manager) && \ - (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \ - mlx5_core_is_pf(mdev)) - extern uint mlx5_core_debug_mask; #define mlx5_core_dbg(__dev, format, ...) \ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index bfea26af6de5..4814cad7456e 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1224,6 +1224,12 @@ static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev) return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF); } +#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs((mdev)->pdev)) +#define MLX5_VPORT_MANAGER(mdev) \ + (MLX5_CAP_GEN(mdev, vport_group_manager) && \ + (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \ + mlx5_core_is_pf(mdev)) + static inline int mlx5_get_gid_table_len(u16 param) { if (param > 4) { diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h new file mode 100644 index 000000000000..f62bf486c18c --- /dev/null +++ b/include/linux/mlx5/eswitch.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + */ + +#ifndef _MLX5_ESWITCH_ +#define _MLX5_ESWITCH_ + +#include + +enum { + SRIOV_NONE, + SRIOV_LEGACY, + SRIOV_OFFLOADS +}; + +enum { + REP_ETH, + NUM_REP_TYPES, +}; + +struct mlx5_eswitch_rep; +struct mlx5_eswitch_rep_if { + int (*load)(struct mlx5_core_dev *dev, + struct mlx5_eswitch_rep *rep); + void (*unload)(struct mlx5_eswitch_rep *rep); + void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep); + void *priv; + bool valid; +}; + +struct mlx5_eswitch_rep { + struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES]; + u16 vport; + u8 hw_id[ETH_ALEN]; + u16 vlan; + u32 vlan_refcount; +}; + +void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, + int vport_index, + struct mlx5_eswitch_rep_if *rep_if, + u8 rep_type); +void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, + int vport_index, + u8 rep_type); +void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, + int vport, + u8 rep_type); +struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, + int vport); +void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type); +u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw); +struct mlx5_flow_handle * +mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, + int vport, u32 sqn); +#endif -- cgit v1.2.3 From cd3d07e7db69456beaeb90529270a95c7692f4b3 Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Tue, 23 Jan 2018 11:19:12 +0000 Subject: net/mlx5: E-Switch, Increase number of FTEs in FDB in switchdev mode The max FTE number should be the max number of SQs that can be opened. Ethernet representors open one SQ each. Once we add IB representor this will increase (depends on the user). For now lets start with 31 per IB representor and if needed increase in the future. This increase only affects the number of FTEs in the slow path FDB, offloaded rules (done via TC on the fast path portion of the FDB) aren't affected. Signed-off-by: Mark Bloch Reviewed-by: Or Gerlitz Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 92fdb10dd29f..a5f5339a4e88 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -427,6 +427,7 @@ static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw) } #define MAX_PF_SQ 256 +#define MAX_SQ_NVPORTS 32 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) { @@ -456,7 +457,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) if (err) goto fast_fdb_err; - table_size = nvports + MAX_PF_SQ + 1; + table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 1; ft_attr.max_fte = table_size; ft_attr.prio = FDB_SLOW_PATH; @@ -479,7 +480,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn); MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); - ix = nvports + MAX_PF_SQ; + ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ; MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1); -- cgit v1.2.3 From f80be5436de78d0231cfb79b368edb9b90c3b057 Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Tue, 30 Jan 2018 10:46:34 +0000 Subject: net/mlx5: E-Switch, Optimize HW steering tables in switchdev mode Under switchdev mode we insert an eswitch miss rule causing any unmatched traffic to be sent towards the PF vport. This miss rule can be optimized if we break it to two, one case is for multicast traffic and the other for unicast. Breaking the miss rule into two (unicast and multicast) allows the firmware to program the hardware in a more efficient way. Using ConncetX-5 Ex with IXIA and testpmd (which use IB representors): IXIA -> NIC -> PF -> IB representor -> NIC -> VF: - Without this optimization: 9.2 MPPS. - With this optimization: 18 MPPS. VF -> NIC -> IB representor-> PF -> NIC -> IXIA: - Without this optimization: 17 MPPS. - With this optimization: 23.4 MPPS. Signed-off-by: Mark Bloch Reviewed-by: Or Gerlitz Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 3 +- .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 48 +++++++++++++++++++--- 2 files changed, 44 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 9c1e1a2d02ef..98d2177d0806 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -129,7 +129,8 @@ struct mlx5_eswitch_fdb { struct mlx5_flow_table *fdb; struct mlx5_flow_group *send_to_vport_grp; struct mlx5_flow_group *miss_grp; - struct mlx5_flow_handle *miss_rule; + struct mlx5_flow_handle *miss_rule_uni; + struct mlx5_flow_handle *miss_rule_multi; int vlan_push_pop_refcount; } offloads; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index a5f5339a4e88..0692d280883c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -351,7 +351,11 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) struct mlx5_flow_destination dest = {}; struct mlx5_flow_handle *flow_rule = NULL; struct mlx5_flow_spec *spec; + void *headers_c; + void *headers_v; int err = 0; + u8 *dmac_c; + u8 *dmac_v; spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) { @@ -359,6 +363,13 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) goto out; } + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + outer_headers); + dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, + outer_headers.dmac_47_16); + dmac_c[0] = 0x01; + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.vport_num = 0; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; @@ -367,11 +378,28 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) &flow_act, &dest, 1); if (IS_ERR(flow_rule)) { err = PTR_ERR(flow_rule); - esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err); + esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err); goto out; } - esw->fdb_table.offloads.miss_rule = flow_rule; + esw->fdb_table.offloads.miss_rule_uni = flow_rule; + + headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers); + dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, + outer_headers.dmac_47_16); + dmac_v[0] = 0x01; + flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec, + &flow_act, &dest, 1); + if (IS_ERR(flow_rule)) { + err = PTR_ERR(flow_rule); + esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err); + mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); + goto out; + } + + esw->fdb_table.offloads.miss_rule_multi = flow_rule; + out: kvfree(spec); return err; @@ -440,6 +468,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) struct mlx5_flow_group *g; void *match_criteria; u32 *flow_group_in; + u8 *dmac; esw_debug(esw->dev, "Create offloads FDB Tables\n"); flow_group_in = kvzalloc(inlen, GFP_KERNEL); @@ -457,7 +486,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) if (err) goto fast_fdb_err; - table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 1; + table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2; ft_attr.max_fte = table_size; ft_attr.prio = FDB_SLOW_PATH; @@ -494,10 +523,16 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) /* create miss group */ memset(flow_group_in, 0, inlen); - MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_OUTER_HEADERS); + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, + match_criteria); + dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, + outer_headers.dmac_47_16); + dmac[0] = 0x01; MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 2); g = mlx5_create_flow_group(fdb, flow_group_in); if (IS_ERR(g)) { @@ -533,7 +568,8 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) return; esw_debug(esw->dev, "Destroy offloads FDB Tables\n"); - mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule); + mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi); + mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); -- cgit v1.2.3 From c5447c70594b6a8e1e62a9dd9373813cb38f1c69 Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Tue, 23 Jan 2018 11:24:13 +0000 Subject: net/mlx5: E-Switch, Reload IB interface when switching devlink modes Up until this point it wasn't possible to activate IB representors when switching to switchdev mode, remove this limitation. We trigger reload of the PF IB interface in order to make sure that already allocated resources are invalid and new resources will be opened correctly with all the limitations of switchdev mode applied (only raw packet capabilities, without RoCE). We also move the remove/add to a place where the E-Switch mode is set/unset to better control when to trigger this action, this will allow the IB side to start in the correct mode. For better code reuse, create a function which reloads an interface and export it. Signed-off-by: Mark Bloch Reviewed-by: Or Gerlitz Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/dev.c | 8 ++++++++ drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 17 +++++++++++++++-- .../net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 17 ++--------------- drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h | 1 + 4 files changed, 26 insertions(+), 17 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index 17b723218b0c..b994b80d5714 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -337,6 +337,14 @@ void mlx5_unregister_interface(struct mlx5_interface *intf) } EXPORT_SYMBOL(mlx5_unregister_interface); +void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol) +{ + mutex_lock(&mlx5_intf_mutex); + mlx5_remove_dev_by_protocol(mdev, protocol); + mlx5_add_dev_by_protocol(mdev, protocol); + mutex_unlock(&mlx5_intf_mutex); +} + void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol) { struct mlx5_priv *priv = &mdev->priv; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index aec4653d88bc..964cd8c4fdcc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1619,10 +1619,14 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode); esw->mode = mode; - if (mode == SRIOV_LEGACY) + if (mode == SRIOV_LEGACY) { err = esw_create_legacy_fdb_table(esw, nvfs + 1); - else + } else { + mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); + err = esw_offloads_init(esw, nvfs + 1); + } + if (err) goto abort; @@ -1644,12 +1648,17 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) abort: esw->mode = SRIOV_NONE; + + if (mode == SRIOV_OFFLOADS) + mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); + return err; } void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) { struct esw_mc_addr *mc_promisc; + int old_mode; int nvports; int i; @@ -1675,7 +1684,11 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) else if (esw->mode == SRIOV_OFFLOADS) esw_offloads_cleanup(esw, nvports); + old_mode = esw->mode; esw->mode = SRIOV_NONE; + + if (old_mode == SRIOV_OFFLOADS) + mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); } int mlx5_eswitch_init(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 0692d280883c..0a8303c1b52f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -827,14 +827,9 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) { int err; - /* disable PF RoCE so missed packets don't go through RoCE steering */ - mlx5_dev_list_lock(); - mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); - mlx5_dev_list_unlock(); - err = esw_create_offloads_fdb_tables(esw, nvports); if (err) - goto create_fdb_err; + return err; err = esw_create_offloads_table(esw); if (err) @@ -859,12 +854,6 @@ create_fg_err: create_ft_err: esw_destroy_offloads_fdb_tables(esw); -create_fdb_err: - /* enable back PF RoCE */ - mlx5_dev_list_lock(); - mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); - mlx5_dev_list_unlock(); - return err; } @@ -882,9 +871,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw) } /* enable back PF RoCE */ - mlx5_dev_list_lock(); - mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); - mlx5_dev_list_unlock(); + mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index ee1a42a078ee..4e25f2b2e0bc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -201,4 +201,5 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev) int mlx5_lag_allow(struct mlx5_core_dev *dev); int mlx5_lag_forbid(struct mlx5_core_dev *dev); +void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol); #endif /* __MLX5_CORE_H__ */ -- cgit v1.2.3 From 46f3ee4f3a6e84c32e554c80d6533b465de7ff99 Mon Sep 17 00:00:00 2001 From: Aviad Yehezkel Date: Thu, 22 Feb 2018 17:40:52 +0200 Subject: net/mlx5: Fixed compilation issue when CONFIG_MLX5_ACCEL is disabled IPSec init and cleanup functions also depends on linux/mlx5/driver.h. Signed-off-by: Aviad Yehezkel Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h index d6e20fea9554..67cda8871f5a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h @@ -34,10 +34,10 @@ #ifndef __MLX5_ACCEL_IPSEC_H__ #define __MLX5_ACCEL_IPSEC_H__ -#ifdef CONFIG_MLX5_ACCEL - #include +#ifdef CONFIG_MLX5_ACCEL + enum { MLX5_ACCEL_IPSEC_DEVICE = BIT(1), MLX5_ACCEL_IPSEC_IPV6 = BIT(2), -- cgit v1.2.3 From ef927a9c168722c4ab5e40838d7fe6b63f978763 Mon Sep 17 00:00:00 2001 From: Aviad Yehezkel Date: Sun, 11 Feb 2018 17:12:44 +0200 Subject: net/mlx5e: Wait for FPGA command responses with a timeout Generally, FPGA IPSec commands must always complete. We want to wait for one minute for them to complete gracefully also when killing a process. Signed-off-by: Aviad Yehezkel Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index 35d0e33381ca..95f9c5a8619b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -39,6 +39,7 @@ #include "fpga/core.h" #define SBU_QP_QUEUE_SIZE 8 +#define MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC (60 * 1000) enum mlx5_ipsec_response_syndrome { MLX5_IPSEC_RESPONSE_SUCCESS = 0, @@ -217,12 +218,14 @@ void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev, int mlx5_fpga_ipsec_sa_cmd_wait(void *ctx) { struct mlx5_ipsec_command_context *context = ctx; + unsigned long timeout = + msecs_to_jiffies(MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC); int res; - res = wait_for_completion_killable(&context->complete); - if (res) { + res = wait_for_completion_timeout(&context->complete, timeout); + if (!res) { mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n"); - return -EINTR; + return -ETIMEDOUT; } if (context->status == MLX5_FPGA_IPSEC_SACMD_COMPLETE) -- cgit v1.2.3 From dc7debec07803f52d04dbd98d8047cae04dfe8ec Mon Sep 17 00:00:00 2001 From: Aviad Yehezkel Date: Sun, 28 Jan 2018 17:25:35 +0200 Subject: net/mlx5e: Fixed sleeping inside atomic context We can't allocate with GFP_KERNEL inside spinlock. Actually ida_simple doesn't require spinlock so remove it. Fixes: 547eede070eb ("net/mlx5e: IPSec, Innova IPSec offload infrastructure") Signed-off-by: Aviad Yehezkel Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index bac5103efad3..710521181143 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -74,18 +74,16 @@ static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry) unsigned long flags; int ret; - spin_lock_irqsave(&ipsec->sadb_rx_lock, flags); ret = ida_simple_get(&ipsec->halloc, 1, 0, GFP_KERNEL); if (ret < 0) - goto out; + return ret; + spin_lock_irqsave(&ipsec->sadb_rx_lock, flags); sa_entry->handle = ret; hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle); - ret = 0; - -out: spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags); - return ret; + + return 0; } static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry) @@ -101,13 +99,10 @@ static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry) static void mlx5e_ipsec_sadb_rx_free(struct mlx5e_ipsec_sa_entry *sa_entry) { struct mlx5e_ipsec *ipsec = sa_entry->ipsec; - unsigned long flags; /* Wait for the hash_del_rcu call in sadb_rx_del to affect data path */ synchronize_rcu(); - spin_lock_irqsave(&ipsec->sadb_rx_lock, flags); ida_simple_remove(&ipsec->halloc, sa_entry->handle); - spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags); } static enum mlx5_accel_ipsec_enc_mode mlx5e_ipsec_enc_mode(struct xfrm_state *x) -- cgit v1.2.3 From 1c9a10ebc77a6f123c701ba31d0c35bbf7414cde Mon Sep 17 00:00:00 2001 From: Aviad Yehezkel Date: Mon, 29 Jan 2018 13:09:12 +0200 Subject: net/mlx5e: Removed not need synchronize_rcu This is already done by xfrm layer between state_dev_del callback to state_dev_free callback. Signed-off-by: Aviad Yehezkel Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 710521181143..1b49afca65c0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -100,8 +100,8 @@ static void mlx5e_ipsec_sadb_rx_free(struct mlx5e_ipsec_sa_entry *sa_entry) { struct mlx5e_ipsec *ipsec = sa_entry->ipsec; - /* Wait for the hash_del_rcu call in sadb_rx_del to affect data path */ - synchronize_rcu(); + /* xfrm already doing sync rcu between del and free callbacks */ + ida_simple_remove(&ipsec->halloc, sa_entry->handle); } -- cgit v1.2.3 From 04e87170b0964d22c67245b739f56c3fe12fda54 Mon Sep 17 00:00:00 2001 From: Matan Barak Date: Sun, 19 Nov 2017 15:51:13 +0000 Subject: net/mlx5: FPGA and IPSec initialization to be before flow steering Some flow steering namespace initialization (i.e. egress namespace) might depend on FPGA capabilities. Changing the initialization order such that the FPGA will be initialized before flow steering. Flow steering fs cmds initialization might depend on IPSec capabilities. Changing the initialization order such that the IPSec will be initialized before flow steering as well. Signed-off-by: Aviad Yehezkel Signed-off-by: Matan Barak Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 39 +++++++++++++------------- 1 file changed, 20 insertions(+), 19 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 8cc22bf80c87..03972eed02cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1173,6 +1173,18 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_affinity_hints; } + err = mlx5_fpga_device_start(dev); + if (err) { + dev_err(&pdev->dev, "fpga device start failed %d\n", err); + goto err_fpga_start; + } + + err = mlx5_accel_ipsec_init(dev); + if (err) { + dev_err(&pdev->dev, "IPSec device start failed %d\n", err); + goto err_ipsec_start; + } + err = mlx5_init_fs(dev); if (err) { dev_err(&pdev->dev, "Failed to init flow steering\n"); @@ -1191,17 +1203,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_sriov; } - err = mlx5_fpga_device_start(dev); - if (err) { - dev_err(&pdev->dev, "fpga device start failed %d\n", err); - goto err_fpga_start; - } - err = mlx5_accel_ipsec_init(dev); - if (err) { - dev_err(&pdev->dev, "IPSec device start failed %d\n", err); - goto err_ipsec_start; - } - if (mlx5_device_registered(dev)) { mlx5_attach_device(dev); } else { @@ -1219,17 +1220,18 @@ out: return 0; err_reg_dev: - mlx5_accel_ipsec_cleanup(dev); -err_ipsec_start: - mlx5_fpga_device_stop(dev); - -err_fpga_start: mlx5_sriov_detach(dev); err_sriov: mlx5_cleanup_fs(dev); err_fs: + mlx5_accel_ipsec_cleanup(dev); + +err_ipsec_start: + mlx5_fpga_device_stop(dev); + +err_fpga_start: mlx5_irq_clear_affinity_hints(dev); err_affinity_hints: @@ -1296,11 +1298,10 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, if (mlx5_device_registered(dev)) mlx5_detach_device(dev); - mlx5_accel_ipsec_cleanup(dev); - mlx5_fpga_device_stop(dev); - mlx5_sriov_detach(dev); mlx5_cleanup_fs(dev); + mlx5_accel_ipsec_cleanup(dev); + mlx5_fpga_device_stop(dev); mlx5_irq_clear_affinity_hints(dev); free_comp_eqs(dev); mlx5_stop_eqs(dev); -- cgit v1.2.3 From a9db0ecf1578894ea3405f3eb5a441508840d479 Mon Sep 17 00:00:00 2001 From: Matan Barak Date: Wed, 16 Aug 2017 09:43:48 +0300 Subject: {net,IB}/mlx5: Add has_tag to mlx5_flow_act The has_tag member will indicate whether a tag action was specified in flow specification. A flow tag 0 = MLX5_FS_DEFAULT_FLOW_TAG is assumed a valid flow tag that is currently used by mlx5 RDMA driver, whereas in HW flow_tag = 0 means that the user doesn't care about flow_tag. HW always provide a flow_tag = 0 if all flow tags requested on a specific flow are 0. So we need a way (in the driver) to differentiate between a user really requesting flow_tag = 0 and a user who does not care, in order to be able to report conflicting flow tags on a specific flow. Signed-off-by: Matan Barak Reviewed-by: Aviad Yehezkel Signed-off-by: Saeed Mahameed --- drivers/infiniband/hw/mlx5/main.c | 3 ++- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 2 +- include/linux/mlx5/fs.h | 1 + 4 files changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 1b305367a817..d50ace805995 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2535,6 +2535,7 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c, return -EINVAL; action->flow_tag = ib_spec->flow_tag.tag_id; + action->has_flow_tag = true; break; case IB_FLOW_SPEC_ACTION_DROP: if (FIELDS_NOT_SUPPORTED(ib_spec->drop, @@ -2847,7 +2848,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; } - if (flow_act.flow_tag != MLX5_FS_DEFAULT_FLOW_TAG && + if (flow_act.has_flow_tag && (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) { mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index fd98b0dc610f..eeff1fac77ef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -675,6 +675,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, struct mlx5_flow_destination dest[2] = {}; struct mlx5_flow_act flow_act = { .action = attr->action, + .has_flow_tag = true, .flow_tag = attr->flow_tag, .encap_id = 0, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index c025c98700e4..d81da6920be8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1443,7 +1443,7 @@ static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_act return -EEXIST; } - if (fte->flow_tag != flow_act->flow_tag) { + if (flow_act->has_flow_tag && fte->flow_tag != flow_act->flow_tag) { mlx5_core_warn(get_dev(&fte->node), "FTE flow tag %u already exists with different flow tag %u\n", fte->flow_tag, diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index a0b48afcb422..f580bc4c2443 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -141,6 +141,7 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); struct mlx5_flow_act { u32 action; + bool has_flow_tag; u32 flow_tag; u32 encap_id; u32 modify_id; -- cgit v1.2.3 From af76c50198d1db136bbf293f3700c80722116831 Mon Sep 17 00:00:00 2001 From: Matan Barak Date: Sun, 20 Aug 2017 15:46:51 +0300 Subject: net/mlx5: Add shim layer between fs and cmd The shim layer allows each namespace to define possibly different functionality for add/delete/update commands. The shim layer introduced here, will be used to support flow steering with the FPGA. Signed-off-by: Matan Barak Signed-off-by: Aviad Yehezkel Signed-off-by: Boris Pismenny Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 191 ++++++++++++++++++---- drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h | 72 ++++---- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 84 ++++++---- drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | 1 + 4 files changed, 248 insertions(+), 100 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 881e2e55840c..c3eaddb43e57 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -39,9 +39,81 @@ #include "mlx5_core.h" #include "eswitch.h" -int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, u32 underlay_qpn, - bool disconnect) +static int mlx5_cmd_stub_update_root_ft(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + u32 underlay_qpn, + bool disconnect) +{ + return 0; +} + +static int mlx5_cmd_stub_create_flow_table(struct mlx5_core_dev *dev, + u16 vport, + enum fs_flow_table_op_mod op_mod, + enum fs_flow_table_type type, + unsigned int level, + unsigned int log_size, + struct mlx5_flow_table *next_ft, + unsigned int *table_id, u32 flags) +{ + return 0; +} + +static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft) +{ + return 0; +} + +static int mlx5_cmd_stub_modify_flow_table(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_table *next_ft) +{ + return 0; +} + +static int mlx5_cmd_stub_create_flow_group(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + u32 *in, + unsigned int *group_id) +{ + return 0; +} + +static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int group_id) +{ + return 0; +} + +static int mlx5_cmd_stub_create_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *group, + struct fs_fte *fte) +{ + return 0; +} + +static int mlx5_cmd_stub_update_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int group_id, + int modify_mask, + struct fs_fte *fte) +{ + return -EOPNOTSUPP; +} + +static int mlx5_cmd_stub_delete_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int index) +{ + return 0; +} + +static int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, u32 underlay_qpn, + bool disconnect) { u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0}; u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0}; @@ -71,12 +143,14 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, - u16 vport, - enum fs_flow_table_op_mod op_mod, - enum fs_flow_table_type type, unsigned int level, - unsigned int log_size, struct mlx5_flow_table - *next_ft, unsigned int *table_id, u32 flags) +static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, + u16 vport, + enum fs_flow_table_op_mod op_mod, + enum fs_flow_table_type type, + unsigned int level, + unsigned int log_size, + struct mlx5_flow_table *next_ft, + unsigned int *table_id, u32 flags) { int en_encap_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN); u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0}; @@ -125,8 +199,8 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, return err; } -int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft) +static int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft) { u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0}; u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0}; @@ -143,9 +217,9 @@ int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev, return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - struct mlx5_flow_table *next_ft) +static int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_table *next_ft) { u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0}; u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0}; @@ -188,10 +262,10 @@ int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev, return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - u32 *in, - unsigned int *group_id) +static int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + u32 *in, + unsigned int *group_id) { u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0}; int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); @@ -213,9 +287,9 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, return err; } -int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - unsigned int group_id) +static int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int group_id) { u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0}; u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0}; @@ -332,19 +406,21 @@ err_out: return err; } -int mlx5_cmd_create_fte(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - unsigned group_id, - struct fs_fte *fte) +static int mlx5_cmd_create_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *group, + struct fs_fte *fte) { + unsigned int group_id = group->id; + return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte); } -int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - unsigned group_id, - int modify_mask, - struct fs_fte *fte) +static int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int group_id, + int modify_mask, + struct fs_fte *fte) { int opmod; int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev, @@ -357,9 +433,9 @@ int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte); } -int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - unsigned int index) +static int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int index) { u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0}; u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0}; @@ -610,3 +686,52 @@ void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id) mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } + +static const struct mlx5_flow_cmds mlx5_flow_cmds = { + .create_flow_table = mlx5_cmd_create_flow_table, + .destroy_flow_table = mlx5_cmd_destroy_flow_table, + .modify_flow_table = mlx5_cmd_modify_flow_table, + .create_flow_group = mlx5_cmd_create_flow_group, + .destroy_flow_group = mlx5_cmd_destroy_flow_group, + .create_fte = mlx5_cmd_create_fte, + .update_fte = mlx5_cmd_update_fte, + .delete_fte = mlx5_cmd_delete_fte, + .update_root_ft = mlx5_cmd_update_root_ft, +}; + +static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = { + .create_flow_table = mlx5_cmd_stub_create_flow_table, + .destroy_flow_table = mlx5_cmd_stub_destroy_flow_table, + .modify_flow_table = mlx5_cmd_stub_modify_flow_table, + .create_flow_group = mlx5_cmd_stub_create_flow_group, + .destroy_flow_group = mlx5_cmd_stub_destroy_flow_group, + .create_fte = mlx5_cmd_stub_create_fte, + .update_fte = mlx5_cmd_stub_update_fte, + .delete_fte = mlx5_cmd_stub_delete_fte, + .update_root_ft = mlx5_cmd_stub_update_root_ft, +}; + +static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void) +{ + return &mlx5_flow_cmds; +} + +static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void) +{ + return &mlx5_flow_cmd_stubs; +} + +const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type) +{ + switch (type) { + case FS_FT_NIC_RX: + case FS_FT_ESW_EGRESS_ACL: + case FS_FT_ESW_INGRESS_ACL: + case FS_FT_FDB: + case FS_FT_SNIFFER_RX: + case FS_FT_SNIFFER_TX: + return mlx5_fs_cmd_get_fw_cmds(); + default: + return mlx5_fs_cmd_get_stub_cmds(); + } +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index 71e2d0f37ad9..81c82f48d93e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -33,46 +33,52 @@ #ifndef _MLX5_FS_CMD_ #define _MLX5_FS_CMD_ -int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, - u16 vport, - enum fs_flow_table_op_mod op_mod, - enum fs_flow_table_type type, unsigned int level, - unsigned int log_size, struct mlx5_flow_table - *next_ft, unsigned int *table_id, u32 flags); +#include "fs_core.h" -int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft); +struct mlx5_flow_cmds { + int (*create_flow_table)(struct mlx5_core_dev *dev, + u16 vport, + enum fs_flow_table_op_mod op_mod, + enum fs_flow_table_type type, + unsigned int level, unsigned int log_size, + struct mlx5_flow_table *next_ft, + unsigned int *table_id, u32 flags); + int (*destroy_flow_table)(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft); -int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - struct mlx5_flow_table *next_ft); + int (*modify_flow_table)(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_table *next_ft); -int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - u32 *in, unsigned int *group_id); + int (*create_flow_group)(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + u32 *in, + unsigned int *group_id); -int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - unsigned int group_id); + int (*destroy_flow_group)(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int group_id); -int mlx5_cmd_create_fte(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - unsigned group_id, - struct fs_fte *fte); + int (*create_fte)(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *fg, + struct fs_fte *fte); -int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - unsigned group_id, - int modify_mask, - struct fs_fte *fte); + int (*update_fte)(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int group_id, + int modify_mask, + struct fs_fte *fte); -int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - unsigned int index); + int (*delete_fte)(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int index); -int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, u32 underlay_qpn, - bool disconnect); + int (*update_root_ft)(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + u32 underlay_qpn, + bool disconnect); +}; int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id); int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id); @@ -90,4 +96,6 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b, u32 id, u64 *packets, u64 *bytes); +const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type); + #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index d81da6920be8..f3a654b96b98 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -425,15 +425,17 @@ static void del_sw_prio(struct fs_node *node) static void del_hw_flow_table(struct fs_node *node) { + struct mlx5_flow_root_namespace *root; struct mlx5_flow_table *ft; struct mlx5_core_dev *dev; int err; fs_get_obj(ft, node); dev = get_dev(&ft->node); + root = find_root(&ft->node); if (node->active) { - err = mlx5_cmd_destroy_flow_table(dev, ft); + err = root->cmds->destroy_flow_table(dev, ft); if (err) mlx5_core_warn(dev, "flow steering can't destroy ft\n"); } @@ -454,6 +456,7 @@ static void del_sw_flow_table(struct fs_node *node) static void del_sw_hw_rule(struct fs_node *node) { + struct mlx5_flow_root_namespace *root; struct mlx5_flow_rule *rule; struct mlx5_flow_table *ft; struct mlx5_flow_group *fg; @@ -488,8 +491,9 @@ static void del_sw_hw_rule(struct fs_node *node) update_fte = true; } out: + root = find_root(&ft->node); if (update_fte && fte->dests_size) { - err = mlx5_cmd_update_fte(dev, ft, fg->id, modify_mask, fte); + err = root->cmds->update_fte(dev, ft, fg->id, modify_mask, fte); if (err) mlx5_core_warn(dev, "%s can't del rule fg id=%d fte_index=%d\n", @@ -500,6 +504,7 @@ out: static void del_hw_fte(struct fs_node *node) { + struct mlx5_flow_root_namespace *root; struct mlx5_flow_table *ft; struct mlx5_flow_group *fg; struct mlx5_core_dev *dev; @@ -512,9 +517,9 @@ static void del_hw_fte(struct fs_node *node) trace_mlx5_fs_del_fte(fte); dev = get_dev(&ft->node); + root = find_root(&ft->node); if (node->active) { - err = mlx5_cmd_delete_fte(dev, ft, - fte->index); + err = root->cmds->delete_fte(dev, ft, fte->index); if (err) mlx5_core_warn(dev, "flow steering can't delete fte in index %d of flow group id %d\n", @@ -542,6 +547,7 @@ static void del_sw_fte(struct fs_node *node) static void del_hw_flow_group(struct fs_node *node) { + struct mlx5_flow_root_namespace *root; struct mlx5_flow_group *fg; struct mlx5_flow_table *ft; struct mlx5_core_dev *dev; @@ -551,7 +557,8 @@ static void del_hw_flow_group(struct fs_node *node) dev = get_dev(&ft->node); trace_mlx5_fs_del_fg(fg); - if (fg->node.active && mlx5_cmd_destroy_flow_group(dev, ft, fg->id)) + root = find_root(&ft->node); + if (fg->node.active && root->cmds->destroy_flow_group(dev, ft, fg->id)) mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n", fg->id, ft->id); } @@ -797,15 +804,14 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev, struct fs_prio *prio, struct mlx5_flow_table *ft) { + struct mlx5_flow_root_namespace *root = find_root(&prio->node); struct mlx5_flow_table *iter; int i = 0; int err; fs_for_each_ft(iter, prio) { i++; - err = mlx5_cmd_modify_flow_table(dev, - iter, - ft); + err = root->cmds->modify_flow_table(dev, iter, ft); if (err) { mlx5_core_warn(dev, "Failed to modify flow table %d\n", iter->id); @@ -853,12 +859,12 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio if (list_empty(&root->underlay_qpns)) { /* Don't set any QPN (zero) in case QPN list is empty */ qpn = 0; - err = mlx5_cmd_update_root_ft(root->dev, ft, qpn, false); + err = root->cmds->update_root_ft(root->dev, ft, qpn, false); } else { list_for_each_entry(uqp, &root->underlay_qpns, list) { qpn = uqp->qpn; - err = mlx5_cmd_update_root_ft(root->dev, ft, qpn, - false); + err = root->cmds->update_root_ft(root->dev, ft, + qpn, false); if (err) break; } @@ -877,6 +883,7 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, struct mlx5_flow_destination *dest) { + struct mlx5_flow_root_namespace *root; struct mlx5_flow_table *ft; struct mlx5_flow_group *fg; struct fs_fte *fte; @@ -891,10 +898,9 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, fs_get_obj(ft, fg->node.parent); memcpy(&rule->dest_attr, dest, sizeof(*dest)); - err = mlx5_cmd_update_fte(get_dev(&ft->node), - ft, fg->id, - modify_mask, - fte); + root = find_root(&ft->node); + err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id, + modify_mask, fte); up_write_ref_node(&fte->node); return err; @@ -1035,9 +1041,9 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table); log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; next_ft = find_next_chained_ft(fs_prio); - err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->op_mod, ft->type, - ft->level, log_table_sz, next_ft, &ft->id, - ft->flags); + err = root->cmds->create_flow_table(root->dev, ft->vport, ft->op_mod, + ft->type, ft->level, log_table_sz, + next_ft, &ft->id, ft->flags); if (err) goto free_ft; @@ -1053,7 +1059,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa mutex_unlock(&root->chain_lock); return ft; destroy_ft: - mlx5_cmd_destroy_flow_table(root->dev, ft); + root->cmds->destroy_flow_table(root->dev, ft); free_ft: kfree(ft); unlock_root: @@ -1125,6 +1131,7 @@ EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table); struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *fg_in) { + struct mlx5_flow_root_namespace *root = find_root(&ft->node); void *match_criteria = MLX5_ADDR_OF(create_flow_group_in, fg_in, match_criteria); u8 match_criteria_enable = MLX5_GET(create_flow_group_in, @@ -1152,7 +1159,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, if (IS_ERR(fg)) return fg; - err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id); + err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id); if (err) { tree_put_node(&fg->node); return ERR_PTR(err); @@ -1275,6 +1282,7 @@ add_rule_fte(struct fs_fte *fte, int dest_num, bool update_action) { + struct mlx5_flow_root_namespace *root; struct mlx5_flow_handle *handle; struct mlx5_flow_table *ft; int modify_mask = 0; @@ -1290,12 +1298,13 @@ add_rule_fte(struct fs_fte *fte, modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION); fs_get_obj(ft, fg->node.parent); + root = find_root(&fg->node); if (!(fte->status & FS_FTE_STATUS_EXISTING)) - err = mlx5_cmd_create_fte(get_dev(&ft->node), - ft, fg->id, fte); + err = root->cmds->create_fte(get_dev(&ft->node), + ft, fg, fte); else - err = mlx5_cmd_update_fte(get_dev(&ft->node), - ft, fg->id, modify_mask, fte); + err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id, + modify_mask, fte); if (err) goto free_handle; @@ -1360,6 +1369,7 @@ out: static int create_auto_flow_group(struct mlx5_flow_table *ft, struct mlx5_flow_group *fg) { + struct mlx5_flow_root_namespace *root = find_root(&ft->node); struct mlx5_core_dev *dev = get_dev(&ft->node); int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); void *match_criteria_addr; @@ -1380,7 +1390,7 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft, memcpy(match_criteria_addr, fg->mask.match_criteria, sizeof(fg->mask.match_criteria)); - err = mlx5_cmd_create_flow_group(dev, ft, in, &fg->id); + err = root->cmds->create_flow_group(dev, ft, in, &fg->id); if (!err) { fg->node.active = true; trace_mlx5_fs_add_fg(fg); @@ -1912,7 +1922,6 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft) return 0; new_root_ft = find_next_ft(ft); - if (!new_root_ft) { root->root_ft = NULL; return 0; @@ -1921,13 +1930,14 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft) if (list_empty(&root->underlay_qpns)) { /* Don't set any QPN (zero) in case QPN list is empty */ qpn = 0; - err = mlx5_cmd_update_root_ft(root->dev, new_root_ft, qpn, - false); + err = root->cmds->update_root_ft(root->dev, new_root_ft, + qpn, false); } else { list_for_each_entry(uqp, &root->underlay_qpns, list) { qpn = uqp->qpn; - err = mlx5_cmd_update_root_ft(root->dev, new_root_ft, - qpn, false); + err = root->cmds->update_root_ft(root->dev, + new_root_ft, qpn, + false); if (err) break; } @@ -2229,10 +2239,11 @@ static int init_root_tree(struct mlx5_flow_steering *steering, return 0; } -static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering *steering, - enum fs_flow_table_type - table_type) +static struct mlx5_flow_root_namespace +*create_root_ns(struct mlx5_flow_steering *steering, + enum fs_flow_table_type table_type) { + const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type); struct mlx5_flow_root_namespace *root_ns; struct mlx5_flow_namespace *ns; @@ -2243,6 +2254,7 @@ static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering root_ns->dev = steering->dev; root_ns->table_type = table_type; + root_ns->cmds = cmds; INIT_LIST_HEAD(&root_ns->underlay_qpns); @@ -2634,7 +2646,8 @@ int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn) goto update_ft_fail; } - err = mlx5_cmd_update_root_ft(dev, root->root_ft, underlay_qpn, false); + err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn, + false); if (err) { mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n", underlay_qpn, err); @@ -2677,7 +2690,8 @@ int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn) goto out; } - err = mlx5_cmd_update_root_ft(dev, root->root_ft, underlay_qpn, true); + err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn, + true); if (err) mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n", underlay_qpn, err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 05262708f14b..45791c792296 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -224,6 +224,7 @@ struct mlx5_flow_root_namespace { /* Should be held when chaining flow tables */ struct mutex chain_lock; struct list_head underlay_qpns; + const struct mlx5_flow_cmds *cmds; }; int mlx5_init_fc_stats(struct mlx5_core_dev *dev); -- cgit v1.2.3 From 5f4183781a303da5ab6731b8c19328c5b9df89fa Mon Sep 17 00:00:00 2001 From: Aviad Yehezkel Date: Sun, 18 Feb 2018 13:17:17 +0200 Subject: net/mlx5: Add empty egress namespace to flow steering core Currently, we don't support egress flow steering namespace in mlx5 flow steering core implementation. However, when we want to encrypt a packet, we model it as a flow steering rule in the egress path. To overcome this, we add an empty egress namespace to flow steering. This namespace is initialized only when ipsec support exists. In the future, this will grow to a full blown full steering implementation, resembling the ingress path. Signed-off-by: Matan Barak Signed-off-by: Aviad Yehezkel Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/diag/fs_tracepoint.c | 3 +++ drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 28 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | 2 ++ include/linux/mlx5/fs.h | 1 + include/linux/mlx5/mlx5_ifc.h | 1 + 6 files changed, 36 insertions(+) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c index 0be4575b58a2..3816b4506561 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c @@ -246,6 +246,9 @@ const char *parse_fs_dst(struct trace_seq *p, case MLX5_FLOW_DESTINATION_TYPE_COUNTER: trace_seq_printf(p, "counter_id=%u\n", counter_id); break; + case MLX5_FLOW_DESTINATION_TYPE_PORT: + trace_seq_printf(p, "port\n"); + break; } trace_seq_putc(p, 0); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index c3eaddb43e57..ed3ea80a24be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -731,6 +731,7 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ case FS_FT_SNIFFER_RX: case FS_FT_SNIFFER_TX: return mlx5_fs_cmd_get_fw_cmds(); + case FS_FT_NIC_TX: default: return mlx5_fs_cmd_get_stub_cmds(); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index f3a654b96b98..5c111186d103 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -37,6 +37,7 @@ #include "fs_core.h" #include "fs_cmd.h" #include "diag/fs_tracepoint.h" +#include "accel/ipsec.h" #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\ sizeof(struct init_tree_node)) @@ -2049,6 +2050,11 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, return &steering->sniffer_tx_root_ns->ns; else return NULL; + case MLX5_FLOW_NAMESPACE_EGRESS: + if (steering->egress_root_ns) + return &steering->egress_root_ns->ns; + else + return NULL; default: return NULL; } @@ -2413,6 +2419,7 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev) cleanup_root_ns(steering->fdb_root_ns); cleanup_root_ns(steering->sniffer_rx_root_ns); cleanup_root_ns(steering->sniffer_tx_root_ns); + cleanup_root_ns(steering->egress_root_ns); mlx5_cleanup_fc_stats(dev); kmem_cache_destroy(steering->ftes_cache); kmem_cache_destroy(steering->fgs_cache); @@ -2558,6 +2565,20 @@ cleanup_root_ns: return err; } +static int init_egress_root_ns(struct mlx5_flow_steering *steering) +{ + struct fs_prio *prio; + + steering->egress_root_ns = create_root_ns(steering, + FS_FT_NIC_TX); + if (!steering->egress_root_ns) + return -ENOMEM; + + /* create 1 prio*/ + prio = fs_create_prio(&steering->egress_root_ns->ns, 0, 1); + return PTR_ERR_OR_ZERO(prio); +} + int mlx5_init_fs(struct mlx5_core_dev *dev) { struct mlx5_flow_steering *steering; @@ -2623,6 +2644,13 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) goto err; } + if (mlx5_accel_ipsec_device_caps(steering->dev) & + MLX5_ACCEL_IPSEC_DEVICE) { + err = init_egress_root_ns(steering); + if (err) + goto err; + } + return 0; err: mlx5_cleanup_fs(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 45791c792296..8586af9ce514 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -48,6 +48,7 @@ enum fs_node_type { enum fs_flow_table_type { FS_FT_NIC_RX = 0x0, + FS_FT_NIC_TX = 0x1, FS_FT_ESW_EGRESS_ACL = 0x2, FS_FT_ESW_INGRESS_ACL = 0x3, FS_FT_FDB = 0X4, @@ -75,6 +76,7 @@ struct mlx5_flow_steering { struct mlx5_flow_root_namespace **esw_ingress_root_ns; struct mlx5_flow_root_namespace *sniffer_tx_root_ns; struct mlx5_flow_root_namespace *sniffer_rx_root_ns; + struct mlx5_flow_root_namespace *egress_root_ns; }; struct fs_node { diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index f580bc4c2443..744ea228acea 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -69,6 +69,7 @@ enum mlx5_flow_namespace_type { MLX5_FLOW_NAMESPACE_ESW_INGRESS, MLX5_FLOW_NAMESPACE_SNIFFER_RX, MLX5_FLOW_NAMESPACE_SNIFFER_TX, + MLX5_FLOW_NAMESPACE_EGRESS, }; struct mlx5_flow_table; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index f4e417686f62..9bc4ea0cf5a9 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1091,6 +1091,7 @@ enum mlx5_flow_destination_type { MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1, MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2, + MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99, MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100, }; -- cgit v1.2.3 From d2ec6a35e834f953790cfea8a4f8a1415d19a431 Mon Sep 17 00:00:00 2001 From: Matan Barak Date: Thu, 9 Nov 2017 12:12:15 +0000 Subject: net/mlx5: Embed mlx5_flow_act into fs_fte fte objects contain the match value and action. Currently, extending the actions require in adding them both to the API and fs_fte. Signed-off-by: Matan Barak Signed-off-by: Aviad Yehezkel Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/diag/fs_tracepoint.h | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 13 ++++++------ drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 24 ++++++++++------------ drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | 5 +---- 4 files changed, 21 insertions(+), 25 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h index 80eef4163f52..a6ba57fbb414 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h @@ -163,9 +163,9 @@ TRACE_EVENT(mlx5_fs_set_fte, fs_get_obj(__entry->fg, fte->node.parent); __entry->group_index = __entry->fg->id; __entry->index = fte->index; - __entry->action = fte->action; + __entry->action = fte->action.action; __entry->mask_enable = __entry->fg->mask.match_criteria_enable; - __entry->flow_tag = fte->flow_tag; + __entry->flow_tag = fte->action.flow_tag; memcpy(__entry->mask_outer, MLX5_ADDR_OF(fte_match_param, &__entry->fg->mask.match_criteria, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index ed3ea80a24be..d9d7dd439bb9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -340,16 +340,17 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); MLX5_SET(flow_context, in_flow_context, group_id, group_id); - MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag); - MLX5_SET(flow_context, in_flow_context, action, fte->action); - MLX5_SET(flow_context, in_flow_context, encap_id, fte->encap_id); - MLX5_SET(flow_context, in_flow_context, modify_header_id, fte->modify_id); + MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag); + MLX5_SET(flow_context, in_flow_context, action, fte->action.action); + MLX5_SET(flow_context, in_flow_context, encap_id, fte->action.encap_id); + MLX5_SET(flow_context, in_flow_context, modify_header_id, + fte->action.modify_id); in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context, match_value); memcpy(in_match_value, &fte->val, sizeof(fte->val)); in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); - if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { int list_size = 0; list_for_each_entry(dst, &fte->node.children, node.list) { @@ -375,7 +376,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, list_size); } - if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev, log_max_flow_counter, ft->type)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 5c111186d103..2e4a1d4e0cea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -481,12 +481,12 @@ static void del_sw_hw_rule(struct fs_node *node) if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER && --fte->dests_size) { modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION); - fte->action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT; + fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT; update_fte = true; goto out; } - if ((fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && + if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && --fte->dests_size) { modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST), update_fte = true; @@ -623,10 +623,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft, memcpy(fte->val, match_value, sizeof(fte->val)); fte->node.type = FS_TYPE_FLOW_ENTRY; - fte->flow_tag = flow_act->flow_tag; - fte->action = flow_act->action; - fte->encap_id = flow_act->encap_id; - fte->modify_id = flow_act->modify_id; + fte->action = *flow_act; tree_init_node(&fte->node, del_hw_fte, del_sw_fte); @@ -892,7 +889,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, int err = 0; fs_get_obj(fte, rule->node.parent); - if (!(fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) + if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) return -EINVAL; down_write_ref_node(&fte->node); fs_get_obj(fg, fte->node.parent); @@ -1448,16 +1445,17 @@ static bool check_conflicting_actions(u32 action1, u32 action2) static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_act *flow_act) { - if (check_conflicting_actions(flow_act->action, fte->action)) { + if (check_conflicting_actions(flow_act->action, fte->action.action)) { mlx5_core_warn(get_dev(&fte->node), "Found two FTEs with conflicting actions\n"); return -EEXIST; } - if (flow_act->has_flow_tag && fte->flow_tag != flow_act->flow_tag) { + if (flow_act->has_flow_tag && + fte->action.flow_tag != flow_act->flow_tag) { mlx5_core_warn(get_dev(&fte->node), "FTE flow tag %u already exists with different flow tag %u\n", - fte->flow_tag, + fte->action.flow_tag, flow_act->flow_tag); return -EEXIST; } @@ -1481,12 +1479,12 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg, if (ret) return ERR_PTR(ret); - old_action = fte->action; - fte->action |= flow_act->action; + old_action = fte->action.action; + fte->action.action |= flow_act->action; handle = add_rule_fte(fte, fg, dest, dest_num, old_action != flow_act->action); if (IS_ERR(handle)) { - fte->action = old_action; + fte->action.action = old_action; return handle; } trace_mlx5_fs_set_fte(fte, false); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 8586af9ce514..e26d3e9d5f9f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -176,11 +176,8 @@ struct fs_fte { struct fs_node node; u32 val[MLX5_ST_SZ_DW_MATCH_PARAM]; u32 dests_size; - u32 flow_tag; u32 index; - u32 action; - u32 encap_id; - u32 modify_id; + struct mlx5_flow_act action; enum fs_fte_status status; struct mlx5_fc *counter; struct rhash_head hash; -- cgit v1.2.3 From e810bf5e96e327500cc6334f9d56c8047aaabcff Mon Sep 17 00:00:00 2001 From: Aviad Yehezkel Date: Sun, 18 Feb 2018 15:00:54 +0200 Subject: net/mlx5: Flow steering cmd interface should get the fte when deleting Previously, deleting a flow steering entry only got the index. Since the FPGA implementation of FTE's deletion might need to dig inside the FTE itself, we would like to get the FTE's context. Changing the interface to pass the FTE context. Signed-off-by: Aviad Yehezkel Signed-off-by: Matan Barak Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 6 +++--- drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index d9d7dd439bb9..645f83cac34d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -106,7 +106,7 @@ static int mlx5_cmd_stub_update_fte(struct mlx5_core_dev *dev, static int mlx5_cmd_stub_delete_fte(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, - unsigned int index) + struct fs_fte *fte) { return 0; } @@ -436,7 +436,7 @@ static int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, static int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, - unsigned int index) + struct fs_fte *fte) { u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0}; u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0}; @@ -444,7 +444,7 @@ static int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY); MLX5_SET(delete_fte_in, in, table_type, ft->type); MLX5_SET(delete_fte_in, in, table_id, ft->id); - MLX5_SET(delete_fte_in, in, flow_index, index); + MLX5_SET(delete_fte_in, in, flow_index, fte->index); if (ft->vport) { MLX5_SET(delete_fte_in, in, vport_number, ft->vport); MLX5_SET(delete_fte_in, in, other_vport, 1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index 81c82f48d93e..6228ba7bfa1a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -72,7 +72,7 @@ struct mlx5_flow_cmds { int (*delete_fte)(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, - unsigned int index); + struct fs_fte *fte); int (*update_root_ft)(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 2e4a1d4e0cea..4e456c292ce4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -520,7 +520,7 @@ static void del_hw_fte(struct fs_node *node) dev = get_dev(&ft->node); root = find_root(&ft->node); if (node->active) { - err = root->cmds->delete_fte(dev, ft, fte->index); + err = root->cmds->delete_fte(dev, ft, fte); if (err) mlx5_core_warn(dev, "flow steering can't delete fte in index %d of flow group id %d\n", -- cgit v1.2.3 From d83a69c2b1b17a8aedc7a75ba8b620bfd2057e93 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Wed, 7 Mar 2018 15:44:48 -0800 Subject: net/mlx5: Use MLX5_IPSEC_DEV macro for ipsec caps Fix build break of mlx5_accel_ipsec_device_caps is not defined when MLX5_ACCEL is not selected, use MLX5_IPSEC_DEV instead which handles such case. Signed-off-by: Saeed Mahameed Reported-by: Doug Ledford --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 4e456c292ce4..f836e6b76f65 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -2642,8 +2642,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) goto err; } - if (mlx5_accel_ipsec_device_caps(steering->dev) & - MLX5_ACCEL_IPSEC_DEVICE) { + if (MLX5_IPSEC_DEV(dev)) { err = init_egress_root_ns(steering); if (err) goto err; -- cgit v1.2.3 From 581fdddee420cebe2cb781cb3c84c82676a86949 Mon Sep 17 00:00:00 2001 From: Yossi Kuperman Date: Sun, 22 Oct 2017 19:43:58 +0300 Subject: net/mlx5: IPSec, Generalize sandbox QP commands The current code assume only SA QP commands. Refactor in order to pave the way for new QP commands: 1. Generic cmd response format. 2. SA cmd checks are in dedicated functions. 3. Aligned debug prints. Signed-off-by: Yossi Kuperman Signed-off-by: Aviad Yehezkel Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/fpga/ipsec.c | 116 ++++++++++++--------- include/linux/mlx5/mlx5_ifc_fpga.h | 16 +++ 2 files changed, 81 insertions(+), 51 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index 95f9c5a8619b..e0f32b025e06 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -41,35 +41,23 @@ #define SBU_QP_QUEUE_SIZE 8 #define MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC (60 * 1000) -enum mlx5_ipsec_response_syndrome { - MLX5_IPSEC_RESPONSE_SUCCESS = 0, - MLX5_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1, - MLX5_IPSEC_RESPONSE_SADB_ISSUE = 2, - MLX5_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE = 3, -}; - -enum mlx5_fpga_ipsec_sacmd_status { - MLX5_FPGA_IPSEC_SACMD_PENDING, - MLX5_FPGA_IPSEC_SACMD_SEND_FAIL, - MLX5_FPGA_IPSEC_SACMD_COMPLETE, +enum mlx5_fpga_ipsec_cmd_status { + MLX5_FPGA_IPSEC_CMD_PENDING, + MLX5_FPGA_IPSEC_CMD_SEND_FAIL, + MLX5_FPGA_IPSEC_CMD_COMPLETE, }; struct mlx5_ipsec_command_context { struct mlx5_fpga_dma_buf buf; - struct mlx5_accel_ipsec_sa sa; - enum mlx5_fpga_ipsec_sacmd_status status; + enum mlx5_fpga_ipsec_cmd_status status; + struct mlx5_ifc_fpga_ipsec_cmd_resp resp; int status_code; struct completion complete; struct mlx5_fpga_device *dev; struct list_head list; /* Item in pending_cmds */ + u8 command[0]; }; -struct mlx5_ipsec_sadb_resp { - __be32 syndrome; - __be32 sw_sa_handle; - u8 reserved[24]; -} __packed; - struct mlx5_fpga_ipsec { struct list_head pending_cmds; spinlock_t pending_cmds_lock; /* Protects pending_cmds */ @@ -105,21 +93,22 @@ static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn, buf); mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n", status); - context->status = MLX5_FPGA_IPSEC_SACMD_SEND_FAIL; + context->status = MLX5_FPGA_IPSEC_CMD_SEND_FAIL; complete(&context->complete); } } -static inline int syndrome_to_errno(enum mlx5_ipsec_response_syndrome syndrome) +static inline +int syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome) { switch (syndrome) { - case MLX5_IPSEC_RESPONSE_SUCCESS: + case MLX5_FPGA_IPSEC_RESPONSE_SUCCESS: return 0; - case MLX5_IPSEC_RESPONSE_SADB_ISSUE: + case MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE: return -EEXIST; - case MLX5_IPSEC_RESPONSE_ILLEGAL_REQUEST: + case MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST: return -EINVAL; - case MLX5_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE: + case MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE: return -EIO; } return -EIO; @@ -127,9 +116,9 @@ static inline int syndrome_to_errno(enum mlx5_ipsec_response_syndrome syndrome) static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf) { - struct mlx5_ipsec_sadb_resp *resp = buf->sg[0].data; + struct mlx5_ifc_fpga_ipsec_cmd_resp *resp = buf->sg[0].data; struct mlx5_ipsec_command_context *context; - enum mlx5_ipsec_response_syndrome syndrome; + enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome; struct mlx5_fpga_device *fdev = cb_arg; unsigned long flags; @@ -139,8 +128,8 @@ static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf) return; } - mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x sa_id %x\n", - ntohl(resp->syndrome), ntohl(resp->sw_sa_handle)); + mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x\n", + ntohl(resp->syndrome)); spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); context = list_first_entry_or_null(&fdev->ipsec->pending_cmds, @@ -156,51 +145,48 @@ static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf) } mlx5_fpga_dbg(fdev, "Handling response for %p\n", context); - if (context->sa.sw_sa_handle != resp->sw_sa_handle) { - mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n", - ntohl(context->sa.sw_sa_handle), - ntohl(resp->sw_sa_handle)); - return; - } - syndrome = ntohl(resp->syndrome); context->status_code = syndrome_to_errno(syndrome); - context->status = MLX5_FPGA_IPSEC_SACMD_COMPLETE; + context->status = MLX5_FPGA_IPSEC_CMD_COMPLETE; + memcpy(&context->resp, resp, sizeof(*resp)); if (context->status_code) - mlx5_fpga_warn(fdev, "IPSec SADB command failed with syndrome %08x\n", + mlx5_fpga_warn(fdev, "IPSec command failed with syndrome %08x\n", syndrome); + complete(&context->complete); } -void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev, - struct mlx5_accel_ipsec_sa *cmd) +static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev, + const void *cmd, int cmd_size) { struct mlx5_ipsec_command_context *context; struct mlx5_fpga_device *fdev = mdev->fpga; unsigned long flags; - int res = 0; + int res; - BUILD_BUG_ON((sizeof(struct mlx5_accel_ipsec_sa) & 3) != 0); if (!fdev || !fdev->ipsec) return ERR_PTR(-EOPNOTSUPP); - context = kzalloc(sizeof(*context), GFP_ATOMIC); + if (cmd_size & 3) + return ERR_PTR(-EINVAL); + + context = kzalloc(sizeof(*context) + cmd_size, GFP_ATOMIC); if (!context) return ERR_PTR(-ENOMEM); - memcpy(&context->sa, cmd, sizeof(*cmd)); + context->status = MLX5_FPGA_IPSEC_CMD_PENDING; + context->dev = fdev; context->buf.complete = mlx5_fpga_ipsec_send_complete; - context->buf.sg[0].size = sizeof(context->sa); - context->buf.sg[0].data = &context->sa; init_completion(&context->complete); - context->dev = fdev; + memcpy(&context->command, cmd, cmd_size); + context->buf.sg[0].size = cmd_size; + context->buf.sg[0].data = &context->command; + spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); list_add_tail(&context->list, &fdev->ipsec->pending_cmds); spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags); - context->status = MLX5_FPGA_IPSEC_SACMD_PENDING; - res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf); if (res) { mlx5_fpga_warn(fdev, "Failure sending IPSec command: %d\n", @@ -215,7 +201,7 @@ void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev, return context; } -int mlx5_fpga_ipsec_sa_cmd_wait(void *ctx) +static int mlx5_fpga_ipsec_cmd_wait(void *ctx) { struct mlx5_ipsec_command_context *context = ctx; unsigned long timeout = @@ -228,11 +214,39 @@ int mlx5_fpga_ipsec_sa_cmd_wait(void *ctx) return -ETIMEDOUT; } - if (context->status == MLX5_FPGA_IPSEC_SACMD_COMPLETE) + if (context->status == MLX5_FPGA_IPSEC_CMD_COMPLETE) res = context->status_code; else res = -EIO; + return res; +} + +void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev, + struct mlx5_accel_ipsec_sa *cmd) +{ + return mlx5_fpga_ipsec_cmd_exec(mdev, cmd, sizeof(*cmd)); +} + +int mlx5_fpga_ipsec_sa_cmd_wait(void *ctx) +{ + struct mlx5_ipsec_command_context *context = ctx; + struct mlx5_accel_ipsec_sa *sa; + int res; + + res = mlx5_fpga_ipsec_cmd_wait(ctx); + if (res) + goto out; + + sa = (struct mlx5_accel_ipsec_sa *)&context->command; + if (sa->sw_sa_handle != context->resp.sw_sa_handle) { + mlx5_fpga_err(context->dev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n", + ntohl(sa->sw_sa_handle), + ntohl(context->resp.sw_sa_handle)); + res = -EIO; + } + +out: kfree(context); return res; } diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h index 255a88d08078..7283fe780f93 100644 --- a/include/linux/mlx5/mlx5_ifc_fpga.h +++ b/include/linux/mlx5/mlx5_ifc_fpga.h @@ -429,4 +429,20 @@ struct mlx5_ifc_ipsec_counters_bits { u8 dropped_cmd[0x40]; }; +enum mlx5_ifc_fpga_ipsec_response_syndrome { + MLX5_FPGA_IPSEC_RESPONSE_SUCCESS = 0, + MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1, + MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE = 2, + MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE = 3, +}; + +struct mlx5_ifc_fpga_ipsec_cmd_resp { + __be32 syndrome; + union { + __be32 sw_sa_handle; + __be32 flags; + }; + u8 reserved[24]; +} __packed; + #endif /* MLX5_IFC_FPGA_H */ -- cgit v1.2.3 From 788a8210764ce2977095010931959c87b60c2f51 Mon Sep 17 00:00:00 2001 From: Yossi Kuperman Date: Sun, 22 Oct 2017 19:45:45 +0300 Subject: net/mlx5e: IPSec, Add support for ESP trailer removal by hardware Current hardware decrypts and authenticates incoming ESP packets. Subsequently, the software extracts the nexthdr field, truncates the trailer and adjusts csum accordingly. With this patch and a capable device, the trailer is being removed by the hardware and the nexthdr field is conveyed via PET. This way we avoid both the need to access the trailer (cache miss) and to compute its relative checksum, which significantly improve the performance. Experiment shows that trailer removal improves the performance by 2Gbps, (netperf). Both forwarding and host-to-host configurations. Signed-off-by: Yossi Kuperman Signed-off-by: Aviad Yehezkel Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/accel/ipsec.h | 2 + .../ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 2 + .../ethernet/mellanox/mlx5/core/en_accel/ipsec.h | 1 + .../mellanox/mlx5/core/en_accel/ipsec_rxtx.c | 10 +++- .../net/ethernet/mellanox/mlx5/core/fpga/ipsec.c | 54 ++++++++++++++++++++++ include/linux/mlx5/mlx5_ifc_fpga.h | 13 +++++- 6 files changed, 80 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h index 67cda8871f5a..4da9611a753d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h @@ -43,6 +43,7 @@ enum { MLX5_ACCEL_IPSEC_IPV6 = BIT(2), MLX5_ACCEL_IPSEC_ESP = BIT(3), MLX5_ACCEL_IPSEC_LSO = BIT(4), + MLX5_ACCEL_IPSEC_NO_TRAILER = BIT(5), }; #define MLX5_IPSEC_SADB_IP_AH BIT(7) @@ -55,6 +56,7 @@ enum { enum { MLX5_IPSEC_CMD_ADD_SA = 0, MLX5_IPSEC_CMD_DEL_SA = 1, + MLX5_IPSEC_CMD_SET_CAP = 5, }; enum mlx5_accel_ipsec_enc_mode { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 1b49afca65c0..460a613059fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -378,6 +378,8 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv) ida_init(&ipsec->halloc); ipsec->en_priv = priv; ipsec->en_priv->ipsec = ipsec; + ipsec->no_trailer = !!(mlx5_accel_ipsec_device_caps(priv->mdev) & + MLX5_ACCEL_IPSEC_NO_TRAILER); netdev_dbg(priv->netdev, "IPSec attached to netdevice\n"); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index 56e00baf16cc..bffc3ed0574a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -77,6 +77,7 @@ struct mlx5e_ipsec_stats { struct mlx5e_ipsec { struct mlx5e_priv *en_priv; DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS); + bool no_trailer; spinlock_t sadb_rx_lock; /* Protects sadb_rx and halloc */ struct ida halloc; struct mlx5e_ipsec_sw_stats sw_stats; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index 6a7c8b04447e..64c549a06678 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -42,10 +42,11 @@ enum { MLX5E_IPSEC_RX_SYNDROME_DECRYPTED = 0x11, MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED = 0x12, + MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO = 0x17, }; struct mlx5e_ipsec_rx_metadata { - unsigned char reserved; + unsigned char nexthdr; __be32 sa_handle; } __packed; @@ -301,10 +302,17 @@ mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb, switch (mdata->syndrome) { case MLX5E_IPSEC_RX_SYNDROME_DECRYPTED: xo->status = CRYPTO_SUCCESS; + if (likely(priv->ipsec->no_trailer)) { + xo->flags |= XFRM_ESP_NO_TRAILER; + xo->proto = mdata->content.rx.nexthdr; + } break; case MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED: xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED; break; + case MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO: + xo->status = CRYPTO_INVALID_PROTOCOL; + break; default: atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome); return NULL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index e0f32b025e06..3b10d46dc821 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -273,6 +273,9 @@ u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, lso)) ret |= MLX5_ACCEL_IPSEC_LSO; + if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer)) + ret |= MLX5_ACCEL_IPSEC_NO_TRAILER; + return ret; } @@ -335,6 +338,46 @@ out: return ret; } +static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags) +{ + struct mlx5_ipsec_command_context *context; + struct mlx5_ifc_fpga_ipsec_cmd_cap cmd = {0}; + int err; + + cmd.cmd = htonl(MLX5_IPSEC_CMD_SET_CAP); + cmd.flags = htonl(flags); + context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd)); + if (IS_ERR(context)) { + err = PTR_ERR(context); + goto out; + } + + err = mlx5_fpga_ipsec_cmd_wait(context); + if (err) + goto out; + + if ((context->resp.flags & cmd.flags) != cmd.flags) { + mlx5_fpga_err(context->dev, "Failed to set capabilities. cmd 0x%08x vs resp 0x%08x\n", + cmd.flags, + context->resp.flags); + err = -EIO; + } + +out: + return err; +} + +static int mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev *mdev) +{ + u32 dev_caps = mlx5_fpga_ipsec_device_caps(mdev); + u32 flags = 0; + + if (dev_caps & MLX5_ACCEL_IPSEC_NO_TRAILER) + flags |= MLX5_FPGA_IPSEC_CAP_NO_TRAILER; + + return mlx5_fpga_ipsec_set_caps(mdev, flags); +} + int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev) { struct mlx5_fpga_conn_attr init_attr = {0}; @@ -372,8 +415,19 @@ int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev) goto error; } fdev->ipsec->conn = conn; + + err = mlx5_fpga_ipsec_enable_supported_caps(mdev); + if (err) { + mlx5_fpga_err(fdev, "Failed to enable IPSec extended capabilities: %d\n", + err); + goto err_destroy_conn; + } + return 0; +err_destroy_conn: + mlx5_fpga_sbu_conn_destroy(conn); + error: kfree(fdev->ipsec); fdev->ipsec = NULL; diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h index 7283fe780f93..643544db180b 100644 --- a/include/linux/mlx5/mlx5_ifc_fpga.h +++ b/include/linux/mlx5/mlx5_ifc_fpga.h @@ -373,7 +373,8 @@ struct mlx5_ifc_fpga_destroy_qp_out_bits { struct mlx5_ifc_ipsec_extended_cap_bits { u8 encapsulation[0x20]; - u8 reserved_0[0x15]; + u8 reserved_0[0x14]; + u8 rx_no_trailer[0x1]; u8 ipv4_fragment[0x1]; u8 ipv6[0x1]; u8 esn[0x1]; @@ -445,4 +446,14 @@ struct mlx5_ifc_fpga_ipsec_cmd_resp { u8 reserved[24]; } __packed; +enum mlx5_ifc_fpga_ipsec_cap { + MLX5_FPGA_IPSEC_CAP_NO_TRAILER = BIT(0), +}; + +struct mlx5_ifc_fpga_ipsec_cmd_cap { + __be32 cmd; + __be32 flags; + u8 reserved[24]; +} __packed; + #endif /* MLX5_IFC_FPGA_H */ -- cgit v1.2.3 From 65802f480008066636a43173b12388bb3fb7bd3a Mon Sep 17 00:00:00 2001 From: Aviad Yehezkel Date: Tue, 16 Jan 2018 16:12:22 +0200 Subject: net/mlx5: IPSec, Add command V2 support This patch adds V2 command support. New fpga devices support extended features (udp encap, esn etc...), this features require new hardware sadb format therefore we have a new version of commands to manipulate it. Signed-off-by: Yossef Efraim Signed-off-by: Aviad Yehezkel Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/accel/ipsec.c | 9 +++- .../net/ethernet/mellanox/mlx5/core/accel/ipsec.h | 21 ++++++-- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 60 ++++++++++------------ .../net/ethernet/mellanox/mlx5/core/fpga/ipsec.c | 11 ++-- .../net/ethernet/mellanox/mlx5/core/fpga/ipsec.h | 5 +- include/linux/mlx5/mlx5_ifc_fpga.h | 4 +- 6 files changed, 66 insertions(+), 44 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c index 53e69edaedde..b88ae12d9066 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c @@ -40,10 +40,17 @@ void *mlx5_accel_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev, struct mlx5_accel_ipsec_sa *cmd) { + int cmd_size; + if (!MLX5_IPSEC_DEV(mdev)) return ERR_PTR(-EOPNOTSUPP); - return mlx5_fpga_ipsec_sa_cmd_exec(mdev, cmd); + if (mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_V2_CMD) + cmd_size = sizeof(*cmd); + else + cmd_size = sizeof(cmd->ipsec_sa_v1); + + return mlx5_fpga_ipsec_sa_cmd_exec(mdev, cmd, cmd_size); } int mlx5_accel_ipsec_sa_cmd_wait(void *ctx) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h index 4da9611a753d..14a2e95e82c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h @@ -44,6 +44,7 @@ enum { MLX5_ACCEL_IPSEC_ESP = BIT(3), MLX5_ACCEL_IPSEC_LSO = BIT(4), MLX5_ACCEL_IPSEC_NO_TRAILER = BIT(5), + MLX5_ACCEL_IPSEC_V2_CMD = BIT(7), }; #define MLX5_IPSEC_SADB_IP_AH BIT(7) @@ -56,6 +57,9 @@ enum { enum { MLX5_IPSEC_CMD_ADD_SA = 0, MLX5_IPSEC_CMD_DEL_SA = 1, + MLX5_IPSEC_CMD_ADD_SA_V2 = 2, + MLX5_IPSEC_CMD_DEL_SA_V2 = 3, + MLX5_IPSEC_CMD_MOD_SA_V2 = 4, MLX5_IPSEC_CMD_SET_CAP = 5, }; @@ -68,7 +72,7 @@ enum mlx5_accel_ipsec_enc_mode { #define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \ MLX5_ACCEL_IPSEC_DEVICE) -struct mlx5_accel_ipsec_sa { +struct mlx5_accel_ipsec_sa_v1 { __be32 cmd; u8 key_enc[32]; u8 key_auth[32]; @@ -88,10 +92,19 @@ struct mlx5_accel_ipsec_sa { __be32 sw_sa_handle; __be16 tfclen; u8 enc_mode; - u8 sip_masklen; - u8 dip_masklen; + u8 reserved1[2]; u8 flags; - u8 reserved[2]; + u8 reserved2[2]; +}; + +struct mlx5_accel_ipsec_sa { + struct mlx5_accel_ipsec_sa_v1 ipsec_sa_v1; + __be16 udp_sp; + __be16 udp_dp; + u8 reserved1[4]; + __be32 esn; + __be16 vid; /* only 12 bits, rest is reserved */ + __be16 reserved2; } __packed; /** diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 460a613059fe..a8c3fe7cff0f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -133,50 +133,46 @@ static void mlx5e_ipsec_build_hw_sa(u32 op, struct mlx5e_ipsec_sa_entry *sa_entr memset(hw_sa, 0, sizeof(*hw_sa)); - if (op == MLX5_IPSEC_CMD_ADD_SA) { - crypto_data_len = (x->aead->alg_key_len + 7) / 8; - key_len = crypto_data_len - 4; /* 4 bytes salt at end */ - aead = x->data; - geniv_ctx = crypto_aead_ctx(aead); - ivsize = crypto_aead_ivsize(aead); - - memcpy(&hw_sa->key_enc, x->aead->alg_key, key_len); - /* Duplicate 128 bit key twice according to HW layout */ - if (key_len == 16) - memcpy(&hw_sa->key_enc[16], x->aead->alg_key, key_len); - memcpy(&hw_sa->gcm.salt_iv, geniv_ctx->salt, ivsize); - hw_sa->gcm.salt = *((__be32 *)(x->aead->alg_key + key_len)); - } - - hw_sa->cmd = htonl(op); - hw_sa->flags |= MLX5_IPSEC_SADB_SA_VALID | MLX5_IPSEC_SADB_SPI_EN; + crypto_data_len = (x->aead->alg_key_len + 7) / 8; + key_len = crypto_data_len - 4; /* 4 bytes salt at end */ + aead = x->data; + geniv_ctx = crypto_aead_ctx(aead); + ivsize = crypto_aead_ivsize(aead); + + memcpy(&hw_sa->ipsec_sa_v1.key_enc, x->aead->alg_key, key_len); + /* Duplicate 128 bit key twice according to HW layout */ + if (key_len == 16) + memcpy(&hw_sa->ipsec_sa_v1.key_enc[16], x->aead->alg_key, key_len); + memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, geniv_ctx->salt, ivsize); + hw_sa->ipsec_sa_v1.gcm.salt = *((__be32 *)(x->aead->alg_key + key_len)); + + hw_sa->ipsec_sa_v1.cmd = htonl(op); + hw_sa->ipsec_sa_v1.flags |= MLX5_IPSEC_SADB_SA_VALID | MLX5_IPSEC_SADB_SPI_EN; if (x->props.family == AF_INET) { - hw_sa->sip[3] = x->props.saddr.a4; - hw_sa->dip[3] = x->id.daddr.a4; - hw_sa->sip_masklen = 32; - hw_sa->dip_masklen = 32; + hw_sa->ipsec_sa_v1.sip[3] = x->props.saddr.a4; + hw_sa->ipsec_sa_v1.dip[3] = x->id.daddr.a4; } else { - memcpy(hw_sa->sip, x->props.saddr.a6, sizeof(hw_sa->sip)); - memcpy(hw_sa->dip, x->id.daddr.a6, sizeof(hw_sa->dip)); - hw_sa->sip_masklen = 128; - hw_sa->dip_masklen = 128; - hw_sa->flags |= MLX5_IPSEC_SADB_IPV6; + memcpy(hw_sa->ipsec_sa_v1.sip, x->props.saddr.a6, + sizeof(hw_sa->ipsec_sa_v1.sip)); + memcpy(hw_sa->ipsec_sa_v1.dip, x->id.daddr.a6, + sizeof(hw_sa->ipsec_sa_v1.dip)); + hw_sa->ipsec_sa_v1.flags |= MLX5_IPSEC_SADB_IPV6; } - hw_sa->spi = x->id.spi; - hw_sa->sw_sa_handle = htonl(sa_entry->handle); + hw_sa->ipsec_sa_v1.spi = x->id.spi; + hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(sa_entry->handle); switch (x->id.proto) { case IPPROTO_ESP: - hw_sa->flags |= MLX5_IPSEC_SADB_IP_ESP; + hw_sa->ipsec_sa_v1.flags |= MLX5_IPSEC_SADB_IP_ESP; break; case IPPROTO_AH: - hw_sa->flags |= MLX5_IPSEC_SADB_IP_AH; + hw_sa->ipsec_sa_v1.flags |= MLX5_IPSEC_SADB_IP_AH; break; default: break; } - hw_sa->enc_mode = mlx5e_ipsec_enc_mode(x); + hw_sa->ipsec_sa_v1.enc_mode = mlx5e_ipsec_enc_mode(x); if (!(x->xso.flags & XFRM_OFFLOAD_INBOUND)) - hw_sa->flags |= MLX5_IPSEC_SADB_DIR_SX; + hw_sa->ipsec_sa_v1.flags |= MLX5_IPSEC_SADB_DIR_SX; } static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index 3b10d46dc821..fa5b5a0888ec 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -223,9 +223,9 @@ static int mlx5_fpga_ipsec_cmd_wait(void *ctx) } void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev, - struct mlx5_accel_ipsec_sa *cmd) + struct mlx5_accel_ipsec_sa *cmd, int cmd_size) { - return mlx5_fpga_ipsec_cmd_exec(mdev, cmd, sizeof(*cmd)); + return mlx5_fpga_ipsec_cmd_exec(mdev, cmd, cmd_size); } int mlx5_fpga_ipsec_sa_cmd_wait(void *ctx) @@ -239,9 +239,9 @@ int mlx5_fpga_ipsec_sa_cmd_wait(void *ctx) goto out; sa = (struct mlx5_accel_ipsec_sa *)&context->command; - if (sa->sw_sa_handle != context->resp.sw_sa_handle) { + if (sa->ipsec_sa_v1.sw_sa_handle != context->resp.sw_sa_handle) { mlx5_fpga_err(context->dev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n", - ntohl(sa->sw_sa_handle), + ntohl(sa->ipsec_sa_v1.sw_sa_handle), ntohl(context->resp.sw_sa_handle)); res = -EIO; } @@ -276,6 +276,9 @@ u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer)) ret |= MLX5_ACCEL_IPSEC_NO_TRAILER; + if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, v2_command)) + ret |= MLX5_ACCEL_IPSEC_V2_CMD; + return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h index 26a3e4b56972..e5ec29f56532 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h @@ -39,7 +39,7 @@ #ifdef CONFIG_MLX5_FPGA void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev, - struct mlx5_accel_ipsec_sa *cmd); + struct mlx5_accel_ipsec_sa *cmd, int cmd_size); int mlx5_fpga_ipsec_sa_cmd_wait(void *context); u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev); @@ -53,7 +53,8 @@ void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev); #else static inline void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev, - struct mlx5_accel_ipsec_sa *cmd) + struct mlx5_accel_ipsec_sa *cmd, + int cmd_size) { return ERR_PTR(-EOPNOTSUPP); } diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h index 643544db180b..dd7e4538159c 100644 --- a/include/linux/mlx5/mlx5_ifc_fpga.h +++ b/include/linux/mlx5/mlx5_ifc_fpga.h @@ -373,7 +373,9 @@ struct mlx5_ifc_fpga_destroy_qp_out_bits { struct mlx5_ifc_ipsec_extended_cap_bits { u8 encapsulation[0x20]; - u8 reserved_0[0x14]; + u8 reserved_0[0x12]; + u8 v2_command[0x1]; + u8 udp_encap[0x1]; u8 rx_no_trailer[0x1]; u8 ipv4_fragment[0x1]; u8 ipv6[0x1]; -- cgit v1.2.3 From 1d2005e2040b95af4c861e40cf806ff44cd7c883 Mon Sep 17 00:00:00 2001 From: Aviad Yehezkel Date: Mon, 29 Jan 2018 15:05:50 +0200 Subject: net/mlx5: Export ipsec capabilities We will need that for ipsec verbs. Signed-off-by: Aviad Yehezkel Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/accel/ipsec.c | 3 +- .../net/ethernet/mellanox/mlx5/core/accel/ipsec.h | 14 +----- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 9 ++-- .../net/ethernet/mellanox/mlx5/core/fpga/ipsec.c | 14 +++--- include/linux/mlx5/accel.h | 57 ++++++++++++++++++++++ 5 files changed, 73 insertions(+), 24 deletions(-) create mode 100644 include/linux/mlx5/accel.h (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c index b88ae12d9066..375ba438e7cf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c @@ -45,7 +45,7 @@ void *mlx5_accel_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev, if (!MLX5_IPSEC_DEV(mdev)) return ERR_PTR(-EOPNOTSUPP); - if (mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_V2_CMD) + if (mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_V2_CMD) cmd_size = sizeof(*cmd); else cmd_size = sizeof(cmd->ipsec_sa_v1); @@ -62,6 +62,7 @@ u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) { return mlx5_fpga_ipsec_device_caps(mdev); } +EXPORT_SYMBOL_GPL(mlx5_accel_ipsec_device_caps); unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h index 14a2e95e82c3..421ed71a029b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h @@ -35,18 +35,10 @@ #define __MLX5_ACCEL_IPSEC_H__ #include +#include #ifdef CONFIG_MLX5_ACCEL -enum { - MLX5_ACCEL_IPSEC_DEVICE = BIT(1), - MLX5_ACCEL_IPSEC_IPV6 = BIT(2), - MLX5_ACCEL_IPSEC_ESP = BIT(3), - MLX5_ACCEL_IPSEC_LSO = BIT(4), - MLX5_ACCEL_IPSEC_NO_TRAILER = BIT(5), - MLX5_ACCEL_IPSEC_V2_CMD = BIT(7), -}; - #define MLX5_IPSEC_SADB_IP_AH BIT(7) #define MLX5_IPSEC_SADB_IP_ESP BIT(6) #define MLX5_IPSEC_SADB_SA_VALID BIT(5) @@ -70,7 +62,7 @@ enum mlx5_accel_ipsec_enc_mode { }; #define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \ - MLX5_ACCEL_IPSEC_DEVICE) + MLX5_ACCEL_IPSEC_CAP_DEVICE) struct mlx5_accel_ipsec_sa_v1 { __be32 cmd; @@ -126,8 +118,6 @@ void *mlx5_accel_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev, */ int mlx5_accel_ipsec_sa_cmd_wait(void *context); -u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev); - unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev); int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, unsigned int count); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index a8c3fe7cff0f..6f4a01620cc3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -242,7 +242,8 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) return -EINVAL; } if (x->props.family == AF_INET6 && - !(mlx5_accel_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_IPV6)) { + !(mlx5_accel_ipsec_device_caps(priv->mdev) & + MLX5_ACCEL_IPSEC_CAP_IPV6)) { netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n"); return -EINVAL; } @@ -375,7 +376,7 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv) ipsec->en_priv = priv; ipsec->en_priv->ipsec = ipsec; ipsec->no_trailer = !!(mlx5_accel_ipsec_device_caps(priv->mdev) & - MLX5_ACCEL_IPSEC_NO_TRAILER); + MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER); netdev_dbg(priv->netdev, "IPSec attached to netdevice\n"); return 0; } @@ -422,7 +423,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv) if (!priv->ipsec) return; - if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_ESP) || + if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) || !MLX5_CAP_ETH(mdev, swp)) { mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n"); return; @@ -441,7 +442,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv) netdev->features |= NETIF_F_HW_ESP_TX_CSUM; netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM; - if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_LSO) || + if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) || !MLX5_CAP_ETH(mdev, swp_lso)) { mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n"); return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index fa5b5a0888ec..e7e28277733d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -257,7 +257,7 @@ u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) u32 ret = 0; if (mlx5_fpga_is_ipsec_device(mdev)) - ret |= MLX5_ACCEL_IPSEC_DEVICE; + ret |= MLX5_ACCEL_IPSEC_CAP_DEVICE; else return ret; @@ -265,19 +265,19 @@ u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) return ret; if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esp)) - ret |= MLX5_ACCEL_IPSEC_ESP; + ret |= MLX5_ACCEL_IPSEC_CAP_ESP; if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, ipv6)) - ret |= MLX5_ACCEL_IPSEC_IPV6; + ret |= MLX5_ACCEL_IPSEC_CAP_IPV6; if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, lso)) - ret |= MLX5_ACCEL_IPSEC_LSO; + ret |= MLX5_ACCEL_IPSEC_CAP_LSO; if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer)) - ret |= MLX5_ACCEL_IPSEC_NO_TRAILER; + ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER; if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, v2_command)) - ret |= MLX5_ACCEL_IPSEC_V2_CMD; + ret |= MLX5_ACCEL_IPSEC_CAP_V2_CMD; return ret; } @@ -375,7 +375,7 @@ static int mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev *mdev) u32 dev_caps = mlx5_fpga_ipsec_device_caps(mdev); u32 flags = 0; - if (dev_caps & MLX5_ACCEL_IPSEC_NO_TRAILER) + if (dev_caps & MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER) flags |= MLX5_FPGA_IPSEC_CAP_NO_TRAILER; return mlx5_fpga_ipsec_set_caps(mdev, flags); diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h new file mode 100644 index 000000000000..601280c782d3 --- /dev/null +++ b/include/linux/mlx5/accel.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef __MLX5_ACCEL_H__ +#define __MLX5_ACCEL_H__ + +#include + +enum mlx5_accel_ipsec_caps { + MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0, + MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 2, + MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 3, + MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 4, + MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER = 1 << 5, + MLX5_ACCEL_IPSEC_CAP_V2_CMD = 1 << 6, +}; + +#ifdef CONFIG_MLX5_ACCEL + +u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev); + +#else + +static inline u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; } + +#endif +#endif -- cgit v1.2.3 From af9fe19d660e333ca9b0a6e1506e684a1126b9e7 Mon Sep 17 00:00:00 2001 From: Aviad Yehezkel Date: Wed, 17 Jan 2018 11:20:33 +0200 Subject: net/mlx5: Added required metadata capability for ipsec Currently our device requires additional metadata in packet to perform ipsec crypto offload. Signed-off-by: Aviad Yehezkel Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c | 6 ++++-- include/linux/mlx5/accel.h | 1 + 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index e7e28277733d..8de992ba7230 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -256,10 +256,12 @@ u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) struct mlx5_fpga_device *fdev = mdev->fpga; u32 ret = 0; - if (mlx5_fpga_is_ipsec_device(mdev)) + if (mlx5_fpga_is_ipsec_device(mdev)) { ret |= MLX5_ACCEL_IPSEC_CAP_DEVICE; - else + ret |= MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA; + } else { return ret; + } if (!fdev->ipsec) return ret; diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h index 601280c782d3..b674af63689b 100644 --- a/include/linux/mlx5/accel.h +++ b/include/linux/mlx5/accel.h @@ -38,6 +38,7 @@ enum mlx5_accel_ipsec_caps { MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0, + MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA = 1 << 1, MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 2, MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 3, MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 4, -- cgit v1.2.3 From d6c4f0298cec8c4c88d33aca17c066995e92fe91 Mon Sep 17 00:00:00 2001 From: Aviad Yehezkel Date: Thu, 18 Jan 2018 13:05:48 +0200 Subject: net/mlx5: Refactor accel IPSec code The current code has one layer that executed FPGA commands and the Ethernet part directly used this code. Since downstream patches introduces support for IPSec in mlx5_ib, we need to provide some abstractions. This patch refactors the accel code into one layer that creates a software IPSec transformation and another one which creates the actual hardware context. The internal command implementation is now hidden in the FPGA core layer. The code also adds the ability to share FPGA hardware contexts. If two contexts are the same, only a reference count is taken. Signed-off-by: Aviad Yehezkel Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/accel/ipsec.c | 58 +-- .../net/ethernet/mellanox/mlx5/core/accel/ipsec.h | 97 ++--- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 150 ++++---- .../net/ethernet/mellanox/mlx5/core/fpga/ipsec.c | 391 +++++++++++++++++++-- .../net/ethernet/mellanox/mlx5/core/fpga/ipsec.h | 55 ++- include/linux/mlx5/accel.h | 83 ++++- include/linux/mlx5/mlx5_ifc_fpga.h | 59 ++++ 7 files changed, 668 insertions(+), 225 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c index 375ba438e7cf..ab5bc82855fd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c @@ -37,27 +37,6 @@ #include "mlx5_core.h" #include "fpga/ipsec.h" -void *mlx5_accel_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev, - struct mlx5_accel_ipsec_sa *cmd) -{ - int cmd_size; - - if (!MLX5_IPSEC_DEV(mdev)) - return ERR_PTR(-EOPNOTSUPP); - - if (mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_V2_CMD) - cmd_size = sizeof(*cmd); - else - cmd_size = sizeof(cmd->ipsec_sa_v1); - - return mlx5_fpga_ipsec_sa_cmd_exec(mdev, cmd, cmd_size); -} - -int mlx5_accel_ipsec_sa_cmd_wait(void *ctx) -{ - return mlx5_fpga_ipsec_sa_cmd_wait(ctx); -} - u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) { return mlx5_fpga_ipsec_device_caps(mdev); @@ -75,6 +54,21 @@ int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, return mlx5_fpga_ipsec_counters_read(mdev, counters, count); } +void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *xfrm, + const __be32 saddr[4], + const __be32 daddr[4], + const __be32 spi, bool is_ipv6) +{ + return mlx5_fpga_ipsec_create_sa_ctx(mdev, xfrm, saddr, daddr, + spi, is_ipv6); +} + +void mlx5_accel_esp_free_hw_context(void *context) +{ + mlx5_fpga_ipsec_delete_sa_ctx(context); +} + int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) { return mlx5_fpga_ipsec_init(mdev); @@ -84,3 +78,25 @@ void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) { mlx5_fpga_ipsec_cleanup(mdev); } + +struct mlx5_accel_esp_xfrm * +mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs, + u32 flags) +{ + struct mlx5_accel_esp_xfrm *xfrm; + + xfrm = mlx5_fpga_esp_create_xfrm(mdev, attrs, flags); + if (IS_ERR(xfrm)) + return xfrm; + + xfrm->mdev = mdev; + return xfrm; +} +EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm); + +void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) +{ + mlx5_fpga_esp_destroy_xfrm(xfrm); +} +EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h index 421ed71a029b..024dbd22a89b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h @@ -39,89 +39,20 @@ #ifdef CONFIG_MLX5_ACCEL -#define MLX5_IPSEC_SADB_IP_AH BIT(7) -#define MLX5_IPSEC_SADB_IP_ESP BIT(6) -#define MLX5_IPSEC_SADB_SA_VALID BIT(5) -#define MLX5_IPSEC_SADB_SPI_EN BIT(4) -#define MLX5_IPSEC_SADB_DIR_SX BIT(3) -#define MLX5_IPSEC_SADB_IPV6 BIT(2) - -enum { - MLX5_IPSEC_CMD_ADD_SA = 0, - MLX5_IPSEC_CMD_DEL_SA = 1, - MLX5_IPSEC_CMD_ADD_SA_V2 = 2, - MLX5_IPSEC_CMD_DEL_SA_V2 = 3, - MLX5_IPSEC_CMD_MOD_SA_V2 = 4, - MLX5_IPSEC_CMD_SET_CAP = 5, -}; - -enum mlx5_accel_ipsec_enc_mode { - MLX5_IPSEC_SADB_MODE_NONE = 0, - MLX5_IPSEC_SADB_MODE_AES_GCM_128_AUTH_128 = 1, - MLX5_IPSEC_SADB_MODE_AES_GCM_256_AUTH_128 = 3, -}; - #define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \ MLX5_ACCEL_IPSEC_CAP_DEVICE) -struct mlx5_accel_ipsec_sa_v1 { - __be32 cmd; - u8 key_enc[32]; - u8 key_auth[32]; - __be32 sip[4]; - __be32 dip[4]; - union { - struct { - __be32 reserved; - u8 salt_iv[8]; - __be32 salt; - } __packed gcm; - struct { - u8 salt[16]; - } __packed cbc; - }; - __be32 spi; - __be32 sw_sa_handle; - __be16 tfclen; - u8 enc_mode; - u8 reserved1[2]; - u8 flags; - u8 reserved2[2]; -}; - -struct mlx5_accel_ipsec_sa { - struct mlx5_accel_ipsec_sa_v1 ipsec_sa_v1; - __be16 udp_sp; - __be16 udp_dp; - u8 reserved1[4]; - __be32 esn; - __be16 vid; /* only 12 bits, rest is reserved */ - __be16 reserved2; -} __packed; - -/** - * mlx5_accel_ipsec_sa_cmd_exec - Execute an IPSec SADB command - * @mdev: mlx5 device - * @cmd: command to execute - * May be called from atomic context. Returns context pointer, or error - * Caller must eventually call mlx5_accel_ipsec_sa_cmd_wait from non-atomic - * context, to cleanup the context pointer - */ -void *mlx5_accel_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev, - struct mlx5_accel_ipsec_sa *cmd); - -/** - * mlx5_accel_ipsec_sa_cmd_wait - Wait for command execution completion - * @context: Context pointer returned from call to mlx5_accel_ipsec_sa_cmd_exec - * Sleeps (killable) until command execution is complete. - * Returns the command result, or -EINTR if killed - */ -int mlx5_accel_ipsec_sa_cmd_wait(void *context); - unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev); int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, unsigned int count); +void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *xfrm, + const __be32 saddr[4], + const __be32 daddr[4], + const __be32 spi, bool is_ipv6); +void mlx5_accel_esp_free_hw_context(void *context); + int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev); void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev); @@ -129,6 +60,20 @@ void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev); #define MLX5_IPSEC_DEV(mdev) false +static inline void * +mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *xfrm, + const __be32 saddr[4], + const __be32 daddr[4], + const __be32 spi, bool is_ipv6) +{ + return NULL; +} + +static inline void mlx5_accel_esp_free_hw_context(void *context) +{ +} + static inline int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) { return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 6f4a01620cc3..59df3dbd2e65 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -47,7 +47,8 @@ struct mlx5e_ipsec_sa_entry { unsigned int handle; /* Handle in SADB_RX */ struct xfrm_state *x; struct mlx5e_ipsec *ipsec; - void *context; + struct mlx5_accel_esp_xfrm *xfrm; + void *hw_context; }; struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec, @@ -105,74 +106,51 @@ static void mlx5e_ipsec_sadb_rx_free(struct mlx5e_ipsec_sa_entry *sa_entry) ida_simple_remove(&ipsec->halloc, sa_entry->handle); } -static enum mlx5_accel_ipsec_enc_mode mlx5e_ipsec_enc_mode(struct xfrm_state *x) -{ - unsigned int key_len = (x->aead->alg_key_len + 7) / 8 - 4; - - switch (key_len) { - case 16: - return MLX5_IPSEC_SADB_MODE_AES_GCM_128_AUTH_128; - case 32: - return MLX5_IPSEC_SADB_MODE_AES_GCM_256_AUTH_128; - default: - netdev_warn(x->xso.dev, "Bad key len: %d for alg %s\n", - key_len, x->aead->alg_name); - return -1; - } -} - -static void mlx5e_ipsec_build_hw_sa(u32 op, struct mlx5e_ipsec_sa_entry *sa_entry, - struct mlx5_accel_ipsec_sa *hw_sa) +static void +mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, + struct mlx5_accel_esp_xfrm_attrs *attrs) { struct xfrm_state *x = sa_entry->x; + struct aes_gcm_keymat *aes_gcm = &attrs->keymat.aes_gcm; struct aead_geniv_ctx *geniv_ctx; - unsigned int crypto_data_len; struct crypto_aead *aead; - unsigned int key_len; + unsigned int crypto_data_len, key_len; int ivsize; - memset(hw_sa, 0, sizeof(*hw_sa)); + memset(attrs, 0, sizeof(*attrs)); + /* key */ crypto_data_len = (x->aead->alg_key_len + 7) / 8; key_len = crypto_data_len - 4; /* 4 bytes salt at end */ + + memcpy(aes_gcm->aes_key, x->aead->alg_key, key_len); + aes_gcm->key_len = key_len * 8; + + /* salt and seq_iv */ aead = x->data; geniv_ctx = crypto_aead_ctx(aead); ivsize = crypto_aead_ivsize(aead); - - memcpy(&hw_sa->ipsec_sa_v1.key_enc, x->aead->alg_key, key_len); - /* Duplicate 128 bit key twice according to HW layout */ - if (key_len == 16) - memcpy(&hw_sa->ipsec_sa_v1.key_enc[16], x->aead->alg_key, key_len); - memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, geniv_ctx->salt, ivsize); - hw_sa->ipsec_sa_v1.gcm.salt = *((__be32 *)(x->aead->alg_key + key_len)); - - hw_sa->ipsec_sa_v1.cmd = htonl(op); - hw_sa->ipsec_sa_v1.flags |= MLX5_IPSEC_SADB_SA_VALID | MLX5_IPSEC_SADB_SPI_EN; - if (x->props.family == AF_INET) { - hw_sa->ipsec_sa_v1.sip[3] = x->props.saddr.a4; - hw_sa->ipsec_sa_v1.dip[3] = x->id.daddr.a4; - } else { - memcpy(hw_sa->ipsec_sa_v1.sip, x->props.saddr.a6, - sizeof(hw_sa->ipsec_sa_v1.sip)); - memcpy(hw_sa->ipsec_sa_v1.dip, x->id.daddr.a6, - sizeof(hw_sa->ipsec_sa_v1.dip)); - hw_sa->ipsec_sa_v1.flags |= MLX5_IPSEC_SADB_IPV6; - } - hw_sa->ipsec_sa_v1.spi = x->id.spi; - hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(sa_entry->handle); - switch (x->id.proto) { - case IPPROTO_ESP: - hw_sa->ipsec_sa_v1.flags |= MLX5_IPSEC_SADB_IP_ESP; - break; - case IPPROTO_AH: - hw_sa->ipsec_sa_v1.flags |= MLX5_IPSEC_SADB_IP_AH; - break; - default: - break; - } - hw_sa->ipsec_sa_v1.enc_mode = mlx5e_ipsec_enc_mode(x); - if (!(x->xso.flags & XFRM_OFFLOAD_INBOUND)) - hw_sa->ipsec_sa_v1.flags |= MLX5_IPSEC_SADB_DIR_SX; + memcpy(&aes_gcm->seq_iv, &geniv_ctx->salt, ivsize); + memcpy(&aes_gcm->salt, x->aead->alg_key + key_len, + sizeof(aes_gcm->salt)); + + /* iv len */ + aes_gcm->icv_len = x->aead->alg_icv_len; + + /* rx handle */ + attrs->sa_handle = sa_entry->handle; + + /* algo type */ + attrs->keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM; + + /* action */ + attrs->action = (!(x->xso.flags & XFRM_OFFLOAD_INBOUND)) ? + MLX5_ACCEL_ESP_ACTION_ENCRYPT : + MLX5_ACCEL_ESP_ACTION_DECRYPT; + /* flags */ + attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ? + MLX5_ACCEL_ESP_FLAGS_TRANSPORT : + MLX5_ACCEL_ESP_FLAGS_TUNNEL; } static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) @@ -254,9 +232,10 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = NULL; struct net_device *netdev = x->xso.dev; - struct mlx5_accel_ipsec_sa hw_sa; + struct mlx5_accel_esp_xfrm_attrs attrs; struct mlx5e_priv *priv; - void *context; + __be32 saddr[4] = {0}, daddr[4] = {0}, spi; + bool is_ipv6 = false; int err; priv = netdev_priv(netdev); @@ -285,20 +264,41 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) } } - mlx5e_ipsec_build_hw_sa(MLX5_IPSEC_CMD_ADD_SA, sa_entry, &hw_sa); - context = mlx5_accel_ipsec_sa_cmd_exec(sa_entry->ipsec->en_priv->mdev, &hw_sa); - if (IS_ERR(context)) { - err = PTR_ERR(context); + /* create xfrm */ + mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs); + sa_entry->xfrm = + mlx5_accel_esp_create_xfrm(priv->mdev, &attrs, + MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA); + if (IS_ERR(sa_entry->xfrm)) { + err = PTR_ERR(sa_entry->xfrm); goto err_sadb_rx; } - err = mlx5_accel_ipsec_sa_cmd_wait(context); - if (err) - goto err_sadb_rx; + /* create hw context */ + if (x->props.family == AF_INET) { + saddr[3] = x->props.saddr.a4; + daddr[3] = x->id.daddr.a4; + } else { + memcpy(saddr, x->props.saddr.a6, sizeof(saddr)); + memcpy(daddr, x->id.daddr.a6, sizeof(daddr)); + is_ipv6 = true; + } + spi = x->id.spi; + sa_entry->hw_context = + mlx5_accel_esp_create_hw_context(priv->mdev, + sa_entry->xfrm, + saddr, daddr, spi, + is_ipv6); + if (IS_ERR(sa_entry->hw_context)) { + err = PTR_ERR(sa_entry->hw_context); + goto err_xfrm; + } x->xso.offload_handle = (unsigned long)sa_entry; goto out; +err_xfrm: + mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm); err_sadb_rx: if (x->xso.flags & XFRM_OFFLOAD_INBOUND) { mlx5e_ipsec_sadb_rx_del(sa_entry); @@ -313,8 +313,6 @@ out: static void mlx5e_xfrm_del_state(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry; - struct mlx5_accel_ipsec_sa hw_sa; - void *context; if (!x->xso.offload_handle) return; @@ -324,19 +322,11 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x) if (x->xso.flags & XFRM_OFFLOAD_INBOUND) mlx5e_ipsec_sadb_rx_del(sa_entry); - - mlx5e_ipsec_build_hw_sa(MLX5_IPSEC_CMD_DEL_SA, sa_entry, &hw_sa); - context = mlx5_accel_ipsec_sa_cmd_exec(sa_entry->ipsec->en_priv->mdev, &hw_sa); - if (IS_ERR(context)) - return; - - sa_entry->context = context; } static void mlx5e_xfrm_free_state(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry; - int res; if (!x->xso.offload_handle) return; @@ -344,11 +334,9 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x) sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle; WARN_ON(sa_entry->x != x); - res = mlx5_accel_ipsec_sa_cmd_wait(sa_entry->context); - sa_entry->context = NULL; - if (res) { - /* Leftover object will leak */ - return; + if (sa_entry->hw_context) { + mlx5_accel_esp_free_hw_context(sa_entry->hw_context); + mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm); } if (x->xso.flags & XFRM_OFFLOAD_INBOUND) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index 8de992ba7230..daae44c937f0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -31,6 +31,7 @@ * */ +#include #include #include "mlx5_core.h" @@ -47,7 +48,7 @@ enum mlx5_fpga_ipsec_cmd_status { MLX5_FPGA_IPSEC_CMD_COMPLETE, }; -struct mlx5_ipsec_command_context { +struct mlx5_fpga_ipsec_cmd_context { struct mlx5_fpga_dma_buf buf; enum mlx5_fpga_ipsec_cmd_status status; struct mlx5_ifc_fpga_ipsec_cmd_resp resp; @@ -58,11 +59,43 @@ struct mlx5_ipsec_command_context { u8 command[0]; }; +struct mlx5_fpga_esp_xfrm; + +struct mlx5_fpga_ipsec_sa_ctx { + struct rhash_head hash; + struct mlx5_ifc_fpga_ipsec_sa hw_sa; + struct mlx5_core_dev *dev; + struct mlx5_fpga_esp_xfrm *fpga_xfrm; +}; + +struct mlx5_fpga_esp_xfrm { + unsigned int num_rules; + struct mlx5_fpga_ipsec_sa_ctx *sa_ctx; + struct mutex lock; /* xfrm lock */ + struct mlx5_accel_esp_xfrm accel_xfrm; +}; + +static const struct rhashtable_params rhash_sa = { + .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa), + .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa), + .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash), + .automatic_shrinking = true, + .min_size = 1, +}; + struct mlx5_fpga_ipsec { struct list_head pending_cmds; spinlock_t pending_cmds_lock; /* Protects pending_cmds */ u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)]; struct mlx5_fpga_conn *conn; + + /* Map hardware SA --> SA context + * (mlx5_fpga_ipsec_sa) (mlx5_fpga_ipsec_sa_ctx) + * We will use this hash to avoid SAs duplication in fpga which + * aren't allowed + */ + struct rhashtable sa_hash; /* hw_sa -> mlx5_fpga_ipsec_sa_ctx */ + struct mutex sa_hash_lock; }; static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) @@ -86,10 +119,10 @@ static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn, struct mlx5_fpga_dma_buf *buf, u8 status) { - struct mlx5_ipsec_command_context *context; + struct mlx5_fpga_ipsec_cmd_context *context; if (status) { - context = container_of(buf, struct mlx5_ipsec_command_context, + context = container_of(buf, struct mlx5_fpga_ipsec_cmd_context, buf); mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n", status); @@ -117,7 +150,7 @@ int syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome) static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf) { struct mlx5_ifc_fpga_ipsec_cmd_resp *resp = buf->sg[0].data; - struct mlx5_ipsec_command_context *context; + struct mlx5_fpga_ipsec_cmd_context *context; enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome; struct mlx5_fpga_device *fdev = cb_arg; unsigned long flags; @@ -133,7 +166,7 @@ static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf) spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); context = list_first_entry_or_null(&fdev->ipsec->pending_cmds, - struct mlx5_ipsec_command_context, + struct mlx5_fpga_ipsec_cmd_context, list); if (context) list_del(&context->list); @@ -160,7 +193,7 @@ static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf) static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev, const void *cmd, int cmd_size) { - struct mlx5_ipsec_command_context *context; + struct mlx5_fpga_ipsec_cmd_context *context; struct mlx5_fpga_device *fdev = mdev->fpga; unsigned long flags; int res; @@ -203,7 +236,7 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev, static int mlx5_fpga_ipsec_cmd_wait(void *ctx) { - struct mlx5_ipsec_command_context *context = ctx; + struct mlx5_fpga_ipsec_cmd_context *context = ctx; unsigned long timeout = msecs_to_jiffies(MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC); int res; @@ -222,33 +255,49 @@ static int mlx5_fpga_ipsec_cmd_wait(void *ctx) return res; } -void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev, - struct mlx5_accel_ipsec_sa *cmd, int cmd_size) +static inline bool is_v2_sadb_supported(struct mlx5_fpga_ipsec *fipsec) { - return mlx5_fpga_ipsec_cmd_exec(mdev, cmd, cmd_size); + if (MLX5_GET(ipsec_extended_cap, fipsec->caps, v2_command)) + return true; + return false; } -int mlx5_fpga_ipsec_sa_cmd_wait(void *ctx) +static int mlx5_fpga_ipsec_update_hw_sa(struct mlx5_fpga_device *fdev, + struct mlx5_ifc_fpga_ipsec_sa *hw_sa, + int opcode) { - struct mlx5_ipsec_command_context *context = ctx; - struct mlx5_accel_ipsec_sa *sa; - int res; + struct mlx5_core_dev *dev = fdev->mdev; + struct mlx5_ifc_fpga_ipsec_sa *sa; + struct mlx5_fpga_ipsec_cmd_context *cmd_context; + size_t sa_cmd_size; + int err; - res = mlx5_fpga_ipsec_cmd_wait(ctx); - if (res) + hw_sa->ipsec_sa_v1.cmd = htonl(opcode); + if (is_v2_sadb_supported(fdev->ipsec)) + sa_cmd_size = sizeof(*hw_sa); + else + sa_cmd_size = sizeof(hw_sa->ipsec_sa_v1); + + cmd_context = (struct mlx5_fpga_ipsec_cmd_context *) + mlx5_fpga_ipsec_cmd_exec(dev, hw_sa, sa_cmd_size); + if (IS_ERR(cmd_context)) + return PTR_ERR(cmd_context); + + err = mlx5_fpga_ipsec_cmd_wait(cmd_context); + if (err) goto out; - sa = (struct mlx5_accel_ipsec_sa *)&context->command; - if (sa->ipsec_sa_v1.sw_sa_handle != context->resp.sw_sa_handle) { - mlx5_fpga_err(context->dev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n", + sa = (struct mlx5_ifc_fpga_ipsec_sa *)&cmd_context->command; + if (sa->ipsec_sa_v1.sw_sa_handle != cmd_context->resp.sw_sa_handle) { + mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n", ntohl(sa->ipsec_sa_v1.sw_sa_handle), - ntohl(context->resp.sw_sa_handle)); - res = -EIO; + ntohl(cmd_context->resp.sw_sa_handle)); + err = -EIO; } out: - kfree(context); - return res; + kfree(cmd_context); + return err; } u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) @@ -278,9 +327,6 @@ u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer)) ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER; - if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, v2_command)) - ret |= MLX5_ACCEL_IPSEC_CAP_V2_CMD; - return ret; } @@ -345,11 +391,11 @@ out: static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags) { - struct mlx5_ipsec_command_context *context; + struct mlx5_fpga_ipsec_cmd_context *context; struct mlx5_ifc_fpga_ipsec_cmd_cap cmd = {0}; int err; - cmd.cmd = htonl(MLX5_IPSEC_CMD_SET_CAP); + cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP); cmd.flags = htonl(flags); context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd)); if (IS_ERR(context)) { @@ -383,6 +429,200 @@ static int mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev *mdev) return mlx5_fpga_ipsec_set_caps(mdev, flags); } +static void +mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs, + struct mlx5_ifc_fpga_ipsec_sa *hw_sa) +{ + const struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm; + + /* key */ + memcpy(&hw_sa->ipsec_sa_v1.key_enc, aes_gcm->aes_key, + aes_gcm->key_len / 8); + /* Duplicate 128 bit key twice according to HW layout */ + if (aes_gcm->key_len == 128) + memcpy(&hw_sa->ipsec_sa_v1.key_enc[16], + aes_gcm->aes_key, aes_gcm->key_len / 8); + + /* salt and seq_iv */ + memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, &aes_gcm->seq_iv, + sizeof(aes_gcm->seq_iv)); + memcpy(&hw_sa->ipsec_sa_v1.gcm.salt, &aes_gcm->salt, + sizeof(aes_gcm->salt)); + + /* rx handle */ + hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(xfrm_attrs->sa_handle); + + /* enc mode */ + switch (aes_gcm->key_len) { + case 128: + hw_sa->ipsec_sa_v1.enc_mode = + MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128; + break; + case 256: + hw_sa->ipsec_sa_v1.enc_mode = + MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128; + break; + } + + /* flags */ + hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_SA_VALID | + MLX5_FPGA_IPSEC_SA_SPI_EN | + MLX5_FPGA_IPSEC_SA_IP_ESP; + + if (xfrm_attrs->action & MLX5_ACCEL_ESP_ACTION_ENCRYPT) + hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_DIR_SX; + else + hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_DIR_SX; +} + +static void +mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs, + const __be32 saddr[4], + const __be32 daddr[4], + const __be32 spi, bool is_ipv6, + struct mlx5_ifc_fpga_ipsec_sa *hw_sa) +{ + mlx5_fpga_ipsec_build_hw_xfrm(mdev, xfrm_attrs, hw_sa); + + /* IPs */ + memcpy(hw_sa->ipsec_sa_v1.sip, saddr, sizeof(hw_sa->ipsec_sa_v1.sip)); + memcpy(hw_sa->ipsec_sa_v1.dip, daddr, sizeof(hw_sa->ipsec_sa_v1.dip)); + + /* SPI */ + hw_sa->ipsec_sa_v1.spi = spi; + + /* flags */ + if (is_ipv6) + hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_IPV6; +} + +void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *accel_xfrm, + const __be32 saddr[4], + const __be32 daddr[4], + const __be32 spi, bool is_ipv6) +{ + struct mlx5_fpga_ipsec_sa_ctx *sa_ctx; + struct mlx5_fpga_esp_xfrm *fpga_xfrm = + container_of(accel_xfrm, typeof(*fpga_xfrm), + accel_xfrm); + struct mlx5_fpga_device *fdev = mdev->fpga; + struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; + int opcode, err; + void *context; + + /* alloc SA */ + sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL); + if (!sa_ctx) + return ERR_PTR(-ENOMEM); + + sa_ctx->dev = mdev; + + /* build candidate SA */ + mlx5_fpga_ipsec_build_hw_sa(mdev, &accel_xfrm->attrs, + saddr, daddr, spi, is_ipv6, + &sa_ctx->hw_sa); + + mutex_lock(&fpga_xfrm->lock); + + if (fpga_xfrm->sa_ctx) { /* multiple rules for same accel_xfrm */ + /* all rules must be with same IPs and SPI */ + if (memcmp(&sa_ctx->hw_sa, &fpga_xfrm->sa_ctx->hw_sa, + sizeof(sa_ctx->hw_sa))) { + context = ERR_PTR(-EINVAL); + goto exists; + } + + ++fpga_xfrm->num_rules; + context = fpga_xfrm->sa_ctx; + goto exists; + } + + /* This is unbounded fpga_xfrm, try to add to hash */ + mutex_lock(&fipsec->sa_hash_lock); + + err = rhashtable_lookup_insert_fast(&fipsec->sa_hash, &sa_ctx->hash, + rhash_sa); + if (err) { + /* Can't bound different accel_xfrm to already existing sa_ctx. + * This is because we can't support multiple ketmats for + * same IPs and SPI + */ + context = ERR_PTR(-EEXIST); + goto unlock_hash; + } + + /* Bound accel_xfrm to sa_ctx */ + opcode = is_v2_sadb_supported(fdev->ipsec) ? + MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 : + MLX5_FPGA_IPSEC_CMD_OP_ADD_SA; + err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode); + sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0; + if (err) { + context = ERR_PTR(err); + goto delete_hash; + } + + mutex_unlock(&fipsec->sa_hash_lock); + + ++fpga_xfrm->num_rules; + fpga_xfrm->sa_ctx = sa_ctx; + sa_ctx->fpga_xfrm = fpga_xfrm; + + mutex_unlock(&fpga_xfrm->lock); + + return sa_ctx; + +delete_hash: + WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash, + rhash_sa)); +unlock_hash: + mutex_unlock(&fipsec->sa_hash_lock); + +exists: + mutex_unlock(&fpga_xfrm->lock); + kfree(sa_ctx); + return context; +} + +static void +mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx) +{ + struct mlx5_fpga_device *fdev = sa_ctx->dev->fpga; + struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; + int opcode = is_v2_sadb_supported(fdev->ipsec) ? + MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 : + MLX5_FPGA_IPSEC_CMD_OP_DEL_SA; + int err; + + err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode); + sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0; + if (err) { + WARN_ON(err); + return; + } + + mutex_lock(&fipsec->sa_hash_lock); + WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash, + rhash_sa)); + mutex_unlock(&fipsec->sa_hash_lock); +} + +void mlx5_fpga_ipsec_delete_sa_ctx(void *context) +{ + struct mlx5_fpga_esp_xfrm *fpga_xfrm = + ((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm; + + mutex_lock(&fpga_xfrm->lock); + if (!--fpga_xfrm->num_rules) { + mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx); + fpga_xfrm->sa_ctx = NULL; + } + mutex_unlock(&fpga_xfrm->lock); +} + int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev) { struct mlx5_fpga_conn_attr init_attr = {0}; @@ -421,15 +661,23 @@ int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev) } fdev->ipsec->conn = conn; + err = rhashtable_init(&fdev->ipsec->sa_hash, &rhash_sa); + if (err) + goto err_destroy_conn; + mutex_init(&fdev->ipsec->sa_hash_lock); + err = mlx5_fpga_ipsec_enable_supported_caps(mdev); if (err) { mlx5_fpga_err(fdev, "Failed to enable IPSec extended capabilities: %d\n", err); - goto err_destroy_conn; + goto err_destroy_hash; } return 0; +err_destroy_hash: + rhashtable_destroy(&fdev->ipsec->sa_hash); + err_destroy_conn: mlx5_fpga_sbu_conn_destroy(conn); @@ -446,7 +694,92 @@ void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev) if (!mlx5_fpga_is_ipsec_device(mdev)) return; + rhashtable_destroy(&fdev->ipsec->sa_hash); + mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn); kfree(fdev->ipsec); fdev->ipsec = NULL; } + +static int +mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs) +{ + if (attrs->tfc_pad) { + mlx5_core_err(mdev, "Cannot offload xfrm states with tfc padding\n"); + return -EOPNOTSUPP; + } + + if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) { + mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay\n"); + return -EOPNOTSUPP; + } + + if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) { + mlx5_core_err(mdev, "Only aes gcm keymat is supported\n"); + return -EOPNOTSUPP; + } + + if (attrs->keymat.aes_gcm.iv_algo != + MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) { + mlx5_core_err(mdev, "Only iv sequence algo is supported\n"); + return -EOPNOTSUPP; + } + + if (attrs->keymat.aes_gcm.icv_len != 128) { + mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n"); + return -EOPNOTSUPP; + } + + if (attrs->keymat.aes_gcm.key_len != 128 && + attrs->keymat.aes_gcm.key_len != 256) { + mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n"); + return -EOPNOTSUPP; + } + + if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) && + (!MLX5_GET(ipsec_extended_cap, mdev->fpga->ipsec->caps, + v2_command))) { + mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +struct mlx5_accel_esp_xfrm * +mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs, + u32 flags) +{ + struct mlx5_fpga_esp_xfrm *fpga_xfrm; + + if (!(flags & MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA)) { + mlx5_core_warn(mdev, "Tried to create an esp action without metadata\n"); + return ERR_PTR(-EINVAL); + } + + if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) { + mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n"); + return ERR_PTR(-EOPNOTSUPP); + } + + fpga_xfrm = kzalloc(sizeof(*fpga_xfrm), GFP_KERNEL); + if (!fpga_xfrm) + return ERR_PTR(-ENOMEM); + + mutex_init(&fpga_xfrm->lock); + memcpy(&fpga_xfrm->accel_xfrm.attrs, attrs, + sizeof(fpga_xfrm->accel_xfrm.attrs)); + + return &fpga_xfrm->accel_xfrm; +} + +void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) +{ + struct mlx5_fpga_esp_xfrm *fpga_xfrm = + container_of(xfrm, struct mlx5_fpga_esp_xfrm, + accel_xfrm); + /* assuming no sa_ctx are connected to this xfrm_ctx */ + kfree(fpga_xfrm); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h index e5ec29f56532..7ad1e2cd3fb0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h @@ -38,31 +38,28 @@ #ifdef CONFIG_MLX5_FPGA -void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev, - struct mlx5_accel_ipsec_sa *cmd, int cmd_size); -int mlx5_fpga_ipsec_sa_cmd_wait(void *context); - u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev); unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev); int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, unsigned int counters_count); +void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *accel_xfrm, + const __be32 saddr[4], + const __be32 daddr[4], + const __be32 spi, bool is_ipv6); +void mlx5_fpga_ipsec_delete_sa_ctx(void *context); + int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev); void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev); -#else - -static inline void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev, - struct mlx5_accel_ipsec_sa *cmd, - int cmd_size) -{ - return ERR_PTR(-EOPNOTSUPP); -} +struct mlx5_accel_esp_xfrm * +mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs, + u32 flags); +void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm); -static inline int mlx5_fpga_ipsec_sa_cmd_wait(void *context) -{ - return -EOPNOTSUPP; -} +#else static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) { @@ -81,6 +78,20 @@ static inline int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, return 0; } +static inline void * +mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *accel_xfrm, + const __be32 saddr[4], + const __be32 daddr[4], + const __be32 spi, bool is_ipv6) +{ + return NULL; +} + +static inline void mlx5_fpga_ipsec_delete_sa_ctx(void *context) +{ +} + static inline int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev) { return 0; @@ -90,6 +101,18 @@ static inline void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev) { } +static inline struct mlx5_accel_esp_xfrm * +mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs, + u32 flags) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) +{ +} + #endif /* CONFIG_MLX5_FPGA */ #endif /* __MLX5_FPGA_SADB_H__ */ diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h index b674af63689b..da6de465ea6d 100644 --- a/include/linux/mlx5/accel.h +++ b/include/linux/mlx5/accel.h @@ -36,23 +36,102 @@ #include -enum mlx5_accel_ipsec_caps { +enum mlx5_accel_esp_aes_gcm_keymat_iv_algo { + MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ, +}; + +enum mlx5_accel_esp_flags { + MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */ + MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0, + MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED = 1UL << 1, + MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2, +}; + +enum mlx5_accel_esp_action { + MLX5_ACCEL_ESP_ACTION_DECRYPT, + MLX5_ACCEL_ESP_ACTION_ENCRYPT, +}; + +enum mlx5_accel_esp_keymats { + MLX5_ACCEL_ESP_KEYMAT_AES_NONE, + MLX5_ACCEL_ESP_KEYMAT_AES_GCM, +}; + +enum mlx5_accel_esp_replay { + MLX5_ACCEL_ESP_REPLAY_NONE, + MLX5_ACCEL_ESP_REPLAY_BMP, +}; + +struct aes_gcm_keymat { + u64 seq_iv; + enum mlx5_accel_esp_aes_gcm_keymat_iv_algo iv_algo; + + u32 salt; + u32 icv_len; + + u32 key_len; + u32 aes_key[256 / 32]; +}; + +struct mlx5_accel_esp_xfrm_attrs { + enum mlx5_accel_esp_action action; + u32 esn; + u32 spi; + u32 seq; + u32 tfc_pad; + u32 flags; + u32 sa_handle; + enum mlx5_accel_esp_replay replay_type; + union { + struct { + u32 size; + + } bmp; + } replay; + enum mlx5_accel_esp_keymats keymat_type; + union { + struct aes_gcm_keymat aes_gcm; + } keymat; +}; + +struct mlx5_accel_esp_xfrm { + struct mlx5_core_dev *mdev; + struct mlx5_accel_esp_xfrm_attrs attrs; +}; + +enum { + MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA = 1UL << 0, +}; + +enum mlx5_accel_ipsec_cap { MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0, MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA = 1 << 1, MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 2, MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 3, MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 4, MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER = 1 << 5, - MLX5_ACCEL_IPSEC_CAP_V2_CMD = 1 << 6, }; #ifdef CONFIG_MLX5_ACCEL u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev); +struct mlx5_accel_esp_xfrm * +mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs, + u32 flags); +void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm); + #else static inline u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; } +static inline struct mlx5_accel_esp_xfrm * +mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs, + u32 flags) { return ERR_PTR(-EOPNOTSUPP); } +static inline void +mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) {} + #endif #endif diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h index dd7e4538159c..debcc57de43a 100644 --- a/include/linux/mlx5/mlx5_ifc_fpga.h +++ b/include/linux/mlx5/mlx5_ifc_fpga.h @@ -448,6 +448,15 @@ struct mlx5_ifc_fpga_ipsec_cmd_resp { u8 reserved[24]; } __packed; +enum mlx5_ifc_fpga_ipsec_cmd_opcode { + MLX5_FPGA_IPSEC_CMD_OP_ADD_SA = 0, + MLX5_FPGA_IPSEC_CMD_OP_DEL_SA = 1, + MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 = 2, + MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 = 3, + MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2 = 4, + MLX5_FPGA_IPSEC_CMD_OP_SET_CAP = 5, +}; + enum mlx5_ifc_fpga_ipsec_cap { MLX5_FPGA_IPSEC_CAP_NO_TRAILER = BIT(0), }; @@ -458,4 +467,54 @@ struct mlx5_ifc_fpga_ipsec_cmd_cap { u8 reserved[24]; } __packed; +enum mlx5_ifc_fpga_ipsec_sa_flags { + MLX5_FPGA_IPSEC_SA_IPV6 = BIT(2), + MLX5_FPGA_IPSEC_SA_DIR_SX = BIT(3), + MLX5_FPGA_IPSEC_SA_SPI_EN = BIT(4), + MLX5_FPGA_IPSEC_SA_SA_VALID = BIT(5), + MLX5_FPGA_IPSEC_SA_IP_ESP = BIT(6), + MLX5_FPGA_IPSEC_SA_IP_AH = BIT(7), +}; + +enum mlx5_ifc_fpga_ipsec_sa_enc_mode { + MLX5_FPGA_IPSEC_SA_ENC_MODE_NONE = 0, + MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128 = 1, + MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128 = 3, +}; + +struct mlx5_ifc_fpga_ipsec_sa_v1 { + __be32 cmd; + u8 key_enc[32]; + u8 key_auth[32]; + __be32 sip[4]; + __be32 dip[4]; + union { + struct { + __be32 reserved; + u8 salt_iv[8]; + __be32 salt; + } __packed gcm; + struct { + u8 salt[16]; + } __packed cbc; + }; + __be32 spi; + __be32 sw_sa_handle; + __be16 tfclen; + u8 enc_mode; + u8 reserved1[2]; + u8 flags; + u8 reserved2[2]; +}; + +struct mlx5_ifc_fpga_ipsec_sa { + struct mlx5_ifc_fpga_ipsec_sa_v1 ipsec_sa_v1; + __be16 udp_sp; + __be16 udp_dp; + u8 reserved1[4]; + __be32 esn; + __be16 vid; /* only 12 bits, rest is reserved */ + __be16 reserved2; +} __packed; + #endif /* MLX5_IFC_FPGA_H */ -- cgit v1.2.3 From 05564d0ae075b7a73339eaa05296c3034e439c32 Mon Sep 17 00:00:00 2001 From: Aviad Yehezkel Date: Sun, 18 Feb 2018 15:07:20 +0200 Subject: net/mlx5: Add flow-steering commands for FPGA IPSec implementation In order to add a context to the FPGA, we need to get both the software transform context (which includes the keys, etc) and the source/destination IPs (which are included in the steering rule). Therefore, we register new set of firmware like commands for the FPGA. Each time a rule is added, the steering core infrastructure calls the FPGA command layer. If the rule is intended for the FPGA, it combines the IPs information with the software transformation context and creates the respective hardware transform. Afterwards, it calls the standard steering command layer. Signed-off-by: Aviad Yehezkel Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/accel/ipsec.c | 7 + .../net/ethernet/mellanox/mlx5/core/fpga/ipsec.c | 724 +++++++++++++++++++++ .../net/ethernet/mellanox/mlx5/core/fpga/ipsec.h | 24 + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 5 + drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 + include/linux/mlx5/accel.h | 5 + include/linux/mlx5/fs.h | 3 + 7 files changed, 770 insertions(+) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c index ab5bc82855fd..9f1b1939716a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c @@ -100,3 +100,10 @@ void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) mlx5_fpga_esp_destroy_xfrm(xfrm); } EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm); + +int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs) +{ + return mlx5_fpga_esp_modify_xfrm(xfrm, attrs); +} +EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index daae44c937f0..7b43fa269117 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -33,8 +33,12 @@ #include #include +#include +#include +#include #include "mlx5_core.h" +#include "fs_cmd.h" #include "fpga/ipsec.h" #include "fpga/sdk.h" #include "fpga/core.h" @@ -75,6 +79,12 @@ struct mlx5_fpga_esp_xfrm { struct mlx5_accel_esp_xfrm accel_xfrm; }; +struct mlx5_fpga_ipsec_rule { + struct rb_node node; + struct fs_fte *fte; + struct mlx5_fpga_ipsec_sa_ctx *ctx; +}; + static const struct rhashtable_params rhash_sa = { .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa), .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa), @@ -84,11 +94,15 @@ static const struct rhashtable_params rhash_sa = { }; struct mlx5_fpga_ipsec { + struct mlx5_fpga_device *fdev; struct list_head pending_cmds; spinlock_t pending_cmds_lock; /* Protects pending_cmds */ u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)]; struct mlx5_fpga_conn *conn; + struct notifier_block fs_notifier_ingress_bypass; + struct notifier_block fs_notifier_egress; + /* Map hardware SA --> SA context * (mlx5_fpga_ipsec_sa) (mlx5_fpga_ipsec_sa_ctx) * We will use this hash to avoid SAs duplication in fpga which @@ -96,6 +110,12 @@ struct mlx5_fpga_ipsec { */ struct rhashtable sa_hash; /* hw_sa -> mlx5_fpga_ipsec_sa_ctx */ struct mutex sa_hash_lock; + + /* Tree holding all rules for this fpga device + * Key for searching a rule (mlx5_fpga_ipsec_rule) is (ft, id) + */ + struct rb_root rules_rb; + struct mutex rules_rb_lock; /* rules lock */ }; static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) @@ -498,6 +518,127 @@ mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev *mdev, hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_IPV6; } +static bool is_full_mask(const void *p, size_t len) +{ + WARN_ON(len % 4); + + return !memchr_inv(p, 0xff, len); +} + +static bool validate_fpga_full_mask(struct mlx5_core_dev *dev, + const u32 *match_c, + const u32 *match_v) +{ + const void *misc_params_c = MLX5_ADDR_OF(fte_match_param, + match_c, + misc_parameters); + const void *headers_c = MLX5_ADDR_OF(fte_match_param, + match_c, + outer_headers); + const void *headers_v = MLX5_ADDR_OF(fte_match_param, + match_v, + outer_headers); + + if (mlx5_fs_is_outer_ipv4_flow(dev, headers_c, headers_v)) { + const void *s_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4, + headers_c, + src_ipv4_src_ipv6.ipv4_layout.ipv4); + const void *d_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4, + headers_c, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4); + + if (!is_full_mask(s_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout, + ipv4)) || + !is_full_mask(d_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout, + ipv4))) + return false; + } else { + const void *s_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4, + headers_c, + src_ipv4_src_ipv6.ipv6_layout.ipv6); + const void *d_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4, + headers_c, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6); + + if (!is_full_mask(s_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout, + ipv6)) || + !is_full_mask(d_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout, + ipv6))) + return false; + } + + if (!is_full_mask(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c, + outer_esp_spi), + MLX5_FLD_SZ_BYTES(fte_match_set_misc, outer_esp_spi))) + return false; + + return true; +} + +static bool mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev *dev, + u8 match_criteria_enable, + const u32 *match_c, + const u32 *match_v) +{ + u32 ipsec_dev_caps = mlx5_accel_ipsec_device_caps(dev); + bool ipv6_flow; + + ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v); + + if (!(match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) || + mlx5_fs_is_outer_udp_flow(match_c, match_v) || + mlx5_fs_is_outer_tcp_flow(match_c, match_v) || + mlx5_fs_is_vxlan_flow(match_c) || + !(mlx5_fs_is_outer_ipv4_flow(dev, match_c, match_v) || + ipv6_flow)) + return false; + + if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_DEVICE)) + return false; + + if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_ESP) && + mlx5_fs_is_outer_ipsec_flow(match_c)) + return false; + + if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_IPV6) && + ipv6_flow) + return false; + + if (!validate_fpga_full_mask(dev, match_c, match_v)) + return false; + + return true; +} + +static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev, + u8 match_criteria_enable, + const u32 *match_c, + const u32 *match_v, + struct mlx5_flow_act *flow_act) +{ + const void *outer_c = MLX5_ADDR_OF(fte_match_param, match_c, + outer_headers); + bool is_dmac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_47_16) || + MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_15_0); + bool is_smac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_47_16) || + MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_15_0); + int ret; + + ret = mlx5_is_fpga_ipsec_rule(dev, match_criteria_enable, match_c, + match_v); + if (!ret) + return ret; + + if (is_dmac || is_smac || + (match_criteria_enable & + ~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) || + (flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) || + flow_act->has_flow_tag) + return false; + + return true; +} + void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev, struct mlx5_accel_esp_xfrm *accel_xfrm, const __be32 saddr[4], @@ -587,6 +728,73 @@ exists: return context; } +static void * +mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev, + struct fs_fte *fte, + bool is_egress) +{ + struct mlx5_accel_esp_xfrm *accel_xfrm; + __be32 saddr[4], daddr[4], spi; + struct mlx5_flow_group *fg; + bool is_ipv6 = false; + + fs_get_obj(fg, fte->node.parent); + /* validate */ + if (is_egress && + !mlx5_is_fpga_egress_ipsec_rule(mdev, + fg->mask.match_criteria_enable, + fg->mask.match_criteria, + fte->val, + &fte->action)) + return ERR_PTR(-EINVAL); + else if (!mlx5_is_fpga_ipsec_rule(mdev, + fg->mask.match_criteria_enable, + fg->mask.match_criteria, + fte->val)) + return ERR_PTR(-EINVAL); + + /* get xfrm context */ + accel_xfrm = + (struct mlx5_accel_esp_xfrm *)fte->action.esp_id; + + /* IPs */ + if (mlx5_fs_is_outer_ipv4_flow(mdev, fg->mask.match_criteria, + fte->val)) { + memcpy(&saddr[3], + MLX5_ADDR_OF(fte_match_set_lyr_2_4, + fte->val, + src_ipv4_src_ipv6.ipv4_layout.ipv4), + sizeof(saddr[3])); + memcpy(&daddr[3], + MLX5_ADDR_OF(fte_match_set_lyr_2_4, + fte->val, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + sizeof(daddr[3])); + } else { + memcpy(saddr, + MLX5_ADDR_OF(fte_match_param, + fte->val, + outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), + sizeof(saddr)); + memcpy(daddr, + MLX5_ADDR_OF(fte_match_param, + fte->val, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + sizeof(daddr)); + is_ipv6 = true; + } + + /* SPI */ + spi = MLX5_GET_BE(typeof(spi), + fte_match_param, fte->val, + misc_parameters.outer_esp_spi); + + /* create */ + return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm, + saddr, daddr, + spi, is_ipv6); +} + static void mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx) { @@ -623,6 +831,389 @@ void mlx5_fpga_ipsec_delete_sa_ctx(void *context) mutex_unlock(&fpga_xfrm->lock); } +static inline struct mlx5_fpga_ipsec_rule * +_rule_search(struct rb_root *root, struct fs_fte *fte) +{ + struct rb_node *node = root->rb_node; + + while (node) { + struct mlx5_fpga_ipsec_rule *rule = + container_of(node, struct mlx5_fpga_ipsec_rule, + node); + + if (rule->fte < fte) + node = node->rb_left; + else if (rule->fte > fte) + node = node->rb_right; + else + return rule; + } + return NULL; +} + +static struct mlx5_fpga_ipsec_rule * +rule_search(struct mlx5_fpga_ipsec *ipsec_dev, struct fs_fte *fte) +{ + struct mlx5_fpga_ipsec_rule *rule; + + mutex_lock(&ipsec_dev->rules_rb_lock); + rule = _rule_search(&ipsec_dev->rules_rb, fte); + mutex_unlock(&ipsec_dev->rules_rb_lock); + + return rule; +} + +static inline int _rule_insert(struct rb_root *root, + struct mlx5_fpga_ipsec_rule *rule) +{ + struct rb_node **new = &root->rb_node, *parent = NULL; + + /* Figure out where to put new node */ + while (*new) { + struct mlx5_fpga_ipsec_rule *this = + container_of(*new, struct mlx5_fpga_ipsec_rule, + node); + + parent = *new; + if (rule->fte < this->fte) + new = &((*new)->rb_left); + else if (rule->fte > this->fte) + new = &((*new)->rb_right); + else + return -EEXIST; + } + + /* Add new node and rebalance tree. */ + rb_link_node(&rule->node, parent, new); + rb_insert_color(&rule->node, root); + + return 0; +} + +static int rule_insert(struct mlx5_fpga_ipsec *ipsec_dev, + struct mlx5_fpga_ipsec_rule *rule) +{ + int ret; + + mutex_lock(&ipsec_dev->rules_rb_lock); + ret = _rule_insert(&ipsec_dev->rules_rb, rule); + mutex_unlock(&ipsec_dev->rules_rb_lock); + + return ret; +} + +static inline void _rule_delete(struct mlx5_fpga_ipsec *ipsec_dev, + struct mlx5_fpga_ipsec_rule *rule) +{ + struct rb_root *root = &ipsec_dev->rules_rb; + + mutex_lock(&ipsec_dev->rules_rb_lock); + rb_erase(&rule->node, root); + mutex_unlock(&ipsec_dev->rules_rb_lock); +} + +static void rule_delete(struct mlx5_fpga_ipsec *ipsec_dev, + struct mlx5_fpga_ipsec_rule *rule) +{ + _rule_delete(ipsec_dev, rule); + kfree(rule); +} + +struct mailbox_mod { + uintptr_t saved_esp_id; + u32 saved_action; + u32 saved_outer_esp_spi_value; +}; + +static void restore_spec_mailbox(struct fs_fte *fte, + struct mailbox_mod *mbox_mod) +{ + char *misc_params_v = MLX5_ADDR_OF(fte_match_param, + fte->val, + misc_parameters); + + MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, + mbox_mod->saved_outer_esp_spi_value); + fte->action.action |= mbox_mod->saved_action; + fte->action.esp_id = (uintptr_t)mbox_mod->saved_esp_id; +} + +static void modify_spec_mailbox(struct mlx5_core_dev *mdev, + struct fs_fte *fte, + struct mailbox_mod *mbox_mod) +{ + char *misc_params_v = MLX5_ADDR_OF(fte_match_param, + fte->val, + misc_parameters); + + mbox_mod->saved_esp_id = fte->action.esp_id; + mbox_mod->saved_action = fte->action.action & + (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | + MLX5_FLOW_CONTEXT_ACTION_DECRYPT); + mbox_mod->saved_outer_esp_spi_value = + MLX5_GET(fte_match_set_misc, misc_params_v, + outer_esp_spi); + + fte->action.esp_id = 0; + fte->action.action &= ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | + MLX5_FLOW_CONTEXT_ACTION_DECRYPT); + if (!MLX5_CAP_FLOWTABLE(mdev, + flow_table_properties_nic_receive.ft_field_support.outer_esp_spi)) + MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 0); +} + +static enum fs_flow_table_type egress_to_fs_ft(bool egress) +{ + return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX; +} + +static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + u32 *in, + unsigned int *group_id, + bool is_egress) +{ + int (*create_flow_group)(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, u32 *in, + unsigned int *group_id) = + mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group; + char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in, + match_criteria.misc_parameters); + u32 saved_outer_esp_spi_mask; + u8 match_criteria_enable; + int ret; + + if (MLX5_CAP_FLOWTABLE(dev, + flow_table_properties_nic_receive.ft_field_support.outer_esp_spi)) + return create_flow_group(dev, ft, in, group_id); + + match_criteria_enable = + MLX5_GET(create_flow_group_in, in, match_criteria_enable); + saved_outer_esp_spi_mask = + MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi); + if (!match_criteria_enable || !saved_outer_esp_spi_mask) + return create_flow_group(dev, ft, in, group_id); + + MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0); + + if (!(*misc_params_c) && + !memcmp(misc_params_c, misc_params_c + 1, MLX5_ST_SZ_BYTES(fte_match_set_misc) - 1)) + MLX5_SET(create_flow_group_in, in, match_criteria_enable, + match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS); + + ret = create_flow_group(dev, ft, in, group_id); + + MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask); + MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable); + + return ret; +} + +static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *fg, + struct fs_fte *fte, + bool is_egress) +{ + int (*create_fte)(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *fg, + struct fs_fte *fte) = + mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte; + struct mlx5_fpga_device *fdev = dev->fpga; + struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; + struct mlx5_fpga_ipsec_rule *rule; + bool is_esp = fte->action.esp_id; + struct mailbox_mod mbox_mod; + int ret; + + if (!is_esp || + !(fte->action.action & + (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | + MLX5_FLOW_CONTEXT_ACTION_DECRYPT))) + return create_fte(dev, ft, fg, fte); + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress); + if (IS_ERR(rule->ctx)) { + kfree(rule); + return PTR_ERR(rule->ctx); + } + + rule->fte = fte; + WARN_ON(rule_insert(fipsec, rule)); + + modify_spec_mailbox(dev, fte, &mbox_mod); + ret = create_fte(dev, ft, fg, fte); + restore_spec_mailbox(fte, &mbox_mod); + if (ret) { + _rule_delete(fipsec, rule); + mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx); + kfree(rule); + } + + return ret; +} + +static int fpga_ipsec_fs_update_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int group_id, + int modify_mask, + struct fs_fte *fte, + bool is_egress) +{ + int (*update_fte)(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int group_id, + int modify_mask, + struct fs_fte *fte) = + mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte; + bool is_esp = fte->action.esp_id; + struct mailbox_mod mbox_mod; + int ret; + + if (!is_esp || + !(fte->action.action & + (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | + MLX5_FLOW_CONTEXT_ACTION_DECRYPT))) + return update_fte(dev, ft, group_id, modify_mask, fte); + + modify_spec_mailbox(dev, fte, &mbox_mod); + ret = update_fte(dev, ft, group_id, modify_mask, fte); + restore_spec_mailbox(fte, &mbox_mod); + + return ret; +} + +static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct fs_fte *fte, + bool is_egress) +{ + int (*delete_fte)(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct fs_fte *fte) = + mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte; + struct mlx5_fpga_device *fdev = dev->fpga; + struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; + struct mlx5_fpga_ipsec_rule *rule; + bool is_esp = fte->action.esp_id; + struct mailbox_mod mbox_mod; + int ret; + + if (!is_esp || + !(fte->action.action & + (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | + MLX5_FLOW_CONTEXT_ACTION_DECRYPT))) + return delete_fte(dev, ft, fte); + + rule = rule_search(fipsec, fte); + if (!rule) + return -ENOENT; + + mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx); + rule_delete(fipsec, rule); + + modify_spec_mailbox(dev, fte, &mbox_mod); + ret = delete_fte(dev, ft, fte); + restore_spec_mailbox(fte, &mbox_mod); + + return ret; +} + +static int +mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + u32 *in, + unsigned int *group_id) +{ + return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, true); +} + +static int +mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *fg, + struct fs_fte *fte) +{ + return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, true); +} + +static int +mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int group_id, + int modify_mask, + struct fs_fte *fte) +{ + return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte, + true); +} + +static int +mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct fs_fte *fte) +{ + return fpga_ipsec_fs_delete_fte(dev, ft, fte, true); +} + +static int +mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + u32 *in, + unsigned int *group_id) +{ + return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, false); +} + +static int +mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *fg, + struct fs_fte *fte) +{ + return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, false); +} + +static int +mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + unsigned int group_id, + int modify_mask, + struct fs_fte *fte) +{ + return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte, + false); +} + +static int +mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct fs_fte *fte) +{ + return fpga_ipsec_fs_delete_fte(dev, ft, fte, false); +} + +static struct mlx5_flow_cmds fpga_ipsec_ingress; +static struct mlx5_flow_cmds fpga_ipsec_egress; + +const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type) +{ + switch (type) { + case FS_FT_NIC_RX: + return &fpga_ipsec_ingress; + case FS_FT_NIC_TX: + return &fpga_ipsec_egress; + default: + WARN_ON(true); + return NULL; + } +} + int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev) { struct mlx5_fpga_conn_attr init_attr = {0}; @@ -637,6 +1228,8 @@ int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev) if (!fdev->ipsec) return -ENOMEM; + fdev->ipsec->fdev = fdev; + err = mlx5_fpga_get_sbu_caps(fdev, sizeof(fdev->ipsec->caps), fdev->ipsec->caps); if (err) { @@ -666,6 +1259,9 @@ int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev) goto err_destroy_conn; mutex_init(&fdev->ipsec->sa_hash_lock); + fdev->ipsec->rules_rb = RB_ROOT; + mutex_init(&fdev->ipsec->rules_rb_lock); + err = mlx5_fpga_ipsec_enable_supported_caps(mdev); if (err) { mlx5_fpga_err(fdev, "Failed to enable IPSec extended capabilities: %d\n", @@ -687,6 +1283,17 @@ error: return err; } +static void destroy_rules_rb(struct rb_root *root) +{ + struct mlx5_fpga_ipsec_rule *r, *tmp; + + rbtree_postorder_for_each_entry_safe(r, tmp, root, node) { + rb_erase(&r->node, root); + mlx5_fpga_ipsec_delete_sa_ctx(r->ctx); + kfree(r); + } +} + void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev) { struct mlx5_fpga_device *fdev = mdev->fpga; @@ -694,6 +1301,7 @@ void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev) if (!mlx5_fpga_is_ipsec_device(mdev)) return; + destroy_rules_rb(&fdev->ipsec->rules_rb); rhashtable_destroy(&fdev->ipsec->sa_hash); mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn); @@ -701,6 +1309,49 @@ void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev) fdev->ipsec = NULL; } +void mlx5_fpga_ipsec_build_fs_cmds(void) +{ + /* ingress */ + fpga_ipsec_ingress.create_flow_table = + mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->create_flow_table; + fpga_ipsec_ingress.destroy_flow_table = + mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_table; + fpga_ipsec_ingress.modify_flow_table = + mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->modify_flow_table; + fpga_ipsec_ingress.create_flow_group = + mlx5_fpga_ipsec_fs_create_flow_group_ingress; + fpga_ipsec_ingress.destroy_flow_group = + mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_group; + fpga_ipsec_ingress.create_fte = + mlx5_fpga_ipsec_fs_create_fte_ingress; + fpga_ipsec_ingress.update_fte = + mlx5_fpga_ipsec_fs_update_fte_ingress; + fpga_ipsec_ingress.delete_fte = + mlx5_fpga_ipsec_fs_delete_fte_ingress; + fpga_ipsec_ingress.update_root_ft = + mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->update_root_ft; + + /* egress */ + fpga_ipsec_egress.create_flow_table = + mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->create_flow_table; + fpga_ipsec_egress.destroy_flow_table = + mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_table; + fpga_ipsec_egress.modify_flow_table = + mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->modify_flow_table; + fpga_ipsec_egress.create_flow_group = + mlx5_fpga_ipsec_fs_create_flow_group_egress; + fpga_ipsec_egress.destroy_flow_group = + mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_group; + fpga_ipsec_egress.create_fte = + mlx5_fpga_ipsec_fs_create_fte_egress; + fpga_ipsec_egress.update_fte = + mlx5_fpga_ipsec_fs_update_fte_egress; + fpga_ipsec_egress.delete_fte = + mlx5_fpga_ipsec_fs_delete_fte_egress; + fpga_ipsec_egress.update_root_ft = + mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->update_root_ft; +} + static int mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev, const struct mlx5_accel_esp_xfrm_attrs *attrs) @@ -783,3 +1434,76 @@ void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) /* assuming no sa_ctx are connected to this xfrm_ctx */ kfree(fpga_xfrm); } + +int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs) +{ + struct mlx5_core_dev *mdev = xfrm->mdev; + struct mlx5_fpga_device *fdev = mdev->fpga; + struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; + struct mlx5_fpga_esp_xfrm *fpga_xfrm; + struct mlx5_ifc_fpga_ipsec_sa org_hw_sa; + + int err = 0; + + if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs))) + return 0; + + if (!mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) { + mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n"); + return -EOPNOTSUPP; + } + + if (is_v2_sadb_supported(fipsec)) { + mlx5_core_warn(mdev, "Modify esp is not supported\n"); + return -EOPNOTSUPP; + } + + fpga_xfrm = container_of(xfrm, struct mlx5_fpga_esp_xfrm, accel_xfrm); + + mutex_lock(&fpga_xfrm->lock); + + if (!fpga_xfrm->sa_ctx) + /* Unbounded xfrm, chane only sw attrs */ + goto change_sw_xfrm_attrs; + + /* copy original hw sa */ + memcpy(&org_hw_sa, &fpga_xfrm->sa_ctx->hw_sa, sizeof(org_hw_sa)); + mutex_lock(&fipsec->sa_hash_lock); + /* remove original hw sa from hash */ + WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, + &fpga_xfrm->sa_ctx->hash, rhash_sa)); + /* update hw_sa with new xfrm attrs*/ + mlx5_fpga_ipsec_build_hw_xfrm(xfrm->mdev, attrs, + &fpga_xfrm->sa_ctx->hw_sa); + /* try to insert new hw_sa to hash */ + err = rhashtable_insert_fast(&fipsec->sa_hash, + &fpga_xfrm->sa_ctx->hash, rhash_sa); + if (err) + goto rollback_sa; + + /* modify device with new hw_sa */ + err = mlx5_fpga_ipsec_update_hw_sa(fdev, &fpga_xfrm->sa_ctx->hw_sa, + MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2); + fpga_xfrm->sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0; + if (err) + WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, + &fpga_xfrm->sa_ctx->hash, + rhash_sa)); +rollback_sa: + if (err) { + /* return original hw_sa to hash */ + memcpy(&fpga_xfrm->sa_ctx->hw_sa, &org_hw_sa, + sizeof(org_hw_sa)); + WARN_ON(rhashtable_insert_fast(&fipsec->sa_hash, + &fpga_xfrm->sa_ctx->hash, + rhash_sa)); + } + mutex_unlock(&fipsec->sa_hash_lock); + +change_sw_xfrm_attrs: + if (!err) + memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs)); + mutex_unlock(&fpga_xfrm->lock); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h index 7ad1e2cd3fb0..2b5e63b0d4d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h @@ -35,6 +35,7 @@ #define __MLX5_FPGA_IPSEC_H__ #include "accel/ipsec.h" +#include "fs_cmd.h" #ifdef CONFIG_MLX5_FPGA @@ -52,12 +53,18 @@ void mlx5_fpga_ipsec_delete_sa_ctx(void *context); int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev); void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev); +void mlx5_fpga_ipsec_build_fs_cmds(void); struct mlx5_accel_esp_xfrm * mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev, const struct mlx5_accel_esp_xfrm_attrs *attrs, u32 flags); void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm); +int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs); + +const struct mlx5_flow_cmds * +mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type); #else @@ -101,6 +108,10 @@ static inline void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev) { } +static inline void mlx5_fpga_ipsec_build_fs_cmds(void) +{ +} + static inline struct mlx5_accel_esp_xfrm * mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev, const struct mlx5_accel_esp_xfrm_attrs *attrs, @@ -113,6 +124,19 @@ static inline void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) { } +static inline int +mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs) +{ + return -EOPNOTSUPP; +} + +static inline const struct mlx5_flow_cmds * +mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type) +{ + return mlx5_fs_cmd_get_default(type); +} + #endif /* CONFIG_MLX5_FPGA */ #endif /* __MLX5_FPGA_SADB_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index f836e6b76f65..09aad58c8e06 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -38,6 +38,7 @@ #include "fs_cmd.h" #include "diag/fs_tracepoint.h" #include "accel/ipsec.h" +#include "fpga/ipsec.h" #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\ sizeof(struct init_tree_node)) @@ -2251,6 +2252,10 @@ static struct mlx5_flow_root_namespace struct mlx5_flow_root_namespace *root_ns; struct mlx5_flow_namespace *ns; + if (mlx5_accel_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE && + (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX)) + cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type); + /* Create the root namespace */ root_ns = kvzalloc(sizeof(*root_ns), GFP_KERNEL); if (!root_ns) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 03972eed02cd..08c33657677c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -58,6 +58,7 @@ #include "eswitch.h" #include "lib/mlx5.h" #include "fpga/core.h" +#include "fpga/ipsec.h" #include "accel/ipsec.h" #include "lib/clock.h" @@ -1658,6 +1659,7 @@ static int __init init(void) get_random_bytes(&sw_owner_id, sizeof(sw_owner_id)); mlx5_core_verify_params(); + mlx5_fpga_ipsec_build_fs_cmds(); mlx5_register_debugfs(); err = pci_register_driver(&mlx5_core_driver); diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h index da6de465ea6d..6c694709b0a2 100644 --- a/include/linux/mlx5/accel.h +++ b/include/linux/mlx5/accel.h @@ -121,6 +121,8 @@ mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, const struct mlx5_accel_esp_xfrm_attrs *attrs, u32 flags); void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm); +int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs); #else @@ -132,6 +134,9 @@ mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, u32 flags) { return ERR_PTR(-EOPNOTSUPP); } static inline void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) {} +static inline int +mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; } #endif #endif diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 744ea228acea..b957e52434f8 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -40,6 +40,8 @@ enum { MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16, + MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17, + MLX5_FLOW_CONTEXT_ACTION_DECRYPT = 1 << 18, }; enum { @@ -146,6 +148,7 @@ struct mlx5_flow_act { u32 flow_tag; u32 encap_id; u32 modify_id; + uintptr_t esp_id; }; #define MLX5_DECLARE_FLOW_ACT(name) \ -- cgit v1.2.3 From 75ef3f551572822a392ca9b03486bf09163cc668 Mon Sep 17 00:00:00 2001 From: Aviad Yehezkel Date: Thu, 18 Jan 2018 16:31:55 +0200 Subject: net/mlx5e: Added common function for to_ipsec_sa_entry New function for getting driver internal sa entry from xfrm state. All checks are done in one function. Signed-off-by: Aviad Yehezkel Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 29 ++++++++++++++-------- 1 file changed, 19 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 59df3dbd2e65..f5b1d60f96f5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -51,6 +51,21 @@ struct mlx5e_ipsec_sa_entry { void *hw_context; }; +static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x) +{ + struct mlx5e_ipsec_sa_entry *sa; + + if (!x) + return NULL; + + sa = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle; + if (!sa) + return NULL; + + WARN_ON(sa->x != x); + return sa; +} + struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec, unsigned int handle) { @@ -312,28 +327,22 @@ out: static void mlx5e_xfrm_del_state(struct xfrm_state *x) { - struct mlx5e_ipsec_sa_entry *sa_entry; + struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); - if (!x->xso.offload_handle) + if (!sa_entry) return; - sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle; - WARN_ON(sa_entry->x != x); - if (x->xso.flags & XFRM_OFFLOAD_INBOUND) mlx5e_ipsec_sadb_rx_del(sa_entry); } static void mlx5e_xfrm_free_state(struct xfrm_state *x) { - struct mlx5e_ipsec_sa_entry *sa_entry; + struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); - if (!x->xso.offload_handle) + if (!sa_entry) return; - sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle; - WARN_ON(sa_entry->x != x); - if (sa_entry->hw_context) { mlx5_accel_esp_free_hw_context(sa_entry->hw_context); mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm); -- cgit v1.2.3 From cb01008390bb0645d4728c7f8825e32d4b540a30 Mon Sep 17 00:00:00 2001 From: Aviad Yehezkel Date: Thu, 18 Jan 2018 16:02:17 +0200 Subject: net/mlx5: IPSec, Add support for ESN Currently ESN is not supported with IPSec device offload. This patch adds ESN support to IPsec device offload. Implementing new xfrm device operation to synchronize offloading device ESN with xfrm received SN. New QP command to update SA state at the following: ESN 1 ESN 2 ESN 3 |-----------*-----------|-----------*-----------|-----------* ^ ^ ^ ^ ^ ^ ^ - marks where QP command invoked to update the SA ESN state machine. | - marks the start of the ESN scope (0-2^32-1). At this point move SA ESN overlap bit to zero and increment ESN. * - marks the middle of the ESN scope (2^31). At this point move SA ESN overlap bit to one. Signed-off-by: Aviad Yehezkel Signed-off-by: Yossef Efraim Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 118 +++++++++++++++++++-- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.h | 23 ++++ .../mellanox/mlx5/core/en_accel/ipsec_rxtx.c | 29 ++++- .../mellanox/mlx5/core/en_accel/ipsec_rxtx.h | 5 + .../net/ethernet/mellanox/mlx5/core/fpga/ipsec.c | 22 ++++ include/linux/mlx5/accel.h | 2 + include/linux/mlx5/mlx5_ifc_fpga.h | 2 + 7 files changed, 189 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index f5b1d60f96f5..cf58c9637904 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -38,18 +38,9 @@ #include #include "en.h" -#include "accel/ipsec.h" #include "en_accel/ipsec.h" #include "en_accel/ipsec_rxtx.h" -struct mlx5e_ipsec_sa_entry { - struct hlist_node hlist; /* Item in SADB_RX hashtable */ - unsigned int handle; /* Handle in SADB_RX */ - struct xfrm_state *x; - struct mlx5e_ipsec *ipsec; - struct mlx5_accel_esp_xfrm *xfrm; - void *hw_context; -}; static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x) { @@ -121,6 +112,40 @@ static void mlx5e_ipsec_sadb_rx_free(struct mlx5e_ipsec_sa_entry *sa_entry) ida_simple_remove(&ipsec->halloc, sa_entry->handle); } +static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry) +{ + struct xfrm_replay_state_esn *replay_esn; + u32 seq_bottom; + u8 overlap; + u32 *esn; + + if (!(sa_entry->x->props.flags & XFRM_STATE_ESN)) { + sa_entry->esn_state.trigger = 0; + return false; + } + + replay_esn = sa_entry->x->replay_esn; + seq_bottom = replay_esn->seq - replay_esn->replay_window + 1; + overlap = sa_entry->esn_state.overlap; + + sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x, + htonl(seq_bottom)); + esn = &sa_entry->esn_state.esn; + + sa_entry->esn_state.trigger = 1; + if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) { + ++(*esn); + sa_entry->esn_state.overlap = 0; + return true; + } else if (unlikely(!overlap && + (seq_bottom >= MLX5E_IPSEC_ESN_SCOPE_MID))) { + sa_entry->esn_state.overlap = 1; + return true; + } + + return false; +} + static void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5_accel_esp_xfrm_attrs *attrs) @@ -152,6 +177,14 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, /* iv len */ aes_gcm->icv_len = x->aead->alg_icv_len; + /* esn */ + if (sa_entry->esn_state.trigger) { + attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED; + attrs->esn = sa_entry->esn_state.esn; + if (sa_entry->esn_state.overlap) + attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; + } + /* rx handle */ attrs->sa_handle = sa_entry->handle; @@ -187,7 +220,9 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) netdev_info(netdev, "Cannot offload compressed xfrm states\n"); return -EINVAL; } - if (x->props.flags & XFRM_STATE_ESN) { + if (x->props.flags & XFRM_STATE_ESN && + !(mlx5_accel_ipsec_device_caps(priv->mdev) & + MLX5_ACCEL_IPSEC_CAP_ESN)) { netdev_info(netdev, "Cannot offload ESN xfrm states\n"); return -EINVAL; } @@ -277,8 +312,14 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) netdev_info(netdev, "Failed adding to SADB_RX: %d\n", err); goto err_entry; } + } else { + sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ? + mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv; } + /* check esn */ + mlx5e_ipsec_update_esn_state(sa_entry); + /* create xfrm */ mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs); sa_entry->xfrm = @@ -344,6 +385,7 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x) return; if (sa_entry->hw_context) { + flush_workqueue(sa_entry->ipsec->wq); mlx5_accel_esp_free_hw_context(sa_entry->hw_context); mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm); } @@ -374,6 +416,12 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv) ipsec->en_priv->ipsec = ipsec; ipsec->no_trailer = !!(mlx5_accel_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER); + ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0, + priv->netdev->name); + if (!ipsec->wq) { + kfree(ipsec); + return -ENOMEM; + } netdev_dbg(priv->netdev, "IPSec attached to netdevice\n"); return 0; } @@ -385,6 +433,9 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv) if (!ipsec) return; + drain_workqueue(ipsec->wq); + destroy_workqueue(ipsec->wq); + ida_destroy(&ipsec->halloc); kfree(ipsec); priv->ipsec = NULL; @@ -405,11 +456,58 @@ static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) return true; } +struct mlx5e_ipsec_modify_state_work { + struct work_struct work; + struct mlx5_accel_esp_xfrm_attrs attrs; + struct mlx5e_ipsec_sa_entry *sa_entry; +}; + +static void _update_xfrm_state(struct work_struct *work) +{ + int ret; + struct mlx5e_ipsec_modify_state_work *modify_work = + container_of(work, struct mlx5e_ipsec_modify_state_work, work); + struct mlx5e_ipsec_sa_entry *sa_entry = modify_work->sa_entry; + + ret = mlx5_accel_esp_modify_xfrm(sa_entry->xfrm, + &modify_work->attrs); + if (ret) + netdev_warn(sa_entry->ipsec->en_priv->netdev, + "Not an IPSec offload device\n"); + + kfree(modify_work); +} + +static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x) +{ + struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); + struct mlx5e_ipsec_modify_state_work *modify_work; + bool need_update; + + if (!sa_entry) + return; + + need_update = mlx5e_ipsec_update_esn_state(sa_entry); + if (!need_update) + return; + + modify_work = kzalloc(sizeof(*modify_work), GFP_ATOMIC); + if (!modify_work) + return; + + mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &modify_work->attrs); + modify_work->sa_entry = sa_entry; + + INIT_WORK(&modify_work->work, _update_xfrm_state); + WARN_ON(!queue_work(sa_entry->ipsec->wq, &modify_work->work)); +} + static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = { .xdo_dev_state_add = mlx5e_xfrm_add_state, .xdo_dev_state_delete = mlx5e_xfrm_del_state, .xdo_dev_state_free = mlx5e_xfrm_free_state, .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok, + .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state, }; void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index bffc3ed0574a..1198fc1eba4c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -40,7 +40,11 @@ #include #include +#include "accel/ipsec.h" + #define MLX5E_IPSEC_SADB_RX_BITS 10 +#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L + #define MLX5E_METADATA_ETHER_TYPE (0x8CE4) #define MLX5E_METADATA_ETHER_LEN 8 @@ -82,6 +86,25 @@ struct mlx5e_ipsec { struct ida halloc; struct mlx5e_ipsec_sw_stats sw_stats; struct mlx5e_ipsec_stats stats; + struct workqueue_struct *wq; +}; + +struct mlx5e_ipsec_esn_state { + u32 esn; + u8 trigger: 1; + u8 overlap: 1; +}; + +struct mlx5e_ipsec_sa_entry { + struct hlist_node hlist; /* Item in SADB_RX hashtable */ + struct mlx5e_ipsec_esn_state esn_state; + unsigned int handle; /* Handle in SADB_RX */ + struct xfrm_state *x; + struct mlx5e_ipsec *ipsec; + struct mlx5_accel_esp_xfrm *xfrm; + void *hw_context; + void (*set_iv_op)(struct sk_buff *skb, struct xfrm_state *x, + struct xfrm_offload *xo); }; void mlx5e_ipsec_build_inverse_table(void); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index 64c549a06678..c245d8e78509 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -176,7 +176,30 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb, } } -static void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_offload *xo) +void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x, + struct xfrm_offload *xo) +{ + struct xfrm_replay_state_esn *replay_esn = x->replay_esn; + __u32 oseq = replay_esn->oseq; + int iv_offset; + __be64 seqno; + u32 seq_hi; + + if (unlikely(skb_is_gso(skb) && oseq < MLX5E_IPSEC_ESN_SCOPE_MID && + MLX5E_IPSEC_ESN_SCOPE_MID < (oseq - skb_shinfo(skb)->gso_segs))) { + seq_hi = xo->seq.hi - 1; + } else { + seq_hi = xo->seq.hi; + } + + /* Place the SN in the IV field */ + seqno = cpu_to_be64(xo->seq.low + ((u64)seq_hi << 32)); + iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr); + skb_store_bits(skb, iv_offset, &seqno, 8); +} + +void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x, + struct xfrm_offload *xo) { int iv_offset; __be64 seqno; @@ -228,6 +251,7 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev, struct mlx5e_priv *priv = netdev_priv(netdev); struct xfrm_offload *xo = xfrm_offload(skb); struct mlx5e_ipsec_metadata *mdata; + struct mlx5e_ipsec_sa_entry *sa_entry; struct xfrm_state *x; if (!xo) @@ -262,7 +286,8 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev, goto drop; } mlx5e_ipsec_set_swp(skb, &wqe->eth, x->props.mode, xo); - mlx5e_ipsec_set_iv(skb, xo); + sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle; + sa_entry->set_iv_op(skb, x, xo); mlx5e_ipsec_set_metadata(skb, mdata, xo); return skb; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h index e37ae2598dbb..2bfbbef1b054 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h @@ -37,6 +37,7 @@ #ifdef CONFIG_MLX5_EN_IPSEC #include +#include #include "en.h" struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, @@ -46,6 +47,10 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_ipsec_inverse_table_init(void); bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev, netdev_features_t features); +void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x, + struct xfrm_offload *xo); +void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x, + struct xfrm_offload *xo); struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev, struct mlx5e_tx_wqe *wqe, struct sk_buff *skb); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index 7b43fa269117..4f1568528738 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -347,6 +347,11 @@ u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer)) ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER; + if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esn)) { + ret |= MLX5_ACCEL_IPSEC_CAP_ESN; + ret |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN; + } + return ret; } @@ -470,6 +475,23 @@ mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev *mdev, memcpy(&hw_sa->ipsec_sa_v1.gcm.salt, &aes_gcm->salt, sizeof(aes_gcm->salt)); + /* esn */ + if (xfrm_attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) { + hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_ESN_EN; + hw_sa->ipsec_sa_v1.flags |= + (xfrm_attrs->flags & + MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ? + MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0; + hw_sa->esn = htonl(xfrm_attrs->esn); + } else { + hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_ESN_EN; + hw_sa->ipsec_sa_v1.flags &= + ~(xfrm_attrs->flags & + MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ? + MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0; + hw_sa->esn = 0; + } + /* rx handle */ hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(xfrm_attrs->sa_handle); diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h index 6c694709b0a2..70e7e5673ce9 100644 --- a/include/linux/mlx5/accel.h +++ b/include/linux/mlx5/accel.h @@ -110,6 +110,8 @@ enum mlx5_accel_ipsec_cap { MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 3, MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 4, MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER = 1 << 5, + MLX5_ACCEL_IPSEC_CAP_ESN = 1 << 6, + MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7, }; #ifdef CONFIG_MLX5_ACCEL diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h index debcc57de43a..ec052491ba3d 100644 --- a/include/linux/mlx5/mlx5_ifc_fpga.h +++ b/include/linux/mlx5/mlx5_ifc_fpga.h @@ -468,6 +468,8 @@ struct mlx5_ifc_fpga_ipsec_cmd_cap { } __packed; enum mlx5_ifc_fpga_ipsec_sa_flags { + MLX5_FPGA_IPSEC_SA_ESN_EN = BIT(0), + MLX5_FPGA_IPSEC_SA_ESN_OVERLAP = BIT(1), MLX5_FPGA_IPSEC_SA_IPV6 = BIT(2), MLX5_FPGA_IPSEC_SA_DIR_SX = BIT(3), MLX5_FPGA_IPSEC_SA_SPI_EN = BIT(4), -- cgit v1.2.3 From 31135eb3887daf2ed3e88fbefc36243357a9008f Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 28 Feb 2018 11:18:13 +0200 Subject: net/mlx5: Fix wrongly assigned CQ reference counter The kernel compiled with CONFIG_REFCOUNT_FULL produces the following error. The reason to it that initial value of refcount_t is supposed to be more than 0, change it. [ 3.106634] ------------[ cut here ]------------ [ 3.107756] refcount_t: increment on 0; use-after-free. [ 3.109130] WARNING: CPU: 0 PID: 1 at lib/refcount.c:153 refcount_inc+0x27/0x30 [ 3.110085] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.16.0-rc1-00028-gf683e04bdccc #137 [ 3.110085] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5-0-ge51488c-20140602_164612-nilsson.home.kraxel.org 04/01/2014 [ 3.110085] RIP: 0010:refcount_inc+0x27/0x30 [ 3.110085] RSP: 0000:ffffaa620000fba0 EFLAGS: 00010286 [ 3.110085] RAX: 0000000000000000 RBX: ffff9a6d1a1821c8 RCX: ffffffff98a50f48 [ 3.110085] RDX: 0000000000000001 RSI: 0000000000000086 RDI: 0000000000000246 [ 3.110085] RBP: ffff9a6d1ac800a0 R08: 0000000000000289 R09: 000000000000000a [ 3.110085] R10: fffff03bc0682840 R11: ffffffff9949856d R12: ffff9a6d1b4a4000 [ 3.110085] R13: 0000000000000000 R14: ffff9a6d1a0a6c00 R15: ffffaa620000fc5c [ 3.110085] FS: 0000000000000000(0000) GS:ffff9a6d1fc00000(0000) knlGS:0000000000000000 [ 3.110085] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 3.110085] CR2: 0000000000000000 CR3: 000000000ba0a000 CR4: 00000000000006b0 [ 3.110085] Call Trace: [ 3.110085] mlx5_core_create_cq+0xde/0x250 [ 3.110085] ? __kmalloc+0x1ce/0x1e0 [ 3.110085] mlx5e_create_cq+0x15c/0x1e0 [ 3.110085] mlx5e_open_drop_rq+0xea/0x190 [ 3.110085] mlx5e_attach_netdev+0x53/0x140 [ 3.110085] mlx5e_attach+0x3d/0x60 [ 3.110085] mlx5e_add+0x11d/0x2f0 [ 3.110085] mlx5_add_device+0x77/0x170 [ 3.110085] mlx5_register_interface+0x74/0xc0 [ 3.110085] ? set_debug_rodata+0x11/0x11 [ 3.110085] init+0x67/0x72 [ 3.110085] ? mlx4_en_init_ptys2ethtool_map+0x346/0x346 [ 3.110085] do_one_initcall+0x98/0x147 [ 3.110085] ? set_debug_rodata+0x11/0x11 [ 3.110085] kernel_init_freeable+0x164/0x1e0 [ 3.110085] ? rest_init+0xb0/0xb0 [ 3.110085] kernel_init+0xa/0x100 [ 3.110085] ret_from_fork+0x35/0x40 [ 3.110085] Code: 00 00 00 00 e8 ab ff ff ff 84 c0 74 02 f3 c3 80 3d 3b c3 64 01 00 75 f5 48 c7 c7 68 0b 81 98 c6 05 2b c3 64 01 01 e8 79 d7 a3 ff <0f> ff c3 66 0f 1f 44 00 00 8b 06 83 f8 ff 74 39 31 c9 39 f8 89 [ 3.110085] ---[ end trace a0068e1c68438a74 ]--- Fixes: f105b45bf77c ("net/mlx5: CQ hold/put API") Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/cq.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index 669ed16938b3..a4179122a279 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -109,8 +109,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, cq->cons_index = 0; cq->arm_sn = 0; cq->eq = eq; - refcount_set(&cq->refcount, 0); - mlx5_cq_hold(cq); + refcount_set(&cq->refcount, 1); init_completion(&cq->free); if (!cq->comp) cq->comp = mlx5_add_cq_to_tasklet; -- cgit v1.2.3 From c846d8da5640a6c61d57e2c14a1a6d74b9c643b0 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Mar 2018 19:57:24 -0700 Subject: mlx5: Remove call to ida_pre_get The mlx5 driver calls ida_pre_get() in a loop for no readily apparent reason. The driver uses ida_simple_get() which will call ida_pre_get() by itself and there's no need to use ida_pre_get() unless using ida_get_new(). Signed-off-by: Matthew Wilcox Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 10e16381f20a..3ba07c7096ef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1647,7 +1647,6 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, list_for_each_entry(iter, match_head, list) { nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT); - ida_pre_get(&iter->g->fte_allocator, GFP_KERNEL); } search_again_locked: -- cgit v1.2.3 From 594619497f3d6d4b8d8440e6d380e8da9dcc9eeb Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 22 Mar 2018 13:44:56 -0500 Subject: net/mlx5: Fix use-after-free _rule_ is being freed and then dereferenced by accessing rule->ctx Fix this by copying the value returned by PTR_ERR(rule->ctx) into a local variable for its safe use after freeing _rule_ Addresses-Coverity-ID: 1466041 ("Read from pointer after free") Fixes: 05564d0ae075 ("net/mlx5: Add flow-steering commands for FPGA IPSec implementation") Reviewed-by: Yuval Shaia Signed-off-by: Gustavo A. R. Silva Acked-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index 4f1568528738..0f5da499a223 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -1061,8 +1061,9 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev, rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress); if (IS_ERR(rule->ctx)) { + int err = PTR_ERR(rule->ctx); kfree(rule); - return PTR_ERR(rule->ctx); + return err; } rule->fte = fte; -- cgit v1.2.3 From 2fcb12df7d2fa5a004fc3e7f589e58a08f7ed8c9 Mon Sep 17 00:00:00 2001 From: Inbar Karmy Date: Thu, 17 Aug 2017 16:39:47 +0300 Subject: net/mlx5e: Expose PFC stall prevention counters Add the needed capability bit and counters to device spec description. Expose the following two counters in ethtool: tx_pause_storm_warning_events: when the device is stalled for a period longer than a pre-configured watermark, the counter increase, allowing the debug utility an insight into current device status. tx_pause_storm_error_events: when the device is stalled for a period longer than a pre-configured timeout, the pause transmission is disabled, and the counter increase. Signed-off-by: Inbar Karmy Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 19 ++++++++++++++- drivers/net/ethernet/mellanox/mlx5/core/fw.c | 3 +++ include/linux/mlx5/device.h | 4 ++++ include/linux/mlx5/mlx5_ifc.h | 28 +++++++++++++++++++--- 4 files changed, 50 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 5f0f3493d747..2553c58dcf1c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -754,7 +754,15 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, }; +static const struct counter_desc pport_pfc_stall_stats_desc[] = { + { "tx_pause_storm_warning_events ", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) }, + { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) }, +}; + #define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc) +#define NUM_PPORT_PFC_STALL_COUNTERS(priv) (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \ + MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \ + MLX5_CAP_DEBUG((priv)->mdev, stall_detect)) static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv) { @@ -790,7 +798,8 @@ static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv) { return (mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * - NUM_PPORT_PER_PRIO_PFC_COUNTERS; + NUM_PPORT_PER_PRIO_PFC_COUNTERS + + NUM_PPORT_PFC_STALL_COUNTERS(priv); } static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv, @@ -818,6 +827,10 @@ static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv, } } + for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pport_pfc_stall_stats_desc[i].format); + return idx; } @@ -845,6 +858,10 @@ static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv, } } + for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++) + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0], + pport_pfc_stall_stats_desc, i); + return idx; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 9d11e92fb541..d7bb10ab2173 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -183,6 +183,9 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) return err; } + if (MLX5_CAP_GEN(dev, debug)) + mlx5_core_get_caps(dev, MLX5_CAP_DEBUG); + if (MLX5_CAP_GEN(dev, pcam_reg)) mlx5_get_pcam_reg(dev); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index e5258ee4e38b..4b5939c78cdd 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1013,6 +1013,7 @@ enum mlx5_cap_type { MLX5_CAP_RESERVED, MLX5_CAP_VECTOR_CALC, MLX5_CAP_QOS, + MLX5_CAP_DEBUG, /* NUM OF CAP Types */ MLX5_CAP_NUM }; @@ -1140,6 +1141,9 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_QOS(mdev, cap)\ MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap) +#define MLX5_CAP_DEBUG(mdev, cap)\ + MLX5_GET(debug_cap, mdev->caps.hca_cur[MLX5_CAP_DEBUG], cap) + #define MLX5_CAP_PCAM_FEATURE(mdev, fld) \ MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld) diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 14ad84afe8ba..c7d50eccff9e 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -593,6 +593,16 @@ struct mlx5_ifc_qos_cap_bits { u8 reserved_at_100[0x700]; }; +struct mlx5_ifc_debug_cap_bits { + u8 reserved_at_0[0x20]; + + u8 reserved_at_20[0x2]; + u8 stall_detect[0x1]; + u8 reserved_at_23[0x1d]; + + u8 reserved_at_40[0x7c0]; +}; + struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 csum_cap[0x1]; u8 vlan_cap[0x1]; @@ -855,7 +865,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 out_of_seq_cnt[0x1]; u8 vport_counters[0x1]; u8 retransmission_q_counters[0x1]; - u8 reserved_at_183[0x1]; + u8 debug[0x1]; u8 modify_rq_counter_set_id[0x1]; u8 rq_delay_drop[0x1]; u8 max_qp_cnt[0xa]; @@ -1572,7 +1582,17 @@ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits { u8 rx_pause_transition_low[0x20]; - u8 reserved_at_3c0[0x400]; + u8 reserved_at_3c0[0x40]; + + u8 device_stall_minor_watermark_cnt_high[0x20]; + + u8 device_stall_minor_watermark_cnt_low[0x20]; + + u8 device_stall_critical_watermark_cnt_high[0x20]; + + u8 device_stall_critical_watermark_cnt_low[0x20]; + + u8 reserved_at_480[0x340]; }; struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits { @@ -7874,8 +7894,10 @@ struct mlx5_ifc_peir_reg_bits { }; struct mlx5_ifc_pcam_enhanced_features_bits { - u8 reserved_at_0[0x7b]; + u8 reserved_at_0[0x76]; + u8 pfcc_mask[0x1]; + u8 reserved_at_77[0x4]; u8 rx_buffer_fullness_counters[0x1]; u8 ptys_connector_type[0x1]; u8 reserved_at_7d[0x1]; -- cgit v1.2.3 From 2afa609f5c970185a8cae73f6a4caadf97fbea54 Mon Sep 17 00:00:00 2001 From: Inbar Karmy Date: Mon, 20 Nov 2017 18:06:20 +0200 Subject: net/mlx5e: PFC stall prevention support Implement set/get functions to configure PFC stall prevention timeout by tunables api through ethtool. By default the stall prevention timeout is configured to 8 sec. Timeout range is: 80-8000 msec. Enabling stall prevention with the auto timeout will set the timeout to 100 msec. Signed-off-by: Inbar Karmy Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 57 +++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/port.c | 64 +++++++++++++++++++--- include/linux/mlx5/mlx5_ifc.h | 17 ++++-- include/linux/mlx5/port.h | 6 ++ 4 files changed, 132 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index cc8048f68f11..62061fd23143 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1066,6 +1066,57 @@ static int mlx5e_get_rxnfc(struct net_device *netdev, return err; } +#define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100 +#define MLX5E_PFC_PREVEN_TOUT_MAX_MSEC 8000 +#define MLX5E_PFC_PREVEN_MINOR_PRECENT 85 +#define MLX5E_PFC_PREVEN_TOUT_MIN_MSEC 80 +#define MLX5E_DEVICE_STALL_MINOR_WATERMARK(critical_tout) \ + max_t(u16, MLX5E_PFC_PREVEN_TOUT_MIN_MSEC, \ + (critical_tout * MLX5E_PFC_PREVEN_MINOR_PRECENT) / 100) + +static int mlx5e_get_pfc_prevention_tout(struct net_device *netdev, + u16 *pfc_prevention_tout) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + + if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) || + !MLX5_CAP_DEBUG((priv)->mdev, stall_detect)) + return -EOPNOTSUPP; + + return mlx5_query_port_stall_watermark(mdev, pfc_prevention_tout, NULL); +} + +static int mlx5e_set_pfc_prevention_tout(struct net_device *netdev, + u16 pfc_preven) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + u16 critical_tout; + u16 minor; + + if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) || + !MLX5_CAP_DEBUG((priv)->mdev, stall_detect)) + return -EOPNOTSUPP; + + critical_tout = (pfc_preven == PFC_STORM_PREVENTION_AUTO) ? + MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC : + pfc_preven; + + if (critical_tout != PFC_STORM_PREVENTION_DISABLE && + (critical_tout > MLX5E_PFC_PREVEN_TOUT_MAX_MSEC || + critical_tout < MLX5E_PFC_PREVEN_TOUT_MIN_MSEC)) { + netdev_info(netdev, "%s: pfc prevention tout not in range (%d-%d)\n", + __func__, MLX5E_PFC_PREVEN_TOUT_MIN_MSEC, + MLX5E_PFC_PREVEN_TOUT_MAX_MSEC); + return -EINVAL; + } + + minor = MLX5E_DEVICE_STALL_MINOR_WATERMARK(critical_tout); + return mlx5_set_port_stall_watermark(mdev, critical_tout, + minor); +} + static int mlx5e_get_tunable(struct net_device *dev, const struct ethtool_tunable *tuna, void *data) @@ -1077,6 +1128,9 @@ static int mlx5e_get_tunable(struct net_device *dev, case ETHTOOL_TX_COPYBREAK: *(u32 *)data = priv->channels.params.tx_max_inline; break; + case ETHTOOL_PFC_PREVENTION_TOUT: + err = mlx5e_get_pfc_prevention_tout(dev, data); + break; default: err = -EINVAL; break; @@ -1118,6 +1172,9 @@ static int mlx5e_set_tunable(struct net_device *dev, break; mlx5e_switch_priv_channels(priv, &new_channels, NULL); + break; + case ETHTOOL_PFC_PREVENTION_TOUT: + err = mlx5e_set_pfc_prevention_tout(dev, *(u16 *)data); break; default: err = -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index c37d00cd472a..fa9d0760dd36 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -483,6 +483,17 @@ int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, } EXPORT_SYMBOL_GPL(mlx5_core_query_ib_ppcnt); +static int mlx5_query_pfcc_reg(struct mlx5_core_dev *dev, u32 *out, + u32 out_size) +{ + u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0}; + + MLX5_SET(pfcc_reg, in, local_port, 1); + + return mlx5_core_access_reg(dev, in, sizeof(in), out, + out_size, MLX5_REG_PFCC, 0, 0); +} + int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause) { u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0}; @@ -500,13 +511,10 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_pause); int mlx5_query_port_pause(struct mlx5_core_dev *dev, u32 *rx_pause, u32 *tx_pause) { - u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; int err; - MLX5_SET(pfcc_reg, in, local_port, 1); - err = mlx5_core_access_reg(dev, in, sizeof(in), out, - sizeof(out), MLX5_REG_PFCC, 0, 0); + err = mlx5_query_pfcc_reg(dev, out, sizeof(out)); if (err) return err; @@ -520,6 +528,49 @@ int mlx5_query_port_pause(struct mlx5_core_dev *dev, } EXPORT_SYMBOL_GPL(mlx5_query_port_pause); +int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev, + u16 stall_critical_watermark, + u16 stall_minor_watermark) +{ + u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0}; + u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; + + MLX5_SET(pfcc_reg, in, local_port, 1); + MLX5_SET(pfcc_reg, in, pptx_mask_n, 1); + MLX5_SET(pfcc_reg, in, pprx_mask_n, 1); + MLX5_SET(pfcc_reg, in, ppan_mask_n, 1); + MLX5_SET(pfcc_reg, in, critical_stall_mask, 1); + MLX5_SET(pfcc_reg, in, minor_stall_mask, 1); + MLX5_SET(pfcc_reg, in, device_stall_critical_watermark, + stall_critical_watermark); + MLX5_SET(pfcc_reg, in, device_stall_minor_watermark, stall_minor_watermark); + + return mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PFCC, 0, 1); +} + +int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev, + u16 *stall_critical_watermark, + u16 *stall_minor_watermark) +{ + u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; + int err; + + err = mlx5_query_pfcc_reg(dev, out, sizeof(out)); + if (err) + return err; + + if (stall_critical_watermark) + *stall_critical_watermark = MLX5_GET(pfcc_reg, out, + device_stall_critical_watermark); + + if (stall_minor_watermark) + *stall_minor_watermark = MLX5_GET(pfcc_reg, out, + device_stall_minor_watermark); + + return 0; +} + int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx) { u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0}; @@ -538,13 +589,10 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_pfc); int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx) { - u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; int err; - MLX5_SET(pfcc_reg, in, local_port, 1); - err = mlx5_core_access_reg(dev, in, sizeof(in), out, - sizeof(out), MLX5_REG_PFCC, 0, 0); + err = mlx5_query_pfcc_reg(dev, out, sizeof(out)); if (err) return err; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index c7d50eccff9e..f3200a9696d6 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -7833,7 +7833,11 @@ struct mlx5_ifc_pifr_reg_bits { struct mlx5_ifc_pfcc_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; - u8 reserved_at_10[0x10]; + u8 reserved_at_10[0xb]; + u8 ppan_mask_n[0x1]; + u8 minor_stall_mask[0x1]; + u8 critical_stall_mask[0x1]; + u8 reserved_at_1e[0x2]; u8 ppan[0x4]; u8 reserved_at_24[0x4]; @@ -7843,17 +7847,22 @@ struct mlx5_ifc_pfcc_reg_bits { u8 pptx[0x1]; u8 aptx[0x1]; - u8 reserved_at_42[0x6]; + u8 pptx_mask_n[0x1]; + u8 reserved_at_43[0x5]; u8 pfctx[0x8]; u8 reserved_at_50[0x10]; u8 pprx[0x1]; u8 aprx[0x1]; - u8 reserved_at_62[0x6]; + u8 pprx_mask_n[0x1]; + u8 reserved_at_63[0x5]; u8 pfcrx[0x8]; u8 reserved_at_70[0x10]; - u8 reserved_at_80[0x80]; + u8 device_stall_minor_watermark[0x10]; + u8 device_stall_critical_watermark[0x10]; + + u8 reserved_at_a0[0x60]; }; struct mlx5_ifc_pelc_reg_bits { diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index 035f0d4dc9fe..34aed6032f86 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -151,6 +151,12 @@ int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx); int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx); +int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev, + u16 stall_critical_watermark, + u16 stall_minor_watermark); +int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev, + u16 *stall_critical_watermark, u16 *stall_minor_watermark); + int mlx5_max_tc(struct mlx5_core_dev *mdev); int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc); -- cgit v1.2.3 From 61c5b5c9178288a4caa3e39095aafb391c5100f6 Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Sun, 7 Jan 2018 16:45:27 +0200 Subject: net/mlx5: Add support for QUERY_VNIC_ENV command Add support for new FW command QUERY_VNIC_ENV. The command is used by the driver to query vnic diagnostic statistics from FW. Signed-off-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 2 ++ include/linux/mlx5/mlx5_ifc.h | 50 ++++++++++++++++++++++++++- 2 files changed, 51 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index e9a1fbcc4adf..fe5428667ad1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -359,6 +359,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: + case MLX5_CMD_OP_QUERY_VNIC_ENV: case MLX5_CMD_OP_QUERY_VPORT_COUNTER: case MLX5_CMD_OP_ALLOC_Q_COUNTER: case MLX5_CMD_OP_QUERY_Q_COUNTER: @@ -501,6 +502,7 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT); MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID); MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY); + MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV); MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER); MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index f3200a9696d6..52e373dd2679 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -143,6 +143,7 @@ enum { MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763, MLX5_CMD_OP_QUERY_HCA_VPORT_GID = 0x764, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765, + MLX5_CMD_OP_QUERY_VNIC_ENV = 0x76f, MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770, MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, @@ -875,7 +876,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 vhca_group_manager[0x1]; u8 ib_virt[0x1]; u8 eth_virt[0x1]; - u8 reserved_at_1a4[0x1]; + u8 vnic_env_queue_counters[0x1]; u8 ets[0x1]; u8 nic_flow_table[0x1]; u8 eswitch_flow_table[0x1]; @@ -2386,6 +2387,24 @@ struct mlx5_ifc_xrc_srqc_bits { u8 reserved_at_180[0x80]; }; +struct mlx5_ifc_vnic_diagnostic_statistics_bits { + u8 counter_error_queues[0x20]; + + u8 total_error_queues[0x20]; + + u8 send_queue_priority_update_flow[0x20]; + + u8 reserved_at_60[0x20]; + + u8 nic_receive_steering_discard[0x40]; + + u8 receive_discard_vport_down[0x40]; + + u8 transmit_discard_vport_down[0x40]; + + u8 reserved_at_140[0xec0]; +}; + struct mlx5_ifc_traffic_counter_bits { u8 packets[0x40]; @@ -3661,6 +3680,35 @@ struct mlx5_ifc_query_vport_state_in_bits { u8 reserved_at_60[0x20]; }; +struct mlx5_ifc_query_vnic_env_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_vnic_diagnostic_statistics_bits vport_env; +}; + +enum { + MLX5_QUERY_VNIC_ENV_IN_OP_MOD_VPORT_DIAG_STATISTICS = 0x0, +}; + +struct mlx5_ifc_query_vnic_env_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_at_41[0xf]; + u8 vport_number[0x10]; + + u8 reserved_at_60[0x20]; +}; + struct mlx5_ifc_query_vport_counter_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; -- cgit v1.2.3 From 5c298143be17f5100656b9c140af672c644116d9 Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Tue, 26 Dec 2017 16:46:29 +0200 Subject: net/mlx5e: Add vnic steering drop statistics Added the following packets drop counter: Rx steering missed dropped packets - counts packets which were dropped due to miss on NIC rx steering rules. This counter will be shown on ethtool as a new counter called rx_steer_missed_packets. Signed-off-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 65 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 5 ++ include/linux/mlx5/mlx5_ifc.h | 3 +- 3 files changed, 72 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 2553c58dcf1c..552510c03ef2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -211,6 +211,65 @@ static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv) qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer); } +#define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c) +static const struct counter_desc vnic_env_stats_desc[] = { + { "rx_steer_missed_packets", + VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) }, +}; + +#define NUM_VNIC_ENV_COUNTERS ARRAY_SIZE(vnic_env_stats_desc) + +static int mlx5e_grp_vnic_env_get_num_stats(struct mlx5e_priv *priv) +{ + return MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard) ? + NUM_VNIC_ENV_COUNTERS : 0; +} + +static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data, + int idx) +{ + int i; + + if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) + return idx; + + for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + vnic_env_stats_desc[i].format); + return idx; +} + +static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data, + int idx) +{ + int i; + + if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) + return idx; + + for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++) + data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out, + vnic_env_stats_desc, i); + return idx; +} + +static void mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv) +{ + u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out; + int outlen = MLX5_ST_SZ_BYTES(query_vnic_env_out); + u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0}; + struct mlx5_core_dev *mdev = priv->mdev; + + if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) + return; + + MLX5_SET(query_vnic_env_in, in, opcode, + MLX5_CMD_OP_QUERY_VNIC_ENV); + MLX5_SET(query_vnic_env_in, in, op_mod, 0); + MLX5_SET(query_vnic_env_in, in, other_vport, 0); + mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); +} + #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c) static const struct counter_desc vport_stats_desc[] = { { "rx_vport_unicast_packets", @@ -1111,6 +1170,12 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = { .update_stats_mask = MLX5E_NDO_UPDATE_STATS, .update_stats = mlx5e_grp_q_update_stats, }, + { + .get_num_stats = mlx5e_grp_vnic_env_get_num_stats, + .fill_strings = mlx5e_grp_vnic_env_fill_strings, + .fill_stats = mlx5e_grp_vnic_env_fill_stats, + .update_stats = mlx5e_grp_vnic_env_update_stats, + }, { .get_num_stats = mlx5e_grp_vport_get_num_stats, .fill_strings = mlx5e_grp_vport_fill_strings, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 0b3320a2b072..847388ff8ca8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -99,6 +99,10 @@ struct mlx5e_qcounter_stats { u32 rx_out_of_buffer; }; +struct mlx5e_vnic_env_stats { + __be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)]; +}; + #define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \ vstats->query_vport_out, c) @@ -201,6 +205,7 @@ struct mlx5e_ch_stats { struct mlx5e_stats { struct mlx5e_sw_stats sw; struct mlx5e_qcounter_stats qcnt; + struct mlx5e_vnic_env_stats vnic; struct mlx5e_vport_stats vport; struct mlx5e_pport_stats pport; struct rtnl_link_stats64 vf_vport; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 52e373dd2679..9202113f552c 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1008,7 +1008,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_330[0xb]; u8 log_max_xrcd[0x5]; - u8 reserved_at_340[0x8]; + u8 nic_receive_steering_discard[0x1]; + u8 reserved_at_341[0x7]; u8 log_max_flow_counter_bulk[0x8]; u8 max_flow_counter_15_0[0x10]; -- cgit v1.2.3 From aaabd0783b1cf46569f5fc1c60d79709815497fc Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Sun, 14 Jan 2018 00:56:25 +0200 Subject: net/mlx5: Add packet dropped while vport down statistics Added the following packets dropped while vport down statistics: Rx dropped while vport down - counts packets which were steered by e-switch to a vport, but dropped since the vport was down. This counter will be shown on ip link tool as part of the vport rx_dropped counter. Tx dropped while vport down - counts packets which were transmitted by a vport, but dropped due to vport logical link down. This counter will be shown on ip link tool as part of the vport tx_dropped counter. The counters are read from FW by command QUERY_VNIC_ENV. Signed-off-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 31 +++++++++++++++++++---- drivers/net/ethernet/mellanox/mlx5/core/vport.c | 26 +++++++++++++++++++ include/linux/mlx5/mlx5_ifc.h | 4 ++- include/linux/mlx5/vport.h | 3 +++ 4 files changed, 58 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 77b7272eaaa8..332bc56306bf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -2096,17 +2096,19 @@ unlock: return err; } -static void mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev, - int vport_idx, - struct mlx5_vport_drop_stats *stats) +static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev, + int vport_idx, + struct mlx5_vport_drop_stats *stats) { struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5_vport *vport = &esw->vports[vport_idx]; + u64 rx_discard_vport_down, tx_discard_vport_down; u64 bytes = 0; u16 idx = 0; + int err = 0; if (!vport->enabled || esw->mode != SRIOV_LEGACY) - return; + return 0; if (vport->egress.drop_counter) { idx = vport->egress.drop_counter->id; @@ -2117,6 +2119,23 @@ static void mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev, idx = vport->ingress.drop_counter->id; mlx5_fc_query(dev, idx, &stats->tx_dropped, &bytes); } + + if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) && + !MLX5_CAP_GEN(dev, transmit_discard_vport_down)) + return 0; + + err = mlx5_query_vport_down_stats(dev, vport_idx, + &rx_discard_vport_down, + &tx_discard_vport_down); + if (err) + return err; + + if (MLX5_CAP_GEN(dev, receive_discard_vport_down)) + stats->rx_dropped += rx_discard_vport_down; + if (MLX5_CAP_GEN(dev, transmit_discard_vport_down)) + stats->tx_dropped += tx_discard_vport_down; + + return 0; } int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, @@ -2180,7 +2199,9 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, vf_stats->broadcast = MLX5_GET_CTR(out, received_eth_broadcast.packets); - mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats); + err = mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats); + if (err) + goto free_out; vf_stats->rx_dropped = stats.rx_dropped; vf_stats->tx_dropped = stats.tx_dropped; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index dfe36cf6fbea..177e076b8d17 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -1070,6 +1070,32 @@ free: } EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter); +int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport, + u64 *rx_discard_vport_down, + u64 *tx_discard_vport_down) +{ + u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0}; + int err; + + MLX5_SET(query_vnic_env_in, in, opcode, + MLX5_CMD_OP_QUERY_VNIC_ENV); + MLX5_SET(query_vnic_env_in, in, op_mod, 0); + MLX5_SET(query_vnic_env_in, in, vport_number, vport); + if (vport) + MLX5_SET(query_vnic_env_in, in, other_vport, 1); + + err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (err) + return err; + + *rx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out, + vport_env.receive_discard_vport_down); + *tx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out, + vport_env.transmit_discard_vport_down); + return 0; +} + int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, u8 other_vport, u8 port_num, int vf, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 9202113f552c..1f3483d40055 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1009,7 +1009,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_max_xrcd[0x5]; u8 nic_receive_steering_discard[0x1]; - u8 reserved_at_341[0x7]; + u8 receive_discard_vport_down[0x1]; + u8 transmit_discard_vport_down[0x1]; + u8 reserved_at_343[0x5]; u8 log_max_flow_counter_bulk[0x8]; u8 max_flow_counter_15_0[0x10]; diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 64e193e87394..9208cb8809ac 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -107,6 +107,9 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev, int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev); int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev); +int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport, + u64 *rx_discard_vport_down, + u64 *tx_discard_vport_down); int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport, int vf, u8 port_num, void *out, size_t out_sz); -- cgit v1.2.3 From 7cbaf9a3ea09ea36c7101e7b244f734f9c09c72b Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Thu, 8 Feb 2018 15:09:57 +0200 Subject: net/mlx5e: Add interface down dropped packets statistics Added the following packets drop counter: Rx interface down dropped packets - counts packets which were received while the ETH interface was down. This counter will be shown on ethtool as a new counter called rx_if_down_packets. The implementation allocates a q_counter for drop rq which gets all the received traffic while the interface is down. Signed-off-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 46 +++++++++++++-------- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 48 ++++++++++++++++------ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 1 + 4 files changed, 69 insertions(+), 29 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 4c9360b25532..48e0b2a747d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -781,7 +781,8 @@ struct mlx5e_priv { struct net_device *netdev; struct mlx5e_stats stats; struct hwtstamp_config tstamp; - u16 q_counter; + u16 q_counter; + u16 drop_rq_q_counter; #ifdef CONFIG_MLX5_CORE_EN_DCB struct mlx5e_dcbx dcbx; #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index da94c8cba5ee..f8bc3bcdf046 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -615,8 +615,7 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq, static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state) { - struct mlx5e_channel *c = rq->channel; - struct mlx5_core_dev *mdev = c->mdev; + struct mlx5_core_dev *mdev = rq->mdev; void *in; void *rqc; @@ -1768,14 +1767,16 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, param->wq.linear = 1; } -static void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev, +static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv, struct mlx5e_rq_param *param) { + struct mlx5_core_dev *mdev = priv->mdev; void *rqc = param->rqc; void *wq = MLX5_ADDR_OF(rqc, rqc, wq); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); + MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter); param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); } @@ -2643,15 +2644,16 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev, return mlx5e_alloc_cq_common(mdev, param, cq); } -static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev, +static int mlx5e_open_drop_rq(struct mlx5e_priv *priv, struct mlx5e_rq *drop_rq) { + struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_cq_param cq_param = {}; struct mlx5e_rq_param rq_param = {}; struct mlx5e_cq *cq = &drop_rq->cq; int err; - mlx5e_build_drop_rq_param(mdev, &rq_param); + mlx5e_build_drop_rq_param(priv, &rq_param); err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param); if (err) @@ -2669,6 +2671,10 @@ static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev, if (err) goto err_free_rq; + err = mlx5e_modify_rq_state(drop_rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); + if (err) + mlx5_core_warn(priv->mdev, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err); + return 0; err_free_rq: @@ -4183,7 +4189,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) mlx5e_ipsec_build_netdev(priv); } -static void mlx5e_create_q_counter(struct mlx5e_priv *priv) +static void mlx5e_create_q_counters(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; int err; @@ -4193,14 +4199,21 @@ static void mlx5e_create_q_counter(struct mlx5e_priv *priv) mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err); priv->q_counter = 0; } + + err = mlx5_core_alloc_q_counter(mdev, &priv->drop_rq_q_counter); + if (err) { + mlx5_core_warn(mdev, "alloc drop RQ counter failed, %d\n", err); + priv->drop_rq_q_counter = 0; + } } -static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv) +static void mlx5e_destroy_q_counters(struct mlx5e_priv *priv) { - if (!priv->q_counter) - return; + if (priv->q_counter) + mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter); - mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter); + if (priv->drop_rq_q_counter) + mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter); } static void mlx5e_nic_init(struct mlx5_core_dev *mdev, @@ -4439,18 +4452,18 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv) if (err) goto out; - err = mlx5e_open_drop_rq(mdev, &priv->drop_rq); + mlx5e_create_q_counters(priv); + + err = mlx5e_open_drop_rq(priv, &priv->drop_rq); if (err) { mlx5_core_err(mdev, "open drop rq failed, %d\n", err); - goto err_cleanup_tx; + goto err_destroy_q_counters; } err = profile->init_rx(priv); if (err) goto err_close_drop_rq; - mlx5e_create_q_counter(priv); - if (profile->enable) profile->enable(priv); @@ -4459,7 +4472,8 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv) err_close_drop_rq: mlx5e_close_drop_rq(&priv->drop_rq); -err_cleanup_tx: +err_destroy_q_counters: + mlx5e_destroy_q_counters(priv); profile->cleanup_tx(priv); out: @@ -4476,9 +4490,9 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv) profile->disable(priv); flush_workqueue(priv->wq); - mlx5e_destroy_q_counter(priv); profile->cleanup_rx(priv); mlx5e_close_drop_rq(&priv->drop_rq); + mlx5e_destroy_q_counters(priv); profile->cleanup_tx(priv); cancel_delayed_work_sync(&priv->update_stats_work); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 552510c03ef2..c0dab9a8969e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -170,11 +170,24 @@ static const struct counter_desc q_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) }, }; +static const struct counter_desc drop_rq_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) }, +}; + #define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc) +#define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc) static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv) { - return priv->q_counter ? NUM_Q_COUNTERS : 0; + int num_stats = 0; + + if (priv->q_counter) + num_stats += NUM_Q_COUNTERS; + + if (priv->drop_rq_q_counter) + num_stats += NUM_DROP_RQ_COUNTERS; + + return num_stats; } static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx) @@ -182,7 +195,13 @@ static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx) int i; for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format); + strcpy(data + (idx++) * ETH_GSTRING_LEN, + q_stats_desc[i].format); + + for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + drop_rq_stats_desc[i].format); + return idx; } @@ -191,7 +210,11 @@ static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) int i; for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++) - data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, q_stats_desc, i); + data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, + q_stats_desc, i); + for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++) + data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, + drop_rq_stats_desc, i); return idx; } @@ -199,16 +222,17 @@ static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv) { struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt; u32 out[MLX5_ST_SZ_DW(query_q_counter_out)]; - int err; - - if (!priv->q_counter) - return; - - err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out)); - if (err) - return; - qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer); + if (priv->q_counter && + !mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, + sizeof(out))) + qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, + out, out_of_buffer); + if (priv->drop_rq_q_counter && + !mlx5_core_query_q_counter(priv->mdev, priv->drop_rq_q_counter, 0, + out, sizeof(out))) + qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, out, + out_of_buffer); } #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 847388ff8ca8..43a72efa28c0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -97,6 +97,7 @@ struct mlx5e_sw_stats { struct mlx5e_qcounter_stats { u32 rx_out_of_buffer; + u32 rx_if_down_packets; }; struct mlx5e_vnic_env_stats { -- cgit v1.2.3 From aa24670ef66cb38e667d7bb039f5ce29d926f2e0 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 30 Jan 2018 14:13:28 +0000 Subject: net/mlx5: E-Switch, Use same source for offloaded actions check Align the checks for modify header and encap actions with the rest of the code. Signed-off-by: Or Gerlitz Reviewed-by: Mark Bloch Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 0a8303c1b52f..21ebe3e80e6e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -88,10 +88,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) flow_act.modify_id = attr->mod_hdr_id; - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) flow_act.encap_id = attr->encap_id; rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb, -- cgit v1.2.3 From 0c06897a9ac7e2db9ad2df15bc6511e8ab88378f Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Sun, 28 Jan 2018 20:14:20 +0200 Subject: net/mlx5: Add core support for vlan push/pop steering action Newer NICs (ConnectX-5 and onward) can apply vlan pop or push as an action taking place during flow steering. Add the core bits for that. Signed-off-by: Or Gerlitz Reviewed-by: Mark Bloch Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h | 2 ++ drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 3 --- drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 10 +++++++++- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 4 +++- include/linux/mlx5/fs.h | 7 +++++++ include/linux/mlx5/mlx5_ifc.h | 16 ++++++++++++++-- 6 files changed, 35 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h index a6ba57fbb414..09f178a3fcab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h @@ -136,6 +136,8 @@ TRACE_EVENT(mlx5_fs_del_fg, {MLX5_FLOW_CONTEXT_ACTION_ENCAP, "ENCAP"},\ {MLX5_FLOW_CONTEXT_ACTION_DECAP, "DECAP"},\ {MLX5_FLOW_CONTEXT_ACTION_MOD_HDR, "MOD_HDR"},\ + {MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH, "VLAN_PUSH"},\ + {MLX5_FLOW_CONTEXT_ACTION_VLAN_POP, "VLAN_POP"},\ {MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"} TRACE_EVENT(mlx5_fs_set_fte, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 98d2177d0806..a435eb7971c6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -227,9 +227,6 @@ enum { SET_VLAN_INSERT = BIT(1) }; -#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x4000 -#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x8000 - struct mlx5_esw_flow_attr { struct mlx5_eswitch_rep *in_rep; struct mlx5_eswitch_rep *out_rep; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 645f83cac34d..ef5afd7c9325 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -317,7 +317,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct); u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0}; struct mlx5_flow_rule *dst; - void *in_flow_context; + void *in_flow_context, *vlan; void *in_match_value; void *in_dests; u32 *in; @@ -340,11 +340,19 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); MLX5_SET(flow_context, in_flow_context, group_id, group_id); + MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag); MLX5_SET(flow_context, in_flow_context, action, fte->action.action); MLX5_SET(flow_context, in_flow_context, encap_id, fte->action.encap_id); MLX5_SET(flow_context, in_flow_context, modify_header_id, fte->action.modify_id); + + vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan); + + MLX5_SET(vlan, vlan, ethtype, fte->action.vlan.ethtype); + MLX5_SET(vlan, vlan, vid, fte->action.vlan.vid); + MLX5_SET(vlan, vlan, prio, fte->action.vlan.prio); + in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context, match_value); memcpy(in_match_value, &fte->val, sizeof(fte->val)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 3ba07c7096ef..de51e7c39bc8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1439,7 +1439,9 @@ static bool check_conflicting_actions(u32 action1, u32 action2) if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_ENCAP | MLX5_FLOW_CONTEXT_ACTION_DECAP | - MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) + MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | + MLX5_FLOW_CONTEXT_ACTION_VLAN_POP | + MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)) return true; return false; diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index b957e52434f8..47aecc4fa8c2 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -142,6 +142,12 @@ struct mlx5_flow_group * mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in); void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); +struct mlx5_fs_vlan { + u16 ethtype; + u16 vid; + u8 prio; +}; + struct mlx5_flow_act { u32 action; bool has_flow_tag; @@ -149,6 +155,7 @@ struct mlx5_flow_act { u32 encap_id; u32 modify_id; uintptr_t esp_id; + struct mlx5_fs_vlan vlan; }; #define MLX5_DECLARE_FLOW_ACT(name) \ diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 1f3483d40055..c19e611d2782 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -314,7 +314,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 flow_table_modify[0x1]; u8 encap[0x1]; u8 decap[0x1]; - u8 reserved_at_9[0x17]; + u8 reserved_at_9[0x1]; + u8 pop_vlan[0x1]; + u8 push_vlan[0x1]; + u8 reserved_at_c[0x14]; u8 reserved_at_20[0x2]; u8 log_max_ft_size[0x6]; @@ -2311,10 +2314,19 @@ enum { MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10, MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20, MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40, + MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80, + MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100, +}; + +struct mlx5_ifc_vlan_bits { + u8 ethtype[0x10]; + u8 prio[0x3]; + u8 cfi[0x1]; + u8 vid[0xc]; }; struct mlx5_ifc_flow_context_bits { - u8 reserved_at_0[0x20]; + struct mlx5_ifc_vlan_bits push_vlan; u8 group_id[0x20]; -- cgit v1.2.3 From 6acfbf38a98a4c455f5b8c827854e1dc201be745 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Wed, 31 Jan 2018 18:36:03 +0200 Subject: net/mlx5e: Offload tc vlan push/pop using HW action Currently, we are emulating the offload of vlan push/pop actions using global setup as done by commit f5f82476090f ("net/mlx5: E-Switch, Support VLAN actions in the offloads mode"). With newer NICs, we can apply a flow action for that matter, do that while keeping the emulated path for the older HW brands. Signed-off-by: Or Gerlitz Reviewed-by: Mark Bloch Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 15 ++++++++----- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 10 ++++++++- .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 26 +++++++++++++++++----- 3 files changed, 40 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 7c33df2034f0..3e4a7e81b67f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2530,12 +2530,17 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { - if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) || - tcf_vlan_push_prio(a)) - return -EOPNOTSUPP; - attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; - attr->vlan = tcf_vlan_push_vid(a); + attr->vlan_vid = tcf_vlan_push_vid(a); + if (mlx5_eswitch_vlan_actions_supported(priv->mdev)) { + attr->vlan_prio = tcf_vlan_push_prio(a); + attr->vlan_proto = tcf_vlan_push_proto(a); + if (!attr->vlan_proto) + attr->vlan_proto = htons(ETH_P_8021Q); + } else if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) || + tcf_vlan_push_prio(a)) { + return -EOPNOTSUPP; + } } else { /* action is TCA_VLAN_ACT_MODIFY */ return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index a435eb7971c6..4cd773fa55e3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -232,7 +232,9 @@ struct mlx5_esw_flow_attr { struct mlx5_eswitch_rep *out_rep; int action; - u16 vlan; + __be16 vlan_proto; + u16 vlan_vid; + u8 vlan_prio; bool vlan_handled; u32 encap_id; u32 mod_hdr_id; @@ -255,6 +257,12 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, int vport, u16 vlan, u8 qos, u8 set_flags); +static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev) +{ + return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) && + MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan); +} + #define MLX5_DEBUG_ESWITCH_MASK BIT(3) #define esw_info(dev, format, ...) \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 21ebe3e80e6e..35e256eb2f6e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -58,8 +58,16 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (esw->mode != SRIOV_OFFLOADS) return ERR_PTR(-EOPNOTSUPP); - /* per flow vlan pop/push is emulated, don't set that into the firmware */ - flow_act.action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); + flow_act.action = attr->action; + /* if per flow vlan pop/push is emulated, don't set that into the firmware */ + if (!mlx5_eswitch_vlan_actions_supported(esw->dev)) + flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | + MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); + else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { + flow_act.vlan.ethtype = ntohs(attr->vlan_proto); + flow_act.vlan.vid = attr->vlan_vid; + flow_act.vlan.prio = attr->vlan_prio; + } if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; @@ -185,7 +193,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr, /* protects against (1) setting rules with different vlans to push and * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0) */ - if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan)) + if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid)) goto out_notsupp; return 0; @@ -202,6 +210,10 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, bool push, pop, fwd; int err = 0; + /* nop if we're on the vlan push/pop non emulation mode */ + if (mlx5_eswitch_vlan_actions_supported(esw->dev)) + return 0; + push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH); pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); @@ -239,11 +251,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, if (vport->vlan_refcount) goto skip_set_push; - err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0, + err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid, 0, SET_VLAN_INSERT | SET_VLAN_STRIP); if (err) goto out; - vport->vlan = attr->vlan; + vport->vlan = attr->vlan_vid; skip_set_push: vport->vlan_refcount++; } @@ -261,6 +273,10 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, bool push, pop, fwd; int err = 0; + /* nop if we're on the vlan push/pop non emulation mode */ + if (mlx5_eswitch_vlan_actions_supported(esw->dev)) + return 0; + if (!attr->vlan_handled) return 0; -- cgit v1.2.3 From 957f6ba8adc7be401a74ccff427e4cfd88d3bfcb Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Tue, 2 Jan 2018 16:49:56 +0200 Subject: net/mlx5: Protect from command bit overflow The system with CONFIG_UBSAN enabled on produces the following error during driver initialization. The reason to it that max_reg_cmds can be larger enough to cause to "1 << max_reg_cmds" overflow the unsigned long. ================================================================================ UBSAN: Undefined behaviour in drivers/net/ethernet/mellanox/mlx5/core/cmd.c:1805:42 signed integer overflow: -2147483648 - 1 cannot be represented in type 'int' CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.15.0-rc2-00032-g06cda2358d9b-dirty #724 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5-0-ge51488c-20140602_164612-nilsson.home.kraxel.org 04/01/2014 Call Trace: dump_stack+0xe9/0x18f ? dma_virt_alloc+0x81/0x81 ubsan_epilogue+0xe/0x4e handle_overflow+0x187/0x20c mlx5_cmd_init+0x73a/0x12b0 mlx5_load_one+0x1c3d/0x1d30 init_one+0xd02/0xf10 pci_device_probe+0x26c/0x3b0 driver_probe_device+0x622/0xb40 __driver_attach+0x175/0x1b0 bus_for_each_dev+0xef/0x190 bus_add_driver+0x2db/0x490 driver_register+0x16b/0x1e0 __pci_register_driver+0x177/0x1b0 init+0x6d/0x92 do_one_initcall+0x15b/0x270 kernel_init_freeable+0x2d8/0x3d0 kernel_init+0x14/0x190 ret_from_fork+0x24/0x30 ================================================================================ Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index fe5428667ad1..21cd1703a862 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1804,7 +1804,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) cmd->checksum_disabled = 1; cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; - cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; + cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1; cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; if (cmd->cmdif_rev > CMD_IF_REV) { -- cgit v1.2.3 From acac5ec0c2e77b79c7861db43d50640999d5d37b Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Thu, 11 Jan 2018 16:24:06 +0200 Subject: net/mlx5e: Remove redundant check in get ethtool stats ethtool core code makes sure data isn't NULL before calling get_ethtool_stats, testing it again in the driver is redundant. Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 62061fd23143..d415e67b557b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -203,9 +203,6 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, { int i, idx = 0; - if (!data) - return; - mutex_lock(&priv->state_lock); mlx5e_update_stats(priv); mutex_unlock(&priv->state_lock); -- cgit v1.2.3 From 707129dceaf4b8c4721425d75e855aed4fd7b832 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Wed, 31 Jan 2018 14:45:40 +0200 Subject: net/mlx5e: Make choose LRO timeout function static The function is used in en_main.c only, we can make it static and remove its declaration from en.h Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 - drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 48e0b2a747d9..294bc9f175a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -1062,7 +1062,6 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv); int mlx5e_close(struct net_device *netdev); int mlx5e_open(struct net_device *netdev); void mlx5e_update_stats_work(struct work_struct *work); -u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout); int mlx5e_bits_invert(unsigned long a, int size); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index f8bc3bcdf046..eb38a35564c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3967,7 +3967,7 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) MLX5_CQ_PERIOD_MODE_START_FROM_CQE); } -u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout) +static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout) { int i; -- cgit v1.2.3 From be0f780bc50a00216ca9c7dc63485e2fc31bf8c8 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Thu, 11 Jan 2018 18:46:20 +0200 Subject: net/mlx5e: Add a helper macro in set features ndo Add a new macro to prevent copy-pasting the same code for each new feature. Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 26 ++++++++++------------- 1 file changed, 11 insertions(+), 15 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index eb38a35564c7..fb936a978f20 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3242,24 +3242,20 @@ static int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) { netdev_features_t oper_features = netdev->features; - int err; + int err = 0; + +#define MLX5E_HANDLE_FEATURE(feature, handler) \ + mlx5e_handle_feature(netdev, &oper_features, features, feature, handler) - err = mlx5e_handle_feature(netdev, &oper_features, features, - NETIF_F_LRO, set_feature_lro); - err |= mlx5e_handle_feature(netdev, &oper_features, features, - NETIF_F_HW_VLAN_CTAG_FILTER, + err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER, set_feature_cvlan_filter); - err |= mlx5e_handle_feature(netdev, &oper_features, features, - NETIF_F_HW_TC, set_feature_tc_num_filters); - err |= mlx5e_handle_feature(netdev, &oper_features, features, - NETIF_F_RXALL, set_feature_rx_all); - err |= mlx5e_handle_feature(netdev, &oper_features, features, - NETIF_F_RXFCS, set_feature_rx_fcs); - err |= mlx5e_handle_feature(netdev, &oper_features, features, - NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan); #ifdef CONFIG_RFS_ACCEL - err |= mlx5e_handle_feature(netdev, &oper_features, features, - NETIF_F_NTUPLE, set_feature_arfs); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs); #endif if (err) { -- cgit v1.2.3 From 71186172b71fe7266eaa768f028c20725202dfd5 Mon Sep 17 00:00:00 2001 From: Aviv Heller Date: Thu, 17 Aug 2017 16:44:16 +0300 Subject: net/mlx5e: Add VLAN offload features to hw_enc_features We support outer VLAN offload in driver and HW regardless of whether an encapsulation is present in the next headers. Exposing this in hw_enc_features will allow us to offload outer VLANs in cases where encapsulation protocols like VXLAN and IPsec are used. Signed-off-by: Aviv Heller Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index fb936a978f20..1d36d7569f44 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4106,6 +4106,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->vlan_features |= NETIF_F_RXCSUM; netdev->vlan_features |= NETIF_F_RXHASH; + netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX; + netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX; + if (!!MLX5_CAP_ETH(mdev, lro_cap)) netdev->vlan_features |= NETIF_F_LRO; -- cgit v1.2.3 From 0608d4dbaf4ee30a33def4c31b78bc664c80ff79 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 17 Jan 2018 17:39:07 +0200 Subject: net/mlx5e: Unify slow PCI heuristic Get the link/pci speed query and logic into a single function. Unify the heuristics and use a single PCI threshold (16G) for all. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 31 ++++++++++------------ .../net/ethernet/mellanox/mlx5/core/mlx5_core.h | 5 ++++ 2 files changed, 19 insertions(+), 17 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 1d36d7569f44..46707826f27e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3902,16 +3902,20 @@ static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw) return 0; } -static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw) +static bool slow_pci_heuristic(struct mlx5_core_dev *mdev) { - return (link_speed && pci_bw && - (pci_bw < 40000) && (pci_bw < link_speed)); -} + u32 link_speed = 0; + u32 pci_bw = 0; -static bool hw_lro_heuristic(u32 link_speed, u32 pci_bw) -{ - return !(link_speed && pci_bw && - (pci_bw <= 16000) && (pci_bw < link_speed)); + mlx5e_get_max_linkspeed(mdev, &link_speed); + mlx5e_get_pci_bw(mdev, &pci_bw); + mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n", + link_speed, pci_bw); + +#define MLX5E_SLOW_PCI_RATIO (2) + + return link_speed && pci_bw && + link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw; } void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) @@ -3980,17 +3984,10 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, u16 max_channels) { u8 cq_period_mode = 0; - u32 link_speed = 0; - u32 pci_bw = 0; params->num_channels = max_channels; params->num_tc = 1; - mlx5e_get_max_linkspeed(mdev, &link_speed); - mlx5e_get_pci_bw(mdev, &pci_bw); - mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n", - link_speed, pci_bw); - /* SQ */ params->log_sq_size = is_kdump_kernel() ? MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE : @@ -4000,7 +3997,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, params->rx_cqe_compress_def = false; if (MLX5_CAP_GEN(mdev, cqe_compression) && MLX5_CAP_GEN(mdev, vport_group_manager)) - params->rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw); + params->rx_cqe_compress_def = slow_pci_heuristic(mdev); MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def); @@ -4011,7 +4008,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */ if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) - params->lro_en = hw_lro_heuristic(link_speed, pci_bw); + params->lro_en = !slow_pci_heuristic(mdev); params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); /* CQ moderation params */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 4e25f2b2e0bc..7d001fe6e631 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -50,6 +50,11 @@ extern uint mlx5_core_debug_mask; __func__, __LINE__, current->pid, \ ##__VA_ARGS__) +#define mlx5_core_dbg_once(__dev, format, ...) \ + dev_dbg_once(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + #define mlx5_core_dbg_mask(__dev, mask, format, ...) \ do { \ if ((mask) & mlx5_core_debug_mask) \ -- cgit v1.2.3 From 291f445eab9b2b11c8dd05ae1a0943c328cb9b6b Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Sun, 11 Feb 2018 11:58:30 +0200 Subject: net/mlx5e: Disable Striding RQ when PCI is slower than link We turn the feature off for servers with PCI BW bounded by a threshold (16G) and lower than MAX LINK BW. This improves the effectiveness of CQE compression feature, that is defaulted to ON for the same case. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 46707826f27e..d4dd00089eb1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -113,13 +113,16 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); } +static bool slow_pci_heuristic(struct mlx5_core_dev *mdev); + static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) && - !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ? - MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : - MLX5_WQ_TYPE_LINKED_LIST; + !slow_pci_heuristic(mdev) && + !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ? + MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : + MLX5_WQ_TYPE_LINKED_LIST; mlx5e_init_rq_type_params(mdev, params, rq_type); } -- cgit v1.2.3 From 73faf36c5ae6b300490f0a5f2835fa702adef0e1 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 21 Nov 2017 17:43:50 +0200 Subject: net/mlx5e: Remove unused define MLX5_MPWRQ_STRIDES_PER_PAGE Clean it up as it's not in use. Fixes: d9d9f156f380 ("net/mlx5e: Expand WQE stride when CQE compression is enabled") Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 294bc9f175a5..85767f0869d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -93,8 +93,6 @@ #define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \ MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0) #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) -#define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \ - MLX5_MPWRQ_WQE_PAGE_ORDER) #define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2) #define MLX5E_REQUIRED_MTTS(wqes) \ -- cgit v1.2.3 From bd658dda423781dbe775a2d87d662a3145ec05a6 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 27 Nov 2017 10:35:12 +0200 Subject: net/mlx5e: Separate dma base address and offset in dma_sync call Pass the base dma address and offset to dma_sync_single_range_for_cpu(), instead of doing the pre-calculation. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 8cce90dc461d..ffcbe5c3818a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -870,10 +870,8 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, data = va + rx_headroom; frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); - dma_sync_single_range_for_cpu(rq->pdev, - di->addr + wi->offset, - 0, frag_size, - DMA_FROM_DEVICE); + dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset, + frag_size, DMA_FROM_DEVICE); prefetch(data); wi->offset += frag_size; -- cgit v1.2.3 From 24fd07abfa3584b91f65002956d3d5a5db169185 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Thu, 15 Feb 2018 18:27:06 +0200 Subject: net/mlx5e: Use no-offset function in skb header copy In copying skb header to skb->data, replace the call to skb_copy_to_linear_data_offset() with a zero offset with the call to the no-offset function skb_copy_to_linear_data(). Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index ffcbe5c3818a..781b8f21d6d1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -333,9 +333,8 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev, len = ALIGN(headlen_pg, sizeof(long)); dma_sync_single_for_cpu(pdev, dma_info->addr + offset, len, DMA_FROM_DEVICE); - skb_copy_to_linear_data_offset(skb, 0, - page_address(dma_info->page) + offset, - len); + skb_copy_to_linear_data(skb, page_address(dma_info->page) + offset, len); + if (unlikely(offset + headlen > PAGE_SIZE)) { dma_info++; headlen_pg = len; -- cgit v1.2.3 From f1e4fc9b4b02ec728b06ad09d6cdf5a9bb7ec372 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 7 Feb 2018 13:21:30 +0200 Subject: net/mlx5e: Remove RQ MPWQE fields from params Introduce functions to calculate them when needed. They can be derived from other params. This will simplify transition between RQ configurations. In general, any parameter that is not explicitly set or controlled, but derived from other parameters, should not have a control-path field itself, but a getter function. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 7 ++-- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 13 +++----- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 38 +++++++++++++++------- 3 files changed, 35 insertions(+), 23 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 85767f0869d8..ba7f1ceb6dcd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -231,8 +231,6 @@ struct mlx5e_params { u8 log_sq_size; u8 rq_wq_type; u16 rq_headroom; - u8 mpwqe_log_stride_sz; - u8 mpwqe_log_num_strides; u8 log_rq_size; u16 num_channels; u8 num_tc; @@ -840,6 +838,11 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi); +u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, + struct mlx5e_params *params); +u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, + struct mlx5e_params *params); + void mlx5e_update_stats(struct mlx5e_priv *priv); int mlx5e_create_flow_steering(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index d415e67b557b..234b5b2ebf0f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -231,8 +231,8 @@ static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type, if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) return num_wqe; - stride_size = 1 << priv->channels.params.mpwqe_log_stride_sz; - num_strides = 1 << priv->channels.params.mpwqe_log_num_strides; + stride_size = 1 << mlx5e_mpwqe_get_log_stride_size(priv->mdev, &priv->channels.params); + num_strides = 1 << mlx5e_mpwqe_get_log_num_strides(priv->mdev, &priv->channels.params); wqe_size = stride_size * num_strides; packets_per_wqe = wqe_size / @@ -252,8 +252,8 @@ static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type, if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) return num_packets; - stride_size = 1 << priv->channels.params.mpwqe_log_stride_sz; - num_strides = 1 << priv->channels.params.mpwqe_log_num_strides; + stride_size = 1 << mlx5e_mpwqe_get_log_stride_size(priv->mdev, &priv->channels.params); + num_strides = 1 << mlx5e_mpwqe_get_log_num_strides(priv->mdev, &priv->channels.params); wqe_size = stride_size * num_strides; num_packets = (1 << order_base_2(num_packets)); @@ -1561,11 +1561,6 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val new_channels.params = priv->channels.params; MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val); - new_channels.params.mpwqe_log_stride_sz = - MLX5E_MPWQE_STRIDE_SZ(priv->mdev, new_val); - new_channels.params.mpwqe_log_num_strides = - MLX5_MPWRQ_LOG_WQE_SZ - new_channels.params.mpwqe_log_stride_sz; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { priv->channels.params = new_channels.params; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index d4dd00089eb1..65e6955713e7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -78,6 +78,20 @@ static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) MLX5_CAP_ETH(mdev, reg_umr_sq); } +u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + return MLX5E_MPWQE_STRIDE_SZ(mdev, + MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); +} + +u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + return MLX5_MPWRQ_LOG_WQE_SZ - + mlx5e_mpwqe_get_log_stride_size(mdev, params); +} + void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params, u8 rq_type) { @@ -88,10 +102,6 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, params->log_rq_size = is_kdump_kernel() ? MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW : MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; - params->mpwqe_log_stride_sz = MLX5E_MPWQE_STRIDE_SZ(mdev, - MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); - params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - - params->mpwqe_log_stride_sz; break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ params->log_rq_size = is_kdump_kernel() ? @@ -109,7 +119,7 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n", params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ, BIT(params->log_rq_size), - BIT(params->mpwqe_log_stride_sz), + BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params)), MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); } @@ -453,8 +463,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, goto err_rq_wq_destroy; } - rq->mpwqe.log_stride_sz = params->mpwqe_log_stride_sz; - rq->mpwqe.num_strides = BIT(params->mpwqe_log_num_strides); + rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params); + rq->mpwqe.num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params)); byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz; @@ -1745,13 +1755,16 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_rq_param *param) { + struct mlx5_core_dev *mdev = priv->mdev; void *rqc = param->rqc; void *wq = MLX5_ADDR_OF(rqc, rqc, wq); switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - MLX5_SET(wq, wq, log_wqe_num_of_strides, params->mpwqe_log_num_strides - 9); - MLX5_SET(wq, wq, log_wqe_stride_size, params->mpwqe_log_stride_sz - 6); + MLX5_SET(wq, wq, log_wqe_num_of_strides, + mlx5e_mpwqe_get_log_num_strides(mdev, params) - 9); + MLX5_SET(wq, wq, log_wqe_stride_size, + mlx5e_mpwqe_get_log_stride_size(mdev, params) - 6); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ); break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ @@ -1761,12 +1774,12 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); MLX5_SET(wq, wq, log_wq_sz, params->log_rq_size); - MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn); + MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn); MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter); MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable); MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en); - param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); + param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); param->wq.linear = 1; } @@ -1825,7 +1838,8 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - log_cq_size = params->log_rq_size + params->mpwqe_log_num_strides; + log_cq_size = params->log_rq_size + + mlx5e_mpwqe_get_log_num_strides(priv->mdev, params); break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ log_cq_size = params->log_rq_size; -- cgit v1.2.3 From b0cedc844c00991d323c13b82d651d372dcbbb12 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 7 Feb 2018 13:28:35 +0200 Subject: net/mlx5e: Remove rq_headroom field from params It can be derived from other params, calculate it via the dedicated function when needed. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 - drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 20 +++++++++++++++----- 2 files changed, 15 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index ba7f1ceb6dcd..ff9aeda186a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -230,7 +230,6 @@ enum mlx5e_priv_flag { struct mlx5e_params { u8 log_sq_size; u8 rq_wq_type; - u16 rq_headroom; u8 log_rq_size; u16 num_channels; u8 num_tc; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 65e6955713e7..4907b7bb08e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -92,6 +92,19 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, mlx5e_mpwqe_get_log_stride_size(mdev, params); } +static u16 mlx5e_get_rq_headroom(struct mlx5e_params *params) +{ + u16 linear_rq_headroom = params->xdp_prog ? + XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM; + + linear_rq_headroom += NET_IP_ALIGN; + + if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST) + return linear_rq_headroom; + + return 0; +} + void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params, u8 rq_type) { @@ -107,12 +120,9 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, params->log_rq_size = is_kdump_kernel() ? MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; - params->rq_headroom = params->xdp_prog ? - XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM; - params->rq_headroom += NET_IP_ALIGN; /* Extra room needed for build_skb */ - params->lro_wqe_sz -= params->rq_headroom + + params->lro_wqe_sz -= mlx5e_get_rq_headroom(params) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); } @@ -441,7 +451,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, goto err_rq_wq_destroy; rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; - rq->buff.headroom = params->rq_headroom; + rq->buff.headroom = mlx5e_get_rq_headroom(params); switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: -- cgit v1.2.3 From 2a0f561bf81c13d36910c7312ac8c67f83320a07 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Sun, 18 Feb 2018 11:37:06 +0200 Subject: net/mlx5e: Do not reset Receive Queue params on every type change Do not implicit a call to mlx5e_init_rq_type_params() upon every change in RQ type. It should be called only on channels creation. Fixes: 2fc4bfb7250d ("net/mlx5e: Dynamic RQ type infrastructure") Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 +-- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 15 +++++++-------- drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 3 ++- 3 files changed, 10 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index ff9aeda186a1..45d0c64e77e5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -918,8 +918,7 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, - struct mlx5e_params *params, - u8 rq_type); + struct mlx5e_params *params); static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 4907b7bb08e0..ffe3b2469032 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -106,9 +106,8 @@ static u16 mlx5e_get_rq_headroom(struct mlx5e_params *params) } void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, - struct mlx5e_params *params, u8 rq_type) + struct mlx5e_params *params) { - params->rq_wq_type = rq_type; params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: @@ -135,15 +134,14 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, static bool slow_pci_heuristic(struct mlx5_core_dev *mdev); -static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, - struct mlx5e_params *params) +static void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) { - u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) && + params->rq_wq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) && !slow_pci_heuristic(mdev) && !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ? MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : MLX5_WQ_TYPE_LINKED_LIST; - mlx5e_init_rq_type_params(mdev, params, rq_type); } static void mlx5e_update_carrier(struct mlx5e_priv *priv) @@ -3736,7 +3734,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) bpf_prog_put(old_prog); if (reset) /* change RQ type according to priv->xdp_prog */ - mlx5e_set_rq_params(priv->mdev, &priv->channels.params); + mlx5e_set_rq_type(priv->mdev, &priv->channels.params); if (was_opened && reset) mlx5e_open_locked(netdev); @@ -4029,7 +4027,8 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def); /* RQ */ - mlx5e_set_rq_params(mdev, params); + mlx5e_set_rq_type(mdev, params); + mlx5e_init_rq_type_params(mdev, params); /* HW LRO */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index f953378bd13d..870584a07c48 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -56,7 +56,8 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */ - mlx5e_init_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST); + params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; + mlx5e_init_rq_type_params(mdev, params); /* RQ size in ipoib by default is 512 */ params->log_rq_size = is_kdump_kernel() ? -- cgit v1.2.3 From 2ccb0a79018c9fafa913654163adc9dbac1280c5 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 7 Feb 2018 14:51:45 +0200 Subject: net/mlx5e: Add ethtool priv-flag for Striding RQ Add a control private flag in ethtool to enable/disable Striding RQ feature. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 7 ++++ .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 38 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 20 ++++++++---- .../net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 3 +- 4 files changed, 60 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 45d0c64e77e5..13dd7a97ae04 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -205,12 +205,14 @@ static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = { "rx_cqe_moder", "tx_cqe_moder", "rx_cqe_compress", + "rx_striding_rq", }; enum mlx5e_priv_flag { MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0), MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1), MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2), + MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3), }; #define MLX5E_SET_PFLAG(params, pflag, enable) \ @@ -827,6 +829,10 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq); void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq); +bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev); +bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, + struct mlx5e_params *params); + void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, bool recycle); void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); @@ -917,6 +923,7 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); +void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params); void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 234b5b2ebf0f..7bfe17b7c279 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1598,6 +1598,38 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, return 0; } +static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_channels new_channels = {}; + int err; + + if (enable) { + if (!mlx5e_check_fragmented_striding_rq_cap(mdev)) + return -EOPNOTSUPP; + if (!mlx5e_striding_rq_possible(mdev, &priv->channels.params)) + return -EINVAL; + } + + new_channels.params = priv->channels.params; + + MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_STRIDING_RQ, enable); + mlx5e_set_rq_type(mdev, &new_channels.params); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + priv->channels.params = new_channels.params; + return 0; + } + + err = mlx5e_open_channels(priv, &new_channels); + if (err) + return err; + + mlx5e_switch_priv_channels(priv, &new_channels, NULL); + return 0; +} + static int mlx5e_handle_pflag(struct net_device *netdev, u32 wanted_flags, enum mlx5e_priv_flag flag, @@ -1643,6 +1675,12 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags) err = mlx5e_handle_pflag(netdev, pflags, MLX5E_PFLAG_RX_CQE_COMPRESS, set_pflag_rx_cqe_compress); + if (err) + goto out; + + err = mlx5e_handle_pflag(netdev, pflags, + MLX5E_PFLAG_RX_STRIDING_RQ, + set_pflag_rx_striding_rq); out: mutex_unlock(&priv->state_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index ffe3b2469032..7610a7916e96 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -71,7 +71,7 @@ struct mlx5e_channel_param { struct mlx5e_cq_param icosq_cq; }; -static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) +bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) { return MLX5_CAP_GEN(mdev, striding_rq) && MLX5_CAP_GEN(mdev, umr_ptr_rlky) && @@ -132,14 +132,17 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); } -static bool slow_pci_heuristic(struct mlx5_core_dev *mdev); +bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + return mlx5e_check_fragmented_striding_rq_cap(mdev) && + !params->xdp_prog && !MLX5_IPSEC_DEV(mdev); +} -static void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, - struct mlx5e_params *params) +void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { - params->rq_wq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) && - !slow_pci_heuristic(mdev) && - !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ? + params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) && + MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ? MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : MLX5_WQ_TYPE_LINKED_LIST; } @@ -4027,6 +4030,9 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def); /* RQ */ + if (mlx5e_striding_rq_possible(mdev, params)) + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, + !slow_pci_heuristic(mdev)); mlx5e_set_rq_type(mdev, params); mlx5e_init_rq_type_params(mdev, params); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 870584a07c48..a35608faf8d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -56,7 +56,8 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */ - params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, false); + mlx5e_set_rq_type(mdev, params); mlx5e_init_rq_type_params(mdev, params); /* RQ size in ipoib by default is 512 */ -- cgit v1.2.3 From c4554fbccaa3306f65954ed0f1dab7abce7889f8 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Sun, 21 Jan 2018 10:52:17 +0200 Subject: net/mlx5e: Remove unused max inline related code Commit 58d522912ac7 ("net/mlx5e: Support TX packet copy into WQE") introduced the max inline WQE as an ethtool tunable. One commit later, that functionality was made dependent on BlueFlame. Commit 6982ab609768 ("net/mlx5e: Xmit, no write combining") removed BlueFlame support, and with it the max inline WQE. This patch cleans up the leftovers from the removed feature. Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 -- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 32 ++-------------------- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 11 -------- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 1 - 4 files changed, 2 insertions(+), 45 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 13dd7a97ae04..6898f5e26006 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -240,7 +240,6 @@ struct mlx5e_params { struct net_dim_cq_moder tx_cq_moderation; bool lro_en; u32 lro_wqe_sz; - u16 tx_max_inline; u8 tx_min_inline_mode; u8 rss_hfunc; u8 toeplitz_hash_key[40]; @@ -366,7 +365,6 @@ struct mlx5e_txqsq { void __iomem *uar_map; struct netdev_queue *txq; u32 sqn; - u16 max_inline; u8 min_inline_mode; u16 edge; struct device *pdev; @@ -1017,7 +1015,6 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, u16 rxq_index, u32 flow_id); #endif -u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev); int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 *in, int inlen); void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 7bfe17b7c279..c57c929d7973 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1118,13 +1118,9 @@ static int mlx5e_get_tunable(struct net_device *dev, const struct ethtool_tunable *tuna, void *data) { - const struct mlx5e_priv *priv = netdev_priv(dev); - int err = 0; + int err; switch (tuna->id) { - case ETHTOOL_TX_COPYBREAK: - *(u32 *)data = priv->channels.params.tx_max_inline; - break; case ETHTOOL_PFC_PREVENTION_TOUT: err = mlx5e_get_pfc_prevention_tout(dev, data); break; @@ -1141,35 +1137,11 @@ static int mlx5e_set_tunable(struct net_device *dev, const void *data) { struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_channels new_channels = {}; - int err = 0; - u32 val; + int err; mutex_lock(&priv->state_lock); switch (tuna->id) { - case ETHTOOL_TX_COPYBREAK: - val = *(u32 *)data; - if (val > mlx5e_get_max_inline_cap(mdev)) { - err = -EINVAL; - break; - } - - new_channels.params = priv->channels.params; - new_channels.params.tx_max_inline = val; - - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - priv->channels.params = new_channels.params; - break; - } - - err = mlx5e_open_channels(priv, &new_channels); - if (err) - break; - mlx5e_switch_priv_channels(priv, &new_channels, NULL); - - break; case ETHTOOL_PFC_PREVENTION_TOUT: err = mlx5e_set_pfc_prevention_tout(dev, *(u16 *)data); break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 7610a7916e96..5d8eb0a9c0f0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -993,7 +993,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, sq->channel = c; sq->txq_ix = txq_ix; sq->uar_map = mdev->mlx5e_res.bfreg.map; - sq->max_inline = params->tx_max_inline; sq->min_inline_mode = params->tx_min_inline_mode; if (MLX5_IPSEC_DEV(c->priv->mdev)) set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); @@ -3882,15 +3881,6 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) return 0; } -u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev) -{ - int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; - - return bf_buf_size - - sizeof(struct mlx5e_tx_wqe) + - 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/; -} - void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, int num_channels) { @@ -4052,7 +4042,6 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, mlx5e_set_tx_cq_mode_params(params, cq_period_mode); /* TX inline */ - params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev); /* RSS */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index ea4b255380a2..dd32f3e390ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -884,7 +884,6 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); mlx5e_set_rx_cq_mode_params(params, cq_period_mode); - params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); params->num_tc = 1; params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; -- cgit v1.2.3 From bfc647d52e67dc756c605e9a50d45b71054c2533 Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Tue, 16 Jan 2018 17:25:06 +0200 Subject: net/mlx5e: Move all TX timeout logic to be under state lock Driver callback for handling TX timeout should access some internal resources (SQ, CQ) in order to decide if the tx timeout work should be scheduled. These resources might be unavailable if channels are closed in parallel (ifdown for example). The state lock is the mechanism to protect from such races. Move all TX timeout logic to be in the work under a state lock. In addition, Move the work from the global WQ to mlx5e WQ to make sure this work is flushed when device is detached.. Also, move the mlx5e_tx_timeout_work code to be next to the TX timeout NDO for better code locality. Fixes: 3947ca185999 ("net/mlx5e: Implement ndo_tx_timeout callback") Signed-off-by: Eran Ben Elisha Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 61 +++++++++++++---------- 1 file changed, 34 insertions(+), 27 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 5d8eb0a9c0f0..e0b75f52d556 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -177,26 +177,6 @@ static void mlx5e_update_carrier_work(struct work_struct *work) mutex_unlock(&priv->state_lock); } -static void mlx5e_tx_timeout_work(struct work_struct *work) -{ - struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, - tx_timeout_work); - int err; - - rtnl_lock(); - mutex_lock(&priv->state_lock); - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) - goto unlock; - mlx5e_close_locked(priv->netdev); - err = mlx5e_open_locked(priv->netdev); - if (err) - netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n", - err); -unlock: - mutex_unlock(&priv->state_lock); - rtnl_unlock(); -} - void mlx5e_update_stats(struct mlx5e_priv *priv) { int i; @@ -3658,13 +3638,19 @@ static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev, return true; } -static void mlx5e_tx_timeout(struct net_device *dev) +static void mlx5e_tx_timeout_work(struct work_struct *work) { - struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, + tx_timeout_work); + struct net_device *dev = priv->netdev; bool reopen_channels = false; - int i; + int i, err; - netdev_err(dev, "TX timeout detected\n"); + rtnl_lock(); + mutex_lock(&priv->state_lock); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + goto unlock; for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) { struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i); @@ -3672,7 +3658,9 @@ static void mlx5e_tx_timeout(struct net_device *dev) if (!netif_xmit_stopped(dev_queue)) continue; - netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n", + + netdev_err(dev, + "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n", i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, jiffies_to_usecs(jiffies - dev_queue->trans_start)); @@ -3685,8 +3673,27 @@ static void mlx5e_tx_timeout(struct net_device *dev) } } - if (reopen_channels && test_bit(MLX5E_STATE_OPENED, &priv->state)) - schedule_work(&priv->tx_timeout_work); + if (!reopen_channels) + goto unlock; + + mlx5e_close_locked(dev); + err = mlx5e_open_locked(dev); + if (err) + netdev_err(priv->netdev, + "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n", + err); + +unlock: + mutex_unlock(&priv->state_lock); + rtnl_unlock(); +} + +static void mlx5e_tx_timeout(struct net_device *dev) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + netdev_err(dev, "TX timeout detected\n"); + queue_work(priv->wq, &priv->tx_timeout_work); } static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) -- cgit v1.2.3 From 2816077127230ef52cc7497903e71def45747611 Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Tue, 26 Dec 2017 15:17:05 +0200 Subject: mlx5_{ib,core}: Add query SQ state helper function Move query SQ state function from mlx5_ib to mlx5_core in order to have it in shared code. It will be used in a downstream patch from mlx5e. Signed-off-by: Eran Ben Elisha Signed-off-by: Saeed Mahameed --- drivers/infiniband/hw/mlx5/qp.c | 14 +----------- drivers/net/ethernet/mellanox/mlx5/core/transobj.c | 25 ++++++++++++++++++++++ include/linux/mlx5/transobj.h | 1 + 3 files changed, 27 insertions(+), 13 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 85c612ac547a..0d0b0b8dad98 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -4739,26 +4739,14 @@ static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev *dev, struct mlx5_ib_sq *sq, u8 *sq_state) { - void *out; - void *sqc; - int inlen; int err; - inlen = MLX5_ST_SZ_BYTES(query_sq_out); - out = kvzalloc(inlen, GFP_KERNEL); - if (!out) - return -ENOMEM; - - err = mlx5_core_query_sq(dev->mdev, sq->base.mqp.qpn, out); + err = mlx5_core_query_sq_state(dev->mdev, sq->base.mqp.qpn, sq_state); if (err) goto out; - - sqc = MLX5_ADDR_OF(query_sq_out, out, sq_context); - *sq_state = MLX5_GET(sqc, sqc, state); sq->state = *sq_state; out: - kvfree(out); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index 9e38343a951f..c64957b5ef47 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -157,6 +157,31 @@ int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out) } EXPORT_SYMBOL(mlx5_core_query_sq); +int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state) +{ + void *out; + void *sqc; + int inlen; + int err; + + inlen = MLX5_ST_SZ_BYTES(query_sq_out); + out = kvzalloc(inlen, GFP_KERNEL); + if (!out) + return -ENOMEM; + + err = mlx5_core_query_sq(dev, sqn, out); + if (err) + goto out; + + sqc = MLX5_ADDR_OF(query_sq_out, out, sq_context); + *state = MLX5_GET(sqc, sqc, state); + +out: + kvfree(out); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_query_sq_state); + int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn) { diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h index 7e8f281f8c00..80d7aa8b2831 100644 --- a/include/linux/mlx5/transobj.h +++ b/include/linux/mlx5/transobj.h @@ -47,6 +47,7 @@ int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen); void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn); int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out); +int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state); int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn); int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in, -- cgit v1.2.3 From 16cc14d817338fc297970d2d9d146c88ec87474d Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Tue, 9 Jan 2018 16:21:16 +0200 Subject: net/mlx5e: Dump xmit error completions Monitor and dump xmit error completions. In addition, add err_cqe counter to track the number of error completion per send queue. Signed-off-by: Eran Ben Elisha Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 3 +++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 2 ++ drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 19 +++++++++++++++++++ 3 files changed, 24 insertions(+) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index c0dab9a8969e..ad91d9de0240 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -60,6 +60,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, @@ -153,6 +154,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->tx_queue_stopped += sq_stats->stopped; s->tx_queue_wake += sq_stats->wake; s->tx_queue_dropped += sq_stats->dropped; + s->tx_cqe_err += sq_stats->cqe_err; s->tx_xmit_more += sq_stats->xmit_more; s->tx_csum_partial_inner += sq_stats->csum_partial_inner; s->tx_csum_none += sq_stats->csum_none; @@ -1103,6 +1105,7 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, }; static const struct counter_desc ch_stats_desc[] = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 43a72efa28c0..43dc808684c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -78,6 +78,7 @@ struct mlx5e_sw_stats { u64 tx_queue_wake; u64 tx_queue_dropped; u64 tx_xmit_more; + u64 tx_cqe_err; u64 rx_wqe_err; u64 rx_mpwqe_filler; u64 rx_buff_alloc_err; @@ -197,6 +198,7 @@ struct mlx5e_sq_stats { u64 stopped; u64 wake; u64 dropped; + u64 cqe_err; }; struct mlx5e_ch_stats { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 11b4f1089d1c..88b5b7bfc9a9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -417,6 +417,18 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) return mlx5e_sq_xmit(sq, skb, wqe, pi); } +static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq, + struct mlx5_err_cqe *err_cqe) +{ + u32 ci = mlx5_cqwq_get_ci(&sq->cq.wq); + + netdev_err(sq->channel->netdev, + "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n", + sq->cq.mcq.cqn, ci, sq->sqn, err_cqe->syndrome, + err_cqe->vendor_err_synd); + mlx5_dump_err_cqe(sq->cq.mdev, err_cqe); +} + bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) { struct mlx5e_txqsq *sq; @@ -456,6 +468,13 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) wqe_counter = be16_to_cpu(cqe->wqe_counter); + if (unlikely(cqe->op_own >> 4 == MLX5_CQE_REQ_ERR)) { + if (!sq->stats.cqe_err) + mlx5e_dump_error_cqe(sq, + (struct mlx5_err_cqe *)cqe); + sq->stats.cqe_err++; + } + do { struct mlx5e_tx_wqe_info *wi; struct sk_buff *skb; -- cgit v1.2.3 From db75373c91b0cfb6a68ad6ae88721e4e21ae6261 Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Tue, 26 Dec 2017 16:02:24 +0200 Subject: net/mlx5e: Recover Send Queue (SQ) from error state An error TX completion (CQE) which arrived on a specific SQ indicates that this SQ got moved by the hardware to error state, which means all pending and incoming TX requests are dropped or will be dropped and no further "Good" CQEs will be generated for that SQ. Before this patch TX completions (CQEs) were not monitored and were handled as a regular CQE. This caused the SQ to stay in an error state, making it useless for xmiting new packets. Mitigation plan: In case of an error completion, schedule a recovery work which would do the following: - Mark the TXQ as DRV_XOFF to disable new packets to arrive from the stack - NAPI to flush all pending SQ WQEs (via flush_in_error_en bit) to release SW and HW resources(SKB, DMA, etc) and have the SQ and CQ consumer/producer indices synced. - Modify the SQ state ERR -> RST -> RDY (restart the SQ). - Reactivate the SQ and reset SQ cc and pc If we identify two consecutive requests for SQ recover in less than 500 msecs, drop the recover request to avoid CPU overload, as this scenario most likely happened due to a severe repeated bug. In addition, add SQ recover SW counter to monitor successful recoveries. Signed-off-by: Eran Ben Elisha Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 6 ++ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 115 +++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 3 + drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 2 + drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 10 +- 5 files changed, 134 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 6898f5e26006..353ac6daa3dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -122,6 +122,7 @@ #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) #define MLX5E_TX_CQ_POLL_BUDGET 128 #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ +#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */ #define MLX5E_ICOSQ_MAX_WQEBBS \ (DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB)) @@ -332,6 +333,7 @@ struct mlx5e_sq_dma { enum { MLX5E_SQ_STATE_ENABLED, + MLX5E_SQ_STATE_RECOVERING, MLX5E_SQ_STATE_IPSEC, }; @@ -378,6 +380,10 @@ struct mlx5e_txqsq { struct mlx5e_channel *channel; int txq_ix; u32 rate_limit; + struct mlx5e_txqsq_recover { + struct work_struct recover_work; + u64 last_recover; + } recover; } ____cacheline_aligned_in_smp; struct mlx5e_xdpsq { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index e0b75f52d556..1b48dec67abf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -956,6 +956,7 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) return 0; } +static void mlx5e_sq_recover(struct work_struct *work); static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, int txq_ix, struct mlx5e_params *params, @@ -974,6 +975,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, sq->txq_ix = txq_ix; sq->uar_map = mdev->mlx5e_res.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; + INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover); if (MLX5_IPSEC_DEV(c->priv->mdev)) set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); @@ -1040,6 +1042,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev, MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode); MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); + MLX5_SET(sqc, sqc, flush_in_error_en, 1); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index); @@ -1158,9 +1161,20 @@ err_free_txqsq: return err; } +static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq) +{ + WARN_ONCE(sq->cc != sq->pc, + "SQ 0x%x: cc (0x%x) != pc (0x%x)\n", + sq->sqn, sq->cc, sq->pc); + sq->cc = 0; + sq->dma_fifo_cc = 0; + sq->pc = 0; +} + static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) { sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix); + clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); netdev_tx_reset_queue(sq->txq); netif_tx_start_queue(sq->txq); @@ -1205,6 +1219,107 @@ static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) mlx5e_free_txqsq(sq); } +static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) +{ + unsigned long exp_time = jiffies + msecs_to_jiffies(2000); + + while (time_before(jiffies, exp_time)) { + if (sq->cc == sq->pc) + return 0; + + msleep(20); + } + + netdev_err(sq->channel->netdev, + "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n", + sq->sqn, sq->cc, sq->pc); + + return -ETIMEDOUT; +} + +static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state) +{ + struct mlx5_core_dev *mdev = sq->channel->mdev; + struct net_device *dev = sq->channel->netdev; + struct mlx5e_modify_sq_param msp = {0}; + int err; + + msp.curr_state = curr_state; + msp.next_state = MLX5_SQC_STATE_RST; + + err = mlx5e_modify_sq(mdev, sq->sqn, &msp); + if (err) { + netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn); + return err; + } + + memset(&msp, 0, sizeof(msp)); + msp.curr_state = MLX5_SQC_STATE_RST; + msp.next_state = MLX5_SQC_STATE_RDY; + + err = mlx5e_modify_sq(mdev, sq->sqn, &msp); + if (err) { + netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn); + return err; + } + + return 0; +} + +static void mlx5e_sq_recover(struct work_struct *work) +{ + struct mlx5e_txqsq_recover *recover = + container_of(work, struct mlx5e_txqsq_recover, + recover_work); + struct mlx5e_txqsq *sq = container_of(recover, struct mlx5e_txqsq, + recover); + struct mlx5_core_dev *mdev = sq->channel->mdev; + struct net_device *dev = sq->channel->netdev; + u8 state; + int err; + + err = mlx5_core_query_sq_state(mdev, sq->sqn, &state); + if (err) { + netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n", + sq->sqn, err); + return; + } + + if (state != MLX5_RQC_STATE_ERR) { + netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn); + return; + } + + netif_tx_disable_queue(sq->txq); + + if (mlx5e_wait_for_sq_flush(sq)) + return; + + /* If the interval between two consecutive recovers per SQ is too + * short, don't recover to avoid infinite loop of ERR_CQE -> recover. + * If we reached this state, there is probably a bug that needs to be + * fixed. let's keep the queue close and let tx timeout cleanup. + */ + if (jiffies_to_msecs(jiffies - recover->last_recover) < + MLX5E_SQ_RECOVER_MIN_INTERVAL) { + netdev_err(dev, "Recover SQ 0x%x canceled, too many error CQEs\n", + sq->sqn); + return; + } + + /* At this point, no new packets will arrive from the stack as TXQ is + * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all + * pending WQEs. SQ can safely reset the SQ. + */ + if (mlx5e_sq_to_ready(sq, state)) + return; + + mlx5e_reset_txqsq_cc_pc(sq); + sq->stats.recover++; + recover->last_recover = jiffies; + mlx5e_activate_txqsq(sq); +} + static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_sq_param *param, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index ad91d9de0240..b08c94422907 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -61,6 +61,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, @@ -155,6 +156,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->tx_queue_wake += sq_stats->wake; s->tx_queue_dropped += sq_stats->dropped; s->tx_cqe_err += sq_stats->cqe_err; + s->tx_recover += sq_stats->recover; s->tx_xmit_more += sq_stats->xmit_more; s->tx_csum_partial_inner += sq_stats->csum_partial_inner; s->tx_csum_none += sq_stats->csum_none; @@ -1106,6 +1108,7 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) }, }; static const struct counter_desc ch_stats_desc[] = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 43dc808684c9..53111a2df587 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -79,6 +79,7 @@ struct mlx5e_sw_stats { u64 tx_queue_dropped; u64 tx_xmit_more; u64 tx_cqe_err; + u64 tx_recover; u64 rx_wqe_err; u64 rx_mpwqe_filler; u64 rx_buff_alloc_err; @@ -199,6 +200,7 @@ struct mlx5e_sq_stats { u64 wake; u64 dropped; u64 cqe_err; + u64 recover; }; struct mlx5e_ch_stats { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 88b5b7bfc9a9..20297108528a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -469,9 +469,13 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) wqe_counter = be16_to_cpu(cqe->wqe_counter); if (unlikely(cqe->op_own >> 4 == MLX5_CQE_REQ_ERR)) { - if (!sq->stats.cqe_err) + if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, + &sq->state)) { mlx5e_dump_error_cqe(sq, (struct mlx5_err_cqe *)cqe); + queue_work(cq->channel->priv->wq, + &sq->recover.recover_work); + } sq->stats.cqe_err++; } @@ -528,7 +532,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) netdev_tx_completed_queue(sq->txq, npkts, nbytes); if (netif_tx_queue_stopped(sq->txq) && - mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM)) { + mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, + MLX5E_SQ_STOP_ROOM) && + !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { netif_tx_wake_queue(sq->txq); sq->stats.wake++; } -- cgit v1.2.3 From 7b2117bb8ff98b7b82543f77768dfe7aca7e3746 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Thu, 1 Feb 2018 05:37:44 -0800 Subject: net/mlx5e: Use eq ptr from cq Instead of looking for the EQ of the CQ, remove that redundant code and use the eq pointer stored in the cq struct. Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 1b48dec67abf..2aff4db9bdaa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3728,21 +3728,11 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb, static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev, struct mlx5e_txqsq *sq) { - struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5_core_dev *mdev = priv->mdev; - int irqn_not_used, eqn; - struct mlx5_eq *eq; + struct mlx5_eq *eq = sq->cq.mcq.eq; u32 eqe_count; - if (mlx5_vector2eqn(mdev, sq->cq.mcq.vector, &eqn, &irqn_not_used)) - return false; - - eq = mlx5_eqn2eq(mdev, eqn); - if (IS_ERR(eq)) - return false; - netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n", - eqn, eq->cons_index, eq->irqn); + eq->eqn, eq->cons_index, eq->irqn); eqe_count = mlx5_eq_poll_irq_disabled(eq); if (!eqe_count) -- cgit v1.2.3 From b2d3907c234618c20239127f2c234b4e92adf5ef Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Tue, 13 Feb 2018 17:07:02 -0800 Subject: net/mlx5: Eliminate query xsrq dead code 1. This function is not used anywhere in mlx5 driver 2. It has a memcpy statement that makes no sense and produces build warning with gcc8 drivers/net/ethernet/mellanox/mlx5/core/transobj.c: In function 'mlx5_core_query_xsrq': drivers/net/ethernet/mellanox/mlx5/core/transobj.c:347:3: error: 'memcpy' source argument is the same as destination [-Werror=restrict] Fixes: 01949d0109ee ("net/mlx5_core: Enable XRCs and SRQs when using ISSI > 0") Reported-by: Arnd Bergmann Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/transobj.c | 21 --------------------- include/linux/mlx5/transobj.h | 1 - 2 files changed, 22 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index c64957b5ef47..dae1c5c5d27c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -354,27 +354,6 @@ int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn) return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out) -{ - u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)] = {0}; - void *srqc; - void *xrc_srqc; - int err; - - MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ); - MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, - MLX5_ST_SZ_BYTES(query_xrc_srq_out)); - if (!err) { - xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out, - xrc_srq_context_entry); - srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry); - memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc)); - } - - return err; -} - int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm) { u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0}; diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h index 80d7aa8b2831..83a33a1873a6 100644 --- a/include/linux/mlx5/transobj.h +++ b/include/linux/mlx5/transobj.h @@ -67,7 +67,6 @@ int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rmpn); int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn); -int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out); int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, -- cgit v1.2.3 From 6c75062823d8aa340615962bf261187bafa8d795 Mon Sep 17 00:00:00 2001 From: Alaa Hleihel Date: Thu, 1 Feb 2018 15:34:35 +0200 Subject: net/mlx5: Change teardown with force mode failure message to warning With ConnectX-4, we expect the force teardown to fail in case that DC was enabled, therefore change the message from error to warning. Signed-off-by: Alaa Hleihel Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index d7bb10ab2173..70066975f1b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -245,7 +245,7 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev) force_state = MLX5_GET(teardown_hca_out, out, force_state); if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) { - mlx5_core_err(dev, "teardown with force mode failed\n"); + mlx5_core_warn(dev, "teardown with force mode failed, doing normal teardown\n"); return -EIO; } -- cgit v1.2.3 From 533788988cb62cf8c9a065612860fb89e50662d3 Mon Sep 17 00:00:00 2001 From: Talat Batheesh Date: Tue, 6 Mar 2018 14:58:46 +0200 Subject: net/mlx5e: IPoIB, Fix spelling mistake Fix spelling mistake in debug message text. "dettaching" -> "detaching" Signed-off-by: Talat Batheesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index a35608faf8d2..4899de74e252 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -540,7 +540,7 @@ static int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca, err = mlx5_core_detach_mcg(mdev, gid, ipriv->qp.qpn); if (err) - mlx5_core_dbg(mdev, "failed dettaching QPN 0x%x, MGID %pI6\n", + mlx5_core_dbg(mdev, "failed detaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw); return err; -- cgit v1.2.3 From 472a1e44b3495df01c83e048667ef93dd2ea1ca0 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 12 Mar 2018 14:24:41 +0200 Subject: net/mlx5e: Save MTU in channels params Knowing the MTU is required for RQ creation flow. By our design, channels creation flow is totally isolated from priv/netdev, and can be completed with access to channels params and mdev. Adding the MTU to the channels params helps preserving that. In addition, we save it in RQ to make its access faster in datapath checks. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 10 ++-- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 58 ++++++++++++---------- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 3 +- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 3 +- .../net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 23 +++++---- 5 files changed, 52 insertions(+), 45 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 353ac6daa3dc..823876bfd6ab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -57,8 +57,8 @@ #define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) -#define MLX5E_HW2SW_MTU(priv, hwmtu) ((hwmtu) - ((priv)->hard_mtu)) -#define MLX5E_SW2HW_MTU(priv, swmtu) ((swmtu) + ((priv)->hard_mtu)) +#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu)) +#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu)) #define MLX5E_MAX_DSCP 64 #define MLX5E_MAX_NUM_TC 8 @@ -251,6 +251,8 @@ struct mlx5e_params { u32 lro_timeout; u32 pflags; struct bpf_prog *xdp_prog; + unsigned int sw_mtu; + int hard_mtu; }; #ifdef CONFIG_MLX5_CORE_EN_DCB @@ -534,6 +536,7 @@ struct mlx5e_rq { /* XDP */ struct bpf_prog *xdp_prog; + unsigned int hw_mtu; struct mlx5e_xdpsq xdpsq; /* control */ @@ -767,7 +770,6 @@ struct mlx5e_priv { struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS]; struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; u32 tx_rates[MLX5E_MAX_NUM_SQS]; - int hard_mtu; struct mlx5e_flow_steering fs; struct mlx5e_vxlan_db vxlan; @@ -1111,7 +1113,7 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv); void mlx5e_destroy_netdev(struct mlx5e_priv *priv); void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params, - u16 max_channels); + u16 max_channels, u16 mtu); u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev); void mlx5e_rx_dim_work(struct work_struct *work); #endif /* __MLX5_EN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 2aff4db9bdaa..af345323b2ce 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -419,6 +419,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->channel = c; rq->ix = c->ix; rq->mdev = mdev; + rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; if (IS_ERR(rq->xdp_prog)) { @@ -494,7 +495,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, byte_count = params->lro_en ? params->lro_wqe_sz : - MLX5E_SW2HW_MTU(c->priv, c->netdev->mtu); + MLX5E_SW2HW_MTU(params, params->sw_mtu); #ifdef CONFIG_MLX5_EN_IPSEC if (MLX5_IPSEC_DEV(mdev)) byte_count += MLX5E_METADATA_ETHER_LEN; @@ -2498,10 +2499,10 @@ static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv, mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true); } -static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu) +static int mlx5e_set_mtu(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, u16 mtu) { - struct mlx5_core_dev *mdev = priv->mdev; - u16 hw_mtu = MLX5E_SW2HW_MTU(priv, mtu); + u16 hw_mtu = MLX5E_SW2HW_MTU(params, mtu); int err; err = mlx5_set_port_mtu(mdev, hw_mtu, 1); @@ -2513,9 +2514,9 @@ static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu) return 0; } -static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu) +static void mlx5e_query_mtu(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, u16 *mtu) { - struct mlx5_core_dev *mdev = priv->mdev; u16 hw_mtu = 0; int err; @@ -2523,25 +2524,27 @@ static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu) if (err || !hw_mtu) /* fallback to port oper mtu */ mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1); - *mtu = MLX5E_HW2SW_MTU(priv, hw_mtu); + *mtu = MLX5E_HW2SW_MTU(params, hw_mtu); } static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv) { + struct mlx5e_params *params = &priv->channels.params; struct net_device *netdev = priv->netdev; + struct mlx5_core_dev *mdev = priv->mdev; u16 mtu; int err; - err = mlx5e_set_mtu(priv, netdev->mtu); + err = mlx5e_set_mtu(mdev, params, params->sw_mtu); if (err) return err; - mlx5e_query_mtu(priv, &mtu); - if (mtu != netdev->mtu) + mlx5e_query_mtu(mdev, params, &mtu); + if (mtu != params->sw_mtu) netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n", - __func__, mtu, netdev->mtu); + __func__, mtu, params->sw_mtu); - netdev->mtu = mtu; + params->sw_mtu = mtu; return 0; } @@ -3411,34 +3414,33 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_channels new_channels = {}; - int curr_mtu; + struct mlx5e_params *params; int err = 0; bool reset; mutex_lock(&priv->state_lock); - reset = !priv->channels.params.lro_en && - (priv->channels.params.rq_wq_type != - MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ); + params = &priv->channels.params; + reset = !params->lro_en && + (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ); reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state); - curr_mtu = netdev->mtu; - netdev->mtu = new_mtu; - if (!reset) { + params->sw_mtu = new_mtu; mlx5e_set_dev_port_mtu(priv); + netdev->mtu = params->sw_mtu; goto out; } - new_channels.params = priv->channels.params; + new_channels.params = *params; + new_channels.params.sw_mtu = new_mtu; err = mlx5e_open_channels(priv, &new_channels); - if (err) { - netdev->mtu = curr_mtu; + if (err) goto out; - } mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_set_dev_port_mtu); + netdev->mtu = new_channels.params.sw_mtu; out: mutex_unlock(&priv->state_lock); @@ -4111,10 +4113,12 @@ static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeo void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params, - u16 max_channels) + u16 max_channels, u16 mtu) { u8 cq_period_mode = 0; + params->sw_mtu = mtu; + params->hard_mtu = MLX5E_ETH_HARD_MTU; params->num_channels = max_channels; params->num_tc = 1; @@ -4175,9 +4179,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, priv->profile = profile; priv->ppriv = ppriv; priv->msglevel = MLX5E_MSG_LEVEL; - priv->hard_mtu = MLX5E_ETH_HARD_MTU; - mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev)); + mlx5e_build_nic_params(mdev, &priv->channels.params, + profile->max_nch(mdev), netdev->mtu); mutex_init(&priv->state_lock); @@ -4454,7 +4458,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) /* MTU range: 68 - hw-specific max */ netdev->min_mtu = ETH_MIN_MTU; mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1); - netdev->max_mtu = MLX5E_HW2SW_MTU(priv, max_mtu); + netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu); mlx5e_set_dev_port_mtu(priv); mlx5_lag_add(mdev, netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index dd32f3e390ff..cd884829cb30 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -877,6 +877,7 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE; + params->hard_mtu = MLX5E_ETH_HARD_MTU; params->log_sq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; @@ -926,8 +927,6 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev, priv->channels.params.num_channels = profile->max_nch(mdev); - priv->hard_mtu = MLX5E_ETH_HARD_MTU; - mlx5e_build_rep_params(mdev, &priv->channels.params); mlx5e_build_rep_netdev(netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 781b8f21d6d1..c0d528f2131b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -766,8 +766,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, prefetchw(wqe); - if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || - MLX5E_SW2HW_MTU(rq->channel->priv, rq->netdev->mtu) < dma_len)) { + if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) { rq->stats.xdp_drop++; return false; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 4899de74e252..dc5e9e706362 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -66,6 +66,7 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev, MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE; params->lro_en = false; + params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN; } /* Called directly after IPoIB netdevice was created to initialize SW structs */ @@ -81,10 +82,10 @@ void mlx5i_init(struct mlx5_core_dev *mdev, priv->netdev = netdev; priv->profile = profile; priv->ppriv = ppriv; - priv->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN; mutex_init(&priv->state_lock); - mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev)); + mlx5e_build_nic_params(mdev, &priv->channels.params, + profile->max_nch(mdev), netdev->mtu); mlx5i_build_nic_params(mdev, &priv->channels.params); mlx5e_timestamp_init(priv); @@ -368,25 +369,27 @@ static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); struct mlx5e_channels new_channels = {}; - int curr_mtu; + struct mlx5e_params *params; int err = 0; mutex_lock(&priv->state_lock); - curr_mtu = netdev->mtu; - netdev->mtu = new_mtu; + params = &priv->channels.params; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + params->sw_mtu = new_mtu; + netdev->mtu = params->sw_mtu; goto out; + } - new_channels.params = priv->channels.params; + new_channels.params = *params; + new_channels.params.sw_mtu = new_mtu; err = mlx5e_open_channels(priv, &new_channels); - if (err) { - netdev->mtu = curr_mtu; + if (err) goto out; - } mlx5e_switch_priv_channels(priv, &new_channels, NULL); + netdev->mtu = new_channels.params.sw_mtu; out: mutex_unlock(&priv->state_lock); -- cgit v1.2.3 From 73281b78a37a1a3f392fd5b6116d04e597484529 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Sun, 11 Feb 2018 15:21:33 +0200 Subject: net/mlx5e: Derive Striding RQ size from MTU In Striding RQ, each WQE serves multiple packets (hence called Multi-Packet WQE, MPWQE). The size of a MPWQE is constant (currently 256KB). Upon a ringparam set operation, we calculate the number of MPWQEs per RQ. For this, first it is needed to determine the number of packets that can reside within a single MPWQE. In this patch we use the actual MTU size instead of ETH_DATA_LEN for this calculation. This implies that a change in MTU might require a change in Striding RQ ring size. In addition, this obsoletes some WQEs-to-packets translation functions and helps delete ~60 LOC. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 63 ++++++----------- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 79 ++-------------------- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 71 ++++++++++++------- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 2 +- .../net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 2 +- .../ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c | 2 +- 6 files changed, 80 insertions(+), 139 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 823876bfd6ab..1f89e2194b61 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -63,18 +63,6 @@ #define MLX5E_MAX_DSCP 64 #define MLX5E_MAX_NUM_TC 8 -#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 -#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa -#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd - -#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1 -#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa -#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd - -#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 -#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3 -#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6 - #define MLX5_RX_HEADROOM NET_SKB_PAD #define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) @@ -95,9 +83,27 @@ #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) #define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2) -#define MLX5E_REQUIRED_MTTS(wqes) \ - (wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8)) -#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX) +#define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8)) +#define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS) +#define MLX5E_MAX_RQ_NUM_MTTS \ + ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */ +#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024)) +#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \ + (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS)) +#define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \ + (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \ + (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU)) + +#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 +#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa +#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd + +#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1 +#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa +#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \ + MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW) + +#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 #define MLX5_UMR_ALIGN (2048) #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256) @@ -155,26 +161,6 @@ static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size) } } -static inline int mlx5_min_log_rq_size(int wq_type) -{ - switch (wq_type) { - case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW; - default: - return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; - } -} - -static inline int mlx5_max_log_rq_size(int wq_type) -{ - switch (wq_type) { - case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW; - default: - return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE; - } -} - static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) { return is_kdump_kernel() ? @@ -233,7 +219,7 @@ enum mlx5e_priv_flag { struct mlx5e_params { u8 log_sq_size; u8 rq_wq_type; - u8 log_rq_size; + u8 log_rq_mtu_frames; u16 num_channels; u8 num_tc; bool rx_cqe_compress_def; @@ -849,11 +835,6 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi); -u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, - struct mlx5e_params *params); -u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, - struct mlx5e_params *params); - void mlx5e_update_stats(struct mlx5e_priv *priv); int mlx5e_create_flow_steering(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index c57c929d7973..a87d46bc2299 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -220,60 +220,12 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, mlx5e_ethtool_get_ethtool_stats(priv, stats, data); } -static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type, - int num_wqe) -{ - int packets_per_wqe; - int stride_size; - int num_strides; - int wqe_size; - - if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) - return num_wqe; - - stride_size = 1 << mlx5e_mpwqe_get_log_stride_size(priv->mdev, &priv->channels.params); - num_strides = 1 << mlx5e_mpwqe_get_log_num_strides(priv->mdev, &priv->channels.params); - wqe_size = stride_size * num_strides; - - packets_per_wqe = wqe_size / - ALIGN(ETH_DATA_LEN, stride_size); - return (1 << (order_base_2(num_wqe * packets_per_wqe) - 1)); -} - -static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type, - int num_packets) -{ - int packets_per_wqe; - int stride_size; - int num_strides; - int wqe_size; - int num_wqes; - - if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) - return num_packets; - - stride_size = 1 << mlx5e_mpwqe_get_log_stride_size(priv->mdev, &priv->channels.params); - num_strides = 1 << mlx5e_mpwqe_get_log_num_strides(priv->mdev, &priv->channels.params); - wqe_size = stride_size * num_strides; - - num_packets = (1 << order_base_2(num_packets)); - - packets_per_wqe = wqe_size / - ALIGN(ETH_DATA_LEN, stride_size); - num_wqes = DIV_ROUND_UP(num_packets, packets_per_wqe); - return 1 << (order_base_2(num_wqes)); -} - void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv, struct ethtool_ringparam *param) { - int rq_wq_type = priv->channels.params.rq_wq_type; - - param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, - 1 << mlx5_max_log_rq_size(rq_wq_type)); + param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE; param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE; - param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, - 1 << priv->channels.params.log_rq_size); + param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames; param->tx_pending = 1 << priv->channels.params.log_sq_size; } @@ -288,13 +240,9 @@ static void mlx5e_get_ringparam(struct net_device *dev, int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, struct ethtool_ringparam *param) { - int rq_wq_type = priv->channels.params.rq_wq_type; struct mlx5e_channels new_channels = {}; - u32 rx_pending_wqes; - u32 min_rq_size; u8 log_rq_size; u8 log_sq_size; - u32 num_mtts; int err = 0; if (param->rx_jumbo_pending) { @@ -308,23 +256,10 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, return -EINVAL; } - min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, - 1 << mlx5_min_log_rq_size(rq_wq_type)); - rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type, - param->rx_pending); - - if (param->rx_pending < min_rq_size) { + if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) { netdev_info(priv->netdev, "%s: rx_pending (%d) < min (%d)\n", __func__, param->rx_pending, - min_rq_size); - return -EINVAL; - } - - num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes); - if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ && - !MLX5E_VALID_NUM_MTTS(num_mtts)) { - netdev_info(priv->netdev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n", - __func__, param->rx_pending); + 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE); return -EINVAL; } @@ -335,17 +270,17 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, return -EINVAL; } - log_rq_size = order_base_2(rx_pending_wqes); + log_rq_size = order_base_2(param->rx_pending); log_sq_size = order_base_2(param->tx_pending); - if (log_rq_size == priv->channels.params.log_rq_size && + if (log_rq_size == priv->channels.params.log_rq_mtu_frames && log_sq_size == priv->channels.params.log_sq_size) return 0; mutex_lock(&priv->state_lock); new_channels.params = priv->channels.params; - new_channels.params.log_rq_size = log_rq_size; + new_channels.params.log_rq_mtu_frames = log_rq_size; new_channels.params.log_sq_size = log_sq_size; if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index af345323b2ce..e627b81cebe9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -78,15 +78,38 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) MLX5_CAP_ETH(mdev, reg_umr_sq); } -u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, - struct mlx5e_params *params) +static u32 mlx5e_mpwqe_get_linear_frag_sz(struct mlx5e_params *params) +{ + u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + + return hw_mtu; +} + +static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params) +{ + u32 linear_frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params); + + return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz); +} + +static u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params) +{ + if (params->log_rq_mtu_frames < + mlx5e_mpwqe_log_pkts_per_wqe(params) + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW) + return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW; + + return params->log_rq_mtu_frames - mlx5e_mpwqe_log_pkts_per_wqe(params); +} + +static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) { return MLX5E_MPWQE_STRIDE_SZ(mdev, MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); } -u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, - struct mlx5e_params *params) +static u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) { return MLX5_MPWRQ_LOG_WQE_SZ - mlx5e_mpwqe_get_log_stride_size(mdev, params); @@ -109,17 +132,13 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; + params->log_rq_mtu_frames = is_kdump_kernel() ? + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : + MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - params->log_rq_size = is_kdump_kernel() ? - MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW : - MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ - params->log_rq_size = is_kdump_kernel() ? - MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : - MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; - /* Extra room needed for build_skb */ params->lro_wqe_sz -= mlx5e_get_rq_headroom(params) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); @@ -127,7 +146,7 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n", params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ, - BIT(params->log_rq_size), + BIT(params->log_rq_mtu_frames), BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params)), MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); } @@ -351,9 +370,6 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, u32 *in; int err; - if (!MLX5E_VALID_NUM_MTTS(npages)) - return -EINVAL; - in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1872,14 +1888,15 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, MLX5_SET(wq, wq, log_wqe_stride_size, mlx5e_mpwqe_get_log_stride_size(mdev, params) - 6); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ); + MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params)); break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); + MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames); } MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); - MLX5_SET(wq, wq, log_wq_sz, params->log_rq_size); MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn); MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter); MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable); @@ -1939,16 +1956,17 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_cq_param *param) { + struct mlx5_core_dev *mdev = priv->mdev; void *cqc = param->cqc; u8 log_cq_size; switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - log_cq_size = params->log_rq_size + - mlx5e_mpwqe_get_log_num_strides(priv->mdev, params); + log_cq_size = mlx5e_mpwqe_get_log_rq_size(params) + + mlx5e_mpwqe_get_log_num_strides(mdev, params); break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ - log_cq_size = params->log_rq_size; + log_cq_size = params->log_rq_mtu_frames; } MLX5_SET(cqc, cqc, log_cq_size, log_cq_size); @@ -3421,11 +3439,20 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) mutex_lock(&priv->state_lock); params = &priv->channels.params; - reset = !params->lro_en && - (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ); + reset = !params->lro_en; reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state); + new_channels.params = *params; + new_channels.params.sw_mtu = new_mtu; + + if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) { + u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params); + u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params); + + reset = reset && (ppw_old != ppw_new); + } + if (!reset) { params->sw_mtu = new_mtu; mlx5e_set_dev_port_mtu(priv); @@ -3433,8 +3460,6 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) goto out; } - new_channels.params = *params; - new_channels.params.sw_mtu = new_mtu; err = mlx5e_open_channels(priv, &new_channels); if (err) goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index cd884829cb30..8e70fa9ef39a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -880,7 +880,7 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, params->hard_mtu = MLX5E_ETH_HARD_MTU; params->log_sq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; - params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; + params->log_rq_mtu_frames = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); mlx5e_set_rx_cq_mode_params(params, cq_period_mode); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index dc5e9e706362..af3bb2f7a504 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -61,7 +61,7 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev, mlx5e_init_rq_type_params(mdev, params); /* RQ size in ipoib by default is 512 */ - params->log_rq_size = is_kdump_kernel() ? + params->log_rq_mtu_frames = is_kdump_kernel() ? MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index b69e9d847a6b..54a188f41f90 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -290,7 +290,7 @@ static void mlx5i_pkey_init(struct mlx5_core_dev *mdev, netdev->ethtool_ops = &mlx5i_pkey_ethtool_ops; /* Use dummy rqs */ - priv->channels.params.log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; + priv->channels.params.log_rq_mtu_frames = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; } /* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */ -- cgit v1.2.3 From 18187fb2c3c138630f6b7c7b7ba7ab41ccd95129 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 18 Jul 2017 12:07:06 +0300 Subject: net/mlx5e: Code movements in RX UMR WQE post Gets the process of a UMR WQE post in one function, in preparation for a downstream patch that inlines the WQE data. No functional change here. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 107 ++++++++++-------------- 1 file changed, 45 insertions(+), 62 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index c0d528f2131b..8aa94d3cff59 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -347,39 +347,44 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev, } } -static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix) +void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) { - struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; - struct mlx5e_icosq *sq = &rq->channel->icosq; - struct mlx5_wq_cyc *wq = &sq->wq; - struct mlx5e_umr_wqe *wqe; - u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB); - u16 pi; + int pg_strides = mlx5e_mpwqe_strides_per_page(rq); + struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; + int i; - /* fill sq edge with nops to avoid wqe wrap around */ - while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { - sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; - mlx5e_post_nop(wq, sq->sqn, &sq->pc); + for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { + page_ref_sub(dma_info->page, pg_strides - wi->skbs_frags[i]); + mlx5e_page_release(rq, dma_info, true); } +} - wqe = mlx5_wq_cyc_get_wqe(wq, pi); - memcpy(wqe, &wi->umr.wqe, sizeof(*wqe)); - wqe->ctrl.opmod_idx_opcode = - cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | - MLX5_OPCODE_UMR); +static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) +{ + struct mlx5_wq_ll *wq = &rq->wq; + struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); - sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; - sq->pc += num_wqebbs; - mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl); + rq->mpwqe.umr_in_progress = false; + + mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); + + /* ensure wqes are visible to device before updating doorbell record */ + dma_wmb(); + + mlx5_wq_ll_update_db_record(wq); } -static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq, - u16 ix) +static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; int pg_strides = mlx5e_mpwqe_strides_per_page(rq); struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; + struct mlx5e_icosq *sq = &rq->channel->icosq; + struct mlx5_wq_cyc *wq = &sq->wq; + struct mlx5e_umr_wqe *wqe; + u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB); int err; + u16 pi; int i; for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { @@ -393,6 +398,24 @@ static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq, memset(wi->skbs_frags, 0, sizeof(*wi->skbs_frags) * MLX5_MPWRQ_PAGES_PER_WQE); wi->consumed_strides = 0; + rq->mpwqe.umr_in_progress = true; + + /* fill sq edge with nops to avoid wqe wrap around */ + while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { + sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; + mlx5e_post_nop(wq, sq->sqn, &sq->pc); + } + + wqe = mlx5_wq_cyc_get_wqe(wq, pi); + memcpy(wqe, &wi->umr.wqe, sizeof(*wqe)); + wqe->ctrl.opmod_idx_opcode = + cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | + MLX5_OPCODE_UMR); + + sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; + sq->pc += num_wqebbs; + mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl); + return 0; err_unmap: @@ -401,51 +424,11 @@ err_unmap: page_ref_sub(dma_info->page, pg_strides); mlx5e_page_release(rq, dma_info, true); } + rq->stats.buff_alloc_err++; return err; } -void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) -{ - int pg_strides = mlx5e_mpwqe_strides_per_page(rq); - struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; - int i; - - for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { - page_ref_sub(dma_info->page, pg_strides - wi->skbs_frags[i]); - mlx5e_page_release(rq, dma_info, true); - } -} - -static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) -{ - struct mlx5_wq_ll *wq = &rq->wq; - struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); - - rq->mpwqe.umr_in_progress = false; - - mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); - - /* ensure wqes are visible to device before updating doorbell record */ - dma_wmb(); - - mlx5_wq_ll_update_db_record(wq); -} - -static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) -{ - int err; - - err = mlx5e_alloc_rx_umr_mpwqe(rq, ix); - if (unlikely(err)) { - rq->stats.buff_alloc_err++; - return err; - } - rq->mpwqe.umr_in_progress = true; - mlx5e_post_umr_wqe(rq, ix); - return 0; -} - void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; -- cgit v1.2.3 From e4d86a4a58b436fca0ce084ea453cc0ec8c39bb4 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Sun, 7 Jan 2018 14:15:02 +0200 Subject: net/mlx5e: Do not busy-wait for UMR completion in Striding RQ Do not busy-wait a pending UMR completion. Under high HW load, busy-waiting a delayed completion would fully utilize the CPU core and mistakenly indicate a SW bottleneck. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 8aa94d3cff59..8eb9e7e89b09 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -527,7 +527,7 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) if (!rq->mpwqe.umr_in_progress) mlx5e_alloc_rx_mpwqe(rq, wq->head); - return true; + return false; } static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp) -- cgit v1.2.3 From ea3886cab76f1026a5db988fa6fad997e98f3a32 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 10 Jul 2017 12:52:36 +0300 Subject: net/mlx5e: Use inline MTTs in UMR WQEs When modifying the page mapping of a HW memory region (via a UMR post), post the new values inlined in WQE, instead of using a data pointer. This is a micro-optimization, inline UMR WQEs of different rings scale better in HW. In addition, this obsoletes a few control flows and helps delete ~50 LOC. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 16 ++--- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 82 +++++------------------ drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 28 ++++---- 3 files changed, 38 insertions(+), 88 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 1f89e2194b61..c1d3a29388bd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -105,7 +105,6 @@ #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 -#define MLX5_UMR_ALIGN (2048) #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256) #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) @@ -130,8 +129,13 @@ #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */ -#define MLX5E_ICOSQ_MAX_WQEBBS \ - (DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB)) +#define MLX5E_UMR_WQE_INLINE_SZ \ + (sizeof(struct mlx5e_umr_wqe) + \ + ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \ + MLX5_UMR_MTT_ALIGNMENT)) +#define MLX5E_UMR_WQEBBS \ + (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB)) +#define MLX5E_ICOSQ_MAX_WQEBBS MLX5E_UMR_WQEBBS #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN) #define MLX5E_XDP_TX_DS_COUNT \ @@ -183,7 +187,7 @@ struct mlx5e_umr_wqe { struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_umr_ctrl_seg uctrl; struct mlx5_mkey_seg mkc; - struct mlx5_wqe_data_seg data; + struct mlx5_mtt inline_mtts[0]; }; extern const char mlx5e_self_tests[][ETH_GSTRING_LEN]; @@ -421,7 +425,6 @@ struct mlx5e_icosq { void __iomem *uar_map; u32 sqn; u16 edge; - __be32 mkey_be; unsigned long state; /* control path */ @@ -446,8 +449,6 @@ struct mlx5e_wqe_frag_info { }; struct mlx5e_umr_dma_info { - __be64 *mtt; - dma_addr_t mtt_addr; struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE]; struct mlx5e_umr_wqe wqe; }; @@ -490,7 +491,6 @@ struct mlx5e_rq { } wqe; struct { struct mlx5e_mpw_info *info; - void *mtt_no_align; u16 num_strides; u8 log_stride_sz; bool umr_in_progress; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index e627b81cebe9..42dc350c5ab1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -73,9 +73,20 @@ struct mlx5e_channel_param { bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) { - return MLX5_CAP_GEN(mdev, striding_rq) && + bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && MLX5_CAP_GEN(mdev, umr_ptr_rlky) && MLX5_CAP_ETH(mdev, reg_umr_sq); + u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq); + bool inline_umr = MLX5E_UMR_WQE_INLINE_SZ <= max_wqe_sz_cap; + + if (!striding_rq_umr) + return false; + if (!inline_umr) { + mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n", + (int)MLX5E_UMR_WQE_INLINE_SZ, max_wqe_sz_cap); + return false; + } + return true; } static u32 mlx5e_mpwqe_get_linear_frag_sz(struct mlx5e_params *params) @@ -258,16 +269,6 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv) synchronize_irq(pci_irq_vector(priv->mdev->pdev, MLX5_EQ_VEC_ASYNC)); } -static inline int mlx5e_get_wqe_mtt_sz(void) -{ - /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes. - * To avoid copying garbage after the mtt array, we allocate - * a little more. - */ - return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64), - MLX5_UMR_MTT_ALIGNMENT); -} - static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *wqe, @@ -275,9 +276,7 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, { struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; - struct mlx5_wqe_data_seg *dseg = &wqe->data; - struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; - u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS); + u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS); u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix); cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | @@ -285,80 +284,32 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; cseg->imm = rq->mkey_be; - ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN; + ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE; ucseg->xlt_octowords = cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE)); ucseg->bsf_octowords = cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset)); ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); - - dseg->lkey = sq->mkey_be; - dseg->addr = cpu_to_be64(wi->umr.mtt_addr); } static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, struct mlx5e_channel *c) { int wq_sz = mlx5_wq_ll_get_size(&rq->wq); - int mtt_sz = mlx5e_get_wqe_mtt_sz(); - int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1; int i; rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info), GFP_KERNEL, cpu_to_node(c->cpu)); if (!rq->mpwqe.info) - goto err_out; - - /* We allocate more than mtt_sz as we will align the pointer */ - rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL, - cpu_to_node(c->cpu)); - if (unlikely(!rq->mpwqe.mtt_no_align)) - goto err_free_wqe_info; + return -ENOMEM; for (i = 0; i < wq_sz; i++) { struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i]; - wi->umr.mtt = PTR_ALIGN(rq->mpwqe.mtt_no_align + i * mtt_alloc, - MLX5_UMR_ALIGN); - wi->umr.mtt_addr = dma_map_single(c->pdev, wi->umr.mtt, mtt_sz, - PCI_DMA_TODEVICE); - if (unlikely(dma_mapping_error(c->pdev, wi->umr.mtt_addr))) - goto err_unmap_mtts; - mlx5e_build_umr_wqe(rq, &c->icosq, &wi->umr.wqe, i); } return 0; - -err_unmap_mtts: - while (--i >= 0) { - struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i]; - - dma_unmap_single(c->pdev, wi->umr.mtt_addr, mtt_sz, - PCI_DMA_TODEVICE); - } - kfree(rq->mpwqe.mtt_no_align); -err_free_wqe_info: - kfree(rq->mpwqe.info); - -err_out: - return -ENOMEM; -} - -static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq) -{ - int wq_sz = mlx5_wq_ll_get_size(&rq->wq); - int mtt_sz = mlx5e_get_wqe_mtt_sz(); - int i; - - for (i = 0; i < wq_sz; i++) { - struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i]; - - dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, - PCI_DMA_TODEVICE); - } - kfree(rq->mpwqe.mtt_no_align); - kfree(rq->mpwqe.info); } static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, @@ -579,7 +530,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - mlx5e_rq_free_mpwqe_info(rq); + kfree(rq->mpwqe.info); mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey); break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ @@ -918,7 +869,6 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c, struct mlx5_core_dev *mdev = c->mdev; int err; - sq->mkey_be = c->mkey_be; sq->channel = c; sq->uar_map = mdev->mlx5e_res.bfreg.map; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 8eb9e7e89b09..539dbe9382ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -381,17 +381,25 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; struct mlx5e_icosq *sq = &rq->channel->icosq; struct mlx5_wq_cyc *wq = &sq->wq; - struct mlx5e_umr_wqe *wqe; - u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB); + struct mlx5e_umr_wqe *umr_wqe; + int cpy = offsetof(struct mlx5e_umr_wqe, inline_mtts); int err; u16 pi; int i; + /* fill sq edge with nops to avoid wqe wrap around */ + while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { + sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; + mlx5e_post_nop(wq, sq->sqn, &sq->pc); + } + + umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi); + memcpy(umr_wqe, &wi->umr.wqe, cpy); for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { err = mlx5e_page_alloc_mapped(rq, dma_info); if (unlikely(err)) goto err_unmap; - wi->umr.mtt[i] = cpu_to_be64(dma_info->addr | MLX5_EN_WR); + umr_wqe->inline_mtts[i].ptag = cpu_to_be64(dma_info->addr | MLX5_EN_WR); page_ref_add(dma_info->page, pg_strides); } @@ -400,21 +408,13 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) rq->mpwqe.umr_in_progress = true; - /* fill sq edge with nops to avoid wqe wrap around */ - while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { - sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; - mlx5e_post_nop(wq, sq->sqn, &sq->pc); - } - - wqe = mlx5_wq_cyc_get_wqe(wq, pi); - memcpy(wqe, &wi->umr.wqe, sizeof(*wqe)); - wqe->ctrl.opmod_idx_opcode = + umr_wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR); sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; - sq->pc += num_wqebbs; - mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl); + sq->pc += MLX5E_UMR_WQEBBS; + mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &umr_wqe->ctrl); return 0; -- cgit v1.2.3 From 619a8f2a42f1031cdbd74435b6a9191eb4913139 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 7 Feb 2018 14:41:25 +0200 Subject: net/mlx5e: Use linear SKB in Striding RQ Current Striding RQ HW feature utilizes the RX buffers so that there is no wasted room between the strides. This maximises the memory utilization. This prevents the use of build_skb() (which requires headroom and tailroom), and demands to memcpy the packets headers into the skb linear part. In this patch, whenever a set of conditions holds, we apply an RQ configuration that allows combining the use of linear SKB on top of a Striding RQ. To use build_skb() with Striding RQ, the following must hold: 1. packet does not cross a page boundary. 2. there is enough headroom and tailroom surrounding the packet. We can satisfy 1 and 2 by configuring: stride size = MTU + headroom + tailoom. This is possible only when: a. (MTU - headroom - tailoom) does not exceed PAGE_SIZE. b. HW LRO is turned off. Using linear SKB has many advantages: - Saves a memcpy of the headers. - No page-boundary checks in datapath. - No filler CQEs. - Significantly smaller CQ. - SKB data continuously resides in linear part, and not split to small amount (linear part) and large amount (fragment). This saves datapath cycles in driver and improves utilization of SKB fragments in GRO. - The fragments of a resulting GRO SKB follow the IP forwarding assumption of equal-size fragments. Some implementation details: HW writes the packets to the beginning of a stride, i.e. does not keep headroom. To overcome this we make sure we can extend backwards and use the last bytes of stride i-1. Extra care is needed for stride 0 as it has no preceding stride. We make sure headroom bytes are available by shifting the buffer pointer passed to HW by headroom bytes. This configuration now becomes default, whenever capable. Of course, this implies turning LRO off. Performance testing: ConnectX-5, single core, single RX ring, default MTU. UDP packet rate, early drop in TC layer: -------------------------------------------- | pkt size | before | after | ratio | -------------------------------------------- | 1500byte | 4.65 Mpps | 5.96 Mpps | 1.28x | | 500byte | 5.23 Mpps | 5.97 Mpps | 1.14x | | 64byte | 5.94 Mpps | 5.96 Mpps | 1.00x | -------------------------------------------- TCP streams: ~20% gain Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 10 +++ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 76 +++++++++++++--- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 102 ++++++++++++++++------ include/linux/mlx5/device.h | 3 + include/linux/mlx5/mlx5_ifc.h | 7 +- 5 files changed, 153 insertions(+), 45 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index c1d3a29388bd..d26dd4bc89f4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -473,6 +473,9 @@ struct mlx5e_page_cache { struct mlx5e_rq; typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*); +typedef struct sk_buff * +(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, + u16 cqe_bcnt, u32 head_offset, u32 page_idx); typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq); typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); @@ -491,6 +494,7 @@ struct mlx5e_rq { } wqe; struct { struct mlx5e_mpw_info *info; + mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq; u16 num_strides; u8 log_stride_sz; bool umr_in_progress; @@ -834,6 +838,12 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq); void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi); +struct sk_buff * +mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, + u16 cqe_bcnt, u32 head_offset, u32 page_idx); +struct sk_buff * +mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, + u16 cqe_bcnt, u32 head_offset, u32 page_idx); void mlx5e_update_stats(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 42dc350c5ab1..bba2fa0aa15f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -91,9 +91,14 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) static u32 mlx5e_mpwqe_get_linear_frag_sz(struct mlx5e_params *params) { - u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + if (!params->xdp_prog) { + u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + u16 rq_headroom = MLX5_RX_HEADROOM + NET_IP_ALIGN; - return hw_mtu; + return MLX5_SKB_FRAG_SZ(rq_headroom + hw_mtu); + } + + return PAGE_SIZE; } static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params) @@ -103,6 +108,26 @@ static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params) return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz); } +static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + u32 frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params); + s8 signed_log_num_strides_param; + u8 log_num_strides; + + if (params->lro_en || frag_sz > PAGE_SIZE) + return false; + + if (MLX5_CAP_GEN(mdev, ext_stride_num_range)) + return true; + + log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(frag_sz); + signed_log_num_strides_param = + (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE; + + return signed_log_num_strides_param >= 0; +} + static u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params) { if (params->log_rq_mtu_frames < @@ -115,6 +140,9 @@ static u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params) static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { + if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) + return order_base_2(mlx5e_mpwqe_get_linear_frag_sz(params)); + return MLX5E_MPWQE_STRIDE_SZ(mdev, MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); } @@ -126,7 +154,8 @@ static u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, mlx5e_mpwqe_get_log_stride_size(mdev, params); } -static u16 mlx5e_get_rq_headroom(struct mlx5e_params *params) +static u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) { u16 linear_rq_headroom = params->xdp_prog ? XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM; @@ -136,6 +165,9 @@ static u16 mlx5e_get_rq_headroom(struct mlx5e_params *params) if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST) return linear_rq_headroom; + if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) + return linear_rq_headroom; + return 0; } @@ -151,12 +183,14 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ /* Extra room needed for build_skb */ - params->lro_wqe_sz -= mlx5e_get_rq_headroom(params) + + params->lro_wqe_sz -= mlx5e_get_rq_headroom(mdev, params) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); } mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n", params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ, + params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ? + BIT(mlx5e_mpwqe_get_log_rq_size(params)) : BIT(params->log_rq_mtu_frames), BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params)), MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); @@ -400,11 +434,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, goto err_rq_wq_destroy; rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; - rq->buff.headroom = mlx5e_get_rq_headroom(params); + rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params); switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - rq->post_wqes = mlx5e_post_rx_mpwqes; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; @@ -422,6 +455,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, goto err_rq_wq_destroy; } + rq->mpwqe.skb_from_cqe_mpwrq = + mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ? + mlx5e_skb_from_cqe_mpwrq_linear : + mlx5e_skb_from_cqe_mpwrq_nonlinear; rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params); rq->mpwqe.num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params)); @@ -484,7 +521,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, i) << PAGE_SHIFT; - wqe->data.addr = cpu_to_be64(dma_offset); + wqe->data.addr = cpu_to_be64(dma_offset + rq->buff.headroom); } wqe->data.byte_count = cpu_to_be32(byte_count); @@ -1834,9 +1871,11 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: MLX5_SET(wq, wq, log_wqe_num_of_strides, - mlx5e_mpwqe_get_log_num_strides(mdev, params) - 9); + mlx5e_mpwqe_get_log_num_strides(mdev, params) - + MLX5_MPWQE_LOG_NUM_STRIDES_BASE); MLX5_SET(wq, wq, log_wqe_stride_size, - mlx5e_mpwqe_get_log_stride_size(mdev, params) - 6); + mlx5e_mpwqe_get_log_stride_size(mdev, params) - + MLX5_MPWQE_LOG_STRIDE_SZ_BASE); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ); MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params)); break; @@ -3193,20 +3232,28 @@ typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable); static int set_feature_lro(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_channels new_channels = {}; + struct mlx5e_params *old_params; int err = 0; bool reset; mutex_lock(&priv->state_lock); - reset = (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST); - reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state); + old_params = &priv->channels.params; + reset = test_bit(MLX5E_STATE_OPENED, &priv->state); - new_channels.params = priv->channels.params; + new_channels.params = *old_params; new_channels.params.lro_en = enable; + if (old_params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) { + if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params) == + mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params)) + reset = false; + } + if (!reset) { - priv->channels.params = new_channels.params; + *old_params = new_channels.params; err = mlx5e_modify_tirs_lro(priv); goto out; } @@ -4121,7 +4168,8 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */ if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) - params->lro_en = !slow_pci_heuristic(mdev); + if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) + params->lro_en = !slow_pci_heuristic(mdev); params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); /* CQ moderation params */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 539dbe9382ee..07db8a58d0a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -836,6 +836,24 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq, } } +static inline +struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, + u32 frag_size, u16 headroom, + u32 cqe_bcnt) +{ + struct sk_buff *skb = build_skb(va, frag_size); + + if (unlikely(!skb)) { + rq->stats.buff_alloc_err++; + return NULL; + } + + skb_reserve(skb, headroom); + skb_put(skb, cqe_bcnt); + + return skb; +} + static inline struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) @@ -867,18 +885,13 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, if (consumed) return NULL; /* page/packet was consumed by XDP */ - skb = build_skb(va, frag_size); - if (unlikely(!skb)) { - rq->stats.buff_alloc_err++; + skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt); + if (unlikely(!skb)) return NULL; - } /* queue up for recycling/reuse */ page_ref_inc(di->page); - skb_reserve(skb, rx_headroom); - skb_put(skb, cqe_bcnt); - return skb; } @@ -967,20 +980,24 @@ wq_ll_pop: } #endif -static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq, - struct mlx5_cqe64 *cqe, - struct mlx5e_mpw_info *wi, - u32 cqe_bcnt, - struct sk_buff *skb) +struct sk_buff * +mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, + u16 cqe_bcnt, u32 head_offset, u32 page_idx) { - u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); - u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; - u32 head_offset = wqe_offset & (PAGE_SIZE - 1); - u32 page_idx = wqe_offset >> PAGE_SHIFT; - u32 head_page_idx = page_idx; u16 headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt); u32 frag_offset = head_offset + headlen; u16 byte_cnt = cqe_bcnt - headlen; + u32 head_page_idx = page_idx; + struct sk_buff *skb; + + skb = napi_alloc_skb(rq->cq.napi, + ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, sizeof(long))); + if (unlikely(!skb)) { + rq->stats.buff_alloc_err++; + return NULL; + } + + prefetchw(skb->data); if (unlikely(frag_offset >= PAGE_SIZE)) { page_idx++; @@ -1003,6 +1020,35 @@ static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq, /* skb linear part was allocated with headlen and aligned to long */ skb->tail += headlen; skb->len += headlen; + + return skb; +} + +struct sk_buff * +mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, + u16 cqe_bcnt, u32 head_offset, u32 page_idx) +{ + struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx]; + u16 rx_headroom = rq->buff.headroom; + struct sk_buff *skb; + void *va, *data; + u32 frag_size; + + va = page_address(di->page) + head_offset; + data = va + rx_headroom; + frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); + + dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset, + frag_size, DMA_FROM_DEVICE); + prefetch(data); + skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt); + if (unlikely(!skb)) + return NULL; + + /* queue up for recycling/reuse */ + wi->skbs_frags[page_idx]++; + + return skb; } void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) @@ -1010,7 +1056,11 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); u16 wqe_id = be16_to_cpu(cqe->wqe_id); struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id]; - struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id); + u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); + u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; + u32 head_offset = wqe_offset & (PAGE_SIZE - 1); + u32 page_idx = wqe_offset >> PAGE_SHIFT; + struct mlx5e_rx_wqe *wqe; struct sk_buff *skb; u16 cqe_bcnt; @@ -1026,18 +1076,13 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) goto mpwrq_cqe_out; } - skb = napi_alloc_skb(rq->cq.napi, - ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, - sizeof(long))); - if (unlikely(!skb)) { - rq->stats.buff_alloc_err++; - goto mpwrq_cqe_out; - } - - prefetchw(skb->data); cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); - mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb); + skb = rq->mpwqe.skb_from_cqe_mpwrq(rq, wi, cqe_bcnt, head_offset, + page_idx); + if (unlikely(!skb)) + goto mpwrq_cqe_out; + mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); napi_gro_receive(rq->cq.napi, skb); @@ -1045,6 +1090,7 @@ mpwrq_cqe_out: if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) return; + wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id); mlx5e_free_rx_mpwqe(rq, wi); mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index); } diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 4b5939c78cdd..12758595459b 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -782,6 +782,9 @@ static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) return (u64)lo | ((u64)hi << 32); } +#define MLX5_MPWQE_LOG_NUM_STRIDES_BASE (9) +#define MLX5_MPWQE_LOG_STRIDE_SZ_BASE (6) + struct mpwrq_cqe_bc { __be16 filler_consumed_strides; __be16 byte_cnt; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index c19e611d2782..d25011f84815 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1038,7 +1038,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_398[0x3]; u8 log_max_tis_per_sq[0x5]; - u8 reserved_at_3a0[0x3]; + u8 ext_stride_num_range[0x1]; + u8 reserved_at_3a1[0x2]; u8 log_max_stride_sz_rq[0x5]; u8 reserved_at_3a8[0x3]; u8 log_min_stride_sz_rq[0x5]; @@ -1205,9 +1206,9 @@ struct mlx5_ifc_wq_bits { u8 log_hairpin_num_packets[0x5]; u8 reserved_at_128[0x3]; u8 log_hairpin_data_sz[0x5]; - u8 reserved_at_130[0x5]; - u8 log_wqe_num_of_strides[0x3]; + u8 reserved_at_130[0x4]; + u8 log_wqe_num_of_strides[0x4]; u8 two_byte_shift_en[0x1]; u8 reserved_at_139[0x4]; u8 log_wqe_stride_size[0x3]; -- cgit v1.2.3 From 121e89275471dccbd5b252e40ad6a823fa0527f7 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 12 Dec 2017 15:46:49 +0200 Subject: net/mlx5e: Refactor RQ XDP_TX indication Make the xdp_xmit indication available for Striding RQ by taking it out of the type-specific union. This refactor is a preparation for a downstream patch that adds XDP support over Striding RQ. In addition, use a bitmap instead of a boolean for possible future flags. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 6 +++++- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 8 +++----- 2 files changed, 8 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index d26dd4bc89f4..a6ca54393bb6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -479,6 +479,10 @@ typedef struct sk_buff * typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq); typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); +enum mlx5e_rq_flag { + MLX5E_RQ_FLAG_XDP_XMIT = BIT(0), +}; + struct mlx5e_rq { /* data path */ struct mlx5_wq_ll wq; @@ -489,7 +493,6 @@ struct mlx5e_rq { u32 frag_sz; /* max possible skb frag_sz */ union { bool page_reuse; - bool xdp_xmit; }; } wqe; struct { @@ -528,6 +531,7 @@ struct mlx5e_rq { struct bpf_prog *xdp_prog; unsigned int hw_mtu; struct mlx5e_xdpsq xdpsq; + DECLARE_BITMAP(flags, 8); /* control */ struct mlx5_wq_ctrl wq_ctrl; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 07db8a58d0a2..a827571deb85 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -788,7 +788,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, /* move page to reference to sq responsibility, * and mark so it's not put back in page-cache. */ - rq->wqe.xdp_xmit = true; + __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ sq->db.di[pi] = *di; sq->pc++; @@ -913,9 +913,8 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (!skb) { /* probably for XDP */ - if (rq->wqe.xdp_xmit) { + if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { wi->di.page = NULL; - rq->wqe.xdp_xmit = false; /* do not return page to cache, it will be returned on XDP_TX completion */ goto wq_ll_pop; } @@ -955,9 +954,8 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (!skb) { - if (rq->wqe.xdp_xmit) { + if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { wi->di.page = NULL; - rq->wqe.xdp_xmit = false; /* do not return page to cache, it will be returned on XDP_TX completion */ goto wq_ll_pop; } -- cgit v1.2.3 From 22f4539881944a8acc9c6f5afa7aa7f028898002 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 7 Feb 2018 14:46:36 +0200 Subject: net/mlx5e: Support XDP over Striding RQ Add XDP support over Striding RQ. Now that linear SKB is supported over Striding RQ, we can support XDP by setting stride size to PAGE_SIZE and headroom to XDP_PACKET_HEADROOM. Upon a MPWQE free, do not release pages that are being XDP xmit, they will be released upon completions. Striding RQ is capable of a higher packet-rate than conventional RQ. A performance gain is expected for all cases that had a HW packet-rate bottleneck. This is the case whenever using many flows that distribute to many cores. Performance testing: ConnectX-5, 24 rings, default MTU. CQE compression ON (to reduce completions BW in PCI). XDP_DROP packet rate: -------------------------------------------------- | pkt size | XDP rate | 100GbE linerate | pct% | -------------------------------------------------- | 64byte | 126.2 Mpps | 148.0 Mpps | 85% | | 128byte | 80.0 Mpps | 84.8 Mpps | 94% | | 256byte | 42.7 Mpps | 42.7 Mpps | 100% | | 512byte | 23.4 Mpps | 23.4 Mpps | 100% | -------------------------------------------------- Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 3 ++- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 30 +++++++++++++++++------ 3 files changed, 26 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index a6ca54393bb6..7997d7c159db 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -457,6 +457,7 @@ struct mlx5e_mpw_info { struct mlx5e_umr_dma_info umr; u16 consumed_strides; u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE]; + DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); }; /* a single cache unit is capable to serve one napi call (for non-striding rq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index bba2fa0aa15f..b03a2327356a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -200,7 +200,8 @@ bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { return mlx5e_check_fragmented_striding_rq_cap(mdev) && - !params->xdp_prog && !MLX5_IPSEC_DEV(mdev); + !MLX5_IPSEC_DEV(mdev) && + !(params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params)); } void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index a827571deb85..1da79cab1838 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -349,13 +349,16 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev, void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) { + const bool no_xdp_xmit = + bitmap_empty(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); int pg_strides = mlx5e_mpwqe_strides_per_page(rq); - struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; + struct mlx5e_dma_info *dma_info = wi->umr.dma_info; int i; - for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { - page_ref_sub(dma_info->page, pg_strides - wi->skbs_frags[i]); - mlx5e_page_release(rq, dma_info, true); + for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { + page_ref_sub(dma_info[i].page, pg_strides - wi->skbs_frags[i]); + if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap)) + mlx5e_page_release(rq, &dma_info[i], true); } } @@ -404,6 +407,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) } memset(wi->skbs_frags, 0, sizeof(*wi->skbs_frags) * MLX5_MPWRQ_PAGES_PER_WQE); + bitmap_zero(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); wi->consumed_strides = 0; rq->mpwqe.umr_in_progress = true; @@ -1028,18 +1032,30 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, { struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx]; u16 rx_headroom = rq->buff.headroom; + u32 cqe_bcnt32 = cqe_bcnt; struct sk_buff *skb; void *va, *data; u32 frag_size; + bool consumed; va = page_address(di->page) + head_offset; data = va + rx_headroom; - frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); + frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32); dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset, frag_size, DMA_FROM_DEVICE); prefetch(data); - skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt); + + rcu_read_lock(); + consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt32); + rcu_read_unlock(); + if (consumed) { + if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) + __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */ + return NULL; /* page/packet was consumed by XDP */ + } + + skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32); if (unlikely(!skb)) return NULL; @@ -1078,7 +1094,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) skb = rq->mpwqe.skb_from_cqe_mpwrq(rq, wi, cqe_bcnt, head_offset, page_idx); - if (unlikely(!skb)) + if (!skb) goto mpwrq_cqe_out; mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); -- cgit v1.2.3 From 9f9e9cd50eac6ad09cb053509f2e764bddc05f18 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 28 Nov 2017 11:03:37 +0200 Subject: net/mlx5e: Remove page_ref bulking in Striding RQ When many packets reside on the same page, the bulking of page_ref modifications reduces the total number of atomic operations executed. Besides the necessary 2 operations on page alloc/free, we have the following extra ops per page: - one on WQE allocation (bump refcnt to maximum possible), - zero ops for SKBs, - one on WQE free, a constant of two operations in total, no matter how many packets/SKBs actually populate the page. Without this bulking, we have: - no ops on WQE allocation or free, - one op per SKB, Comparing the two methods when PAGE_SIZE is 4K: - As mentioned above, bulking method always executes 2 operations, not more, but not less. - In the default MTU configuration (1500, stride size is 2K), the non-bulking method execute 2 ops as well. - For larger MTUs with stride size of 4K, non-bulking method executes only a single op. - For XDP (stride size of 4K, no SKBs), non-bulking method executes no ops at all! Hence, to optimize the flows with linear SKB and XDP over Striding RQ, we here remove the page_ref bulking method. Performance testing: ConnectX-5, Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz. Single core packet rate (64 bytes). Early drop in TC: no degradation. XDP_DROP: before: 14,270,188 pps after: 20,503,603 pps, 43% improvement. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 - drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 47 +++++++++---------------- 2 files changed, 16 insertions(+), 32 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 7997d7c159db..5853de4e4fc7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -456,7 +456,6 @@ struct mlx5e_umr_dma_info { struct mlx5e_mpw_info { struct mlx5e_umr_dma_info umr; u16 consumed_strides; - u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE]; DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 1da79cab1838..9bb47a6d40f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -296,37 +296,28 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) mlx5e_free_rx_wqe(rq, wi); } -static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq) -{ - return rq->mpwqe.num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER; -} - static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq, struct sk_buff *skb, - struct mlx5e_mpw_info *wi, - u32 page_idx, u32 frag_offset, - u32 len) + struct mlx5e_dma_info *di, + u32 frag_offset, u32 len) { unsigned int truesize = ALIGN(len, BIT(rq->mpwqe.log_stride_sz)); dma_sync_single_for_cpu(rq->pdev, - wi->umr.dma_info[page_idx].addr + frag_offset, + di->addr + frag_offset, len, DMA_FROM_DEVICE); - wi->skbs_frags[page_idx]++; + page_ref_inc(di->page); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - wi->umr.dma_info[page_idx].page, frag_offset, - len, truesize); + di->page, frag_offset, len, truesize); } static inline void mlx5e_copy_skb_header_mpwqe(struct device *pdev, struct sk_buff *skb, - struct mlx5e_mpw_info *wi, - u32 page_idx, u32 offset, - u32 headlen) + struct mlx5e_dma_info *dma_info, + u32 offset, u32 headlen) { u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset); - struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[page_idx]; unsigned int len; /* Aligning len to sizeof(long) optimizes memcpy performance */ @@ -351,15 +342,12 @@ void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) { const bool no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); - int pg_strides = mlx5e_mpwqe_strides_per_page(rq); struct mlx5e_dma_info *dma_info = wi->umr.dma_info; int i; - for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { - page_ref_sub(dma_info[i].page, pg_strides - wi->skbs_frags[i]); + for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap)) mlx5e_page_release(rq, &dma_info[i], true); - } } static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) @@ -380,7 +368,6 @@ static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; - int pg_strides = mlx5e_mpwqe_strides_per_page(rq); struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; struct mlx5e_icosq *sq = &rq->channel->icosq; struct mlx5_wq_cyc *wq = &sq->wq; @@ -403,10 +390,8 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) if (unlikely(err)) goto err_unmap; umr_wqe->inline_mtts[i].ptag = cpu_to_be64(dma_info->addr | MLX5_EN_WR); - page_ref_add(dma_info->page, pg_strides); } - memset(wi->skbs_frags, 0, sizeof(*wi->skbs_frags) * MLX5_MPWRQ_PAGES_PER_WQE); bitmap_zero(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); wi->consumed_strides = 0; @@ -425,7 +410,6 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) err_unmap: while (--i >= 0) { dma_info--; - page_ref_sub(dma_info->page, pg_strides); mlx5e_page_release(rq, dma_info, true); } rq->stats.buff_alloc_err++; @@ -987,9 +971,10 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w u16 cqe_bcnt, u32 head_offset, u32 page_idx) { u16 headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt); + struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx]; u32 frag_offset = head_offset + headlen; - u16 byte_cnt = cqe_bcnt - headlen; - u32 head_page_idx = page_idx; + u32 byte_cnt = cqe_bcnt - headlen; + struct mlx5e_dma_info *head_di = di; struct sk_buff *skb; skb = napi_alloc_skb(rq->cq.napi, @@ -1002,7 +987,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w prefetchw(skb->data); if (unlikely(frag_offset >= PAGE_SIZE)) { - page_idx++; + di++; frag_offset -= PAGE_SIZE; } @@ -1010,14 +995,14 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); - mlx5e_add_skb_frag_mpwqe(rq, skb, wi, page_idx, frag_offset, + mlx5e_add_skb_frag_mpwqe(rq, skb, di, frag_offset, pg_consumed_bytes); byte_cnt -= pg_consumed_bytes; frag_offset = 0; - page_idx++; + di++; } /* copy header */ - mlx5e_copy_skb_header_mpwqe(rq->pdev, skb, wi, head_page_idx, + mlx5e_copy_skb_header_mpwqe(rq->pdev, skb, head_di, head_offset, headlen); /* skb linear part was allocated with headlen and aligned to long */ skb->tail += headlen; @@ -1060,7 +1045,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, return NULL; /* queue up for recycling/reuse */ - wi->skbs_frags[page_idx]++; + page_ref_inc(di->page); return skb; } -- cgit v1.2.3 From b8a98a4cf3221d8140969e3f5bde09206a6cb623 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 20 Dec 2017 11:56:35 +0200 Subject: net/mlx5e: Keep single pre-initialized UMR WQE per RQ All UMR WQEs of an RQ share many common fields. We use pre-initialized structures to save calculations in datapath. One field (xlt_offset) was the only reason we saved a pre-initialized copy per WQE index. Here we remove its initialization (move its calculation to datapath), and reduce the number of copies to one-per-RQ. A very small datapath calculation is added, it occurs once per a MPWQE (i.e. once every 256KB), but reduces memory consumption and gives better cache utilization. Performance testing: Tested packet rate, no degradation sensed. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 8 ++------ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 20 ++++++++------------ drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 4 +++- 3 files changed, 13 insertions(+), 19 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 5853de4e4fc7..30cad07be2b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -84,6 +84,7 @@ #define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2) #define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8)) +#define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS)) #define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS) #define MLX5E_MAX_RQ_NUM_MTTS \ ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */ @@ -450,7 +451,6 @@ struct mlx5e_wqe_frag_info { struct mlx5e_umr_dma_info { struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE]; - struct mlx5e_umr_wqe wqe; }; struct mlx5e_mpw_info { @@ -496,6 +496,7 @@ struct mlx5e_rq { }; } wqe; struct { + struct mlx5e_umr_wqe umr_wqe; struct mlx5e_mpw_info *info; mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq; u16 num_strides; @@ -978,11 +979,6 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc); } -static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix) -{ - return wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8); -} - extern const struct ethtool_ops mlx5e_ethtool_ops; #ifdef CONFIG_MLX5_CORE_EN_DCB extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index b03a2327356a..0339609cfa56 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -306,13 +306,11 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv) static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_icosq *sq, - struct mlx5e_umr_wqe *wqe, - u16 ix) + struct mlx5e_umr_wqe *wqe) { struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS); - u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix); cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | ds_cnt); @@ -322,8 +320,6 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE; ucseg->xlt_octowords = cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE)); - ucseg->bsf_octowords = - cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset)); ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); } @@ -331,18 +327,13 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, struct mlx5e_channel *c) { int wq_sz = mlx5_wq_ll_get_size(&rq->wq); - int i; rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info), GFP_KERNEL, cpu_to_node(c->cpu)); if (!rq->mpwqe.info) return -ENOMEM; - for (i = 0; i < wq_sz; i++) { - struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i]; - - mlx5e_build_umr_wqe(rq, &c->icosq, &wi->umr.wqe, i); - } + mlx5e_build_umr_wqe(rq, &c->icosq, &rq->mpwqe.umr_wqe); return 0; } @@ -388,6 +379,11 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey); } +static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix) +{ + return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT; +} + static int mlx5e_alloc_rq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_rq_param *rqp, @@ -520,7 +516,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { - u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, i) << PAGE_SHIFT; + u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i); wqe->data.addr = cpu_to_be64(dma_offset + rq->buff.headroom); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 9bb47a6d40f1..682f9ff9da34 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -373,6 +373,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5e_umr_wqe *umr_wqe; int cpy = offsetof(struct mlx5e_umr_wqe, inline_mtts); + u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1); int err; u16 pi; int i; @@ -384,7 +385,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) } umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi); - memcpy(umr_wqe, &wi->umr.wqe, cpy); + memcpy(umr_wqe, &rq->mpwqe.umr_wqe, cpy); for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { err = mlx5e_page_alloc_mapped(rq, dma_info); if (unlikely(err)) @@ -400,6 +401,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) umr_wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR); + umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset); sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; sq->pc += MLX5E_UMR_WQEBBS; -- cgit v1.2.3 From ab966d7e4ff988a48b3ad72e7abf903aa840afd1 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Thu, 4 Jan 2018 13:09:15 +0200 Subject: net/mlx5e: RX, Recycle buffer of UMR WQEs Upon a new UMR post, check if the WQE buffer contains a previous UMR WQE. If so, modify the dynamic fields instead of a whole WQE overwrite. This saves a memcpy. In current setting, after 2 WQ cycles (12 UMR posts), this will always be the case. No degradation sensed. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 682f9ff9da34..176645762e49 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -365,6 +365,11 @@ static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) mlx5_wq_ll_update_db_record(wq); } +static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq) +{ + return sq->pc >> MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; +} + static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; @@ -372,7 +377,6 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) struct mlx5e_icosq *sq = &rq->channel->icosq; struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5e_umr_wqe *umr_wqe; - int cpy = offsetof(struct mlx5e_umr_wqe, inline_mtts); u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1); int err; u16 pi; @@ -385,7 +389,10 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) } umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi); - memcpy(umr_wqe, &rq->mpwqe.umr_wqe, cpy); + if (unlikely(mlx5e_icosq_wrap_cnt(sq) < 2)) + memcpy(umr_wqe, &rq->mpwqe.umr_wqe, + offsetof(struct mlx5e_umr_wqe, inline_mtts)); + for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { err = mlx5e_page_alloc_mapped(rq, dma_info); if (unlikely(err)) -- cgit v1.2.3 From 48bfc39791b8b4a25f165e711f18b9c1617cefbc Mon Sep 17 00:00:00 2001 From: Tal Gilboa Date: Fri, 30 Mar 2018 15:50:08 -0700 Subject: net/mlx5e: Set EQE based as default TX interrupt moderation mode The default TX moderation mode was mistakenly set to CQE based. The intention was to add a control ability in order to improve some specific use-cases. In general, we prefer to use EQE based moderation as it gives much better numbers for the common cases. CQE based causes a degradation in the common case since it resets the moderation timer on CQE generation. This causes an issue when TSO is well utilized (large TSO sessions). The timer is set to 16us so traffic of ~64KB TSO sessions per second would mean timer reset (CQE per TSO session -> long time between CQEs). In this case we quickly reach the tcp_limit_output_bytes (256KB by default) and cause a halt in TX traffic. By setting EQE based moderation we make sure timer would expire after 16us regardless of the packet rate. This fixes an up to 40% packet rate and up to 23% bandwidth degradtions. Fixes: 0088cbbc4b66 ("net/mlx5e: Enable CQE based moderation on TX CQ") Signed-off-by: Tal Gilboa Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index c71f4f10283b..0aab3afc6885 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4137,7 +4137,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params, u16 max_channels, u16 mtu) { - u8 cq_period_mode = 0; + u8 rx_cq_period_mode; params->sw_mtu = mtu; params->hard_mtu = MLX5E_ETH_HARD_MTU; @@ -4173,12 +4173,12 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); /* CQ moderation params */ - cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? + rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE; params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); - mlx5e_set_rx_cq_mode_params(params, cq_period_mode); - mlx5e_set_tx_cq_mode_params(params, cq_period_mode); + mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode); + mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); /* TX inline */ params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev); -- cgit v1.2.3