summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
diff options
context:
space:
mode:
authorMaxim Mikityanskiy <maximmi@nvidia.com>2022-10-02 07:56:23 +0300
committerJakub Kicinski <kuba@kernel.org>2022-10-04 02:55:28 +0300
commit9f123f740428e96ef2eae8b5e2876b675b6a4605 (patch)
tree8ceeb449d2110256447840e894b5cb286a5b42fa /drivers/net/ethernet/mellanox/mlx5/core/en/params.c
parent168723c1f8d6e1e823d4c6ad3cf64478cf58330a (diff)
downloadlinux-9f123f740428e96ef2eae8b5e2876b675b6a4605.tar.xz
net/mlx5e: Improve MTT/KSM alignment
Make mlx5e_mpwrq_mtts_per_wqe take into account that KSM requires smaller alignment than MTT. Ensure that there is always an even amount of MTTs in a UMR WQE, so that complete octwords are formed, and no garbage is mapped. Drop extra alignment in MLX5_MTT_OCTW that may cause setting too big ucseg->xlt_octowords, also leading to mapping garbage. Generalize some calculations by introducing the MLX5_OCTWORD constant. Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com> Reviewed-by: Tariq Toukan <tariqt@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en/params.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c10
1 files changed, 9 insertions, 1 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index b57855bf7629..e8c3b8abf941 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -91,6 +91,13 @@ u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
pages_per_wqe = log_wqe_sz > page_shift ? (1 << (log_wqe_sz - page_shift)) : 1;
+ /* Two MTTs are needed to form an octword. The number of MTTs is encoded
+ * in octwords in a UMR WQE, so we need at least two to avoid mapping
+ * garbage addresses.
+ */
+ if (WARN_ON_ONCE(pages_per_wqe < 2 && umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED))
+ pages_per_wqe = 2;
+
/* Sanity check for further calculations to succeed. */
BUILD_BUG_ON(MLX5_MPWRQ_MAX_PAGES_PER_WQE > 64);
if (WARN_ON_ONCE(pages_per_wqe > MLX5_MPWRQ_MAX_PAGES_PER_WQE))
@@ -131,7 +138,8 @@ u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
* MTU. These oversize packets are dropped by the driver at a later
* stage.
*/
- return MLX5_ALIGN_MTTS(pages_per_wqe + 1);
+ return ALIGN(pages_per_wqe + 1,
+ MLX5_SEND_WQE_BB / mlx5e_mpwrq_umr_entry_size(umr_mode));
}
u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev,