summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_main.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c41
1 files changed, 24 insertions, 17 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index b3466c721ad8..26f1557843a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -71,17 +71,20 @@
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
bool striding_rq_umr, inline_umr;
- u16 max_wqe_sz_cap;
+ u16 max_wqebbs;
+ u16 umr_wqebbs;
striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
MLX5_CAP_ETH(mdev, reg_umr_sq);
- max_wqe_sz_cap = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB;
- inline_umr = max_wqe_sz_cap >= MLX5E_UMR_WQE_INLINE_SZ;
+ max_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
+ umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(MLX5_MPWRQ_PAGES_PER_WQE);
+ inline_umr = umr_wqebbs <= max_wqebbs;
if (!striding_rq_umr)
return false;
if (!inline_umr) {
- mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n",
- (int)MLX5E_UMR_WQE_INLINE_SZ, max_wqe_sz_cap);
+ mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%u) exceeds maximum supported (%u).\n",
+ umr_wqebbs * MLX5_SEND_WQE_BB,
+ max_wqebbs * MLX5_SEND_WQE_BB);
return false;
}
return true;
@@ -206,7 +209,10 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
{
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
- u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS);
+ u8 ds_cnt;
+
+ ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mpwqe.pages_per_wqe),
+ MLX5_SEND_WQE_DS);
cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
ds_cnt);
@@ -214,7 +220,7 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
ucseg->xlt_octowords =
- cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
+ cpu_to_be16(MLX5_MTT_OCTW(rq->mpwqe.pages_per_wqe));
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
@@ -263,7 +269,7 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
size_t alloc_size;
alloc_size = array_size(wq_sz, struct_size(rq->mpwqe.info, dma_info,
- MLX5_MPWRQ_PAGES_PER_WQE));
+ rq->mpwqe.pages_per_wqe));
rq->mpwqe.info = kvzalloc_node(alloc_size, GFP_KERNEL, node);
if (!rq->mpwqe.info)
@@ -359,9 +365,9 @@ static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev,
static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
{
- u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq));
+ u64 num_mtts = mlx5_wq_ll_get_size(&rq->mpwqe.wq) * rq->mpwqe.mtts_per_wqe;
- return mlx5e_create_umr_mtt_mkey(mdev, num_mtts, PAGE_SHIFT,
+ return mlx5e_create_umr_mtt_mkey(mdev, num_mtts, rq->mpwqe.page_shift,
&rq->umr_mkey, rq->wqe_overflow.addr);
}
@@ -379,11 +385,6 @@ static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
&rq->mpwqe.shampo->mkey);
}
-static u64 mlx5e_get_mpwqe_offset(u16 wqe_ix)
-{
- return MLX5E_REQUIRED_MTTS(wqe_ix) << PAGE_SHIFT;
-}
-
static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
{
struct mlx5e_wqe_frag_info next_frag = {};
@@ -590,7 +591,12 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
- pool_size = MLX5_MPWRQ_PAGES_PER_WQE <<
+ rq->mpwqe.page_shift = PAGE_SHIFT;
+ rq->mpwqe.pages_per_wqe = MLX5_MPWRQ_PAGES_PER_WQE;
+ rq->mpwqe.umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(rq->mpwqe.pages_per_wqe);
+ rq->mpwqe.mtts_per_wqe = MLX5E_REQUIRED_WQE_MTTS;
+
+ pool_size = rq->mpwqe.pages_per_wqe <<
mlx5e_mpwqe_get_log_rq_size(params, xsk);
rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
@@ -680,7 +686,8 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
u32 byte_count =
rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
- u64 dma_offset = mlx5e_get_mpwqe_offset(i);
+ u64 dma_offset = mul_u32_u32(i, rq->mpwqe.mtts_per_wqe) <<
+ rq->mpwqe.page_shift;
u16 headroom = test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) ?
0 : rq->buff.headroom;