From 388a2b56e5b042e0d1ad4d309f875a944b1d1c11 Mon Sep 17 00:00:00 2001 From: Maxim Mikityanskiy Date: Thu, 30 Jul 2020 16:14:58 +0300 Subject: net/mlx5e: Small improvements for XDP TX MPWQE logic Use MLX5E_XDP_MPW_MAX_WQEBBS to reserve space for a MPWQE, because it's actually the maximal size a MPWQE can take. Reorganize the logic that checks when to close the MPWQE session: 1. Put all checks into a single function. 2. When inline is on, make only one comparison - if it's false, the less strict one will also be false. The compiler probably optimized it out anyway, but it's clearer to also reflect it in the code. The MLX5E_XDP_INLINE_WQE_* defines are also changed to make the calculations more correct from the logical point of view. Though MLX5E_XDP_INLINE_WQE_MAX_DS_CNT used to be 16 and didn't change its value, the calculation used to be DIV_ROUND_UP(max inline packet size, MLX5_SEND_WQE_DS), and the numerator should have included sizeof(struct mlx5_wqe_inline_seg). Signed-off-by: Maxim Mikityanskiy Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index e806c13d491f..615bf04f4a54 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -42,9 +42,10 @@ (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) #define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */) -#define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg)) -#define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT \ - DIV_ROUND_UP(MLX5E_XDP_INLINE_WQE_SZ_THRSD, MLX5_SEND_WQE_DS) +#define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT 16 +#define MLX5E_XDP_INLINE_WQE_SZ_THRSD \ + (MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \ + sizeof(struct mlx5_wqe_inline_seg)) /* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment. @@ -141,11 +142,12 @@ static inline void mlx5e_xdp_update_inline_state(struct mlx5e_xdpsq *sq) session->inline_on = 1; } -static inline bool -mlx5e_xdp_no_room_for_inline_pkt(struct mlx5e_xdp_mpwqe *session) +static inline bool mlx5e_xdp_mpqwe_is_full(struct mlx5e_xdp_mpwqe *session) { - return session->inline_on && - session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT > MLX5E_XDP_MPW_MAX_NUM_DS; + if (session->inline_on) + return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT > + MLX5E_XDP_MPW_MAX_NUM_DS; + return session->ds_count == MLX5E_XDP_MPW_MAX_NUM_DS; } struct mlx5e_xdp_wqe_info { -- cgit v1.2.3 From 97e3afd64dc2918f79026986c575f3343197e7c8 Mon Sep 17 00:00:00 2001 From: Maxim Mikityanskiy Date: Thu, 9 Jul 2020 14:05:31 +0300 Subject: net/mlx5e: Unify constants for WQE_EMPTY_DS_COUNT A constant for the number of DS in an empty WQE (i.e. a WQE without data segments) is needed in multiple places (normal TX data path, MPWQE in XDP), but currently we have a constant for XDP and an inline formula in normal TX. This patch introduces a common constant. Additionally, mlx5e_xdp_mpwqe_session_start is converted to use struct assignment, because the code nearby is touched. Signed-off-by: Maxim Mikityanskiy Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h | 2 ++ drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 17 ++++++++++------- drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h | 21 ++++++++------------- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 2 +- 4 files changed, 21 insertions(+), 21 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 9931a605eed9..277725c05de4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -7,6 +7,8 @@ #include "en.h" #include +#define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) enum mlx5e_icosq_wqe_type { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 7fccd2ea7dc9..737e88d49e89 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -196,16 +196,19 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq) { struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; struct mlx5e_xdpsq_stats *stats = sq->stats; + struct mlx5e_tx_wqe *wqe; u16 pi; pi = mlx5e_xdpsq_get_next_pi(sq, MLX5E_XDP_MPW_MAX_WQEBBS); - session->wqe = MLX5E_TX_FETCH_WQE(sq, pi); - - net_prefetchw(session->wqe->data); - session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT; - session->pkt_count = 0; - - mlx5e_xdp_update_inline_state(sq); + wqe = MLX5E_TX_FETCH_WQE(sq, pi); + net_prefetchw(wqe->data); + + *session = (struct mlx5e_xdp_mpwqe) { + .wqe = wqe, + .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT, + .pkt_count = 0, + .inline_on = mlx5e_xdp_get_inline_state(sq, session->inline_on), + }; stats->mpwqe++; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index 615bf04f4a54..96d6b1553bab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -38,9 +38,7 @@ #include "en/txrx.h" #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN) -#define MLX5E_XDP_TX_EMPTY_DS_COUNT \ - (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) -#define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */) +#define MLX5E_XDP_TX_DS_COUNT (MLX5E_TX_WQE_EMPTY_DS_COUNT + 1 /* SG DS */) #define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT 16 #define MLX5E_XDP_INLINE_WQE_SZ_THRSD \ @@ -123,23 +121,20 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) /* Enable inline WQEs to shift some load from a congested HCA (HW) to * a less congested cpu (SW). */ -static inline void mlx5e_xdp_update_inline_state(struct mlx5e_xdpsq *sq) +static inline bool mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq *sq, bool cur) { u16 outstanding = sq->xdpi_fifo_pc - sq->xdpi_fifo_cc; - struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; #define MLX5E_XDP_INLINE_WATERMARK_LOW 10 #define MLX5E_XDP_INLINE_WATERMARK_HIGH 128 - if (session->inline_on) { - if (outstanding <= MLX5E_XDP_INLINE_WATERMARK_LOW) - session->inline_on = 0; - return; - } + if (cur && outstanding <= MLX5E_XDP_INLINE_WATERMARK_LOW) + return false; + + if (!cur && outstanding >= MLX5E_XDP_INLINE_WATERMARK_HIGH) + return true; - /* inline is false */ - if (outstanding >= MLX5E_XDP_INLINE_WATERMARK_HIGH) - session->inline_on = 1; + return cur; } static inline bool mlx5e_xdp_mpqwe_is_full(struct mlx5e_xdp_mpwqe *session) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 939bbf0aa2c3..e458a0ab8740 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -305,7 +305,7 @@ static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb, static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_attr *attr, struct mlx5e_tx_wqe_attr *wqe_attr) { - u16 ds_cnt = sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS; + u16 ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT; u16 ds_cnt_inl = 0; ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags; -- cgit v1.2.3 From 530d5ce22ca2d7f54b756580c02d0772c053d221 Mon Sep 17 00:00:00 2001 From: Maxim Mikityanskiy Date: Thu, 4 Jun 2020 16:43:27 +0300 Subject: net/mlx5e: Generalize TX MPWQE checks for full session As preparation for the upcoming TX MPWQE for SKBs, create a function (mlx5e_tx_mpwqe_is_full) to check whether an MPWQE session is full. This function will be shared by MPWQE code for XDP and for SKBs. Defines are renamed and moved to make them not XDP-specific. Signed-off-by: Maxim Mikityanskiy Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h | 18 ++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h | 18 ++---------------- 3 files changed, 21 insertions(+), 17 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 03fe92323f48..b4ee1f2f1746 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -9,6 +9,19 @@ #define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) +/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS + * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment. + * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a + * full-session WQE be cache-aligned. + */ +#if L1_CACHE_BYTES < 128 +#define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1) +#else +#define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2) +#endif + +#define MLX5E_TX_MPW_MAX_NUM_DS (MLX5E_TX_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS) + #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) enum mlx5e_icosq_wqe_type { @@ -266,6 +279,11 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma) void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more); +static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_xdp_mpwqe *session) +{ + return session->ds_count == MLX5E_TX_MPW_MAX_NUM_DS; +} + static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq) { if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 737e88d49e89..2a72496ceda9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -199,7 +199,7 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq) struct mlx5e_tx_wqe *wqe; u16 pi; - pi = mlx5e_xdpsq_get_next_pi(sq, MLX5E_XDP_MPW_MAX_WQEBBS); + pi = mlx5e_xdpsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS); wqe = MLX5E_TX_FETCH_WQE(sq, pi); net_prefetchw(wqe->data); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index 96d6b1553bab..0dc38acab5a8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -45,20 +45,6 @@ (MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \ sizeof(struct mlx5_wqe_inline_seg)) -/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS - * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment. - * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a - * full-session WQE be cache-aligned. - */ -#if L1_CACHE_BYTES < 128 -#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1) -#else -#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2) -#endif - -#define MLX5E_XDP_MPW_MAX_NUM_DS \ - (MLX5E_XDP_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS) - struct mlx5e_xsk_param; int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk); bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, @@ -141,8 +127,8 @@ static inline bool mlx5e_xdp_mpqwe_is_full(struct mlx5e_xdp_mpwqe *session) { if (session->inline_on) return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT > - MLX5E_XDP_MPW_MAX_NUM_DS; - return session->ds_count == MLX5E_XDP_MPW_MAX_NUM_DS; + MLX5E_TX_MPW_MAX_NUM_DS; + return mlx5e_tx_mpwqe_is_full(session); } struct mlx5e_xdp_wqe_info { -- cgit v1.2.3 From b39fe61edc50aff03a7188c1ad925c61af5ea882 Mon Sep 17 00:00:00 2001 From: Maxim Mikityanskiy Date: Thu, 16 Apr 2020 11:30:14 +0300 Subject: net/mlx5e: Rename xmit-related structs to generalize them As preparation for the upcoming TX MPWQE support for SKBs, rename struct mlx5e_xdp_mpwqe to mlx5e_tx_mpwqe and move it above struct mlx5e_txqsq. This structure will be reused in the regular SQ and in the regular TX data path. Also rename mlx5e_xdp_xmit_data to mlx5e_xmit_data - it will be used in the upcoming TX MPWQE flow. Signed-off-by: Maxim Mikityanskiy Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 22 +++++++++++----------- drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 16 ++++++++-------- drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h | 10 +++++----- .../net/ethernet/mellanox/mlx5/core/en/xsk/tx.c | 2 +- 5 files changed, 26 insertions(+), 26 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 04c6ff2386bf..252cc0277475 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -313,6 +313,14 @@ enum { MLX5E_SQ_STATE_PENDING_XSK_TX, }; +struct mlx5e_tx_mpwqe { + /* Current MPWQE session */ + struct mlx5e_tx_wqe *wqe; + u8 ds_count; + u8 pkt_count; + u8 inline_on; +}; + struct mlx5e_txqsq { /* data path */ @@ -403,7 +411,7 @@ struct mlx5e_xdp_info { }; }; -struct mlx5e_xdp_xmit_data { +struct mlx5e_xmit_data { dma_addr_t dma_addr; void *data; u32 len; @@ -416,18 +424,10 @@ struct mlx5e_xdp_info_fifo { u32 mask; }; -struct mlx5e_xdp_mpwqe { - /* Current MPWQE session */ - struct mlx5e_tx_wqe *wqe; - u8 ds_count; - u8 pkt_count; - u8 inline_on; -}; - struct mlx5e_xdpsq; typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *); typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *, - struct mlx5e_xdp_xmit_data *, + struct mlx5e_xmit_data *, struct mlx5e_xdp_info *, int); @@ -442,7 +442,7 @@ struct mlx5e_xdpsq { u32 xdpi_fifo_pc ____cacheline_aligned_in_smp; u16 pc; struct mlx5_wqe_ctrl_seg *doorbell_cseg; - struct mlx5e_xdp_mpwqe mpwqe; + struct mlx5e_tx_mpwqe mpwqe; struct mlx5e_cq cq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index b4ee1f2f1746..06dbfd6cd82a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -279,7 +279,7 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma) void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more); -static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_xdp_mpwqe *session) +static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session) { return session->ds_count == MLX5E_TX_MPW_MAX_NUM_DS; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 2a72496ceda9..f0a102763de6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -59,7 +59,7 @@ static inline bool mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, struct mlx5e_dma_info *di, struct xdp_buff *xdp) { - struct mlx5e_xdp_xmit_data xdptxd; + struct mlx5e_xmit_data xdptxd; struct mlx5e_xdp_info xdpi; struct xdp_frame *xdpf; dma_addr_t dma_addr; @@ -194,7 +194,7 @@ static u16 mlx5e_xdpsq_get_next_pi(struct mlx5e_xdpsq *sq, u16 size) static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq) { - struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; + struct mlx5e_tx_mpwqe *session = &sq->mpwqe; struct mlx5e_xdpsq_stats *stats = sq->stats; struct mlx5e_tx_wqe *wqe; u16 pi; @@ -203,7 +203,7 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq) wqe = MLX5E_TX_FETCH_WQE(sq, pi); net_prefetchw(wqe->data); - *session = (struct mlx5e_xdp_mpwqe) { + *session = (struct mlx5e_tx_mpwqe) { .wqe = wqe, .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT, .pkt_count = 0, @@ -216,7 +216,7 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq) void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq) { struct mlx5_wq_cyc *wq = &sq->wq; - struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; + struct mlx5e_tx_mpwqe *session = &sq->mpwqe; struct mlx5_wqe_ctrl_seg *cseg = &session->wqe->ctrl; u16 ds_count = session->ds_count; u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); @@ -261,10 +261,10 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq } INDIRECT_CALLABLE_SCOPE bool -mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_xmit_data *xdptxd, +mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, struct mlx5e_xdp_info *xdpi, int check_result) { - struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; + struct mlx5e_tx_mpwqe *session = &sq->mpwqe; struct mlx5e_xdpsq_stats *stats = sq->stats; if (unlikely(xdptxd->len > sq->hw_mtu)) { @@ -308,7 +308,7 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq) } INDIRECT_CALLABLE_SCOPE bool -mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_xmit_data *xdptxd, +mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, struct mlx5e_xdp_info *xdpi, int check_result) { struct mlx5_wq_cyc *wq = &sq->wq; @@ -505,7 +505,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; - struct mlx5e_xdp_xmit_data xdptxd; + struct mlx5e_xmit_data xdptxd; struct mlx5e_xdp_info xdpi; bool ret; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index 0dc38acab5a8..4bd8af478a4a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -58,11 +58,11 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, - struct mlx5e_xdp_xmit_data *xdptxd, + struct mlx5e_xmit_data *xdptxd, struct mlx5e_xdp_info *xdpi, int check_result)); INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, - struct mlx5e_xdp_xmit_data *xdptxd, + struct mlx5e_xmit_data *xdptxd, struct mlx5e_xdp_info *xdpi, int check_result)); INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)); @@ -123,7 +123,7 @@ static inline bool mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq *sq, bool cur) return cur; } -static inline bool mlx5e_xdp_mpqwe_is_full(struct mlx5e_xdp_mpwqe *session) +static inline bool mlx5e_xdp_mpqwe_is_full(struct mlx5e_tx_mpwqe *session) { if (session->inline_on) return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT > @@ -138,10 +138,10 @@ struct mlx5e_xdp_wqe_info { static inline void mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, - struct mlx5e_xdp_xmit_data *xdptxd, + struct mlx5e_xmit_data *xdptxd, struct mlx5e_xdpsq_stats *stats) { - struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; + struct mlx5e_tx_mpwqe *session = &sq->mpwqe; struct mlx5_wqe_data_seg *dseg = (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count; u32 dma_len = xdptxd->len; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c index aa91cbdfe969..fb671a457129 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c @@ -67,8 +67,8 @@ static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq, bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget) { struct xsk_buff_pool *pool = sq->xsk_pool; + struct mlx5e_xmit_data xdptxd; struct mlx5e_xdp_info xdpi; - struct mlx5e_xdp_xmit_data xdptxd; bool work_done = true; bool flush = false; -- cgit v1.2.3 From 5af75c747e2a868abbf8611494b50ed5e076fca7 Mon Sep 17 00:00:00 2001 From: Maxim Mikityanskiy Date: Thu, 2 Jul 2020 12:37:29 +0300 Subject: net/mlx5e: Enhanced TX MPWQE for SKBs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit adds support for Enhanced TX MPWQE feature in the regular (SKB) data path. A MPWQE (multi-packet work queue element) can serve multiple packets, reducing the PCI bandwidth on control traffic. Two new stats (tx*_mpwqe_blks and tx*_mpwqe_pkts) are added. The feature is on by default and controlled by the skb_tx_mpwqe private flag. In a MPWQE, eseg is shared among all packets, so eseg-based offloads (IPSEC, GENEVE, checksum) run on a separate eseg that is compared to the eseg of the current MPWQE session to decide if the new packet can be added to the same session. MPWQE is not compatible with certain offloads and features, such as TLS offload, TSO, nonlinear SKBs. If such incompatible features are in use, the driver gracefully falls back to non-MPWQE. This change has no performance impact in TCP single stream test and XDP_TX single stream test. UDP pktgen, 64-byte packets, single stream, MPWQE off: Packet rate: 16.96 Mpps (±0.12 Mpps) -> 17.01 Mpps (±0.20 Mpps) Instructions per packet: 421 -> 429 Cycles per packet: 156 -> 161 Instructions per cycle: 2.70 -> 2.67 UDP pktgen, 64-byte packets, single stream, MPWQE on: Packet rate: 16.96 Mpps (±0.12 Mpps) -> 20.94 Mpps (±0.33 Mpps) Instructions per packet: 421 -> 329 Cycles per packet: 156 -> 123 Instructions per cycle: 2.70 -> 2.67 Enabling MPWQE can reduce PCI bandwidth: PCI Gen2, pktgen at fixed rate of 36864000 pps on 24 CPU cores: Inbound PCI utilization with MPWQE off: 80.3% Inbound PCI utilization with MPWQE on: 59.0% PCI Gen3, pktgen at fixed rate of 56064000 pps on 24 CPU cores: Inbound PCI utilization with MPWQE off: 65.4% Inbound PCI utilization with MPWQE on: 49.3% Enabling MPWQE can also reduce CPU load, increasing the packet rate in case of CPU bottleneck: PCI Gen2, pktgen at full rate on 24 CPU cores: Packet rate with MPWQE off: 37.5 Mpps Packet rate with MPWQE on: 49.0 Mpps PCI Gen3, pktgen at full rate on 24 CPU cores: Packet rate with MPWQE off: 57.0 Mpps Packet rate with MPWQE on: 66.8 Mpps Burst size in all pktgen tests is 32. CPU: Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz (x86_64) NIC: Mellanox ConnectX-6 Dx GCC 10.2.0 Signed-off-by: Maxim Mikityanskiy Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 4 + drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h | 1 + .../mellanox/mlx5/core/en_accel/en_accel.h | 29 ++-- .../mellanox/mlx5/core/en_accel/tls_rxtx.c | 2 + .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 15 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 11 ++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 6 + drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 4 + drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 184 ++++++++++++++++++++- 11 files changed, 240 insertions(+), 18 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 252cc0277475..0df40d24acb0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -221,6 +221,7 @@ enum mlx5e_priv_flag { MLX5E_PFLAG_RX_STRIDING_RQ, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, MLX5E_PFLAG_XDP_TX_MPWQE, + MLX5E_PFLAG_SKB_TX_MPWQE, MLX5E_NUM_PFLAGS, /* Keep last */ }; @@ -305,6 +306,7 @@ struct mlx5e_sq_dma { enum { MLX5E_SQ_STATE_ENABLED, + MLX5E_SQ_STATE_MPWQE, MLX5E_SQ_STATE_RECOVERING, MLX5E_SQ_STATE_IPSEC, MLX5E_SQ_STATE_AM, @@ -316,6 +318,7 @@ enum { struct mlx5e_tx_mpwqe { /* Current MPWQE session */ struct mlx5e_tx_wqe *wqe; + u32 bytes_count; u8 ds_count; u8 pkt_count; u8 inline_on; @@ -334,6 +337,7 @@ struct mlx5e_txqsq { u16 pc ____cacheline_aligned_in_smp; u16 skb_fifo_pc; u32 dma_fifo_pc; + struct mlx5e_tx_mpwqe mpwqe; struct mlx5e_cq cq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 06dbfd6cd82a..8ccd0b661a7f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -278,6 +278,7 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma) } void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more); +void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq); static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index f0a102763de6..0b201e66f191 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -205,6 +205,7 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq) *session = (struct mlx5e_tx_mpwqe) { .wqe = wqe, + .bytes_count = 0, .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT, .pkt_count = 0, .inline_on = mlx5e_xdp_get_inline_state(sq, session->inline_on), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index 4bd8af478a4a..d487e5e37162 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -147,6 +147,7 @@ mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, u32 dma_len = xdptxd->len; session->pkt_count++; + session->bytes_count += dma_len; if (session->inline_on && dma_len <= MLX5E_XDP_INLINE_WQE_SZ_THRSD) { struct mlx5_wqe_inline_seg *inline_dseg = diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h index 23d4ef5ab9c5..2ea1cdc1ca54 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -128,31 +128,38 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev, return true; } -static inline bool mlx5e_accel_tx_finish(struct mlx5e_priv *priv, - struct mlx5e_txqsq *sq, - struct sk_buff *skb, - struct mlx5e_tx_wqe *wqe, - struct mlx5e_accel_tx_state *state) -{ -#ifdef CONFIG_MLX5_EN_TLS - mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls); -#endif +/* Part of the eseg touched by TX offloads */ +#define MLX5E_ACCEL_ESEG_LEN offsetof(struct mlx5_wqe_eth_seg, mss) +static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv, + struct mlx5e_txqsq *sq, + struct sk_buff *skb, + struct mlx5_wqe_eth_seg *eseg) +{ #ifdef CONFIG_MLX5_EN_IPSEC if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) { - if (unlikely(!mlx5e_ipsec_handle_tx_skb(priv, &wqe->eth, skb))) + if (unlikely(!mlx5e_ipsec_handle_tx_skb(priv, eseg, skb))) return false; } #endif #if IS_ENABLED(CONFIG_GENEVE) if (skb->encapsulation) - mlx5e_tx_tunnel_accel(skb, &wqe->eth); + mlx5e_tx_tunnel_accel(skb, eseg); #endif return true; } +static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq, + struct mlx5e_tx_wqe *wqe, + struct mlx5e_accel_tx_state *state) +{ +#ifdef CONFIG_MLX5_EN_TLS + mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls); +#endif +} + static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv) { return mlx5e_ktls_init_rx(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index c36560b3e93d..6982b193ee8a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -270,6 +270,8 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, if (!datalen) return true; + mlx5e_tx_mpwqe_ensure_complete(sq); + tls_ctx = tls_get_ctx(skb->sk); if (WARN_ON_ONCE(tls_ctx->netdev != netdev)) goto err_out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 0dda80d8bdca..d25a56ec6876 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1908,7 +1908,7 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable) return 0; } -static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable) +static int set_pflag_tx_mpwqe_common(struct net_device *netdev, u32 flag, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; @@ -1920,7 +1920,7 @@ static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable) new_channels.params = priv->channels.params; - MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_XDP_TX_MPWQE, enable); + MLX5E_SET_PFLAG(&new_channels.params, flag, enable); if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { priv->channels.params = new_channels.params; @@ -1931,6 +1931,16 @@ static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable) return err; } +static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable) +{ + return set_pflag_tx_mpwqe_common(netdev, MLX5E_PFLAG_XDP_TX_MPWQE, enable); +} + +static int set_pflag_skb_tx_mpwqe(struct net_device *netdev, bool enable) +{ + return set_pflag_tx_mpwqe_common(netdev, MLX5E_PFLAG_SKB_TX_MPWQE, enable); +} + static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = { { "rx_cqe_moder", set_pflag_rx_cqe_based_moder }, { "tx_cqe_moder", set_pflag_tx_cqe_based_moder }, @@ -1938,6 +1948,7 @@ static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = { { "rx_striding_rq", set_pflag_rx_striding_rq }, { "rx_no_csum_complete", set_pflag_rx_no_csum_complete }, { "xdp_tx_mpwqe", set_pflag_xdp_tx_mpwqe }, + { "skb_tx_mpwqe", set_pflag_skb_tx_mpwqe }, }; static int mlx5e_handle_pflag(struct net_device *netdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index c331aa9714f8..472252ea67a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1082,6 +1082,12 @@ static int mlx5e_calc_sq_stop_room(struct mlx5e_txqsq *sq, u8 log_sq_size) sq->stop_room = mlx5e_tls_get_stop_room(sq); sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); + if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state)) + /* A MPWQE can take up to the maximum-sized WQE + all the normal + * stop room can be taken if a new packet breaks the active + * MPWQE session and allocates its WQEs right away. + */ + sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); if (WARN_ON(sq->stop_room >= sq_size)) { netdev_err(sq->channel->netdev, "Stop room %hu is bigger than the SQ size %d\n", @@ -1123,6 +1129,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); if (mlx5_accel_is_tls_device(c->priv->mdev)) set_bit(MLX5E_SQ_STATE_TLS, &sq->state); + if (param->is_mpw) + set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state); err = mlx5e_calc_sq_stop_room(sq, params->log_sq_size); if (err) return err; @@ -2175,6 +2183,7 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv, mlx5e_build_sq_param_common(priv, param); MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); MLX5_SET(sqc, sqc, allow_swp, allow_swp); + param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); mlx5e_build_tx_cq_param(priv, params, ¶m->cqp); } @@ -4731,6 +4740,8 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, params->log_sq_size = is_kdump_kernel() ? MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE : MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE, + MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe)); /* XDP SQ */ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 6d5e54b964c0..c580f8b8c242 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -98,6 +98,8 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) }, #ifdef CONFIG_MLX5_EN_TLS { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) }, @@ -353,6 +355,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes; s->tx_added_vlan_packets += sq_stats->added_vlan_packets; s->tx_nop += sq_stats->nop; + s->tx_mpwqe_blks += sq_stats->mpwqe_blks; + s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts; s->tx_queue_stopped += sq_stats->stopped; s->tx_queue_wake += sq_stats->wake; s->tx_queue_dropped += sq_stats->dropped; @@ -1556,6 +1560,8 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) }, #ifdef CONFIG_MLX5_EN_TLS { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 9d9ee269a041..1e46e0ed04aa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -120,6 +120,8 @@ struct mlx5e_sw_stats { u64 tx_tso_inner_bytes; u64 tx_added_vlan_packets; u64 tx_nop; + u64 tx_mpwqe_blks; + u64 tx_mpwqe_pkts; u64 rx_lro_packets; u64 rx_lro_bytes; u64 rx_ecn_mark; @@ -348,6 +350,8 @@ struct mlx5e_sq_stats { u64 csum_partial_inner; u64 added_vlan_packets; u64 nop; + u64 mpwqe_blks; + u64 mpwqe_pkts; #ifdef CONFIG_MLX5_EN_TLS u64 tls_encrypted_packets; u64 tls_encrypted_bytes; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index f5af35c5ecc8..13bd4f254ed7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -428,6 +428,166 @@ err_drop: dev_kfree_skb_any(skb); } +static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr) +{ + return !skb_is_nonlinear(skb) && !skb_vlan_tag_present(skb) && !attr->ihs; +} + +static bool mlx5e_tx_mpwqe_same_eseg(struct mlx5e_txqsq *sq, struct mlx5_wqe_eth_seg *eseg) +{ + struct mlx5e_tx_mpwqe *session = &sq->mpwqe; + + /* Assumes the session is already running and has at least one packet. */ + return !memcmp(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN); +} + +static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq, + struct mlx5_wqe_eth_seg *eseg) +{ + struct mlx5e_tx_mpwqe *session = &sq->mpwqe; + struct mlx5e_tx_wqe *wqe; + u16 pi; + + pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS); + wqe = MLX5E_TX_FETCH_WQE(sq, pi); + prefetchw(wqe->data); + + *session = (struct mlx5e_tx_mpwqe) { + .wqe = wqe, + .bytes_count = 0, + .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT, + .pkt_count = 0, + .inline_on = 0, + }; + + memcpy(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN); + + sq->stats->mpwqe_blks++; +} + +static bool mlx5e_tx_mpwqe_session_is_active(struct mlx5e_txqsq *sq) +{ + return sq->mpwqe.wqe; +} + +static void mlx5e_tx_mpwqe_add_dseg(struct mlx5e_txqsq *sq, struct mlx5e_xmit_data *txd) +{ + struct mlx5e_tx_mpwqe *session = &sq->mpwqe; + struct mlx5_wqe_data_seg *dseg; + + dseg = (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count; + + session->pkt_count++; + session->bytes_count += txd->len; + + dseg->addr = cpu_to_be64(txd->dma_addr); + dseg->byte_count = cpu_to_be32(txd->len); + dseg->lkey = sq->mkey_be; + session->ds_count++; + + sq->stats->mpwqe_pkts++; +} + +static struct mlx5_wqe_ctrl_seg *mlx5e_tx_mpwqe_session_complete(struct mlx5e_txqsq *sq) +{ + struct mlx5e_tx_mpwqe *session = &sq->mpwqe; + u8 ds_count = session->ds_count; + struct mlx5_wqe_ctrl_seg *cseg; + struct mlx5e_tx_wqe_info *wi; + u16 pi; + + cseg = &session->wqe->ctrl; + cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW); + cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count); + + pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); + wi = &sq->db.wqe_info[pi]; + *wi = (struct mlx5e_tx_wqe_info) { + .skb = NULL, + .num_bytes = session->bytes_count, + .num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS), + .num_dma = session->pkt_count, + .num_fifo_pkts = session->pkt_count, + }; + + sq->pc += wi->num_wqebbs; + + session->wqe = NULL; + + mlx5e_tx_check_stop(sq); + + return cseg; +} + +static void +mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, + struct mlx5_wqe_eth_seg *eseg, bool xmit_more) +{ + struct mlx5_wqe_ctrl_seg *cseg; + struct mlx5e_xmit_data txd; + + if (!mlx5e_tx_mpwqe_session_is_active(sq)) { + mlx5e_tx_mpwqe_session_start(sq, eseg); + } else if (!mlx5e_tx_mpwqe_same_eseg(sq, eseg)) { + mlx5e_tx_mpwqe_session_complete(sq); + mlx5e_tx_mpwqe_session_start(sq, eseg); + } + + sq->stats->xmit_more += xmit_more; + + txd.data = skb->data; + txd.len = skb->len; + + txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr))) + goto err_unmap; + mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE); + + mlx5e_skb_fifo_push(sq, skb); + + mlx5e_tx_mpwqe_add_dseg(sq, &txd); + + mlx5e_tx_skb_update_hwts_flags(skb); + + if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) { + /* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */ + cseg = mlx5e_tx_mpwqe_session_complete(sq); + + if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more)) + mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); + } else if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more)) { + /* Might stop the queue, but we were asked to ring the doorbell anyway. */ + cseg = mlx5e_tx_mpwqe_session_complete(sq); + + mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); + } + + return; + +err_unmap: + mlx5e_dma_unmap_wqe_err(sq, 1); + sq->stats->dropped++; + dev_kfree_skb_any(skb); +} + +void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq) +{ + /* Unlikely in non-MPWQE workloads; not important in MPWQE workloads. */ + if (unlikely(mlx5e_tx_mpwqe_session_is_active(sq))) + mlx5e_tx_mpwqe_session_complete(sq); +} + +static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq, + struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg) +{ + if (unlikely(!mlx5e_accel_tx_eseg(priv, sq, skb, eseg))) + return false; + + mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); + + return true; +} + netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -442,21 +602,35 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) /* May send SKBs and WQEs. */ if (unlikely(!mlx5e_accel_tx_begin(dev, sq, skb, &accel))) - goto out; + return NETDEV_TX_OK; mlx5e_sq_xmit_prepare(sq, skb, &accel, &attr); + + if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state)) { + if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) { + struct mlx5_wqe_eth_seg eseg = {}; + + if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &eseg))) + return NETDEV_TX_OK; + + mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more()); + return NETDEV_TX_OK; + } + + mlx5e_tx_mpwqe_ensure_complete(sq); + } + mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr); pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs); wqe = MLX5E_TX_FETCH_WQE(sq, pi); /* May update the WQE, but may not post other WQEs. */ - if (unlikely(!mlx5e_accel_tx_finish(priv, sq, skb, wqe, &accel))) - goto out; + mlx5e_accel_tx_finish(sq, wqe, &accel); + if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &wqe->eth))) + return NETDEV_TX_OK; - mlx5e_txwqe_build_eseg_csum(sq, skb, &wqe->eth); mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more()); -out: return NETDEV_TX_OK; } -- cgit v1.2.3