summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
diff options
context:
space:
mode:
authorMaxim Mikityanskiy <maximmi@mellanox.com>2020-07-02 12:37:29 +0300
committerSaeed Mahameed <saeedm@nvidia.com>2020-09-22 05:41:16 +0300
commit5af75c747e2a868abbf8611494b50ed5e076fca7 (patch)
treefbca47bb5a219577b56cbaf179e94af5b3c55edb /drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
parent67044a88aa0556b929cd07ba0656b101f3a6a67c (diff)
downloadlinux-5af75c747e2a868abbf8611494b50ed5e076fca7.tar.xz
net/mlx5e: Enhanced TX MPWQE for SKBs
This commit adds support for Enhanced TX MPWQE feature in the regular (SKB) data path. A MPWQE (multi-packet work queue element) can serve multiple packets, reducing the PCI bandwidth on control traffic. Two new stats (tx*_mpwqe_blks and tx*_mpwqe_pkts) are added. The feature is on by default and controlled by the skb_tx_mpwqe private flag. In a MPWQE, eseg is shared among all packets, so eseg-based offloads (IPSEC, GENEVE, checksum) run on a separate eseg that is compared to the eseg of the current MPWQE session to decide if the new packet can be added to the same session. MPWQE is not compatible with certain offloads and features, such as TLS offload, TSO, nonlinear SKBs. If such incompatible features are in use, the driver gracefully falls back to non-MPWQE. This change has no performance impact in TCP single stream test and XDP_TX single stream test. UDP pktgen, 64-byte packets, single stream, MPWQE off: Packet rate: 16.96 Mpps (±0.12 Mpps) -> 17.01 Mpps (±0.20 Mpps) Instructions per packet: 421 -> 429 Cycles per packet: 156 -> 161 Instructions per cycle: 2.70 -> 2.67 UDP pktgen, 64-byte packets, single stream, MPWQE on: Packet rate: 16.96 Mpps (±0.12 Mpps) -> 20.94 Mpps (±0.33 Mpps) Instructions per packet: 421 -> 329 Cycles per packet: 156 -> 123 Instructions per cycle: 2.70 -> 2.67 Enabling MPWQE can reduce PCI bandwidth: PCI Gen2, pktgen at fixed rate of 36864000 pps on 24 CPU cores: Inbound PCI utilization with MPWQE off: 80.3% Inbound PCI utilization with MPWQE on: 59.0% PCI Gen3, pktgen at fixed rate of 56064000 pps on 24 CPU cores: Inbound PCI utilization with MPWQE off: 65.4% Inbound PCI utilization with MPWQE on: 49.3% Enabling MPWQE can also reduce CPU load, increasing the packet rate in case of CPU bottleneck: PCI Gen2, pktgen at full rate on 24 CPU cores: Packet rate with MPWQE off: 37.5 Mpps Packet rate with MPWQE on: 49.0 Mpps PCI Gen3, pktgen at full rate on 24 CPU cores: Packet rate with MPWQE off: 57.0 Mpps Packet rate with MPWQE on: 66.8 Mpps Burst size in all pktgen tests is 32. CPU: Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz (x86_64) NIC: Mellanox ConnectX-6 Dx GCC 10.2.0 Signed-off-by: Maxim Mikityanskiy <maximmi@mellanox.com> Reviewed-by: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h29
1 files changed, 18 insertions, 11 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 23d4ef5ab9c5..2ea1cdc1ca54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -128,31 +128,38 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
return true;
}
-static inline bool mlx5e_accel_tx_finish(struct mlx5e_priv *priv,
- struct mlx5e_txqsq *sq,
- struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe,
- struct mlx5e_accel_tx_state *state)
-{
-#ifdef CONFIG_MLX5_EN_TLS
- mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls);
-#endif
+/* Part of the eseg touched by TX offloads */
+#define MLX5E_ACCEL_ESEG_LEN offsetof(struct mlx5_wqe_eth_seg, mss)
+static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
+ struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg)
+{
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) {
- if (unlikely(!mlx5e_ipsec_handle_tx_skb(priv, &wqe->eth, skb)))
+ if (unlikely(!mlx5e_ipsec_handle_tx_skb(priv, eseg, skb)))
return false;
}
#endif
#if IS_ENABLED(CONFIG_GENEVE)
if (skb->encapsulation)
- mlx5e_tx_tunnel_accel(skb, &wqe->eth);
+ mlx5e_tx_tunnel_accel(skb, eseg);
#endif
return true;
}
+static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
+ struct mlx5e_tx_wqe *wqe,
+ struct mlx5e_accel_tx_state *state)
+{
+#ifdef CONFIG_MLX5_EN_TLS
+ mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls);
+#endif
+}
+
static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv)
{
return mlx5e_ktls_init_rx(priv);