summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorTariq Toukan <tariqt@mellanox.com>2020-01-12 17:22:14 +0300
committerSaeed Mahameed <saeedm@mellanox.com>2020-01-24 23:04:35 +0300
commitffbd9ca94e2ebbfe802d4b28bab5ba19818de853 (patch)
treee98dcb5fadb78f29039f556e41e0bee6d986b0aa /drivers
parent3b83b6c2e024d85b770ddb1e19a513b5d7587f6f (diff)
downloadlinux-ffbd9ca94e2ebbfe802d4b28bab5ba19818de853.tar.xz
net/mlx5e: kTLS, Fix corner-case checks in TX resync flow
There are the following cases: 1. Packet ends before start marker: bypass offload. 2. Packet starts before start marker and ends after it: drop, not supported, breaks contract with kernel. 3. packet ends before tls record info starts: drop, this packet was already acknowledged and its record info was released. Add the above as comment in code. Mind possible wraparounds of the TCP seq, replace the simple comparison with a call to the TCP before() method. In addition, remove logic that handles negative sync_len values, as it became impossible. Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support") Fixes: 46a3ea98074e ("net/mlx5e: kTLS, Enhance TX resync flow") Signed-off-by: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: Boris Pismenny <borisp@mellanox.com> Reviewed-by: Boris Pismenny <borisp@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c33
1 files changed, 19 insertions, 14 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 778dab1af8fc..8dbb92176bd7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -180,7 +180,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
struct tx_sync_info {
u64 rcd_sn;
- s32 sync_len;
+ u32 sync_len;
int nr_frags;
skb_frag_t frags[MAX_SKB_FRAGS];
};
@@ -193,13 +193,14 @@ enum mlx5e_ktls_sync_retval {
static enum mlx5e_ktls_sync_retval
tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
- u32 tcp_seq, struct tx_sync_info *info)
+ u32 tcp_seq, int datalen, struct tx_sync_info *info)
{
struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
struct tls_record_info *record;
int remaining, i = 0;
unsigned long flags;
+ bool ends_before;
spin_lock_irqsave(&tx_ctx->lock, flags);
record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
@@ -209,9 +210,21 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
goto out;
}
- if (unlikely(tcp_seq < tls_record_start_seq(record))) {
- ret = tls_record_is_start_marker(record) ?
- MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
+ /* There are the following cases:
+ * 1. packet ends before start marker: bypass offload.
+ * 2. packet starts before start marker and ends after it: drop,
+ * not supported, breaks contract with kernel.
+ * 3. packet ends before tls record info starts: drop,
+ * this packet was already acknowledged and its record info
+ * was released.
+ */
+ ends_before = before(tcp_seq + datalen, tls_record_start_seq(record));
+
+ if (unlikely(tls_record_is_start_marker(record))) {
+ ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
+ goto out;
+ } else if (ends_before) {
+ ret = MLX5E_KTLS_SYNC_FAIL;
goto out;
}
@@ -337,7 +350,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
u8 num_wqebbs;
int i = 0;
- ret = tx_sync_info_get(priv_tx, seq, &info);
+ ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
stats->tls_skip_no_sync_data++;
@@ -351,14 +364,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
goto err_out;
}
- if (unlikely(info.sync_len < 0)) {
- if (likely(datalen <= -info.sync_len))
- return MLX5E_KTLS_SYNC_DONE;
-
- stats->tls_drop_bypass_req++;
- goto err_out;
- }
-
stats->tls_ooo++;
tx_post_resync_params(sq, priv_tx, info.rcd_sn);