summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
diff options
context:
space:
mode:
authorIlya Lesokhin <ilyal@mellanox.com>2018-04-30 10:16:21 +0300
committerDavid S. Miller <davem@davemloft.net>2018-05-01 16:42:48 +0300
commit43585a41bd894925abe5015edbce475beb3d8c10 (patch)
treef7c92461df4748fc03454ce74962de33ae7cffdb /drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
parentbf23974104fa7aa2f3cbd2f2295f866a71875efd (diff)
downloadlinux-43585a41bd894925abe5015edbce475beb3d8c10.tar.xz
net/mlx5e: TLS, Add error statistics
Add statistics for rare TLS related errors. Since the errors are rare we have a counter per netdev rather then per SQ. Signed-off-by: Ilya Lesokhin <ilyal@mellanox.com> Signed-off-by: Boris Pismenny <borisp@mellanox.com> Acked-by: Saeed Mahameed <saeedm@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index 49e8d455ebc3..ad2790fb5966 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -164,7 +164,8 @@ static struct sk_buff *
mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context,
struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe **wqe,
- u16 *pi)
+ u16 *pi,
+ struct mlx5e_tls *tls)
{
u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
struct sync_info info;
@@ -175,12 +176,14 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context,
sq->stats.tls_ooo++;
- if (mlx5e_tls_get_sync_data(context, tcp_seq, &info))
+ if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) {
/* We might get here if a retransmission reaches the driver
* after the relevant record is acked.
* It should be safe to drop the packet in this case
*/
+ atomic64_inc(&tls->sw_stats.tx_tls_drop_no_sync_data);
goto err_out;
+ }
if (unlikely(info.sync_len < 0)) {
u32 payload;
@@ -192,21 +195,22 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context,
*/
return skb;
- netdev_err(skb->dev,
- "Can't offload from the middle of an SKB [seq: %X, offload_seq: %X, end_seq: %X]\n",
- tcp_seq, tcp_seq + payload + info.sync_len,
- tcp_seq + payload);
+ atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required);
goto err_out;
}
- if (unlikely(mlx5e_tls_add_metadata(skb, context->swid)))
+ if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
+ atomic64_inc(&tls->sw_stats.tx_tls_drop_metadata);
goto err_out;
+ }
headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
linear_len += headln + sizeof(info.rcd_sn);
nskb = alloc_skb(linear_len, GFP_ATOMIC);
- if (unlikely(!nskb))
+ if (unlikely(!nskb)) {
+ atomic64_inc(&tls->sw_stats.tx_tls_drop_resync_alloc);
goto err_out;
+ }
context->expected_seq = tcp_seq + skb->len - headln;
skb_put(nskb, linear_len);
@@ -234,6 +238,7 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
struct mlx5e_tx_wqe **wqe,
u16 *pi)
{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_tls_offload_context *context;
struct tls_context *tls_ctx;
u32 expected_seq;
@@ -256,11 +261,12 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
expected_seq = context->expected_seq;
if (unlikely(expected_seq != skb_seq)) {
- skb = mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi);
+ skb = mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi, priv->tls);
goto out;
}
if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
+ atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata);
dev_kfree_skb_any(skb);
skb = NULL;
goto out;