summaryrefslogtreecommitdiff
path: root/net/smc/af_smc.c
diff options
context:
space:
mode:
authorGuvenc Gulce <guvenc@linux.ibm.com>2021-06-16 17:52:55 +0300
committerDavid S. Miller <davem@davemloft.net>2021-06-16 22:54:02 +0300
commite0e4b8fa533858532f1b9ea9c6a4660d09beb37a (patch)
treedc04b676e1642f2333f69ad513152c0b999fecaa /net/smc/af_smc.c
parentfb0a1dacf2bef929bf047c5434bfb976ac6a93e6 (diff)
downloadlinux-e0e4b8fa533858532f1b9ea9c6a4660d09beb37a.tar.xz
net/smc: Add SMC statistics support
Add the ability to collect SMC statistics information. Per-cpu variables are used to collect the statistic information for better performance and for reducing concurrency pitfalls. The code that is collecting statistic data is implemented in macros to increase code reuse and readability. Signed-off-by: Guvenc Gulce <guvenc@linux.ibm.com> Signed-off-by: Karsten Graul <kgraul@linux.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/smc/af_smc.c')
-rw-r--r--net/smc/af_smc.c89
1 files changed, 71 insertions, 18 deletions
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 5eff7cccceff..efeaed384769 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -49,6 +49,7 @@
#include "smc_tx.h"
#include "smc_rx.h"
#include "smc_close.h"
+#include "smc_stats.h"
static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group
* creation on server
@@ -508,9 +509,42 @@ static void smc_link_save_peer_info(struct smc_link *link,
link->peer_mtu = clc->r0.qp_mtu;
}
-static void smc_switch_to_fallback(struct smc_sock *smc)
+static void smc_stat_inc_fback_rsn_cnt(struct smc_sock *smc,
+ struct smc_stats_fback *fback_arr)
+{
+ int cnt;
+
+ for (cnt = 0; cnt < SMC_MAX_FBACK_RSN_CNT; cnt++) {
+ if (fback_arr[cnt].fback_code == smc->fallback_rsn) {
+ fback_arr[cnt].count++;
+ break;
+ }
+ if (!fback_arr[cnt].fback_code) {
+ fback_arr[cnt].fback_code = smc->fallback_rsn;
+ fback_arr[cnt].count++;
+ break;
+ }
+ }
+}
+
+static void smc_stat_fallback(struct smc_sock *smc)
+{
+ mutex_lock(&smc_stat_fback_rsn);
+ if (smc->listen_smc) {
+ smc_stat_inc_fback_rsn_cnt(smc, fback_rsn.srv);
+ fback_rsn.srv_fback_cnt++;
+ } else {
+ smc_stat_inc_fback_rsn_cnt(smc, fback_rsn.clnt);
+ fback_rsn.clnt_fback_cnt++;
+ }
+ mutex_unlock(&smc_stat_fback_rsn);
+}
+
+static void smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
{
smc->use_fallback = true;
+ smc->fallback_rsn = reason_code;
+ smc_stat_fallback(smc);
if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
smc->clcsock->file = smc->sk.sk_socket->file;
smc->clcsock->file->private_data = smc->clcsock;
@@ -522,8 +556,7 @@ static void smc_switch_to_fallback(struct smc_sock *smc)
/* fall back during connect */
static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
{
- smc_switch_to_fallback(smc);
- smc->fallback_rsn = reason_code;
+ smc_switch_to_fallback(smc, reason_code);
smc_copy_sock_settings_to_clc(smc);
smc->connect_nonblock = 0;
if (smc->sk.sk_state == SMC_INIT)
@@ -538,6 +571,7 @@ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
int rc;
if (reason_code < 0) { /* error, fallback is not possible */
+ this_cpu_inc(smc_stats->clnt_hshake_err_cnt);
if (smc->sk.sk_state == SMC_INIT)
sock_put(&smc->sk); /* passive closing */
return reason_code;
@@ -545,6 +579,7 @@ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
if (reason_code != SMC_CLC_DECL_PEERDECL) {
rc = smc_clc_send_decline(smc, reason_code, version);
if (rc < 0) {
+ this_cpu_inc(smc_stats->clnt_hshake_err_cnt);
if (smc->sk.sk_state == SMC_INIT)
sock_put(&smc->sk); /* passive closing */
return rc;
@@ -992,6 +1027,7 @@ static int __smc_connect(struct smc_sock *smc)
if (rc)
goto vlan_cleanup;
+ SMC_STAT_CLNT_SUCC_INC(aclc);
smc_connect_ism_vlan_cleanup(smc, ini);
kfree(buf);
kfree(ini);
@@ -1308,6 +1344,7 @@ static void smc_listen_out_err(struct smc_sock *new_smc)
{
struct sock *newsmcsk = &new_smc->sk;
+ this_cpu_inc(smc_stats->srv_hshake_err_cnt);
if (newsmcsk->sk_state == SMC_INIT)
sock_put(&new_smc->sk); /* passive closing */
newsmcsk->sk_state = SMC_CLOSED;
@@ -1325,8 +1362,7 @@ static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
smc_listen_out_err(new_smc);
return;
}
- smc_switch_to_fallback(new_smc);
- new_smc->fallback_rsn = reason_code;
+ smc_switch_to_fallback(new_smc, reason_code);
if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
if (smc_clc_send_decline(new_smc, reason_code, version) < 0) {
smc_listen_out_err(new_smc);
@@ -1699,8 +1735,7 @@ static void smc_listen_work(struct work_struct *work)
/* check if peer is smc capable */
if (!tcp_sk(newclcsock->sk)->syn_smc) {
- smc_switch_to_fallback(new_smc);
- new_smc->fallback_rsn = SMC_CLC_DECL_PEERNOSMC;
+ smc_switch_to_fallback(new_smc, SMC_CLC_DECL_PEERNOSMC);
smc_listen_out_connected(new_smc);
return;
}
@@ -1778,6 +1813,7 @@ static void smc_listen_work(struct work_struct *work)
}
smc_conn_save_peer_info(new_smc, cclc);
smc_listen_out_connected(new_smc);
+ SMC_STAT_SERV_SUCC_INC(ini);
goto out_free;
out_unlock:
@@ -1984,18 +2020,19 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
if (msg->msg_flags & MSG_FASTOPEN) {
if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
- smc_switch_to_fallback(smc);
- smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
+ smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
} else {
rc = -EINVAL;
goto out;
}
}
- if (smc->use_fallback)
+ if (smc->use_fallback) {
rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
- else
+ } else {
rc = smc_tx_sendmsg(smc, msg, len);
+ SMC_STAT_TX_PAYLOAD(smc, len, rc);
+ }
out:
release_sock(sk);
return rc;
@@ -2030,6 +2067,7 @@ static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
} else {
msg->msg_namelen = 0;
rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
+ SMC_STAT_RX_PAYLOAD(smc, rc, rc);
}
out:
@@ -2194,8 +2232,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
case TCP_FASTOPEN_NO_COOKIE:
/* option not supported by SMC */
if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
- smc_switch_to_fallback(smc);
- smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
+ smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
} else {
rc = -EINVAL;
}
@@ -2204,18 +2241,22 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
if (sk->sk_state != SMC_INIT &&
sk->sk_state != SMC_LISTEN &&
sk->sk_state != SMC_CLOSED) {
- if (val)
+ if (val) {
+ SMC_STAT_INC(!smc->conn.lnk, ndly_cnt);
mod_delayed_work(smc->conn.lgr->tx_wq,
&smc->conn.tx_work, 0);
+ }
}
break;
case TCP_CORK:
if (sk->sk_state != SMC_INIT &&
sk->sk_state != SMC_LISTEN &&
sk->sk_state != SMC_CLOSED) {
- if (!val)
+ if (!val) {
+ SMC_STAT_INC(!smc->conn.lnk, cork_cnt);
mod_delayed_work(smc->conn.lgr->tx_wq,
&smc->conn.tx_work, 0);
+ }
}
break;
case TCP_DEFER_ACCEPT:
@@ -2338,11 +2379,13 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page,
goto out;
}
release_sock(sk);
- if (smc->use_fallback)
+ if (smc->use_fallback) {
rc = kernel_sendpage(smc->clcsock, page, offset,
size, flags);
- else
+ } else {
+ SMC_STAT_INC(!smc->conn.lnk, sendpage_cnt);
rc = sock_no_sendpage(sock, page, offset, size, flags);
+ }
out:
return rc;
@@ -2391,6 +2434,7 @@ static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
flags = MSG_DONTWAIT;
else
flags = 0;
+ SMC_STAT_INC(!smc->conn.lnk, splice_cnt);
rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
}
out:
@@ -2514,10 +2558,16 @@ static int __init smc_init(void)
if (!smc_close_wq)
goto out_alloc_hs_wq;
+ rc = smc_stats_init();
+ if (rc) {
+ pr_err("%s: smc_stats_init fails with %d\n", __func__, rc);
+ goto out_alloc_wqs;
+ }
+
rc = smc_core_init();
if (rc) {
pr_err("%s: smc_core_init fails with %d\n", __func__, rc);
- goto out_alloc_wqs;
+ goto out_smc_stat;
}
rc = smc_llc_init();
@@ -2569,6 +2619,8 @@ out_proto:
proto_unregister(&smc_proto);
out_core:
smc_core_exit();
+out_smc_stat:
+ smc_stats_exit();
out_alloc_wqs:
destroy_workqueue(smc_close_wq);
out_alloc_hs_wq:
@@ -2591,6 +2643,7 @@ static void __exit smc_exit(void)
smc_ib_unregister_client();
destroy_workqueue(smc_close_wq);
destroy_workqueue(smc_hs_wq);
+ smc_stats_exit();
proto_unregister(&smc_proto6);
proto_unregister(&smc_proto);
smc_pnet_exit();