summaryrefslogtreecommitdiff
path: root/drivers/infiniband/ulp/rtrs/rtrs-clt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/ulp/rtrs/rtrs-clt.c')
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c254
1 files changed, 151 insertions, 103 deletions
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 0a794d748a7a..f2c40e50f25e 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -32,6 +32,8 @@
#define RTRS_RECONNECT_SEED 8
#define FIRST_CONN 0x01
+/* limit to 128 * 4k = 512k max IO */
+#define RTRS_MAX_SEGMENTS 128
MODULE_DESCRIPTION("RDMA Transport Client");
MODULE_LICENSE("GPL");
@@ -412,6 +414,7 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
req->inv_errno = errno;
}
+ refcount_inc(&req->ref);
err = rtrs_inv_rkey(req);
if (unlikely(err)) {
rtrs_err(con->c.sess, "Send INV WR key=%#x: %d\n",
@@ -427,10 +430,14 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
return;
}
+ if (!refcount_dec_and_test(&req->ref))
+ return;
}
ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
}
+ if (!refcount_dec_and_test(&req->ref))
+ return;
if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
atomic_dec(&sess->stats->inflight);
@@ -438,10 +445,9 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
req->con = NULL;
if (errno) {
- rtrs_err_rl(con->c.sess,
- "IO request failed: error=%d path=%s [%s:%u]\n",
+ rtrs_err_rl(con->c.sess, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n",
errno, kobject_name(&sess->kobj), sess->hca_name,
- sess->hca_port);
+ sess->hca_port, notify);
}
if (notify)
@@ -480,7 +486,7 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1,
rbuf->rkey, rbuf->addr + off,
- imm, flags, wr);
+ imm, flags, wr, NULL);
}
static void process_io_rsp(struct rtrs_clt_sess *sess, u32 msg_id,
@@ -655,7 +661,6 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
rtrs_err(con->c.sess, "rtrs_post_recv_empty(): %d\n",
err);
rtrs_rdma_error_recovery(con);
- break;
}
break;
case IB_WC_RECV:
@@ -814,6 +819,9 @@ static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it)
int inflight;
list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
+ if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED))
+ continue;
+
if (unlikely(!list_empty(raw_cpu_ptr(sess->mp_skip_entry))))
continue;
@@ -913,7 +921,7 @@ static inline void path_it_deinit(struct path_it *it)
}
/**
- * rtrs_clt_init_req() Initialize an rtrs_clt_io_req holding information
+ * rtrs_clt_init_req() - Initialize an rtrs_clt_io_req holding information
* about an inflight IO.
* The user buffer holding user control message (not data) is copied into
* the corresponding buffer of rtrs_iu (req->iu->buf), which later on will
@@ -954,6 +962,7 @@ static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
req->need_inv = false;
req->need_inv_comp = false;
req->inv_errno = 0;
+ refcount_set(&req->ref, 1);
iov_iter_kvec(&iter, READ, vec, 1, usr_len);
len = _copy_from_iter(req->iu->buf, usr_len, &iter);
@@ -997,9 +1006,10 @@ rtrs_clt_get_copy_req(struct rtrs_clt_sess *alive_sess,
}
static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
- struct rtrs_clt_io_req *req,
- struct rtrs_rbuf *rbuf,
- u32 size, u32 imm)
+ struct rtrs_clt_io_req *req,
+ struct rtrs_rbuf *rbuf, bool fr_en,
+ u32 size, u32 imm, struct ib_send_wr *wr,
+ struct ib_send_wr *tail)
{
struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
struct ib_sge *sge = req->sge;
@@ -1007,18 +1017,28 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
struct scatterlist *sg;
size_t num_sge;
int i;
-
- for_each_sg(req->sglist, sg, req->sg_cnt, i) {
- sge[i].addr = sg_dma_address(sg);
- sge[i].length = sg_dma_len(sg);
- sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ struct ib_send_wr *ptail = NULL;
+
+ if (fr_en) {
+ i = 0;
+ sge[i].addr = req->mr->iova;
+ sge[i].length = req->mr->length;
+ sge[i].lkey = req->mr->lkey;
+ i++;
+ num_sge = 2;
+ ptail = tail;
+ } else {
+ for_each_sg(req->sglist, sg, req->sg_cnt, i) {
+ sge[i].addr = sg_dma_address(sg);
+ sge[i].length = sg_dma_len(sg);
+ sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ }
+ num_sge = 1 + req->sg_cnt;
}
sge[i].addr = req->iu->dma_addr;
sge[i].length = size;
sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
- num_sge = 1 + req->sg_cnt;
-
/*
* From time to time we have to post signalled sends,
* or send queue will fill up and only QP reset can help.
@@ -1031,7 +1051,22 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge,
rbuf->rkey, rbuf->addr, imm,
- flags, NULL);
+ flags, wr, ptail);
+}
+
+static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count)
+{
+ int nr;
+
+ /* Align the MR to a 4K page size to match the block virt boundary */
+ nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K);
+ if (nr < 0)
+ return nr;
+ if (unlikely(nr < req->sg_cnt))
+ return -EINVAL;
+ ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
+
+ return nr;
}
static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
@@ -1044,6 +1079,10 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
struct rtrs_rbuf *rbuf;
int ret, count = 0;
u32 imm, buf_id;
+ struct ib_reg_wr rwr;
+ struct ib_send_wr inv_wr;
+ struct ib_send_wr *wr = NULL;
+ bool fr_en = false;
const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
@@ -1072,15 +1111,43 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
req->sg_size = tsize;
rbuf = &sess->rbufs[buf_id];
+ if (count) {
+ ret = rtrs_map_sg_fr(req, count);
+ if (ret < 0) {
+ rtrs_err_rl(s,
+ "Write request failed, failed to map fast reg. data, err: %d\n",
+ ret);
+ ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ return ret;
+ }
+ inv_wr = (struct ib_send_wr) {
+ .opcode = IB_WR_LOCAL_INV,
+ .wr_cqe = &req->inv_cqe,
+ .send_flags = IB_SEND_SIGNALED,
+ .ex.invalidate_rkey = req->mr->rkey,
+ };
+ req->inv_cqe.done = rtrs_clt_inv_rkey_done;
+ rwr = (struct ib_reg_wr) {
+ .wr.opcode = IB_WR_REG_MR,
+ .wr.wr_cqe = &fast_reg_cqe,
+ .mr = req->mr,
+ .key = req->mr->rkey,
+ .access = (IB_ACCESS_LOCAL_WRITE),
+ };
+ wr = &rwr.wr;
+ fr_en = true;
+ refcount_inc(&req->ref);
+ }
/*
* Update stats now, after request is successfully sent it is not
* safe anymore to touch it.
*/
rtrs_clt_update_all_stats(req, WRITE);
- ret = rtrs_post_rdma_write_sg(req->con, req, rbuf,
- req->usr_len + sizeof(*msg),
- imm);
+ ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en,
+ req->usr_len + sizeof(*msg),
+ imm, wr, &inv_wr);
if (unlikely(ret)) {
rtrs_err_rl(s,
"Write request failed: error=%d path=%s [%s:%u]\n",
@@ -1096,21 +1163,6 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
return ret;
}
-static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count)
-{
- int nr;
-
- /* Align the MR to a 4K page size to match the block virt boundary */
- nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K);
- if (nr < 0)
- return nr;
- if (unlikely(nr < req->sg_cnt))
- return -EINVAL;
- ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
-
- return nr;
-}
-
static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
{
struct rtrs_clt_con *con = req->con;
@@ -1219,7 +1271,7 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
}
/**
- * rtrs_clt_failover_req() Try to find an active path for a failed request
+ * rtrs_clt_failover_req() - Try to find an active path for a failed request
* @clt: clt context
* @fail_req: a failed io request.
*/
@@ -1305,7 +1357,6 @@ static void free_sess_reqs(struct rtrs_clt_sess *sess)
static int alloc_sess_reqs(struct rtrs_clt_sess *sess)
{
struct rtrs_clt_io_req *req;
- struct rtrs_clt *clt = sess->clt;
int i, err = -ENOMEM;
sess->reqs = kcalloc(sess->queue_depth, sizeof(*sess->reqs),
@@ -1322,8 +1373,7 @@ static int alloc_sess_reqs(struct rtrs_clt_sess *sess)
if (!req->iu)
goto out;
- req->sge = kmalloc_array(clt->max_segments + 1,
- sizeof(*req->sge), GFP_KERNEL);
+ req->sge = kcalloc(2, sizeof(*req->sge), GFP_KERNEL);
if (!req->sge)
goto out;
@@ -1415,7 +1465,8 @@ static void query_fast_reg_mode(struct rtrs_clt_sess *sess)
sess->max_pages_per_mr =
min3(sess->max_pages_per_mr, (u32)max_pages_per_mr,
ib_dev->attrs.max_fast_reg_page_list_len);
- sess->max_send_sge = ib_dev->attrs.max_send_sge;
+ sess->clt->max_segments =
+ min(sess->max_pages_per_mr, sess->clt->max_segments);
}
static bool rtrs_clt_change_state_get_old(struct rtrs_clt_sess *sess,
@@ -1449,23 +1500,12 @@ static void rtrs_clt_init_hb(struct rtrs_clt_sess *sess)
rtrs_wq);
}
-static void rtrs_clt_start_hb(struct rtrs_clt_sess *sess)
-{
- rtrs_start_hb(&sess->s);
-}
-
-static void rtrs_clt_stop_hb(struct rtrs_clt_sess *sess)
-{
- rtrs_stop_hb(&sess->s);
-}
-
static void rtrs_clt_reconnect_work(struct work_struct *work);
static void rtrs_clt_close_work(struct work_struct *work);
static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt,
- const struct rtrs_addr *path,
- size_t con_num, u16 max_segments,
- u32 nr_poll_queues)
+ const struct rtrs_addr *path,
+ size_t con_num, u32 nr_poll_queues)
{
struct rtrs_clt_sess *sess;
int err = -ENOMEM;
@@ -1505,9 +1545,9 @@ static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt,
if (path->src)
memcpy(&sess->s.src_addr, path->src,
rdma_addr_size((struct sockaddr *)path->src));
- strlcpy(sess->s.sessname, clt->sessname, sizeof(sess->s.sessname));
+ strscpy(sess->s.sessname, clt->sessname, sizeof(sess->s.sessname));
sess->clt = clt;
- sess->max_pages_per_mr = max_segments;
+ sess->max_pages_per_mr = RTRS_MAX_SEGMENTS;
init_waitqueue_head(&sess->state_wq);
sess->state = RTRS_CLT_CONNECTING;
atomic_set(&sess->connected_cnt, 0);
@@ -1581,20 +1621,13 @@ static void destroy_con(struct rtrs_clt_con *con)
static int create_con_cq_qp(struct rtrs_clt_con *con)
{
struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
- u32 max_send_wr, max_recv_wr, cq_size;
+ u32 max_send_wr, max_recv_wr, cq_num, max_send_sge, wr_limit;
int err, cq_vector;
struct rtrs_msg_rkey_rsp *rsp;
lockdep_assert_held(&con->con_mutex);
if (con->c.cid == 0) {
- /*
- * One completion for each receive and two for each send
- * (send request + registration)
- * + 2 for drain and heartbeat
- * in case qp gets into error state
- */
- max_send_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
- max_recv_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
+ max_send_sge = 1;
/* We must be the first here */
if (WARN_ON(sess->s.dev))
return -EINVAL;
@@ -1613,6 +1646,17 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
}
sess->s.dev_ref = 1;
query_fast_reg_mode(sess);
+ wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr;
+ /*
+ * Two (request + registration) completion for send
+ * Two for recv if always_invalidate is set on server
+ * or one for recv.
+ * + 2 for drain and heartbeat
+ * in case qp gets into error state.
+ */
+ max_send_wr =
+ min_t(int, wr_limit, SERVICE_CON_QUEUE_DEPTH * 2 + 2);
+ max_recv_wr = max_send_wr;
} else {
/*
* Here we assume that session members are correctly set.
@@ -1624,35 +1668,36 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
if (WARN_ON(!sess->queue_depth))
return -EINVAL;
+ wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr;
/* Shared between connections */
sess->s.dev_ref++;
- max_send_wr =
- min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
+ max_send_wr = min_t(int, wr_limit,
/* QD * (REQ + RSP + FR REGS or INVS) + drain */
sess->queue_depth * 3 + 1);
- max_recv_wr =
- min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
+ max_recv_wr = min_t(int, wr_limit,
sess->queue_depth * 3 + 1);
+ max_send_sge = 2;
}
+ cq_num = max_send_wr + max_recv_wr;
/* alloc iu to recv new rkey reply when server reports flags set */
if (sess->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
- con->rsp_ius = rtrs_iu_alloc(max_recv_wr, sizeof(*rsp),
+ con->rsp_ius = rtrs_iu_alloc(cq_num, sizeof(*rsp),
GFP_KERNEL, sess->s.dev->ib_dev,
DMA_FROM_DEVICE,
rtrs_clt_rdma_done);
if (!con->rsp_ius)
return -ENOMEM;
- con->queue_size = max_recv_wr;
+ con->queue_num = cq_num;
}
- cq_size = max_send_wr + max_recv_wr;
+ cq_num = max_send_wr + max_recv_wr;
cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors;
if (con->c.cid >= sess->s.irq_con_num)
- err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge,
- cq_vector, cq_size, max_send_wr,
+ err = rtrs_cq_qp_create(&sess->s, &con->c, max_send_sge,
+ cq_vector, cq_num, max_send_wr,
max_recv_wr, IB_POLL_DIRECT);
else
- err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge,
- cq_vector, cq_size, max_send_wr,
+ err = rtrs_cq_qp_create(&sess->s, &con->c, max_send_sge,
+ cq_vector, cq_num, max_send_wr,
max_recv_wr, IB_POLL_SOFTIRQ);
/*
* In case of error we do not bother to clean previous allocations,
@@ -1672,9 +1717,9 @@ static void destroy_con_cq_qp(struct rtrs_clt_con *con)
lockdep_assert_held(&con->con_mutex);
rtrs_cq_qp_destroy(&con->c);
if (con->rsp_ius) {
- rtrs_iu_free(con->rsp_ius, sess->s.dev->ib_dev, con->queue_size);
+ rtrs_iu_free(con->rsp_ius, sess->s.dev->ib_dev, con->queue_num);
con->rsp_ius = NULL;
- con->queue_size = 0;
+ con->queue_num = 0;
}
if (sess->s.dev_ref && !--sess->s.dev_ref) {
rtrs_ib_dev_put(sess->s.dev);
@@ -1783,12 +1828,19 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
if (con->c.cid == 0) {
queue_depth = le16_to_cpu(msg->queue_depth);
- if (queue_depth > MAX_SESS_QUEUE_DEPTH) {
- rtrs_err(clt, "Invalid RTRS message: queue=%d\n",
- queue_depth);
+ if (sess->queue_depth > 0 && queue_depth != sess->queue_depth) {
+ rtrs_err(clt, "Error: queue depth changed\n");
+
+ /*
+ * Stop any more reconnection attempts
+ */
+ sess->reconnect_attempts = -1;
+ rtrs_err(clt,
+ "Disabling auto-reconnect. Trigger a manual reconnect after issue is resolved\n");
return -ECONNRESET;
}
- if (!sess->rbufs || sess->queue_depth < queue_depth) {
+
+ if (!sess->rbufs) {
kfree(sess->rbufs);
sess->rbufs = kcalloc(queue_depth, sizeof(*sess->rbufs),
GFP_KERNEL);
@@ -1802,7 +1854,7 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
sess->chunk_size = sess->max_io_size + sess->max_hdr_size;
/*
- * Global queue depth and IO size is always a minimum.
+ * Global IO size is always a minimum.
* If while a reconnection server sends us a value a bit
* higher - client does not care and uses cached minimum.
*
@@ -1810,8 +1862,7 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
* connections in parallel, use lock.
*/
mutex_lock(&clt->paths_mutex);
- clt->queue_depth = min_not_zero(sess->queue_depth,
- clt->queue_depth);
+ clt->queue_depth = sess->queue_depth;
clt->max_io_size = min_not_zero(sess->max_io_size,
clt->max_io_size);
mutex_unlock(&clt->paths_mutex);
@@ -1869,7 +1920,7 @@ static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
return -ECONNRESET;
}
-static void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait)
+void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait)
{
if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_CLOSING, NULL))
queue_work(rtrs_wq, &sess->close_work);
@@ -2098,7 +2149,7 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
*/
synchronize_rcu();
- rtrs_clt_stop_hb(sess);
+ rtrs_stop_hb(&sess->s);
/*
* The order it utterly crucial: firstly disconnect and complete all
@@ -2291,7 +2342,7 @@ static int init_conns(struct rtrs_clt_sess *sess)
if (err)
goto destroy;
- rtrs_clt_start_hb(sess);
+ rtrs_start_hb(&sess->s);
return 0;
@@ -2465,7 +2516,7 @@ static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
int err;
rx_sz = sizeof(struct rtrs_msg_info_rsp);
- rx_sz += sizeof(u64) * MAX_SESS_QUEUE_DEPTH;
+ rx_sz += sizeof(struct rtrs_sg_desc) * sess->queue_depth;
tx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL,
sess->s.dev->ib_dev, DMA_TO_DEVICE,
@@ -2617,7 +2668,6 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
u16 port, size_t pdu_sz, void *priv,
void (*link_ev)(void *priv,
enum rtrs_clt_link_ev ev),
- unsigned int max_segments,
unsigned int reconnect_delay_sec,
unsigned int max_reconnect_attempts)
{
@@ -2646,13 +2696,13 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
clt->paths_up = MAX_PATHS_NUM;
clt->port = port;
clt->pdu_sz = pdu_sz;
- clt->max_segments = max_segments;
+ clt->max_segments = RTRS_MAX_SEGMENTS;
clt->reconnect_delay_sec = reconnect_delay_sec;
clt->max_reconnect_attempts = max_reconnect_attempts;
clt->priv = priv;
clt->link_ev = link_ev;
clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
- strlcpy(clt->sessname, sessname, sizeof(clt->sessname));
+ strscpy(clt->sessname, sessname, sizeof(clt->sessname));
init_waitqueue_head(&clt->permits_wait);
mutex_init(&clt->paths_ev_mutex);
mutex_init(&clt->paths_mutex);
@@ -2715,7 +2765,6 @@ static void free_clt(struct rtrs_clt *clt)
* @port: port to be used by the RTRS session
* @pdu_sz: Size of extra payload which can be accessed after permit allocation.
* @reconnect_delay_sec: time between reconnect tries
- * @max_segments: Max. number of segments per IO request
* @max_reconnect_attempts: Number of times to reconnect on error before giving
* up, 0 for * disabled, -1 for forever
* @nr_poll_queues: number of polling mode connection using IB_POLL_DIRECT flag
@@ -2730,7 +2779,6 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
const struct rtrs_addr *paths,
size_t paths_num, u16 port,
size_t pdu_sz, u8 reconnect_delay_sec,
- u16 max_segments,
s16 max_reconnect_attempts, u32 nr_poll_queues)
{
struct rtrs_clt_sess *sess, *tmp;
@@ -2739,7 +2787,7 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
clt = alloc_clt(sessname, paths_num, port, pdu_sz, ops->priv,
ops->link_ev,
- max_segments, reconnect_delay_sec,
+ reconnect_delay_sec,
max_reconnect_attempts);
if (IS_ERR(clt)) {
err = PTR_ERR(clt);
@@ -2749,7 +2797,7 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
struct rtrs_clt_sess *sess;
sess = alloc_sess(clt, &paths[i], nr_cpu_ids,
- max_segments, nr_poll_queues);
+ nr_poll_queues);
if (IS_ERR(sess)) {
err = PTR_ERR(sess);
goto close_all_sess;
@@ -2762,6 +2810,8 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
if (err) {
list_del_rcu(&sess->s.entry);
rtrs_clt_close_conns(sess, true);
+ free_percpu(sess->stats->pcpu_stats);
+ kfree(sess->stats);
free_sess(sess);
goto close_all_sess;
}
@@ -2770,6 +2820,8 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
if (err) {
list_del_rcu(&sess->s.entry);
rtrs_clt_close_conns(sess, true);
+ free_percpu(sess->stats->pcpu_stats);
+ kfree(sess->stats);
free_sess(sess);
goto close_all_sess;
}
@@ -2841,13 +2893,6 @@ int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess)
return err;
}
-int rtrs_clt_disconnect_from_sysfs(struct rtrs_clt_sess *sess)
-{
- rtrs_clt_close_conns(sess, true);
-
- return 0;
-}
-
int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
const struct attribute *sysfs_self)
{
@@ -3014,6 +3059,7 @@ int rtrs_clt_query(struct rtrs_clt *clt, struct rtrs_attrs *attr)
return -ECOMM;
attr->queue_depth = clt->queue_depth;
+ attr->max_segments = clt->max_segments;
/* Cap max_io_size to min of remote buffer size and the fr pages */
attr->max_io_size = min_t(int, clt->max_io_size,
clt->max_segments * SZ_4K);
@@ -3028,7 +3074,7 @@ int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
struct rtrs_clt_sess *sess;
int err;
- sess = alloc_sess(clt, addr, nr_cpu_ids, clt->max_segments, 0);
+ sess = alloc_sess(clt, addr, nr_cpu_ids, 0);
if (IS_ERR(sess))
return PTR_ERR(sess);
@@ -3052,6 +3098,8 @@ int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
close_sess:
rtrs_clt_remove_path_from_arr(sess);
rtrs_clt_close_conns(sess, true);
+ free_percpu(sess->stats->pcpu_stats);
+ kfree(sess->stats);
free_sess(sess);
return err;