summaryrefslogtreecommitdiff
path: root/drivers/infiniband/ulp/rtrs/rtrs-srv.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/ulp/rtrs/rtrs-srv.c')
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c72
1 files changed, 29 insertions, 43 deletions
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index 22d7ba05e9fe..d1703e2c0b82 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -561,9 +561,11 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
{
struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_path *ss = &srv_path->s;
- int i, mri, err, mrs_num;
+ int i, err, mrs_num;
unsigned int chunk_bits;
int chunks_per_mr = 1;
+ struct ib_mr *mr;
+ struct sg_table *sgt;
/*
* Here we map queue_depth chunks to MR. Firstly we have to
@@ -586,16 +588,14 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
if (!srv_path->mrs)
return -ENOMEM;
- srv_path->mrs_num = mrs_num;
-
- for (mri = 0; mri < mrs_num; mri++) {
- struct rtrs_srv_mr *srv_mr = &srv_path->mrs[mri];
- struct sg_table *sgt = &srv_mr->sgt;
+ for (srv_path->mrs_num = 0; srv_path->mrs_num < mrs_num;
+ srv_path->mrs_num++) {
+ struct rtrs_srv_mr *srv_mr = &srv_path->mrs[srv_path->mrs_num];
struct scatterlist *s;
- struct ib_mr *mr;
int nr, nr_sgt, chunks;
- chunks = chunks_per_mr * mri;
+ sgt = &srv_mr->sgt;
+ chunks = chunks_per_mr * srv_path->mrs_num;
if (!always_invalidate)
chunks_per_mr = min_t(int, chunks_per_mr,
srv->queue_depth - chunks);
@@ -622,7 +622,7 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
}
nr = ib_map_mr_sg(mr, sgt->sgl, nr_sgt,
NULL, max_chunk_size);
- if (nr < 0 || nr < sgt->nents) {
+ if (nr != nr_sgt) {
err = nr < 0 ? nr : -EINVAL;
goto dereg_mr;
}
@@ -644,31 +644,24 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
srv_mr->mr = mr;
-
- continue;
-err:
- while (mri--) {
- srv_mr = &srv_path->mrs[mri];
- sgt = &srv_mr->sgt;
- mr = srv_mr->mr;
- rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
-dereg_mr:
- ib_dereg_mr(mr);
-unmap_sg:
- ib_dma_unmap_sg(srv_path->s.dev->ib_dev, sgt->sgl,
- sgt->nents, DMA_BIDIRECTIONAL);
-free_sg:
- sg_free_table(sgt);
- }
- kfree(srv_path->mrs);
-
- return err;
}
chunk_bits = ilog2(srv->queue_depth - 1) + 1;
srv_path->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits);
return 0;
+
+dereg_mr:
+ ib_dereg_mr(mr);
+unmap_sg:
+ ib_dma_unmap_sg(srv_path->s.dev->ib_dev, sgt->sgl,
+ sgt->nents, DMA_BIDIRECTIONAL);
+free_sg:
+ sg_free_table(sgt);
+err:
+ unmap_cont_bufs(srv_path);
+
+ return err;
}
static void rtrs_srv_hb_err_handler(struct rtrs_con *c)
@@ -1678,12 +1671,6 @@ static int create_con(struct rtrs_srv_path *srv_path,
srv->queue_depth * (1 + 2) + 1);
max_recv_wr = srv->queue_depth + 1;
- /*
- * If we have all receive requests posted and
- * all write requests posted and each read request
- * requires an invalidate request + drain
- * and qp gets into error state.
- */
}
cq_num = max_send_wr + max_recv_wr;
atomic_set(&con->c.sq_wr_avail, max_send_wr);
@@ -1950,22 +1937,21 @@ static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id,
{
struct rtrs_srv_path *srv_path = NULL;
struct rtrs_path *s = NULL;
+ struct rtrs_con *c = NULL;
- if (ev->event != RDMA_CM_EVENT_CONNECT_REQUEST) {
- struct rtrs_con *c = cm_id->context;
-
- s = c->path;
- srv_path = to_srv_path(s);
- }
-
- switch (ev->event) {
- case RDMA_CM_EVENT_CONNECT_REQUEST:
+ if (ev->event == RDMA_CM_EVENT_CONNECT_REQUEST)
/*
* In case of error cma.c will destroy cm_id,
* see cma_process_remove()
*/
return rtrs_rdma_connect(cm_id, ev->param.conn.private_data,
ev->param.conn.private_data_len);
+
+ c = cm_id->context;
+ s = c->path;
+ srv_path = to_srv_path(s);
+
+ switch (ev->event) {
case RDMA_CM_EVENT_ESTABLISHED:
/* Nothing here */
break;