summaryrefslogtreecommitdiff
path: root/drivers/infiniband/sw/rxe
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2023-03-04 20:45:33 +0300
committerJason Gunthorpe <jgg@nvidia.com>2023-03-24 17:21:36 +0300
commitf455a1bc972cefc4bf2dbf1bf37a36bb51a5f7e7 (patch)
tree80bd745e4a7fca21d11bd5271ff9bf6af6ba5b49 /drivers/infiniband/sw/rxe
parent960ebe97e5238565d15063c8f4d1b2108efe2e65 (diff)
downloadlinux-f455a1bc972cefc4bf2dbf1bf37a36bb51a5f7e7.tar.xz
RDMA/rxe: Make tasks schedule each other
Replace rxe_run_task() by rxe_sched_task() when tasks call each other. These are not performance critical and mainly involve error paths but they run the risk of causing deadlocks. Link: https://lore.kernel.org/r/20230304174533.11296-8-rpearsonhpe@gmail.com Signed-off-by: Ian Ziemba <ian.ziemba@hpe.com> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/sw/rxe')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c4
2 files changed, 6 insertions, 6 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 7aa8e90bdfe4..2c70cdcd55dc 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -322,7 +322,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
qp->comp.psn = pkt->psn;
if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
- rxe_run_task(&qp->req.task);
+ rxe_sched_task(&qp->req.task);
}
}
return COMPST_ERROR_RETRY;
@@ -473,7 +473,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
*/
if (qp->req.wait_fence) {
qp->req.wait_fence = 0;
- rxe_run_task(&qp->req.task);
+ rxe_sched_task(&qp->req.task);
}
}
@@ -487,7 +487,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
if (qp->req.need_rd_atomic) {
qp->comp.timeout_retry = 0;
qp->req.need_rd_atomic = 0;
- rxe_run_task(&qp->req.task);
+ rxe_sched_task(&qp->req.task);
}
}
@@ -767,7 +767,7 @@ int rxe_completer(struct rxe_qp *qp)
RXE_CNT_COMP_RETRY);
qp->req.need_retry = 1;
qp->comp.started_retry = 1;
- rxe_run_task(&qp->req.task);
+ rxe_sched_task(&qp->req.task);
}
goto done;
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index abc65c54bfd6..745731140a54 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -753,7 +753,7 @@ int rxe_requester(struct rxe_qp *qp)
qp->req.wqe_index);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
- rxe_run_task(&qp->comp.task);
+ rxe_sched_task(&qp->comp.task);
goto done;
}
payload = mtu;
@@ -837,7 +837,7 @@ err:
qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
wqe->state = wqe_state_error;
qp->req.state = QP_STATE_ERROR;
- rxe_run_task(&qp->comp.task);
+ rxe_sched_task(&qp->comp.task);
exit:
ret = -EAGAIN;
out: