diff options
author | Bob Pearson <rpearsonhpe@gmail.com> | 2023-04-05 07:26:10 +0300 |
---|---|---|
committer | Jason Gunthorpe <jgg@nvidia.com> | 2023-04-17 22:01:44 +0300 |
commit | 7b560b89a08d35c23dfc95dc44aee10651c8b9a0 (patch) | |
tree | ea3a8b32410bafaa2d2b5d62dad9ad0815fb4d79 /drivers/infiniband/sw/rxe/rxe_req.c | |
parent | 98e891b5e4d94ceb0844de3355c9218027426e72 (diff) | |
download | linux-7b560b89a08d35c23dfc95dc44aee10651c8b9a0.tar.xz |
RDMA/rxe: Move code to check if drained to subroutine
Move two blocks of code in rxe_comp.c and rxe_req.c to subroutines that
check if draining is complete in the SQD state and, if so, generate a
SQ_DRAINED event.
Link: https://lore.kernel.org/r/20230405042611.6467-4-rpearsonhpe@gmail.com
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_req.c')
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_req.c | 32 |
1 files changed, 18 insertions, 14 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 8a8242512f2a..f329038efbc8 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -108,17 +108,12 @@ void rnr_nak_timer(struct timer_list *t) rxe_sched_task(&qp->req.task); } -static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) +static void req_check_sq_drain_done(struct rxe_qp *qp) { - struct rxe_send_wqe *wqe; struct rxe_queue *q = qp->sq.queue; unsigned int index = qp->req.wqe_index; - unsigned int cons; - unsigned int prod; - - wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT); - cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT); - prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT); + unsigned int cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT); + struct rxe_send_wqe *wqe = queue_addr_from_index(q, cons); if (unlikely(qp_state(qp) == IB_QPS_SQD)) { /* check to see if we are drained; @@ -126,18 +121,14 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) */ spin_lock_bh(&qp->state_lock); do { - if (!qp->attr.sq_draining) { + if (!qp->attr.sq_draining) /* comp just finished */ - spin_unlock_bh(&qp->state_lock); break; - } if (wqe && ((index != cons) || - (wqe->state != wqe_state_posted))) { + (wqe->state != wqe_state_posted))) /* comp not done yet */ - spin_unlock_bh(&qp->state_lock); break; - } qp->attr.sq_draining = 0; spin_unlock_bh(&qp->state_lock); @@ -151,9 +142,22 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } + return; } while (0); + spin_unlock_bh(&qp->state_lock); } +} +static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) +{ + struct rxe_send_wqe *wqe; + struct rxe_queue *q = qp->sq.queue; + unsigned int index = qp->req.wqe_index; + unsigned int prod; + + req_check_sq_drain_done(qp); + + prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT); if (index == prod) return NULL; |