summaryrefslogtreecommitdiff
path: root/drivers/infiniband/sw/rxe
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2023-03-04 20:45:30 +0300
committerJason Gunthorpe <jgg@nvidia.com>2023-03-24 17:21:36 +0300
commitfbdeb828a21ff8de045a27ddcfc6ee66201b9a94 (patch)
treecd313b5a63a3b2c4b75831692590ce422fe8650f /drivers/infiniband/sw/rxe
parent49dc9c1f0c7e396654a31a480328fffd902fa494 (diff)
downloadlinux-fbdeb828a21ff8de045a27ddcfc6ee66201b9a94.tar.xz
RDMA/rxe: Cleanup error state handling in rxe_comp.c
Cleanup the handling of qp in the error state, reset state and during rxe_qp_do_cleanup. Make the same as rxe_resp.c Link: https://lore.kernel.org/r/20230304174533.11296-5-rpearsonhpe@gmail.com Signed-off-by: Ian Ziemba <ian.ziemba@hpe.com> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/sw/rxe')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c55
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c28
3 files changed, 61 insertions, 23 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index cbfa16b3a490..f7ab0dfe1034 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -542,25 +542,60 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp,
return COMPST_GET_WQE;
}
-static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
+/* drain incoming response packet queue */
+static void drain_resp_pkts(struct rxe_qp *qp)
{
struct sk_buff *skb;
- struct rxe_send_wqe *wqe;
- struct rxe_queue *q = qp->sq.queue;
while ((skb = skb_dequeue(&qp->resp_pkts))) {
rxe_put(qp);
kfree_skb(skb);
ib_device_put(qp->ibqp.device);
}
+}
+
+/* complete send wqe with flush error */
+static int flush_send_wqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+ struct rxe_cqe cqe = {};
+ struct ib_wc *wc = &cqe.ibwc;
+ struct ib_uverbs_wc *uwc = &cqe.uibwc;
+ int err;
+
+ if (qp->is_user) {
+ uwc->wr_id = wqe->wr.wr_id;
+ uwc->status = IB_WC_WR_FLUSH_ERR;
+ uwc->qp_num = qp->ibqp.qp_num;
+ } else {
+ wc->wr_id = wqe->wr.wr_id;
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->qp = &qp->ibqp;
+ }
+
+ err = rxe_cq_post(qp->scq, &cqe, 0);
+ if (err)
+ rxe_dbg_cq(qp->scq, "post cq failed, err = %d", err);
+
+ return err;
+}
+
+/* drain and optionally complete the send queue
+ * if unable to complete a wqe, i.e. cq is full, stop
+ * completing and flush the remaining wqes
+ */
+static void flush_send_queue(struct rxe_qp *qp, bool notify)
+{
+ struct rxe_send_wqe *wqe;
+ struct rxe_queue *q = qp->sq.queue;
+ int err;
while ((wqe = queue_head(q, q->type))) {
if (notify) {
- wqe->status = IB_WC_WR_FLUSH_ERR;
- do_complete(qp, wqe);
- } else {
- queue_advance_consumer(q, q->type);
+ err = flush_send_wqe(qp, wqe);
+ if (err)
+ notify = 0;
}
+ queue_advance_consumer(q, q->type);
}
}
@@ -589,8 +624,10 @@ int rxe_completer(struct rxe_qp *qp)
if (!qp->valid || qp->comp.state == QP_STATE_ERROR ||
qp->comp.state == QP_STATE_RESET) {
- rxe_drain_resp_pkts(qp, qp->valid &&
- qp->comp.state == QP_STATE_ERROR);
+ bool notify = qp->valid &&
+ (qp->comp.state == QP_STATE_ERROR);
+ drain_resp_pkts(qp);
+ flush_send_queue(qp, notify);
goto exit;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index 22fbc198e5d1..66a13c935d50 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -114,6 +114,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (unlikely(full)) {
+ rxe_err_cq(cq, "queue full");
spin_unlock_irqrestore(&cq->cq_lock, flags);
if (cq->ibcq.event_handler) {
ev.device = cq->ibcq.device;
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 8f9bbb14fa7a..2f71183449f9 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -1396,7 +1396,7 @@ static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
}
/* drain incoming request packet queue */
-static void rxe_drain_req_pkts(struct rxe_qp *qp)
+static void drain_req_pkts(struct rxe_qp *qp)
{
struct sk_buff *skb;
@@ -1408,33 +1408,35 @@ static void rxe_drain_req_pkts(struct rxe_qp *qp)
}
/* complete receive wqe with flush error */
-static int complete_flush(struct rxe_qp *qp, struct rxe_recv_wqe *wqe)
+static int flush_recv_wqe(struct rxe_qp *qp, struct rxe_recv_wqe *wqe)
{
struct rxe_cqe cqe = {};
struct ib_wc *wc = &cqe.ibwc;
struct ib_uverbs_wc *uwc = &cqe.uibwc;
+ int err;
if (qp->rcq->is_user) {
+ uwc->wr_id = wqe->wr_id;
uwc->status = IB_WC_WR_FLUSH_ERR;
uwc->qp_num = qp_num(qp);
- uwc->wr_id = wqe->wr_id;
} else {
+ wc->wr_id = wqe->wr_id;
wc->status = IB_WC_WR_FLUSH_ERR;
wc->qp = &qp->ibqp;
- wc->wr_id = wqe->wr_id;
}
- if (rxe_cq_post(qp->rcq, &cqe, 0))
- return -ENOMEM;
+ err = rxe_cq_post(qp->rcq, &cqe, 0);
+ if (err)
+ rxe_dbg_cq(qp->rcq, "post cq failed err = %d", err);
- return 0;
+ return err;
}
/* drain and optionally complete the recive queue
* if unable to complete a wqe stop completing and
* just flush the remaining wqes
*/
-static void rxe_drain_recv_queue(struct rxe_qp *qp, bool notify)
+static void flush_recv_queue(struct rxe_qp *qp, bool notify)
{
struct rxe_queue *q = qp->rq.queue;
struct rxe_recv_wqe *wqe;
@@ -1445,11 +1447,9 @@ static void rxe_drain_recv_queue(struct rxe_qp *qp, bool notify)
while ((wqe = queue_head(q, q->type))) {
if (notify) {
- err = complete_flush(qp, wqe);
- if (err) {
- rxe_dbg_qp(qp, "complete failed for recv wqe");
+ err = flush_recv_wqe(qp, wqe);
+ if (err)
notify = 0;
- }
}
queue_advance_consumer(q, q->type);
}
@@ -1471,8 +1471,8 @@ int rxe_responder(struct rxe_qp *qp)
qp->resp.state == QP_STATE_RESET) {
bool notify = qp->valid &&
(qp->resp.state == QP_STATE_ERROR);
- rxe_drain_req_pkts(qp);
- rxe_drain_recv_queue(qp, notify);
+ drain_req_pkts(qp);
+ flush_recv_queue(qp, notify);
goto exit;
}