summaryrefslogtreecommitdiff
path: root/drivers/infiniband/sw/rxe/rxe_comp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_comp.c')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c161
1 files changed, 106 insertions, 55 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 20737fec392b..db18ace74d2b 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -118,10 +118,12 @@ void retransmit_timer(struct timer_list *t)
rxe_dbg_qp(qp, "retransmit timer fired\n");
+ spin_lock_bh(&qp->state_lock);
if (qp->valid) {
qp->comp.timeout = 1;
rxe_sched_task(&qp->comp.task);
}
+ spin_unlock_bh(&qp->state_lock);
}
void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
@@ -322,7 +324,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
qp->comp.psn = pkt->psn;
if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
- rxe_run_task(&qp->req.task);
+ rxe_sched_task(&qp->req.task);
}
}
return COMPST_ERROR_RETRY;
@@ -428,6 +430,10 @@ static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
uwc->wc_flags = IB_WC_WITH_IMM;
uwc->byte_len = wqe->dma.length;
}
+ } else {
+ if (wqe->status != IB_WC_WR_FLUSH_ERR)
+ rxe_err_qp(qp, "non-flush error status = %d",
+ wqe->status);
}
}
@@ -469,30 +475,16 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
*/
if (qp->req.wait_fence) {
qp->req.wait_fence = 0;
- rxe_run_task(&qp->req.task);
+ rxe_sched_task(&qp->req.task);
}
}
-static inline enum comp_state complete_ack(struct rxe_qp *qp,
- struct rxe_pkt_info *pkt,
- struct rxe_send_wqe *wqe)
+static void comp_check_sq_drain_done(struct rxe_qp *qp)
{
- if (wqe->has_rd_atomic) {
- wqe->has_rd_atomic = 0;
- atomic_inc(&qp->req.rd_atomic);
- if (qp->req.need_rd_atomic) {
- qp->comp.timeout_retry = 0;
- qp->req.need_rd_atomic = 0;
- rxe_run_task(&qp->req.task);
- }
- }
-
- if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
- /* state_lock used by requester & completer */
- spin_lock_bh(&qp->state_lock);
- if ((qp->req.state == QP_STATE_DRAIN) &&
- (qp->comp.psn == qp->req.psn)) {
- qp->req.state = QP_STATE_DRAINED;
+ spin_lock_bh(&qp->state_lock);
+ if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
+ if (qp->attr.sq_draining && qp->comp.psn == qp->req.psn) {
+ qp->attr.sq_draining = 0;
spin_unlock_bh(&qp->state_lock);
if (qp->ibqp.event_handler) {
@@ -504,10 +496,27 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
qp->ibqp.event_handler(&ev,
qp->ibqp.qp_context);
}
- } else {
- spin_unlock_bh(&qp->state_lock);
+ return;
}
}
+ spin_unlock_bh(&qp->state_lock);
+}
+
+static inline enum comp_state complete_ack(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_send_wqe *wqe)
+{
+ if (wqe->has_rd_atomic) {
+ wqe->has_rd_atomic = 0;
+ atomic_inc(&qp->req.rd_atomic);
+ if (qp->req.need_rd_atomic) {
+ qp->comp.timeout_retry = 0;
+ qp->req.need_rd_atomic = 0;
+ rxe_sched_task(&qp->req.task);
+ }
+ }
+
+ comp_check_sq_drain_done(qp);
do_complete(qp, wqe);
@@ -538,25 +547,60 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp,
return COMPST_GET_WQE;
}
-static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
+/* drain incoming response packet queue */
+static void drain_resp_pkts(struct rxe_qp *qp)
{
struct sk_buff *skb;
- struct rxe_send_wqe *wqe;
- struct rxe_queue *q = qp->sq.queue;
while ((skb = skb_dequeue(&qp->resp_pkts))) {
rxe_put(qp);
kfree_skb(skb);
ib_device_put(qp->ibqp.device);
}
+}
+
+/* complete send wqe with flush error */
+static int flush_send_wqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+ struct rxe_cqe cqe = {};
+ struct ib_wc *wc = &cqe.ibwc;
+ struct ib_uverbs_wc *uwc = &cqe.uibwc;
+ int err;
+
+ if (qp->is_user) {
+ uwc->wr_id = wqe->wr.wr_id;
+ uwc->status = IB_WC_WR_FLUSH_ERR;
+ uwc->qp_num = qp->ibqp.qp_num;
+ } else {
+ wc->wr_id = wqe->wr.wr_id;
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->qp = &qp->ibqp;
+ }
+
+ err = rxe_cq_post(qp->scq, &cqe, 0);
+ if (err)
+ rxe_dbg_cq(qp->scq, "post cq failed, err = %d", err);
+
+ return err;
+}
+
+/* drain and optionally complete the send queue
+ * if unable to complete a wqe, i.e. cq is full, stop
+ * completing and flush the remaining wqes
+ */
+static void flush_send_queue(struct rxe_qp *qp, bool notify)
+{
+ struct rxe_send_wqe *wqe;
+ struct rxe_queue *q = qp->sq.queue;
+ int err;
while ((wqe = queue_head(q, q->type))) {
if (notify) {
- wqe->status = IB_WC_WR_FLUSH_ERR;
- do_complete(qp, wqe);
- } else {
- queue_advance_consumer(q, q->type);
+ err = flush_send_wqe(qp, wqe);
+ if (err)
+ notify = 0;
}
+ queue_advance_consumer(q, q->type);
}
}
@@ -571,9 +615,28 @@ static void free_pkt(struct rxe_pkt_info *pkt)
ib_device_put(dev);
}
-int rxe_completer(void *arg)
+/* reset the retry timer if
+ * - QP is type RC
+ * - there is a packet sent by the requester that
+ * might be acked (we still might get spurious
+ * timeouts but try to keep them as few as possible)
+ * - the timeout parameter is set
+ * - the QP is alive
+ */
+static void reset_retry_timer(struct rxe_qp *qp)
+{
+ if (qp_type(qp) == IB_QPT_RC && qp->qp_timeout_jiffies) {
+ spin_lock_bh(&qp->state_lock);
+ if (qp_state(qp) >= IB_QPS_RTS &&
+ psn_compare(qp->req.psn, qp->comp.psn) > 0)
+ mod_timer(&qp->retrans_timer,
+ jiffies + qp->qp_timeout_jiffies);
+ spin_unlock_bh(&qp->state_lock);
+ }
+}
+
+int rxe_completer(struct rxe_qp *qp)
{
- struct rxe_qp *qp = (struct rxe_qp *)arg;
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
struct rxe_send_wqe *wqe = NULL;
struct sk_buff *skb = NULL;
@@ -581,15 +644,17 @@ int rxe_completer(void *arg)
enum comp_state state;
int ret;
- if (!rxe_get(qp))
- return -EAGAIN;
+ spin_lock_bh(&qp->state_lock);
+ if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
+ qp_state(qp) == IB_QPS_RESET) {
+ bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR);
- if (!qp->valid || qp->comp.state == QP_STATE_ERROR ||
- qp->comp.state == QP_STATE_RESET) {
- rxe_drain_resp_pkts(qp, qp->valid &&
- qp->comp.state == QP_STATE_ERROR);
+ drain_resp_pkts(qp);
+ flush_send_queue(qp, notify);
+ spin_unlock_bh(&qp->state_lock);
goto exit;
}
+ spin_unlock_bh(&qp->state_lock);
if (qp->comp.timeout) {
qp->comp.timeout_retry = 1;
@@ -677,20 +742,7 @@ int rxe_completer(void *arg)
break;
}
- /* re reset the timeout counter if
- * (1) QP is type RC
- * (2) the QP is alive
- * (3) there is a packet sent by the requester that
- * might be acked (we still might get spurious
- * timeouts but try to keep them as few as possible)
- * (4) the timeout parameter is set
- */
- if ((qp_type(qp) == IB_QPT_RC) &&
- (qp->req.state == QP_STATE_READY) &&
- (psn_compare(qp->req.psn, qp->comp.psn) > 0) &&
- qp->qp_timeout_jiffies)
- mod_timer(&qp->retrans_timer,
- jiffies + qp->qp_timeout_jiffies);
+ reset_retry_timer(qp);
goto exit;
case COMPST_ERROR_RETRY:
@@ -730,7 +782,7 @@ int rxe_completer(void *arg)
RXE_CNT_COMP_RETRY);
qp->req.need_retry = 1;
qp->comp.started_retry = 1;
- rxe_run_task(&qp->req.task);
+ rxe_sched_task(&qp->req.task);
}
goto done;
@@ -752,6 +804,7 @@ int rxe_completer(void *arg)
*/
qp->req.wait_for_rnr_timer = 1;
rxe_dbg_qp(qp, "set rnr nak timer\n");
+ // TODO who protects from destroy_qp??
mod_timer(&qp->rnr_nak_timer,
jiffies + rnrnak_jiffies(aeth_syn(pkt)
& ~AETH_TYPE_MASK));
@@ -784,7 +837,5 @@ exit:
out:
if (pkt)
free_pkt(pkt);
- rxe_put(qp);
-
return ret;
}