summaryrefslogtreecommitdiff
path: root/drivers/infiniband/sw/rxe/rxe_req.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_req.c')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c104
1 files changed, 62 insertions, 42 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 899c8779f800..65134a9aefe7 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -102,44 +102,44 @@ void rnr_nak_timer(struct timer_list *t)
rxe_dbg_qp(qp, "nak timer fired\n");
- /* request a send queue retry */
- qp->req.need_retry = 1;
- qp->req.wait_for_rnr_timer = 0;
- rxe_sched_task(&qp->req.task);
+ spin_lock_bh(&qp->state_lock);
+ if (qp->valid) {
+ /* request a send queue retry */
+ qp->req.need_retry = 1;
+ qp->req.wait_for_rnr_timer = 0;
+ rxe_sched_task(&qp->req.task);
+ }
+ spin_unlock_bh(&qp->state_lock);
}
-static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
+static void req_check_sq_drain_done(struct rxe_qp *qp)
{
- struct rxe_send_wqe *wqe;
- struct rxe_queue *q = qp->sq.queue;
- unsigned int index = qp->req.wqe_index;
+ struct rxe_queue *q;
+ unsigned int index;
unsigned int cons;
- unsigned int prod;
+ struct rxe_send_wqe *wqe;
- wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
- cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
- prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
+ spin_lock_bh(&qp->state_lock);
+ if (qp_state(qp) == IB_QPS_SQD) {
+ q = qp->sq.queue;
+ index = qp->req.wqe_index;
+ cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
+ wqe = queue_addr_from_index(q, cons);
- if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
/* check to see if we are drained;
* state_lock used by requester and completer
*/
- spin_lock_bh(&qp->state_lock);
do {
- if (qp->req.state != QP_STATE_DRAIN) {
+ if (!qp->attr.sq_draining)
/* comp just finished */
- spin_unlock_bh(&qp->state_lock);
break;
- }
if (wqe && ((index != cons) ||
- (wqe->state != wqe_state_posted))) {
+ (wqe->state != wqe_state_posted)))
/* comp not done yet */
- spin_unlock_bh(&qp->state_lock);
break;
- }
- qp->req.state = QP_STATE_DRAINED;
+ qp->attr.sq_draining = 0;
spin_unlock_bh(&qp->state_lock);
if (qp->ibqp.event_handler) {
@@ -151,18 +151,42 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
qp->ibqp.event_handler(&ev,
qp->ibqp.qp_context);
}
+ return;
} while (0);
}
+ spin_unlock_bh(&qp->state_lock);
+}
+
+static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp)
+{
+ struct rxe_queue *q = qp->sq.queue;
+ unsigned int index = qp->req.wqe_index;
+ unsigned int prod;
+ prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
if (index == prod)
return NULL;
+ else
+ return queue_addr_from_index(q, index);
+}
+
+static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
+{
+ struct rxe_send_wqe *wqe;
+
+ req_check_sq_drain_done(qp);
- wqe = queue_addr_from_index(q, index);
+ wqe = __req_next_wqe(qp);
+ if (wqe == NULL)
+ return NULL;
- if (unlikely((qp->req.state == QP_STATE_DRAIN ||
- qp->req.state == QP_STATE_DRAINED) &&
- (wqe->state != wqe_state_processing)))
+ spin_lock_bh(&qp->state_lock);
+ if (unlikely((qp_state(qp) == IB_QPS_SQD) &&
+ (wqe->state != wqe_state_processing))) {
+ spin_unlock_bh(&qp->state_lock);
return NULL;
+ }
+ spin_unlock_bh(&qp->state_lock);
wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
return wqe;
@@ -635,9 +659,8 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
return 0;
}
-int rxe_requester(void *arg)
+int rxe_requester(struct rxe_qp *qp)
{
- struct rxe_qp *qp = (struct rxe_qp *)arg;
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
struct rxe_pkt_info pkt;
struct sk_buff *skb;
@@ -654,24 +677,22 @@ int rxe_requester(void *arg)
struct rxe_ah *ah;
struct rxe_av *av;
- if (!rxe_get(qp))
- return -EAGAIN;
-
- if (unlikely(!qp->valid))
+ spin_lock_bh(&qp->state_lock);
+ if (unlikely(!qp->valid)) {
+ spin_unlock_bh(&qp->state_lock);
goto exit;
+ }
- if (unlikely(qp->req.state == QP_STATE_ERROR)) {
- wqe = req_next_wqe(qp);
+ if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
+ wqe = __req_next_wqe(qp);
+ spin_unlock_bh(&qp->state_lock);
if (wqe)
- /*
- * Generate an error completion for error qp state
- */
goto err;
else
goto exit;
}
- if (unlikely(qp->req.state == QP_STATE_RESET)) {
+ if (unlikely(qp_state(qp) == IB_QPS_RESET)) {
qp->req.wqe_index = queue_get_consumer(q,
QUEUE_TYPE_FROM_CLIENT);
qp->req.opcode = -1;
@@ -679,8 +700,10 @@ int rxe_requester(void *arg)
qp->req.wait_psn = 0;
qp->req.need_retry = 0;
qp->req.wait_for_rnr_timer = 0;
+ spin_unlock_bh(&qp->state_lock);
goto exit;
}
+ spin_unlock_bh(&qp->state_lock);
/* we come here if the retransmit timer has fired
* or if the rnr timer has fired. If the retransmit
@@ -757,7 +780,7 @@ int rxe_requester(void *arg)
qp->req.wqe_index);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
- rxe_run_task(&qp->comp.task);
+ rxe_sched_task(&qp->comp.task);
goto done;
}
payload = mtu;
@@ -840,12 +863,9 @@ err:
/* update wqe_index for each wqe completion */
qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
wqe->state = wqe_state_error;
- qp->req.state = QP_STATE_ERROR;
- rxe_run_task(&qp->comp.task);
+ rxe_qp_error(qp);
exit:
ret = -EAGAIN;
out:
- rxe_put(qp);
-
return ret;
}