summaryrefslogtreecommitdiff
path: root/drivers/infiniband/sw/rxe/rxe_req.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_req.c')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c50
1 files changed, 37 insertions, 13 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index f63771207970..899c8779f800 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -100,12 +100,12 @@ void rnr_nak_timer(struct timer_list *t)
{
struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
- pr_debug("%s: fired for qp#%d\n", __func__, qp_num(qp));
+ rxe_dbg_qp(qp, "nak timer fired\n");
/* request a send queue retry */
qp->req.need_retry = 1;
qp->req.wait_for_rnr_timer = 0;
- rxe_run_task(&qp->req.task, 1);
+ rxe_sched_task(&qp->req.task);
}
static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
@@ -241,6 +241,9 @@ static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
IB_OPCODE_RC_SEND_FIRST;
+ case IB_WR_FLUSH:
+ return IB_OPCODE_RC_FLUSH;
+
case IB_WR_RDMA_READ:
return IB_OPCODE_RC_RDMA_READ_REQUEST;
@@ -258,6 +261,10 @@ static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
else
return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
IB_OPCODE_RC_SEND_FIRST;
+
+ case IB_WR_ATOMIC_WRITE:
+ return IB_OPCODE_RC_ATOMIC_WRITE;
+
case IB_WR_REG_MR:
case IB_WR_LOCAL_INV:
return opcode;
@@ -421,11 +428,18 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
/* init optional headers */
if (pkt->mask & RXE_RETH_MASK) {
- reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
+ if (pkt->mask & RXE_FETH_MASK)
+ reth_set_rkey(pkt, ibwr->wr.flush.rkey);
+ else
+ reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
reth_set_va(pkt, wqe->iova);
reth_set_len(pkt, wqe->dma.resid);
}
+ /* Fill Flush Extension Transport Header */
+ if (pkt->mask & RXE_FETH_MASK)
+ feth_init(pkt, ibwr->wr.flush.type, ibwr->wr.flush.level);
+
if (pkt->mask & RXE_IMMDT_MASK)
immdt_set_imm(pkt, ibwr->ex.imm_data);
@@ -484,6 +498,14 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_av *av,
memset(pad, 0, bth_pad(pkt));
}
+ } else if (pkt->mask & RXE_FLUSH_MASK) {
+ /* oA19-2: shall have no payload. */
+ wqe->dma.resid = 0;
+ }
+
+ if (pkt->mask & RXE_ATOMIC_WRITE_MASK) {
+ memcpy(payload_addr(pkt), wqe->dma.atomic_wr, payload);
+ wqe->dma.resid -= payload;
}
return 0;
@@ -595,7 +617,7 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
}
break;
default:
- pr_err("Unexpected send wqe opcode %d\n", opcode);
+ rxe_dbg_qp(qp, "Unexpected send wqe opcode %d\n", opcode);
wqe->status = IB_WC_LOC_QP_OP_ERR;
return -EINVAL;
}
@@ -608,7 +630,7 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
* which can lead to a deadlock. So go ahead and complete
* it now.
*/
- rxe_run_task(&qp->comp.task, 1);
+ rxe_sched_task(&qp->comp.task);
return 0;
}
@@ -709,13 +731,15 @@ int rxe_requester(void *arg)
}
mask = rxe_opcode[opcode].mask;
- if (unlikely(mask & RXE_READ_OR_ATOMIC_MASK)) {
+ if (unlikely(mask & (RXE_READ_OR_ATOMIC_MASK |
+ RXE_ATOMIC_WRITE_MASK))) {
if (check_init_depth(qp, wqe))
goto exit;
}
mtu = get_mtu(qp);
- payload = (mask & RXE_WRITE_OR_SEND_MASK) ? wqe->dma.resid : 0;
+ payload = (mask & (RXE_WRITE_OR_SEND_MASK | RXE_ATOMIC_WRITE_MASK)) ?
+ wqe->dma.resid : 0;
if (payload > mtu) {
if (qp_type(qp) == IB_QPT_UD) {
/* C10-93.1.1: If the total sum of all the buffer lengths specified for a
@@ -733,7 +757,7 @@ int rxe_requester(void *arg)
qp->req.wqe_index);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
- rxe_run_task(&qp->comp.task, 0);
+ rxe_run_task(&qp->comp.task);
goto done;
}
payload = mtu;
@@ -748,14 +772,14 @@ int rxe_requester(void *arg)
av = rxe_get_av(&pkt, &ah);
if (unlikely(!av)) {
- pr_err("qp#%d Failed no address vector\n", qp_num(qp));
+ rxe_dbg_qp(qp, "Failed no address vector\n");
wqe->status = IB_WC_LOC_QP_OP_ERR;
goto err;
}
skb = init_req_packet(qp, av, wqe, opcode, payload, &pkt);
if (unlikely(!skb)) {
- pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
+ rxe_dbg_qp(qp, "Failed allocating skb\n");
wqe->status = IB_WC_LOC_QP_OP_ERR;
if (ah)
rxe_put(ah);
@@ -764,7 +788,7 @@ int rxe_requester(void *arg)
err = finish_packet(qp, av, wqe, &pkt, skb, payload);
if (unlikely(err)) {
- pr_debug("qp#%d Error during finish packet\n", qp_num(qp));
+ rxe_dbg_qp(qp, "Error during finish packet\n");
if (err == -EFAULT)
wqe->status = IB_WC_LOC_PROT_ERR;
else
@@ -795,7 +819,7 @@ int rxe_requester(void *arg)
rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
if (err == -EAGAIN) {
- rxe_run_task(&qp->req.task, 1);
+ rxe_sched_task(&qp->req.task);
goto exit;
}
@@ -817,7 +841,7 @@ err:
qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
wqe->state = wqe_state_error;
qp->req.state = QP_STATE_ERROR;
- rxe_run_task(&qp->comp.task, 0);
+ rxe_run_task(&qp->comp.task);
exit:
ret = -EAGAIN;
out: