summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/cxgb3/iwch_cm.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-05-26 23:13:57 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-26 23:13:57 +0400
commit4c171acc20794af16a27da25e11ec4e9cad5d9fa (patch)
treefb097384d709b7bda982902d999f658bb4f07b2c /drivers/infiniband/hw/cxgb3/iwch_cm.c
parent20e0ec119b2c6cc412addefbe169f4f5e38701e8 (diff)
parent8dc4abdf4c82d0e1c47f14b6615406d31975ea66 (diff)
downloadlinux-4c171acc20794af16a27da25e11ec4e9cad5d9fa.tar.xz
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: RDMA/cma: Save PID of ID's owner RDMA/cma: Add support for netlink statistics export RDMA/cma: Pass QP type into rdma_create_id() RDMA: Update exported headers list RDMA/cma: Export enum cma_state in <rdma/rdma_cm.h> RDMA/nes: Add a check for strict_strtoul() RDMA/cxgb3: Don't post zero-byte read if endpoint is going away RDMA/cxgb4: Use completion objects for event blocking IB/srp: Fix integer -> pointer cast warnings IB: Add devnode methods to cm_class and umad_class IB/mad: Return EPROTONOSUPPORT when an RDMA device lacks the QP required IB/uverbs: Add devnode method to set path/mode RDMA/ucma: Add .nodename/.mode to tell userspace where to create device node RDMA: Add netlink infrastructure RDMA: Add error handling to ib_core_init()
Diffstat (limited to 'drivers/infiniband/hw/cxgb3/iwch_cm.c')
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c26
1 files changed, 17 insertions, 9 deletions
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 239184138994..0a5008fbebac 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -914,7 +914,7 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
goto err;
if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) {
- iwch_post_zb_read(ep->com.qp);
+ iwch_post_zb_read(ep);
}
goto out;
@@ -1078,6 +1078,8 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
struct iwch_ep *ep = ctx;
struct cpl_wr_ack *hdr = cplhdr(skb);
unsigned int credits = ntohs(hdr->credits);
+ unsigned long flags;
+ int post_zb = 0;
PDBG("%s ep %p credits %u\n", __func__, ep, credits);
@@ -1087,28 +1089,34 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
return CPL_RET_BUF_DONE;
}
+ spin_lock_irqsave(&ep->com.lock, flags);
BUG_ON(credits != 1);
dst_confirm(ep->dst);
if (!ep->mpa_skb) {
PDBG("%s rdma_init wr_ack ep %p state %u\n",
- __func__, ep, state_read(&ep->com));
+ __func__, ep, ep->com.state);
if (ep->mpa_attr.initiator) {
PDBG("%s initiator ep %p state %u\n",
- __func__, ep, state_read(&ep->com));
- if (peer2peer)
- iwch_post_zb_read(ep->com.qp);
+ __func__, ep, ep->com.state);
+ if (peer2peer && ep->com.state == FPDU_MODE)
+ post_zb = 1;
} else {
PDBG("%s responder ep %p state %u\n",
- __func__, ep, state_read(&ep->com));
- ep->com.rpl_done = 1;
- wake_up(&ep->com.waitq);
+ __func__, ep, ep->com.state);
+ if (ep->com.state == MPA_REQ_RCVD) {
+ ep->com.rpl_done = 1;
+ wake_up(&ep->com.waitq);
+ }
}
} else {
PDBG("%s lsm ack ep %p state %u freeing skb\n",
- __func__, ep, state_read(&ep->com));
+ __func__, ep, ep->com.state);
kfree_skb(ep->mpa_skb);
ep->mpa_skb = NULL;
}
+ spin_unlock_irqrestore(&ep->com.lock, flags);
+ if (post_zb)
+ iwch_post_zb_read(ep);
return CPL_RET_BUF_DONE;
}