summaryrefslogtreecommitdiff
path: root/net/sunrpc
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2021-03-01 18:44:49 +0300
committerChuck Lever <chuck.lever@oracle.com>2021-03-31 22:58:48 +0300
commite3eded5e81c4df60006e94614ec645da089e35e7 (patch)
treeee5cc39c1c0ab30bbe830436b25b1c039c80d969 /net/sunrpc
parent5533c4f4b996b7fc36d16b5e0807ebbc08c93af4 (diff)
downloadlinux-e3eded5e81c4df60006e94614ec645da089e35e7.tar.xz
svcrdma: Clean up dto_q critical section in svc_rdma_recvfrom()
This, to me, seems less cluttered and less redundant. I was hoping it could help reduce lock contention on the dto_q lock by reducing the size of the critical section, but alas, the only improvement is readability. Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 232860ea683b..6be23ce7a93d 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -794,22 +794,22 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
rqstp->rq_xprt_ctxt = NULL;
+ ctxt = NULL;
spin_lock(&rdma_xprt->sc_rq_dto_lock);
ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q);
- if (!ctxt) {
+ if (ctxt)
+ list_del(&ctxt->rc_list);
+ else
/* No new incoming requests, terminate the loop */
clear_bit(XPT_DATA, &xprt->xpt_flags);
- spin_unlock(&rdma_xprt->sc_rq_dto_lock);
- svc_xprt_received(xprt);
- return 0;
- }
- list_del(&ctxt->rc_list);
spin_unlock(&rdma_xprt->sc_rq_dto_lock);
- percpu_counter_inc(&svcrdma_stat_recv);
/* Unblock the transport for the next receive */
svc_xprt_received(xprt);
+ if (!ctxt)
+ return 0;
+ percpu_counter_inc(&svcrdma_stat_recv);
ib_dma_sync_single_for_cpu(rdma_xprt->sc_pd->device,
ctxt->rc_recv_sge.addr, ctxt->rc_byte_len,
DMA_FROM_DEVICE);