summaryrefslogtreecommitdiff
path: root/net/sunrpc/svc_xprt.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2023-09-11 17:38:45 +0300
committerChuck Lever <chuck.lever@oracle.com>2023-10-16 19:44:03 +0300
commit7b31f4daebad296e3164602b8303c265ec4ac7dc (patch)
tree1e655f32b6c098c5fd96b66e4ee09a04fc3fd00a /net/sunrpc/svc_xprt.c
parente3274026e2ec69eec6ab51bc499e14bb548548d0 (diff)
downloadlinux-7b31f4daebad296e3164602b8303c265ec4ac7dc.tar.xz
SUNRPC: rename and refactor svc_get_next_xprt()
svc_get_next_xprt() does a lot more than just get an xprt. It also decides if it needs to sleep, depending not only on the availability of xprts but also on the need to exit or handle external work. So rename it to svc_rqst_wait_for_work() and only do the testing and waiting. Move all the waiting-related code out of svc_recv() into the new svc_rqst_wait_for_work(). Move the dequeueing code out of svc_get_next_xprt() into svc_recv(). Previously svc_xprt_dequeue() would be called twice, once before waiting and possibly once after. Now instead rqst_should_sleep() is called twice. Once to decide if waiting is needed, and once to check against after setting the task state do see if we might have missed a wakeup. Signed-off-by: NeilBrown <neilb@suse.de> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Diffstat (limited to 'net/sunrpc/svc_xprt.c')
-rw-r--r--net/sunrpc/svc_xprt.c92
1 files changed, 44 insertions, 48 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 60759647fee4..835160da3ad4 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -722,51 +722,34 @@ rqst_should_sleep(struct svc_rqst *rqstp)
return true;
}
-static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp)
+static void svc_rqst_wait_for_work(struct svc_rqst *rqstp)
{
- struct svc_pool *pool = rqstp->rq_pool;
-
- /* rq_xprt should be clear on entry */
- WARN_ON_ONCE(rqstp->rq_xprt);
+ struct svc_pool *pool = rqstp->rq_pool;
- rqstp->rq_xprt = svc_xprt_dequeue(pool);
- if (rqstp->rq_xprt)
- goto out_found;
-
- set_current_state(TASK_IDLE);
- smp_mb__before_atomic();
- clear_bit(SP_CONGESTED, &pool->sp_flags);
- clear_bit(RQ_BUSY, &rqstp->rq_flags);
- smp_mb__after_atomic();
-
- if (likely(rqst_should_sleep(rqstp)))
- schedule();
- else
- __set_current_state(TASK_RUNNING);
+ if (rqst_should_sleep(rqstp)) {
+ set_current_state(TASK_IDLE);
+ smp_mb__before_atomic();
+ clear_bit(SP_CONGESTED, &pool->sp_flags);
+ clear_bit(RQ_BUSY, &rqstp->rq_flags);
+ smp_mb__after_atomic();
+
+ /* Need to check should_sleep() again after
+ * setting task state in case a wakeup happened
+ * between testing and setting.
+ */
+ if (rqst_should_sleep(rqstp)) {
+ schedule();
+ } else {
+ __set_current_state(TASK_RUNNING);
+ cond_resched();
+ }
+ set_bit(RQ_BUSY, &rqstp->rq_flags);
+ smp_mb__after_atomic();
+ } else {
+ cond_resched();
+ }
try_to_freeze();
-
- set_bit(RQ_BUSY, &rqstp->rq_flags);
- smp_mb__after_atomic();
- clear_bit(SP_TASK_PENDING, &pool->sp_flags);
- rqstp->rq_xprt = svc_xprt_dequeue(pool);
- if (rqstp->rq_xprt)
- goto out_found;
-
- if (kthread_should_stop())
- return NULL;
- return NULL;
-out_found:
- clear_bit(SP_TASK_PENDING, &pool->sp_flags);
- /* Normally we will wait up to 5 seconds for any required
- * cache information to be provided.
- */
- if (!test_bit(SP_CONGESTED, &pool->sp_flags))
- rqstp->rq_chandle.thread_wait = 5*HZ;
- else
- rqstp->rq_chandle.thread_wait = 1*HZ;
- trace_svc_xprt_dequeue(rqstp);
- return rqstp->rq_xprt;
}
static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
@@ -858,20 +841,33 @@ out:
*/
void svc_recv(struct svc_rqst *rqstp)
{
- struct svc_xprt *xprt = NULL;
+ struct svc_pool *pool = rqstp->rq_pool;
if (!svc_alloc_arg(rqstp))
return;
- try_to_freeze();
- cond_resched();
+ svc_rqst_wait_for_work(rqstp);
+
+ clear_bit(SP_TASK_PENDING, &pool->sp_flags);
+
if (kthread_should_stop())
- goto out;
+ return;
+
+ rqstp->rq_xprt = svc_xprt_dequeue(pool);
+ if (rqstp->rq_xprt) {
+ struct svc_xprt *xprt = rqstp->rq_xprt;
+
+ /* Normally we will wait up to 5 seconds for any required
+ * cache information to be provided.
+ */
+ if (!test_bit(SP_CONGESTED, &pool->sp_flags))
+ rqstp->rq_chandle.thread_wait = 5 * HZ;
+ else
+ rqstp->rq_chandle.thread_wait = 1 * HZ;
- xprt = svc_get_next_xprt(rqstp);
- if (xprt)
+ trace_svc_xprt_dequeue(rqstp);
svc_handle_xprt(rqstp, xprt);
-out:
+ }
}
EXPORT_SYMBOL_GPL(svc_recv);