summaryrefslogtreecommitdiff
path: root/net/sunrpc/svc.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2023-09-11 17:39:30 +0300
committerChuck Lever <chuck.lever@oracle.com>2023-10-16 19:44:05 +0300
commit9bd4161c591710f152a8cd3ed85ea928c61e26ca (patch)
tree28f92ebc977aff7156a9470489c60672c5142df9 /net/sunrpc/svc.c
parentd6b3358a2813bb14791259a2227d9af1e7019ca0 (diff)
downloadlinux-9bd4161c591710f152a8cd3ed85ea928c61e26ca.tar.xz
SUNRPC: change service idle list to be an llist
With an llist we don't need to take a lock to add a thread to the list, though we still need a lock to remove it. That will go in the next patch. Unlike double-linked lists, a thread cannot reliably remove itself from the list. Only the first thread can be removed, and that can change asynchronously. So some care is needed. We already check if there is pending work to do, so we are unlikely to add ourselves to the idle list and then want to remove ourselves again. If we DO find something needs to be done after adding ourselves to the list, we simply wake up the first thread on the list. If that was us, we successfully removed ourselves and can continue. If it was some other thread, they will do the work that needs to be done. We can safely sleep until woken. We also remove the test on freezing() from rqst_should_sleep(). Instead we set TASK_FREEZABLE before scheduling. This makes is safe to schedule() when a freeze is pending. As we now loop waiting to be removed from the idle queue, this is a cleaner way to handle freezing. Signed-off-by: NeilBrown <neilb@suse.de> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Diffstat (limited to 'net/sunrpc/svc.c')
-rw-r--r--net/sunrpc/svc.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index db4674211f36..54ae6a569f6a 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -510,7 +510,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
pool->sp_id = i;
INIT_LIST_HEAD(&pool->sp_sockets);
INIT_LIST_HEAD(&pool->sp_all_threads);
- INIT_LIST_HEAD(&pool->sp_idle_threads);
+ init_llist_head(&pool->sp_idle_threads);
spin_lock_init(&pool->sp_lock);
percpu_counter_init(&pool->sp_messages_arrived, 0, GFP_KERNEL);
@@ -642,7 +642,7 @@ svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
folio_batch_init(&rqstp->rq_fbatch);
- svc_thread_set_busy(rqstp);
+ init_llist_node(&rqstp->rq_idle);
rqstp->rq_server = serv;
rqstp->rq_pool = pool;
@@ -701,15 +701,15 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
void svc_pool_wake_idle_thread(struct svc_pool *pool)
{
struct svc_rqst *rqstp;
+ struct llist_node *ln;
rcu_read_lock();
spin_lock_bh(&pool->sp_lock);
- rqstp = list_first_entry_or_null(&pool->sp_idle_threads,
- struct svc_rqst, rq_idle);
- if (rqstp)
- list_del_init(&rqstp->rq_idle);
+ ln = llist_del_first_init(&pool->sp_idle_threads);
spin_unlock_bh(&pool->sp_lock);
- if (rqstp) {
+ if (ln) {
+ rqstp = llist_entry(ln, struct svc_rqst, rq_idle);
+
WRITE_ONCE(rqstp->rq_qtime, ktime_get());
wake_up_process(rqstp->rq_task);
rcu_read_unlock();