From f78116d3bf4fd7a84451e1a2adc35df7a63fbbf4 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 18 Jul 2023 16:38:08 +1000 Subject: SUNRPC: call svc_process() from svc_recv(). All callers of svc_recv() go on to call svc_process() on success. Simplify callers by having svc_recv() do that for them. This loses one call to validate_process_creds() in nfsd. That was debugging code added 14 years ago. I don't think we need to keep it. Signed-off-by: NeilBrown Reviewed-by: Jeff Layton Signed-off-by: Chuck Lever --- net/sunrpc/svc.c | 1 - 1 file changed, 1 deletion(-) (limited to 'net/sunrpc/svc.c') diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 587811a002c9..c69896c124a4 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1516,7 +1516,6 @@ out_baddir: out_drop: svc_drop(rqstp); } -EXPORT_SYMBOL_GPL(svc_process); #if defined(CONFIG_SUNRPC_BACKCHANNEL) /* -- cgit v1.2.3 From c743b4259c3af2c0637c307f08a062d25fa3c99f Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 18 Jul 2023 16:38:08 +1000 Subject: SUNRPC: remove timeout arg from svc_recv() Most svc threads have no interest in a timeout. nfsd sets it to 1 hour, but this is a wart of no significance. lockd uses the timeout so that it can call nlmsvc_retry_blocked(). It also sometimes calls svc_wake_up() to ensure this is called. So change lockd to be consistent and always use svc_wake_up() to trigger nlmsvc_retry_blocked() - using a timer instead of a timeout to svc_recv(). And change svc_recv() to not take a timeout arg. This makes the sp_threads_timedout counter always zero. Signed-off-by: NeilBrown Signed-off-by: Chuck Lever --- fs/lockd/svc.c | 14 +++++++++----- fs/lockd/svclock.c | 5 +++-- fs/nfs/callback.c | 2 +- fs/nfsd/nfssvc.c | 2 +- include/linux/lockd/lockd.h | 4 +++- include/linux/sunrpc/svc.h | 1 - include/linux/sunrpc/svcsock.h | 2 +- net/sunrpc/svc.c | 2 -- net/sunrpc/svc_xprt.c | 34 ++++++++++++++++------------------ 9 files changed, 34 insertions(+), 32 deletions(-) (limited to 'net/sunrpc/svc.c') diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index cf4ff7d3564c..ef3f77a59556 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -56,6 +56,12 @@ static unsigned int nlmsvc_users; static struct svc_serv *nlmsvc_serv; unsigned long nlmsvc_timeout; +static void nlmsvc_request_retry(struct timer_list *tl) +{ + svc_wake_up(nlmsvc_serv); +} +DEFINE_TIMER(nlmsvc_retry, nlmsvc_request_retry); + unsigned int lockd_net_id; /* @@ -130,14 +136,11 @@ lockd(void *vrqstp) * NFS mount or NFS daemon has gone away. */ while (!kthread_should_stop()) { - long timeout = MAX_SCHEDULE_TIMEOUT; - /* update sv_maxconn if it has changed */ rqstp->rq_server->sv_maxconn = nlm_max_connections; - timeout = nlmsvc_retry_blocked(); - - svc_recv(rqstp, timeout); + nlmsvc_retry_blocked(); + svc_recv(rqstp); } if (nlmsvc_ops) nlmsvc_invalidate_all(); @@ -371,6 +374,7 @@ static void lockd_put(void) #endif svc_set_num_threads(nlmsvc_serv, NULL, 0); + timer_delete_sync(&nlmsvc_retry); nlmsvc_serv = NULL; dprintk("lockd_down: service destroyed\n"); } diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index 28abec5c451d..43aeba9de55c 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c @@ -1019,7 +1019,7 @@ retry_deferred_block(struct nlm_block *block) * picks up locks that can be granted, or grant notifications that must * be retransmitted. */ -unsigned long +void nlmsvc_retry_blocked(void) { unsigned long timeout = MAX_SCHEDULE_TIMEOUT; @@ -1049,5 +1049,6 @@ nlmsvc_retry_blocked(void) } spin_unlock(&nlm_blocked_lock); - return timeout; + if (timeout < MAX_SCHEDULE_TIMEOUT) + mod_timer(&nlmsvc_retry, jiffies + timeout); } diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 54155b484f7b..39a0ba746267 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -79,7 +79,7 @@ nfs4_callback_svc(void *vrqstp) set_freezable(); while (!kthread_freezable_should_stop(NULL)) - svc_recv(rqstp, MAX_SCHEDULE_TIMEOUT); + svc_recv(rqstp); svc_exit_thread(rqstp); return 0; diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 4c0ab101d90b..1582af33e204 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -961,7 +961,7 @@ nfsd(void *vrqstp) /* Update sv_maxconn if it has changed */ rqstp->rq_server->sv_maxconn = nn->max_connections; - svc_recv(rqstp, 60*60*HZ); + svc_recv(rqstp); validate_process_creds(); } diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index f42594a9efe0..0f016d69c996 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -204,6 +204,8 @@ extern unsigned long nlmsvc_timeout; extern bool nsm_use_hostnames; extern u32 nsm_local_state; +extern struct timer_list nlmsvc_retry; + /* * Lockd client functions */ @@ -280,7 +282,7 @@ __be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *, struct nlm_host *, struct nlm_lock *, struct nlm_lock *, struct nlm_cookie *); __be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *, struct nlm_lock *); -unsigned long nlmsvc_retry_blocked(void); +void nlmsvc_retry_blocked(void); void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *, nlm_host_match_fn_t match); void nlmsvc_grant_reply(struct nlm_cookie *, __be32); diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 2230148d9d68..b206fdde8e97 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -41,7 +41,6 @@ struct svc_pool { /* statistics on pool operation */ struct percpu_counter sp_sockets_queued; struct percpu_counter sp_threads_woken; - struct percpu_counter sp_threads_timedout; #define SP_TASK_PENDING (0) /* still work to do even if no * xprt is queued. */ diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h index c0ddd331d82f..d4a173c5b3be 100644 --- a/include/linux/sunrpc/svcsock.h +++ b/include/linux/sunrpc/svcsock.h @@ -57,7 +57,7 @@ static inline u32 svc_sock_final_rec(struct svc_sock *svsk) * Function prototypes. */ void svc_close_net(struct svc_serv *, struct net *); -void svc_recv(struct svc_rqst *, long); +void svc_recv(struct svc_rqst *rqstp); void svc_send(struct svc_rqst *rqstp); void svc_drop(struct svc_rqst *); void svc_sock_update_bufs(struct svc_serv *serv); diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index c69896c124a4..030f8c759ee6 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -515,7 +515,6 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, percpu_counter_init(&pool->sp_sockets_queued, 0, GFP_KERNEL); percpu_counter_init(&pool->sp_threads_woken, 0, GFP_KERNEL); - percpu_counter_init(&pool->sp_threads_timedout, 0, GFP_KERNEL); } return serv; @@ -590,7 +589,6 @@ svc_destroy(struct kref *ref) percpu_counter_destroy(&pool->sp_sockets_queued); percpu_counter_destroy(&pool->sp_threads_woken); - percpu_counter_destroy(&pool->sp_threads_timedout); } kfree(serv->sv_pools); kfree(serv); diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index d7d69143011c..9bdcdd8401b8 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -750,10 +750,9 @@ rqst_should_sleep(struct svc_rqst *rqstp) return true; } -static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) +static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp) { struct svc_pool *pool = rqstp->rq_pool; - long time_left = 0; /* rq_xprt should be clear on entry */ WARN_ON_ONCE(rqstp->rq_xprt); @@ -769,7 +768,7 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) smp_mb__after_atomic(); if (likely(rqst_should_sleep(rqstp))) - time_left = schedule_timeout(timeout); + schedule(); else __set_current_state(TASK_RUNNING); @@ -781,9 +780,6 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) if (rqstp->rq_xprt) goto out_found; - if (!time_left) - percpu_counter_inc(&pool->sp_threads_timedout); - if (kthread_should_stop()) return NULL; return NULL; @@ -863,12 +859,15 @@ out: return len; } -/* - * Receive the next request on any transport. This code is carefully - * organised not to touch any cachelines in the shared svc_serv - * structure, only cachelines in the local svc_pool. +/** + * svc_recv - Receive and process the next request on any transport + * @rqstp: an idle RPC service thread + * + * This code is carefully organised not to touch any cachelines in + * the shared svc_serv structure, only cachelines in the local + * svc_pool. */ -void svc_recv(struct svc_rqst *rqstp, long timeout) +void svc_recv(struct svc_rqst *rqstp) { struct svc_xprt *xprt = NULL; struct svc_serv *serv = rqstp->rq_server; @@ -882,7 +881,7 @@ void svc_recv(struct svc_rqst *rqstp, long timeout) if (kthread_should_stop()) goto out; - xprt = svc_get_next_xprt(rqstp, timeout); + xprt = svc_get_next_xprt(rqstp); if (!xprt) goto out; @@ -1447,12 +1446,11 @@ static int svc_pool_stats_show(struct seq_file *m, void *p) return 0; } - seq_printf(m, "%u %llu %llu %llu %llu\n", - pool->sp_id, - percpu_counter_sum_positive(&pool->sp_sockets_queued), - percpu_counter_sum_positive(&pool->sp_sockets_queued), - percpu_counter_sum_positive(&pool->sp_threads_woken), - percpu_counter_sum_positive(&pool->sp_threads_timedout)); + seq_printf(m, "%u %llu %llu %llu 0\n", + pool->sp_id, + percpu_counter_sum_positive(&pool->sp_sockets_queued), + percpu_counter_sum_positive(&pool->sp_sockets_queued), + percpu_counter_sum_positive(&pool->sp_threads_woken)); return 0; } -- cgit v1.2.3 From 78c542f916bccafffef4f3bec9bc60d7cda548f5 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sat, 29 Jul 2023 20:58:54 -0400 Subject: SUNRPC: Add enum svc_auth_status In addition to the benefits of using an enum rather than a set of macros, we now have a named type that can improve static type checking of function return values. As part of this change, I removed a stale comment from svcauth.h; the return values from current implementations of the auth_ops::release method are all zero/negative errno, not the SVC_OK enum values as the old comment suggested. Suggested-by: NeilBrown Signed-off-by: Chuck Lever --- fs/lockd/svc.c | 2 +- fs/nfs/callback.c | 2 +- include/linux/sunrpc/svc.h | 2 +- include/linux/sunrpc/svcauth.h | 50 +++++++++++++++++++-------------------- include/trace/events/sunrpc.h | 9 ++++--- net/sunrpc/auth_gss/svcauth_gss.c | 7 ++---- net/sunrpc/svc.c | 6 ++++- net/sunrpc/svcauth.c | 35 ++++++++++++++++++++++----- net/sunrpc/svcauth_unix.c | 9 ++++--- 9 files changed, 73 insertions(+), 49 deletions(-) (limited to 'net/sunrpc/svc.c') diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index ef3f77a59556..6579948070a4 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -506,7 +506,7 @@ static inline int is_callback(u32 proc) } -static int lockd_authenticate(struct svc_rqst *rqstp) +static enum svc_auth_status lockd_authenticate(struct svc_rqst *rqstp) { rqstp->rq_client = NULL; switch (rqstp->rq_authop->flavour) { diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 39a0ba746267..466ebf1d41b2 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -372,7 +372,7 @@ check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp) * All other checking done after NFS decoding where the nfs_client can be * found in nfs4_callback_compound */ -static int nfs_callback_authenticate(struct svc_rqst *rqstp) +static enum svc_auth_status nfs_callback_authenticate(struct svc_rqst *rqstp) { rqstp->rq_auth_stat = rpc_autherr_badcred; diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 9b429253dbd9..1c491f02efc8 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -336,7 +336,7 @@ struct svc_program { char * pg_name; /* service name */ char * pg_class; /* class name: services sharing authentication */ struct svc_stat * pg_stats; /* rpc statistics */ - int (*pg_authenticate)(struct svc_rqst *); + enum svc_auth_status (*pg_authenticate)(struct svc_rqst *rqstp); __be32 (*pg_init_request)(struct svc_rqst *, const struct svc_program *, struct svc_process_info *); diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h index 27582d3b538f..6f90203edbf8 100644 --- a/include/linux/sunrpc/svcauth.h +++ b/include/linux/sunrpc/svcauth.h @@ -83,6 +83,19 @@ struct auth_domain { struct rcu_head rcu_head; }; +enum svc_auth_status { + SVC_GARBAGE = 1, + SVC_SYSERR, + SVC_VALID, + SVC_NEGATIVE, + SVC_OK, + SVC_DROP, + SVC_CLOSE, + SVC_DENIED, + SVC_PENDING, + SVC_COMPLETE, +}; + /* * Each authentication flavour registers an auth_ops * structure. @@ -98,6 +111,8 @@ struct auth_domain { * is (probably) already in place. Certainly space is * reserved for it. * DROP - simply drop the request. It may have been deferred + * CLOSE - like SVC_DROP, but request is definitely lost. + * If there is a tcp connection, it should be closed. * GARBAGE - rpc garbage_args error * SYSERR - rpc system_err error * DENIED - authp holds reason for denial. @@ -111,14 +126,10 @@ struct auth_domain { * * release() is given a request after the procedure has been run. * It should sign/encrypt the results if needed - * It should return: - * OK - the resbuf is ready to be sent - * DROP - the reply should be quitely dropped - * DENIED - authp holds a reason for MSG_DENIED - * SYSERR - rpc system_err * * domain_release() * This call releases a domain. + * * set_client() * Givens a pending request (struct svc_rqst), finds and assigns * an appropriate 'auth_domain' as the client. @@ -127,31 +138,18 @@ struct auth_ops { char * name; struct module *owner; int flavour; - int (*accept)(struct svc_rqst *rq); - int (*release)(struct svc_rqst *rq); - void (*domain_release)(struct auth_domain *); - int (*set_client)(struct svc_rqst *rq); -}; -#define SVC_GARBAGE 1 -#define SVC_SYSERR 2 -#define SVC_VALID 3 -#define SVC_NEGATIVE 4 -#define SVC_OK 5 -#define SVC_DROP 6 -#define SVC_CLOSE 7 /* Like SVC_DROP, but request is definitely - * lost so if there is a tcp connection, it - * should be closed - */ -#define SVC_DENIED 8 -#define SVC_PENDING 9 -#define SVC_COMPLETE 10 + enum svc_auth_status (*accept)(struct svc_rqst *rqstp); + int (*release)(struct svc_rqst *rqstp); + void (*domain_release)(struct auth_domain *dom); + enum svc_auth_status (*set_client)(struct svc_rqst *rqstp); +}; struct svc_xprt; -extern int svc_authenticate(struct svc_rqst *rqstp); +extern enum svc_auth_status svc_authenticate(struct svc_rqst *rqstp); extern int svc_authorise(struct svc_rqst *rqstp); -extern int svc_set_client(struct svc_rqst *rqstp); +extern enum svc_auth_status svc_set_client(struct svc_rqst *rqstp); extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops); extern void svc_auth_unregister(rpc_authflavor_t flavor); @@ -161,7 +159,7 @@ extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *ne extern struct auth_domain *auth_domain_find(char *name); extern void svcauth_unix_purge(struct net *net); extern void svcauth_unix_info_release(struct svc_xprt *xpt); -extern int svcauth_unix_set_client(struct svc_rqst *rqstp); +extern enum svc_auth_status svcauth_unix_set_client(struct svc_rqst *rqstp); extern int unix_gid_cache_create(struct net *net); extern void unix_gid_cache_destroy(struct net *net); diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 00db9e1fb711..55716b62ce91 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -1706,7 +1706,7 @@ TRACE_DEFINE_ENUM(SVC_DENIED); TRACE_DEFINE_ENUM(SVC_PENDING); TRACE_DEFINE_ENUM(SVC_COMPLETE); -#define svc_show_status(status) \ +#define show_svc_auth_status(status) \ __print_symbolic(status, \ { SVC_GARBAGE, "SVC_GARBAGE" }, \ { SVC_SYSERR, "SVC_SYSERR" }, \ @@ -1743,7 +1743,10 @@ TRACE_DEFINE_ENUM(SVC_COMPLETE); __entry->xid, __get_sockaddr(server), __get_sockaddr(client) TRACE_EVENT_CONDITION(svc_authenticate, - TP_PROTO(const struct svc_rqst *rqst, int auth_res), + TP_PROTO( + const struct svc_rqst *rqst, + enum svc_auth_status auth_res + ), TP_ARGS(rqst, auth_res), @@ -1766,7 +1769,7 @@ TRACE_EVENT_CONDITION(svc_authenticate, TP_printk(SVC_RQST_ENDPOINT_FORMAT " auth_res=%s auth_stat=%s", SVC_RQST_ENDPOINT_VARARGS, - svc_show_status(__entry->svc_status), + show_svc_auth_status(__entry->svc_status), rpc_show_auth_stat(__entry->auth_stat)) ); diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index c4a566737085..18734e70c5dd 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -986,7 +986,7 @@ bad_unwrap: return -EINVAL; } -static int +static enum svc_auth_status svcauth_gss_set_client(struct svc_rqst *rqstp) { struct gss_svc_data *svcdata = rqstp->rq_auth_data; @@ -1634,7 +1634,7 @@ svcauth_gss_decode_credbody(struct xdr_stream *xdr, * * The rqstp->rq_auth_stat field is also set (see RFCs 2203 and 5531). */ -static int +static enum svc_auth_status svcauth_gss_accept(struct svc_rqst *rqstp) { struct gss_svc_data *svcdata = rqstp->rq_auth_data; @@ -1945,9 +1945,6 @@ bad_wrap: * %0: the Reply is ready to be sent * %-ENOMEM: failed to allocate memory * %-EINVAL: encoding error - * - * XXX: These return values do not match the return values documented - * for the auth_ops ->release method in linux/sunrpc/svcauth.h. */ static int svcauth_gss_release(struct svc_rqst *rqstp) diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 030f8c759ee6..7873a3ff24a8 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1275,8 +1275,9 @@ svc_process_common(struct svc_rqst *rqstp) const struct svc_procedure *procp = NULL; struct svc_serv *serv = rqstp->rq_server; struct svc_process_info process; - int auth_res, rc; + enum svc_auth_status auth_res; unsigned int aoffset; + int rc; __be32 *p; /* Will be turned off by GSS integrity and privacy services */ @@ -1331,6 +1332,9 @@ svc_process_common(struct svc_rqst *rqstp) goto dropit; case SVC_COMPLETE: goto sendit; + default: + pr_warn_once("Unexpected svc_auth_status (%d)\n", auth_res); + goto err_system_err; } if (progp == NULL) diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c index 67d8245a08af..aa4429d0b810 100644 --- a/net/sunrpc/svcauth.c +++ b/net/sunrpc/svcauth.c @@ -60,8 +60,19 @@ svc_put_auth_ops(struct auth_ops *aops) module_put(aops->owner); } -int -svc_authenticate(struct svc_rqst *rqstp) +/** + * svc_authenticate - Initialize an outgoing credential + * @rqstp: RPC execution context + * + * Return values: + * %SVC_OK: XDR encoding of the result can begin + * %SVC_DENIED: Credential or verifier is not valid + * %SVC_GARBAGE: Failed to decode credential or verifier + * %SVC_COMPLETE: GSS context lifetime event; no further action + * %SVC_DROP: Drop this request; no further action + * %SVC_CLOSE: Like drop, but also close transport connection + */ +enum svc_auth_status svc_authenticate(struct svc_rqst *rqstp) { struct auth_ops *aops; u32 flavor; @@ -89,16 +100,28 @@ svc_authenticate(struct svc_rqst *rqstp) } EXPORT_SYMBOL_GPL(svc_authenticate); -int svc_set_client(struct svc_rqst *rqstp) +/** + * svc_set_client - Assign an appropriate 'auth_domain' as the client + * @rqstp: RPC execution context + * + * Return values: + * %SVC_OK: Client was found and assigned + * %SVC_DENY: Client was explicitly denied + * %SVC_DROP: Ignore this request + * %SVC_CLOSE: Ignore this request and close the connection + */ +enum svc_auth_status svc_set_client(struct svc_rqst *rqstp) { rqstp->rq_client = NULL; return rqstp->rq_authop->set_client(rqstp); } EXPORT_SYMBOL_GPL(svc_set_client); -/* A request, which was authenticated, has now executed. - * Time to finalise the credentials and verifier - * and release and resources +/** + * svc_authorise - Finalize credentials/verifier and release resources + * @rqstp: RPC execution context + * + * Returns zero on success, or a negative errno. */ int svc_authorise(struct svc_rqst *rqstp) { diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 174783f804fa..04b45588ae6f 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c @@ -665,7 +665,7 @@ static struct group_info *unix_gid_find(kuid_t uid, struct svc_rqst *rqstp) } } -int +enum svc_auth_status svcauth_unix_set_client(struct svc_rqst *rqstp) { struct sockaddr_in *sin; @@ -736,7 +736,6 @@ out: rqstp->rq_auth_stat = rpc_auth_ok; return SVC_OK; } - EXPORT_SYMBOL_GPL(svcauth_unix_set_client); /** @@ -751,7 +750,7 @@ EXPORT_SYMBOL_GPL(svcauth_unix_set_client); * * rqstp->rq_auth_stat is set as mandated by RFC 5531. */ -static int +static enum svc_auth_status svcauth_null_accept(struct svc_rqst *rqstp) { struct xdr_stream *xdr = &rqstp->rq_arg_stream; @@ -828,7 +827,7 @@ struct auth_ops svcauth_null = { * * rqstp->rq_auth_stat is set as mandated by RFC 5531. */ -static int +static enum svc_auth_status svcauth_tls_accept(struct svc_rqst *rqstp) { struct xdr_stream *xdr = &rqstp->rq_arg_stream; @@ -913,7 +912,7 @@ struct auth_ops svcauth_tls = { * * rqstp->rq_auth_stat is set as mandated by RFC 5531. */ -static int +static enum svc_auth_status svcauth_unix_accept(struct svc_rqst *rqstp) { struct xdr_stream *xdr = &rqstp->rq_arg_stream; -- cgit v1.2.3 From 850bac3ae4a636e9e6bb8de62fe697ac171cb221 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 10 Jul 2023 12:42:00 -0400 Subject: SUNRPC: Deduplicate thread wake-up code Refactor: Extract the loop that finds an idle service thread from svc_xprt_enqueue() and svc_wake_up(). Both functions do just about the same thing. Note that svc_wake_up() currently does not hold the RCU read lock while waking the target thread. It indeed should hold the lock, just as svc_xprt_enqueue() does, to ensure the rqstp does not vanish during the wake-up. This patch adds the RCU lock for svc_wake_up(). Note that shrinking the pool thread count is rare, and calls to svc_wake_up() are also quite infrequent. In practice, this race is very unlikely to be hit, so we are not marking the lock fix for stable backport at this time. Reviewed-by: Jeff Layton Reviewed-by: NeilBrown Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc.h | 1 + net/sunrpc/svc.c | 34 ++++++++++++++++++++++++++++++++++ net/sunrpc/svc_xprt.c | 44 ++++++++------------------------------------ 3 files changed, 43 insertions(+), 36 deletions(-) (limited to 'net/sunrpc/svc.c') diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 1c491f02efc8..2b9c6df23078 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -419,6 +419,7 @@ int svc_register(const struct svc_serv *, struct net *, const int, void svc_wake_up(struct svc_serv *); void svc_reserve(struct svc_rqst *rqstp, int space); +bool svc_pool_wake_idle_thread(struct svc_pool *pool); struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv); char * svc_print_addr(struct svc_rqst *, char *, size_t); const char * svc_proc_name(const struct svc_rqst *rqstp); diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 7873a3ff24a8..ee9d55b2f275 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -687,6 +687,40 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) return rqstp; } +/** + * svc_pool_wake_idle_thread - Awaken an idle thread in @pool + * @pool: service thread pool + * + * Can be called from soft IRQ or process context. Finding an idle + * service thread and marking it BUSY is atomic with respect to + * other calls to svc_pool_wake_idle_thread(). + * + * Return value: + * %true: An idle thread was awoken + * %false: No idle thread was found + */ +bool svc_pool_wake_idle_thread(struct svc_pool *pool) +{ + struct svc_rqst *rqstp; + + rcu_read_lock(); + list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { + if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) + continue; + + WRITE_ONCE(rqstp->rq_qtime, ktime_get()); + wake_up_process(rqstp->rq_task); + rcu_read_unlock(); + percpu_counter_inc(&pool->sp_threads_woken); + trace_svc_wake_up(rqstp->rq_task->pid); + return true; + } + rcu_read_unlock(); + + set_bit(SP_CONGESTED, &pool->sp_flags); + return false; +} + /* * Choose a pool in which to create a new thread, for svc_set_num_threads */ diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index cdea4b49cbc5..20c66f659113 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -457,7 +457,6 @@ static bool svc_xprt_ready(struct svc_xprt *xprt) void svc_xprt_enqueue(struct svc_xprt *xprt) { struct svc_pool *pool; - struct svc_rqst *rqstp = NULL; if (!svc_xprt_ready(xprt)) return; @@ -477,20 +476,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); spin_unlock_bh(&pool->sp_lock); - /* find a thread for this xprt */ - rcu_read_lock(); - list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { - if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) - continue; - percpu_counter_inc(&pool->sp_threads_woken); - rqstp->rq_qtime = ktime_get(); - wake_up_process(rqstp->rq_task); - goto out_unlock; - } - set_bit(SP_CONGESTED, &pool->sp_flags); - rqstp = NULL; -out_unlock: - rcu_read_unlock(); + svc_pool_wake_idle_thread(pool); } EXPORT_SYMBOL_GPL(svc_xprt_enqueue); @@ -581,7 +567,10 @@ static void svc_xprt_release(struct svc_rqst *rqstp) svc_xprt_put(xprt); } -/* +/** + * svc_wake_up - Wake up a service thread for non-transport work + * @serv: RPC service + * * Some svc_serv's will have occasional work to do, even when a xprt is not * waiting to be serviced. This function is there to "kick" a task in one of * those services so that it can wake up and do that work. Note that we only @@ -590,27 +579,10 @@ static void svc_xprt_release(struct svc_rqst *rqstp) */ void svc_wake_up(struct svc_serv *serv) { - struct svc_rqst *rqstp; - struct svc_pool *pool; - - pool = &serv->sv_pools[0]; - - rcu_read_lock(); - list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { - /* skip any that aren't queued */ - if (test_bit(RQ_BUSY, &rqstp->rq_flags)) - continue; - rcu_read_unlock(); - wake_up_process(rqstp->rq_task); - trace_svc_wake_up(rqstp->rq_task->pid); - return; - } - rcu_read_unlock(); + struct svc_pool *pool = &serv->sv_pools[0]; - /* No free entries available */ - set_bit(SP_TASK_PENDING, &pool->sp_flags); - smp_wmb(); - trace_svc_wake_up(0); + if (!svc_pool_wake_idle_thread(pool)) + set_bit(SP_TASK_PENDING, &pool->sp_flags); } EXPORT_SYMBOL_GPL(svc_wake_up); -- cgit v1.2.3 From f208e9508ace01864f2b37a45d07cda0641ff3ea Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 10 Jul 2023 12:42:20 -0400 Subject: SUNRPC: Count ingress RPC messages per svc_pool svc_xprt_enqueue() can be costly, since it involves selecting and waking up a process. More than one enqueue is done per incoming RPC. For example, svc_data_ready() enqueues, and so does svc_xprt_receive(). Also, if an RPC message requires more than one call to ->recvfrom() to receive it fully, each one of those calls does an enqueue. To get a sense of the average number of transport enqueue operations needed to process an incoming RPC message, re-use the "packets" pool stat. Track the number of complete RPC messages processed by each thread pool. Reviewed-by: Jeff Layton Reviewed-by: NeilBrown Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc.h | 1 + net/sunrpc/svc.c | 2 ++ net/sunrpc/svc_xprt.c | 3 ++- 3 files changed, 5 insertions(+), 1 deletion(-) (limited to 'net/sunrpc/svc.c') diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 2b9c6df23078..7838b37bcfa8 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -39,6 +39,7 @@ struct svc_pool { struct list_head sp_all_threads; /* all server threads */ /* statistics on pool operation */ + struct percpu_counter sp_messages_arrived; struct percpu_counter sp_sockets_queued; struct percpu_counter sp_threads_woken; diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index ee9d55b2f275..98dff8dfbac8 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -513,6 +513,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, INIT_LIST_HEAD(&pool->sp_all_threads); spin_lock_init(&pool->sp_lock); + percpu_counter_init(&pool->sp_messages_arrived, 0, GFP_KERNEL); percpu_counter_init(&pool->sp_sockets_queued, 0, GFP_KERNEL); percpu_counter_init(&pool->sp_threads_woken, 0, GFP_KERNEL); } @@ -587,6 +588,7 @@ svc_destroy(struct kref *ref) for (i = 0; i < serv->sv_nrpools; i++) { struct svc_pool *pool = &serv->sv_pools[i]; + percpu_counter_destroy(&pool->sp_messages_arrived); percpu_counter_destroy(&pool->sp_sockets_queued); percpu_counter_destroy(&pool->sp_threads_woken); } diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 20c66f659113..d3280ae70e36 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -871,6 +871,7 @@ void svc_recv(struct svc_rqst *rqstp) if (serv->sv_stats) serv->sv_stats->netcnt++; + percpu_counter_inc(&rqstp->rq_pool->sp_messages_arrived); rqstp->rq_stime = ktime_get(); svc_process(rqstp); out: @@ -1420,7 +1421,7 @@ static int svc_pool_stats_show(struct seq_file *m, void *p) seq_printf(m, "%u %llu %llu %llu 0\n", pool->sp_id, - percpu_counter_sum_positive(&pool->sp_sockets_queued), + percpu_counter_sum_positive(&pool->sp_messages_arrived), percpu_counter_sum_positive(&pool->sp_sockets_queued), percpu_counter_sum_positive(&pool->sp_threads_woken)); -- cgit v1.2.3 From d2f0ef1cbf37e396ef9c57a30c004ebe65cdbca9 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 10 Jul 2023 12:42:33 -0400 Subject: SUNRPC: Clean up svc_set_num_threads Document the API contract and remove stale or obvious comments. Reviewed-by: Jeff Layton Reviewed-by: NeilBrown Signed-off-by: Chuck Lever --- net/sunrpc/svc.c | 60 +++++++++++++++++++++++--------------------------------- 1 file changed, 25 insertions(+), 35 deletions(-) (limited to 'net/sunrpc/svc.c') diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 98dff8dfbac8..af692bff44ab 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -723,23 +723,14 @@ bool svc_pool_wake_idle_thread(struct svc_pool *pool) return false; } -/* - * Choose a pool in which to create a new thread, for svc_set_num_threads - */ -static inline struct svc_pool * -choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) +static struct svc_pool * +svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) { - if (pool != NULL) - return pool; - - return &serv->sv_pools[(*state)++ % serv->sv_nrpools]; + return pool ? pool : &serv->sv_pools[(*state)++ % serv->sv_nrpools]; } -/* - * Choose a thread to kill, for svc_set_num_threads - */ -static inline struct task_struct * -choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) +static struct task_struct * +svc_pool_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) { unsigned int i; struct task_struct *task = NULL; @@ -747,7 +738,6 @@ choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) if (pool != NULL) { spin_lock_bh(&pool->sp_lock); } else { - /* choose a pool in round-robin fashion */ for (i = 0; i < serv->sv_nrpools; i++) { pool = &serv->sv_pools[--(*state) % serv->sv_nrpools]; spin_lock_bh(&pool->sp_lock); @@ -762,21 +752,15 @@ found_pool: if (!list_empty(&pool->sp_all_threads)) { struct svc_rqst *rqstp; - /* - * Remove from the pool->sp_all_threads list - * so we don't try to kill it again. - */ rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all); set_bit(RQ_VICTIM, &rqstp->rq_flags); list_del_rcu(&rqstp->rq_all); task = rqstp->rq_task; } spin_unlock_bh(&pool->sp_lock); - return task; } -/* create new threads */ static int svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) { @@ -788,13 +772,12 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) do { nrservs--; - chosen_pool = choose_pool(serv, pool, &state); - + chosen_pool = svc_pool_next(serv, pool, &state); node = svc_pool_map_get_node(chosen_pool->sp_id); + rqstp = svc_prepare_thread(serv, chosen_pool, node); if (IS_ERR(rqstp)) return PTR_ERR(rqstp); - task = kthread_create_on_node(serv->sv_threadfn, rqstp, node, "%s", serv->sv_name); if (IS_ERR(task)) { @@ -813,15 +796,6 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) return 0; } -/* - * Create or destroy enough new threads to make the number - * of threads the given number. If `pool' is non-NULL, applies - * only to threads in that pool, otherwise round-robins between - * all pools. Caller must ensure that mutual exclusion between this and - * server startup or shutdown. - */ - -/* destroy old threads */ static int svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) { @@ -829,9 +803,8 @@ svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) struct task_struct *task; unsigned int state = serv->sv_nrthreads-1; - /* destroy old threads */ do { - task = choose_victim(serv, pool, &state); + task = svc_pool_victim(serv, pool, &state); if (task == NULL) break; rqstp = kthread_data(task); @@ -843,6 +816,23 @@ svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) return 0; } +/** + * svc_set_num_threads - adjust number of threads per RPC service + * @serv: RPC service to adjust + * @pool: Specific pool from which to choose threads, or NULL + * @nrservs: New number of threads for @serv (0 or less means kill all threads) + * + * Create or destroy threads to make the number of threads for @serv the + * given number. If @pool is non-NULL, change only threads in that pool; + * otherwise, round-robin between all pools for @serv. @serv's + * sv_nrthreads is adjusted for each thread created or destroyed. + * + * Caller must ensure mutual exclusion between this and server startup or + * shutdown. + * + * Returns zero on success or a negative errno if an error occurred while + * starting a thread. + */ int svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) { -- cgit v1.2.3 From 2a4557452aacf9e7168cb83bc102467094ff9391 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 31 Jul 2023 16:48:29 +1000 Subject: SUNRPC: Remove return value of svc_pool_wake_idle_thread() The returned value is not used (any more), so don't return it. Signed-off-by: NeilBrown Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc.h | 2 +- net/sunrpc/svc.c | 8 ++------ 2 files changed, 3 insertions(+), 7 deletions(-) (limited to 'net/sunrpc/svc.c') diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 7838b37bcfa8..dbf5b21feafe 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -420,7 +420,7 @@ int svc_register(const struct svc_serv *, struct net *, const int, void svc_wake_up(struct svc_serv *); void svc_reserve(struct svc_rqst *rqstp, int space); -bool svc_pool_wake_idle_thread(struct svc_pool *pool); +void svc_pool_wake_idle_thread(struct svc_pool *pool); struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv); char * svc_print_addr(struct svc_rqst *, char *, size_t); const char * svc_proc_name(const struct svc_rqst *rqstp); diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index af692bff44ab..dc21e6c732db 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -697,11 +697,8 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) * service thread and marking it BUSY is atomic with respect to * other calls to svc_pool_wake_idle_thread(). * - * Return value: - * %true: An idle thread was awoken - * %false: No idle thread was found */ -bool svc_pool_wake_idle_thread(struct svc_pool *pool) +void svc_pool_wake_idle_thread(struct svc_pool *pool) { struct svc_rqst *rqstp; @@ -715,12 +712,11 @@ bool svc_pool_wake_idle_thread(struct svc_pool *pool) rcu_read_unlock(); percpu_counter_inc(&pool->sp_threads_woken); trace_svc_wake_up(rqstp->rq_task->pid); - return true; + return; } rcu_read_unlock(); set_bit(SP_CONGESTED, &pool->sp_flags); - return false; } static struct svc_pool * -- cgit v1.2.3