summaryrefslogtreecommitdiff
path: root/fs/nfs/nfs4client.c
diff options
context:
space:
mode:
authorTrond Myklebust <trond.myklebust@hammerspace.com>2019-08-03 17:11:27 +0300
committerTrond Myklebust <trond.myklebust@hammerspace.com>2019-08-05 05:35:40 +0300
commitc77e22834ae9a11891cb613bd9a551be1b94f2bc (patch)
treedfa88210f8f4660ce07e597ba5dda83e6aee73af /fs/nfs/nfs4client.c
parente3c8dc761ead061da2220ee8f8132f729ac3ddfe (diff)
downloadlinux-c77e22834ae9a11891cb613bd9a551be1b94f2bc.tar.xz
NFSv4: Fix a potential sleep while atomic in nfs4_do_reclaim()
John Hubbard reports seeing the following stack trace: nfs4_do_reclaim rcu_read_lock /* we are now in_atomic() and must not sleep */ nfs4_purge_state_owners nfs4_free_state_owner nfs4_destroy_seqid_counter rpc_destroy_wait_queue cancel_delayed_work_sync __cancel_work_timer __flush_work start_flush_work might_sleep: (kernel/workqueue.c:2975: BUG) The solution is to separate out the freeing of the state owners from nfs4_purge_state_owners(), and perform that outside the atomic context. Reported-by: John Hubbard <jhubbard@nvidia.com> Fixes: 0aaaf5c424c7f ("NFS: Cache state owners after files are closed") Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
Diffstat (limited to 'fs/nfs/nfs4client.c')
-rw-r--r--fs/nfs/nfs4client.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 616393a01c06..da6204025a2d 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -758,9 +758,12 @@ out:
static void nfs4_destroy_server(struct nfs_server *server)
{
+ LIST_HEAD(freeme);
+
nfs_server_return_all_delegations(server);
unset_pnfs_layoutdriver(server);
- nfs4_purge_state_owners(server);
+ nfs4_purge_state_owners(server, &freeme);
+ nfs4_free_state_owners(&freeme);
}
/*