summaryrefslogtreecommitdiff
path: root/net/sunrpc/clnt.c
diff options
context:
space:
mode:
authorStanislav Kinsbursky <skinsbursky@parallels.com>2011-03-17 18:54:23 +0300
committerTrond Myklebust <Trond.Myklebust@netapp.com>2011-03-17 19:39:00 +0300
commit8e26de238fd794c8ea56a5c98bf67c40cfeb051d (patch)
tree3c8d0e6b1e6e0782b13221ee6c39e5be09552dd9 /net/sunrpc/clnt.c
parentba3c578de274a5438bafbce03f9225936698051c (diff)
downloadlinux-8e26de238fd794c8ea56a5c98bf67c40cfeb051d.tar.xz
RPC: killing RPC tasks races fixed
RPC task RPC_TASK_QUEUED bit is set must be checked before trying to wake up task rpc_killall_tasks() because task->tk_waitqueue can not be set (equal to NULL). Also, as Trond Myklebust mentioned, such approach (instead of checking tk_waitqueue to NULL) allows us to "optimise away the call to rpc_wake_up_queued_task() altogether for those tasks that aren't queued". Here is an example of dereferencing of tk_waitqueue equal to NULL: CPU 0 CPU 1 CPU 2 -------------------- --------------------- -------------------------- nfs4_run_open_task rpc_run_task rpc_execute rpc_set_active rpc_make_runnable (waiting) rpc_async_schedule nfs4_open_prepare nfs_wait_on_sequence nfs_umount_begin rpc_killall_tasks rpc_wake_up_task rpc_wake_up_queued_task spin_lock(tk_waitqueue == NULL) BUG() rpc_sleep_on spin_lock(&q->lock) __rpc_sleep_on task->tk_waitqueue = q Signed-off-by: Stanislav Kinsbursky <skinsbursky@openvz.org> Cc: stable@kernel.org Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net/sunrpc/clnt.c')
-rw-r--r--net/sunrpc/clnt.c4
1 files changed, 3 insertions, 1 deletions
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index edaf56e2ed29..e7a96e478f63 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -436,7 +436,9 @@ void rpc_killall_tasks(struct rpc_clnt *clnt)
if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
rovr->tk_flags |= RPC_TASK_KILLED;
rpc_exit(rovr, -EIO);
- rpc_wake_up_queued_task(rovr->tk_waitqueue, rovr);
+ if (RPC_IS_QUEUED(rovr))
+ rpc_wake_up_queued_task(rovr->tk_waitqueue,
+ rovr);
}
}
spin_unlock(&clnt->cl_lock);