summaryrefslogtreecommitdiff
path: root/kernel/task_work.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2017-06-30 23:13:59 +0300
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-07-25 20:08:58 +0300
commitf274f1e72d7171c80c8c790040e47a23a74796b6 (patch)
tree1e69960fc748abd6fd3454a9a782851f200ff92f /kernel/task_work.c
parent3ef0c7a730de0bae03d86c19570af764fa3c4445 (diff)
downloadlinux-f274f1e72d7171c80c8c790040e47a23a74796b6.tar.xz
task_work: Replace spin_unlock_wait() with lock/unlock pair
There is no agreed-upon definition of spin_unlock_wait()'s semantics, and it appears that all callers could do just as well with a lock/unlock pair. This commit therefore replaces the spin_unlock_wait() call in task_work_run() with a spin_lock_irq() and a spin_unlock_irq() aruond the cmpxchg() dequeue loop. This should be safe from a performance perspective because ->pi_lock is local to the task and because calls to the other side of the race, task_work_cancel(), should be rare. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/task_work.c')
-rw-r--r--kernel/task_work.c8
1 files changed, 2 insertions, 6 deletions
diff --git a/kernel/task_work.c b/kernel/task_work.c
index d513051fcca2..836a72a66fba 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -96,20 +96,16 @@ void task_work_run(void)
* work->func() can do task_work_add(), do not set
* work_exited unless the list is empty.
*/
+ raw_spin_lock_irq(&task->pi_lock);
do {
work = READ_ONCE(task->task_works);
head = !work && (task->flags & PF_EXITING) ?
&work_exited : NULL;
} while (cmpxchg(&task->task_works, work, head) != work);
+ raw_spin_unlock_irq(&task->pi_lock);
if (!work)
break;
- /*
- * Synchronize with task_work_cancel(). It can't remove
- * the first entry == work, cmpxchg(task_works) should
- * fail, but it can play with *work and other entries.
- */
- raw_spin_unlock_wait(&task->pi_lock);
do {
next = work->next;