summaryrefslogtreecommitdiff
path: root/include/linux/kthread.h
diff options
context:
space:
mode:
authorJulia Cartwright <julia@ni.com>2019-02-12 19:25:53 +0300
committerThomas Gleixner <tglx@linutronix.de>2019-02-28 13:18:38 +0300
commitfe99a4f4d6022ec92f9b52a5528cb9b77513e7d1 (patch)
tree87e1bdb98c37e47aab67bf52170fe4a61ca3bde1 /include/linux/kthread.h
parentc89d92eddfad11e912fb506f85e1796064a9f9d2 (diff)
downloadlinux-fe99a4f4d6022ec92f9b52a5528cb9b77513e7d1.tar.xz
kthread: Convert worker lock to raw spinlock
In order to enable the queuing of kthread work items from hardirq context even when PREEMPT_RT_FULL is enabled, convert the worker spin_lock to a raw_spin_lock. This is only acceptable to do because the work performed under the lock is well-bounded and minimal. Reported-by: Steffen Trumtrar <s.trumtrar@pengutronix.de> Reported-by: Tim Sander <tim@krieglstein.org> Signed-off-by: Julia Cartwright <julia@ni.com> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Steffen Trumtrar <s.trumtrar@pengutronix.de> Reviewed-by: Petr Mladek <pmladek@suse.com> Cc: Guenter Roeck <linux@roeck-us.net> Link: https://lkml.kernel.org/r/20190212162554.19779-1-bigeasy@linutronix.de
Diffstat (limited to 'include/linux/kthread.h')
-rw-r--r--include/linux/kthread.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index c1961761311d..6b8c064f0cbc 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -85,7 +85,7 @@ enum {
struct kthread_worker {
unsigned int flags;
- spinlock_t lock;
+ raw_spinlock_t lock;
struct list_head work_list;
struct list_head delayed_work_list;
struct task_struct *task;
@@ -106,7 +106,7 @@ struct kthread_delayed_work {
};
#define KTHREAD_WORKER_INIT(worker) { \
- .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \
.work_list = LIST_HEAD_INIT((worker).work_list), \
.delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
}