summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/workqueue.h66
-rw-r--r--include/trace/events/workqueue.h8
-rw-r--r--kernel/workqueue.c16
3 files changed, 73 insertions, 17 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 7fee9b6cfede..62e75dd40d9a 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -406,7 +406,7 @@ alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
* alloc_ordered_workqueue - allocate an ordered workqueue
* @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
- * @args...: args for @fmt
+ * @args: args for @fmt
*
* Allocate an ordered workqueue. An ordered workqueue executes at
* most one work item at any given time in the queued order. They are
@@ -445,7 +445,7 @@ extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay);
extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
-extern void flush_workqueue(struct workqueue_struct *wq);
+extern void __flush_workqueue(struct workqueue_struct *wq);
extern void drain_workqueue(struct workqueue_struct *wq);
extern int schedule_on_each_cpu(work_func_t func);
@@ -563,15 +563,23 @@ static inline bool schedule_work(struct work_struct *work)
return queue_work(system_wq, work);
}
+/*
+ * Detect attempt to flush system-wide workqueues at compile time when possible.
+ *
+ * See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
+ * for reasons and steps for converting system-wide workqueues into local workqueues.
+ */
+extern void __warn_flushing_systemwide_wq(void)
+ __compiletime_warning("Please avoid flushing system-wide workqueues.");
+
/**
* flush_scheduled_work - ensure that any scheduled work has run to completion.
*
* Forces execution of the kernel-global workqueue and blocks until its
* completion.
*
- * Think twice before calling this function! It's very easy to get into
- * trouble if you don't take great care. Either of the following situations
- * will lead to deadlock:
+ * It's very easy to get into trouble if you don't take great care.
+ * Either of the following situations will lead to deadlock:
*
* One of the work items currently on the workqueue needs to acquire
* a lock held by your code or its caller.
@@ -586,11 +594,51 @@ static inline bool schedule_work(struct work_struct *work)
* need to know that a particular work item isn't queued and isn't running.
* In such cases you should use cancel_delayed_work_sync() or
* cancel_work_sync() instead.
+ *
+ * Please stop calling this function! A conversion to stop flushing system-wide
+ * workqueues is in progress. This function will be removed after all in-tree
+ * users stopped calling this function.
*/
-static inline void flush_scheduled_work(void)
-{
- flush_workqueue(system_wq);
-}
+/*
+ * The background of commit 771c035372a036f8 ("deprecate the
+ * '__deprecated' attribute warnings entirely and for good") is that,
+ * since Linus builds all modules between every single pull he does,
+ * the standard kernel build needs to be _clean_ in order to be able to
+ * notice when new problems happen. Therefore, don't emit warning while
+ * there are in-tree users.
+ */
+#define flush_scheduled_work() \
+({ \
+ if (0) \
+ __warn_flushing_systemwide_wq(); \
+ __flush_workqueue(system_wq); \
+})
+
+/*
+ * Although there is no longer in-tree caller, for now just emit warning
+ * in order to give out-of-tree callers time to update.
+ */
+#define flush_workqueue(wq) \
+({ \
+ struct workqueue_struct *_wq = (wq); \
+ \
+ if ((__builtin_constant_p(_wq == system_wq) && \
+ _wq == system_wq) || \
+ (__builtin_constant_p(_wq == system_highpri_wq) && \
+ _wq == system_highpri_wq) || \
+ (__builtin_constant_p(_wq == system_long_wq) && \
+ _wq == system_long_wq) || \
+ (__builtin_constant_p(_wq == system_unbound_wq) && \
+ _wq == system_unbound_wq) || \
+ (__builtin_constant_p(_wq == system_freezable_wq) && \
+ _wq == system_freezable_wq) || \
+ (__builtin_constant_p(_wq == system_power_efficient_wq) && \
+ _wq == system_power_efficient_wq) || \
+ (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \
+ _wq == system_freezable_power_efficient_wq)) \
+ __warn_flushing_systemwide_wq(); \
+ __flush_workqueue(_wq); \
+})
/**
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
index 6154a2e72bce..262d52021c23 100644
--- a/include/trace/events/workqueue.h
+++ b/include/trace/events/workqueue.h
@@ -22,7 +22,7 @@ struct pool_workqueue;
*/
TRACE_EVENT(workqueue_queue_work,
- TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq,
+ TP_PROTO(int req_cpu, struct pool_workqueue *pwq,
struct work_struct *work),
TP_ARGS(req_cpu, pwq, work),
@@ -31,8 +31,8 @@ TRACE_EVENT(workqueue_queue_work,
__field( void *, work )
__field( void *, function)
__string( workqueue, pwq->wq->name)
- __field( unsigned int, req_cpu )
- __field( unsigned int, cpu )
+ __field( int, req_cpu )
+ __field( int, cpu )
),
TP_fast_assign(
@@ -43,7 +43,7 @@ TRACE_EVENT(workqueue_queue_work,
__entry->cpu = pwq->pool->cpu;
),
- TP_printk("work struct=%p function=%ps workqueue=%s req_cpu=%u cpu=%u",
+ TP_printk("work struct=%p function=%ps workqueue=%s req_cpu=%d cpu=%d",
__entry->work, __entry->function, __get_str(workqueue),
__entry->req_cpu, __entry->cpu)
);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 4056f2a3f9d5..1ea50f6be843 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2788,13 +2788,13 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
}
/**
- * flush_workqueue - ensure that any scheduled work has run to completion.
+ * __flush_workqueue - ensure that any scheduled work has run to completion.
* @wq: workqueue to flush
*
* This function sleeps until all work items which were queued on entry
* have finished execution, but it is not livelocked by new incoming ones.
*/
-void flush_workqueue(struct workqueue_struct *wq)
+void __flush_workqueue(struct workqueue_struct *wq)
{
struct wq_flusher this_flusher = {
.list = LIST_HEAD_INIT(this_flusher.list),
@@ -2943,7 +2943,7 @@ void flush_workqueue(struct workqueue_struct *wq)
out_unlock:
mutex_unlock(&wq->mutex);
}
-EXPORT_SYMBOL(flush_workqueue);
+EXPORT_SYMBOL(__flush_workqueue);
/**
* drain_workqueue - drain a workqueue
@@ -2971,7 +2971,7 @@ void drain_workqueue(struct workqueue_struct *wq)
wq->flags |= __WQ_DRAINING;
mutex_unlock(&wq->mutex);
reflush:
- flush_workqueue(wq);
+ __flush_workqueue(wq);
mutex_lock(&wq->mutex);
@@ -6111,3 +6111,11 @@ void __init workqueue_init(void)
wq_online = true;
wq_watchdog_init();
}
+
+/*
+ * Despite the naming, this is a no-op function which is here only for avoiding
+ * link error. Since compile-time warning may fail to catch, we will need to
+ * emit run-time warning from __flush_workqueue().
+ */
+void __warn_flushing_systemwide_wq(void) { }
+EXPORT_SYMBOL(__warn_flushing_systemwide_wq);