summaryrefslogtreecommitdiff
path: root/include/linux/workqueue.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/workqueue.h')
-rw-r--r--include/linux/workqueue.h220
1 files changed, 122 insertions, 98 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index af155450cabb..2b58905d3504 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -16,6 +16,7 @@ struct workqueue_struct;
struct work_struct;
typedef void (*work_func_t)(struct work_struct *work);
+void delayed_work_timer_fn(unsigned long __data);
/*
* The first word is the work queue pointer and the flags rolled into
@@ -67,9 +68,18 @@ enum {
WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
WORK_STRUCT_COLOR_BITS,
+ /* data contains off-queue information when !WORK_STRUCT_CWQ */
+ WORK_OFFQ_FLAG_BASE = WORK_STRUCT_FLAG_BITS,
+
+ WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE),
+
+ WORK_OFFQ_FLAG_BITS = 1,
+ WORK_OFFQ_CPU_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
+
+ /* convenience constants */
WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
- WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS,
+ WORK_STRUCT_NO_CPU = (unsigned long)WORK_CPU_NONE << WORK_OFFQ_CPU_SHIFT,
/* bit mask for work_busy() return values */
WORK_BUSY_PENDING = 1 << 0,
@@ -92,6 +102,7 @@ struct work_struct {
struct delayed_work {
struct work_struct work;
struct timer_list timer;
+ int cpu;
};
static inline struct delayed_work *to_delayed_work(struct work_struct *work)
@@ -115,41 +126,38 @@ struct execute_work {
#define __WORK_INIT_LOCKDEP_MAP(n, k)
#endif
-#define __WORK_INITIALIZER(n, f) { \
- .data = WORK_DATA_STATIC_INIT(), \
- .entry = { &(n).entry, &(n).entry }, \
- .func = (f), \
- __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
+#define __WORK_INITIALIZER(n, f) { \
+ .data = WORK_DATA_STATIC_INIT(), \
+ .entry = { &(n).entry, &(n).entry }, \
+ .func = (f), \
+ __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
}
-#define __DELAYED_WORK_INITIALIZER(n, f) { \
- .work = __WORK_INITIALIZER((n).work, (f)), \
- .timer = TIMER_INITIALIZER(NULL, 0, 0), \
+#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
+ .work = __WORK_INITIALIZER((n).work, (f)), \
+ .timer = __TIMER_INITIALIZER(delayed_work_timer_fn, \
+ 0, (unsigned long)&(n), \
+ (tflags) | TIMER_IRQSAFE), \
}
-#define __DEFERRED_WORK_INITIALIZER(n, f) { \
- .work = __WORK_INITIALIZER((n).work, (f)), \
- .timer = TIMER_DEFERRED_INITIALIZER(NULL, 0, 0), \
- }
-
-#define DECLARE_WORK(n, f) \
+#define DECLARE_WORK(n, f) \
struct work_struct n = __WORK_INITIALIZER(n, f)
-#define DECLARE_DELAYED_WORK(n, f) \
- struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
+#define DECLARE_DELAYED_WORK(n, f) \
+ struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
-#define DECLARE_DEFERRED_WORK(n, f) \
- struct delayed_work n = __DEFERRED_WORK_INITIALIZER(n, f)
+#define DECLARE_DEFERRABLE_WORK(n, f) \
+ struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
/*
* initialize a work item's function pointer
*/
-#define PREPARE_WORK(_work, _func) \
- do { \
- (_work)->func = (_func); \
+#define PREPARE_WORK(_work, _func) \
+ do { \
+ (_work)->func = (_func); \
} while (0)
-#define PREPARE_DELAYED_WORK(_work, _func) \
+#define PREPARE_DELAYED_WORK(_work, _func) \
PREPARE_WORK(&(_work)->work, (_func))
#ifdef CONFIG_DEBUG_OBJECTS_WORK
@@ -179,7 +187,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
\
__init_work((_work), _onstack); \
(_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
- lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\
+ lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
INIT_LIST_HEAD(&(_work)->entry); \
PREPARE_WORK((_work), (_func)); \
} while (0)
@@ -193,33 +201,44 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
} while (0)
#endif
-#define INIT_WORK(_work, _func) \
- do { \
- __INIT_WORK((_work), (_func), 0); \
+#define INIT_WORK(_work, _func) \
+ do { \
+ __INIT_WORK((_work), (_func), 0); \
} while (0)
-#define INIT_WORK_ONSTACK(_work, _func) \
- do { \
- __INIT_WORK((_work), (_func), 1); \
+#define INIT_WORK_ONSTACK(_work, _func) \
+ do { \
+ __INIT_WORK((_work), (_func), 1); \
} while (0)
-#define INIT_DELAYED_WORK(_work, _func) \
- do { \
- INIT_WORK(&(_work)->work, (_func)); \
- init_timer(&(_work)->timer); \
+#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
+ do { \
+ INIT_WORK(&(_work)->work, (_func)); \
+ __setup_timer(&(_work)->timer, delayed_work_timer_fn, \
+ (unsigned long)(_work), \
+ (_tflags) | TIMER_IRQSAFE); \
} while (0)
-#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
- do { \
- INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
- init_timer_on_stack(&(_work)->timer); \
+#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
+ do { \
+ INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
+ __setup_timer_on_stack(&(_work)->timer, \
+ delayed_work_timer_fn, \
+ (unsigned long)(_work), \
+ (_tflags) | TIMER_IRQSAFE); \
} while (0)
-#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \
- do { \
- INIT_WORK(&(_work)->work, (_func)); \
- init_timer_deferrable(&(_work)->timer); \
- } while (0)
+#define INIT_DELAYED_WORK(_work, _func) \
+ __INIT_DELAYED_WORK(_work, _func, 0)
+
+#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
+ __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
+
+#define INIT_DEFERRABLE_WORK(_work, _func) \
+ __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
+
+#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
+ __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
/**
* work_pending - Find out whether a work item is currently pending
@@ -278,10 +297,6 @@ enum {
* system_long_wq is similar to system_wq but may host long running
* works. Queue flushing might take relatively long.
*
- * system_nrt_wq is non-reentrant and guarantees that any given work
- * item is never executed in parallel by multiple CPUs. Queue
- * flushing might take relatively long.
- *
* system_unbound_wq is unbound workqueue. Workers are not bound to
* any specific CPU, not concurrency managed, and all queued works are
* executed immediately as long as max_active limit is not reached and
@@ -289,16 +304,25 @@ enum {
*
* system_freezable_wq is equivalent to system_wq except that it's
* freezable.
- *
- * system_nrt_freezable_wq is equivalent to system_nrt_wq except that
- * it's freezable.
*/
extern struct workqueue_struct *system_wq;
extern struct workqueue_struct *system_long_wq;
-extern struct workqueue_struct *system_nrt_wq;
extern struct workqueue_struct *system_unbound_wq;
extern struct workqueue_struct *system_freezable_wq;
-extern struct workqueue_struct *system_nrt_freezable_wq;
+
+static inline struct workqueue_struct * __deprecated __system_nrt_wq(void)
+{
+ return system_wq;
+}
+
+static inline struct workqueue_struct * __deprecated __system_nrt_freezable_wq(void)
+{
+ return system_freezable_wq;
+}
+
+/* equivlalent to system_wq and system_freezable_wq, deprecated */
+#define system_nrt_wq __system_nrt_wq()
+#define system_nrt_freezable_wq __system_nrt_freezable_wq()
extern struct workqueue_struct *
__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
@@ -321,22 +345,22 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
* Pointer to the allocated workqueue on success, %NULL on failure.
*/
#ifdef CONFIG_LOCKDEP
-#define alloc_workqueue(fmt, flags, max_active, args...) \
-({ \
- static struct lock_class_key __key; \
- const char *__lock_name; \
- \
- if (__builtin_constant_p(fmt)) \
- __lock_name = (fmt); \
- else \
- __lock_name = #fmt; \
- \
- __alloc_workqueue_key((fmt), (flags), (max_active), \
- &__key, __lock_name, ##args); \
+#define alloc_workqueue(fmt, flags, max_active, args...) \
+({ \
+ static struct lock_class_key __key; \
+ const char *__lock_name; \
+ \
+ if (__builtin_constant_p(fmt)) \
+ __lock_name = (fmt); \
+ else \
+ __lock_name = #fmt; \
+ \
+ __alloc_workqueue_key((fmt), (flags), (max_active), \
+ &__key, __lock_name, ##args); \
})
#else
-#define alloc_workqueue(fmt, flags, max_active, args...) \
- __alloc_workqueue_key((fmt), (flags), (max_active), \
+#define alloc_workqueue(fmt, flags, max_active, args...) \
+ __alloc_workqueue_key((fmt), (flags), (max_active), \
NULL, NULL, ##args)
#endif
@@ -353,46 +377,50 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
* RETURNS:
* Pointer to the allocated workqueue on success, %NULL on failure.
*/
-#define alloc_ordered_workqueue(fmt, flags, args...) \
+#define alloc_ordered_workqueue(fmt, flags, args...) \
alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args)
-#define create_workqueue(name) \
+#define create_workqueue(name) \
alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
-#define create_freezable_workqueue(name) \
+#define create_freezable_workqueue(name) \
alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
-#define create_singlethread_workqueue(name) \
+#define create_singlethread_workqueue(name) \
alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
extern void destroy_workqueue(struct workqueue_struct *wq);
-extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
-extern int queue_work_on(int cpu, struct workqueue_struct *wq,
+extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work);
-extern int queue_delayed_work(struct workqueue_struct *wq,
+extern bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
+extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *work, unsigned long delay);
-extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
+extern bool queue_delayed_work(struct workqueue_struct *wq,
struct delayed_work *work, unsigned long delay);
+extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
+ struct delayed_work *dwork, unsigned long delay);
+extern bool mod_delayed_work(struct workqueue_struct *wq,
+ struct delayed_work *dwork, unsigned long delay);
extern void flush_workqueue(struct workqueue_struct *wq);
extern void drain_workqueue(struct workqueue_struct *wq);
extern void flush_scheduled_work(void);
-extern int schedule_work(struct work_struct *work);
-extern int schedule_work_on(int cpu, struct work_struct *work);
-extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
-extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
- unsigned long delay);
+extern bool schedule_work_on(int cpu, struct work_struct *work);
+extern bool schedule_work(struct work_struct *work);
+extern bool schedule_delayed_work_on(int cpu, struct delayed_work *work,
+ unsigned long delay);
+extern bool schedule_delayed_work(struct delayed_work *work,
+ unsigned long delay);
extern int schedule_on_each_cpu(work_func_t func);
extern int keventd_up(void);
int execute_in_process_context(work_func_t fn, struct execute_work *);
extern bool flush_work(struct work_struct *work);
-extern bool flush_work_sync(struct work_struct *work);
extern bool cancel_work_sync(struct work_struct *work);
extern bool flush_delayed_work(struct delayed_work *dwork);
-extern bool flush_delayed_work_sync(struct delayed_work *work);
+extern bool cancel_delayed_work(struct delayed_work *dwork);
extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
extern void workqueue_set_max_active(struct workqueue_struct *wq,
@@ -402,27 +430,11 @@ extern unsigned int work_cpu(struct work_struct *work);
extern unsigned int work_busy(struct work_struct *work);
/*
- * Kill off a pending schedule_delayed_work(). Note that the work callback
- * function may still be running on return from cancel_delayed_work(), unless
- * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
- * cancel_work_sync() to wait on it.
- */
-static inline bool cancel_delayed_work(struct delayed_work *work)
-{
- bool ret;
-
- ret = del_timer_sync(&work->timer);
- if (ret)
- work_clear_pending(&work->work);
- return ret;
-}
-
-/*
* Like above, but uses del_timer() instead of del_timer_sync(). This means,
* if it returns 0 the timer function may be running and the queueing is in
* progress.
*/
-static inline bool __cancel_delayed_work(struct delayed_work *work)
+static inline bool __deprecated __cancel_delayed_work(struct delayed_work *work)
{
bool ret;
@@ -432,6 +444,18 @@ static inline bool __cancel_delayed_work(struct delayed_work *work)
return ret;
}
+/* used to be different but now identical to flush_work(), deprecated */
+static inline bool __deprecated flush_work_sync(struct work_struct *work)
+{
+ return flush_work(work);
+}
+
+/* used to be different but now identical to flush_delayed_work(), deprecated */
+static inline bool __deprecated flush_delayed_work_sync(struct delayed_work *dwork)
+{
+ return flush_delayed_work(dwork);
+}
+
#ifndef CONFIG_SMP
static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
{