summaryrefslogtreecommitdiff
path: root/include/linux/workqueue.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-09-16 12:36:00 +0400
committerTejun Heo <tj@kernel.org>2010-09-19 19:51:05 +0400
commit401a8d048eadfbe1b1c1bf53d3b614fcc894c61a (patch)
tree7e1761149643e395a33619deb67ec99d8949a7a4 /include/linux/workqueue.h
parent81dcaf6516d8bbd75b894862c8ae7bba04380cfe (diff)
downloadlinux-401a8d048eadfbe1b1c1bf53d3b614fcc894c61a.tar.xz
workqueue: cleanup flush/cancel functions
Make the following cleanup changes. * Relocate flush/cancel function prototypes and definitions. * Relocate wait_on_cpu_work() and wait_on_work() before try_to_grab_pending(). These will be used to implement flush_work_sync(). * Make all flush/cancel functions return bool instead of int. * Update wait_on_cpu_work() and wait_on_work() to return %true if they actually waited. * Add / update comments. This patch doesn't cause any functional changes. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'include/linux/workqueue.h')
-rw-r--r--include/linux/workqueue.h18
1 files changed, 9 insertions, 9 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 07c48925a8fc..bb9b683ea6fa 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -343,7 +343,6 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
extern void flush_workqueue(struct workqueue_struct *wq);
extern void flush_scheduled_work(void);
-extern void flush_delayed_work(struct delayed_work *work);
extern int schedule_work(struct work_struct *work);
extern int schedule_work_on(int cpu, struct work_struct *work);
@@ -355,8 +354,11 @@ extern int keventd_up(void);
int execute_in_process_context(work_func_t fn, struct execute_work *);
-extern int flush_work(struct work_struct *work);
-extern int cancel_work_sync(struct work_struct *work);
+extern bool flush_work(struct work_struct *work);
+extern bool cancel_work_sync(struct work_struct *work);
+
+extern bool flush_delayed_work(struct delayed_work *dwork);
+extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
extern void workqueue_set_max_active(struct workqueue_struct *wq,
int max_active);
@@ -370,9 +372,9 @@ extern unsigned int work_busy(struct work_struct *work);
* it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
* cancel_work_sync() to wait on it.
*/
-static inline int cancel_delayed_work(struct delayed_work *work)
+static inline bool cancel_delayed_work(struct delayed_work *work)
{
- int ret;
+ bool ret;
ret = del_timer_sync(&work->timer);
if (ret)
@@ -385,9 +387,9 @@ static inline int cancel_delayed_work(struct delayed_work *work)
* if it returns 0 the timer function may be running and the queueing is in
* progress.
*/
-static inline int __cancel_delayed_work(struct delayed_work *work)
+static inline bool __cancel_delayed_work(struct delayed_work *work)
{
- int ret;
+ bool ret;
ret = del_timer(&work->timer);
if (ret)
@@ -395,8 +397,6 @@ static inline int __cancel_delayed_work(struct delayed_work *work)
return ret;
}
-extern int cancel_delayed_work_sync(struct delayed_work *work);
-
/* Obsolete. use cancel_delayed_work_sync() */
static inline
void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,