summaryrefslogtreecommitdiff
path: root/fs/btrfs/async-thread.c
diff options
context:
space:
mode:
authorQu Wenruo <quwenruo@cn.fujitsu.com>2014-02-28 06:46:04 +0400
committerJosef Bacik <jbacik@fb.com>2014-03-10 23:17:03 +0400
commit1ca08976ae94f3594dd7303584581cf8099ce47e (patch)
tree36266c4bd75ea9807f8936c40d1e13803b3fd83a /fs/btrfs/async-thread.c
parent08a9ff3264181986d1d692a4e6fce3669700c9f8 (diff)
downloadlinux-1ca08976ae94f3594dd7303584581cf8099ce47e.tar.xz
btrfs: Add high priority workqueue support for btrfs_workqueue_struct
Add high priority function to btrfs_workqueue. This is implemented by embedding a btrfs_workqueue into a btrfs_workqueue and use some helper functions to differ the normal priority wq and high priority wq. So the high priority wq is completely independent from the normal workqueue. Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com> Tested-by: David Sterba <dsterba@suse.cz> Signed-off-by: Josef Bacik <jbacik@fb.com>
Diffstat (limited to 'fs/btrfs/async-thread.c')
-rw-r--r--fs/btrfs/async-thread.c91
1 files changed, 79 insertions, 12 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 905de02e4386..193c84964db9 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -730,7 +730,7 @@ void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
spin_unlock_irqrestore(&worker->lock, flags);
}
-struct btrfs_workqueue_struct {
+struct __btrfs_workqueue_struct {
struct workqueue_struct *normal_wq;
/* List head pointing to ordered work list */
struct list_head ordered_list;
@@ -739,6 +739,38 @@ struct btrfs_workqueue_struct {
spinlock_t list_lock;
};
+struct btrfs_workqueue_struct {
+ struct __btrfs_workqueue_struct *normal;
+ struct __btrfs_workqueue_struct *high;
+};
+
+static inline struct __btrfs_workqueue_struct
+*__btrfs_alloc_workqueue(char *name, int flags, int max_active)
+{
+ struct __btrfs_workqueue_struct *ret = kzalloc(sizeof(*ret), GFP_NOFS);
+
+ if (unlikely(!ret))
+ return NULL;
+
+ if (flags & WQ_HIGHPRI)
+ ret->normal_wq = alloc_workqueue("%s-%s-high", flags,
+ max_active, "btrfs", name);
+ else
+ ret->normal_wq = alloc_workqueue("%s-%s", flags,
+ max_active, "btrfs", name);
+ if (unlikely(!ret->normal_wq)) {
+ kfree(ret);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&ret->ordered_list);
+ spin_lock_init(&ret->list_lock);
+ return ret;
+}
+
+static inline void
+__btrfs_destroy_workqueue(struct __btrfs_workqueue_struct *wq);
+
struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name,
int flags,
int max_active)
@@ -748,19 +780,25 @@ struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name,
if (unlikely(!ret))
return NULL;
- ret->normal_wq = alloc_workqueue("%s-%s", flags, max_active,
- "btrfs", name);
- if (unlikely(!ret->normal_wq)) {
+ ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI,
+ max_active);
+ if (unlikely(!ret->normal)) {
kfree(ret);
return NULL;
}
- INIT_LIST_HEAD(&ret->ordered_list);
- spin_lock_init(&ret->list_lock);
+ if (flags & WQ_HIGHPRI) {
+ ret->high = __btrfs_alloc_workqueue(name, flags, max_active);
+ if (unlikely(!ret->high)) {
+ __btrfs_destroy_workqueue(ret->normal);
+ kfree(ret);
+ return NULL;
+ }
+ }
return ret;
}
-static void run_ordered_work(struct btrfs_workqueue_struct *wq)
+static void run_ordered_work(struct __btrfs_workqueue_struct *wq)
{
struct list_head *list = &wq->ordered_list;
struct btrfs_work_struct *work;
@@ -804,7 +842,7 @@ static void run_ordered_work(struct btrfs_workqueue_struct *wq)
static void normal_work_helper(struct work_struct *arg)
{
struct btrfs_work_struct *work;
- struct btrfs_workqueue_struct *wq;
+ struct __btrfs_workqueue_struct *wq;
int need_order = 0;
work = container_of(arg, struct btrfs_work_struct, normal_work);
@@ -840,8 +878,8 @@ void btrfs_init_work(struct btrfs_work_struct *work,
work->flags = 0;
}
-void btrfs_queue_work(struct btrfs_workqueue_struct *wq,
- struct btrfs_work_struct *work)
+static inline void __btrfs_queue_work(struct __btrfs_workqueue_struct *wq,
+ struct btrfs_work_struct *work)
{
unsigned long flags;
@@ -854,13 +892,42 @@ void btrfs_queue_work(struct btrfs_workqueue_struct *wq,
queue_work(wq->normal_wq, &work->normal_work);
}
-void btrfs_destroy_workqueue(struct btrfs_workqueue_struct *wq)
+void btrfs_queue_work(struct btrfs_workqueue_struct *wq,
+ struct btrfs_work_struct *work)
+{
+ struct __btrfs_workqueue_struct *dest_wq;
+
+ if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
+ dest_wq = wq->high;
+ else
+ dest_wq = wq->normal;
+ __btrfs_queue_work(dest_wq, work);
+}
+
+static inline void
+__btrfs_destroy_workqueue(struct __btrfs_workqueue_struct *wq)
{
destroy_workqueue(wq->normal_wq);
kfree(wq);
}
+void btrfs_destroy_workqueue(struct btrfs_workqueue_struct *wq)
+{
+ if (!wq)
+ return;
+ if (wq->high)
+ __btrfs_destroy_workqueue(wq->high);
+ __btrfs_destroy_workqueue(wq->normal);
+}
+
void btrfs_workqueue_set_max(struct btrfs_workqueue_struct *wq, int max)
{
- workqueue_set_max_active(wq->normal_wq, max);
+ workqueue_set_max_active(wq->normal->normal_wq, max);
+ if (wq->high)
+ workqueue_set_max_active(wq->high->normal_wq, max);
+}
+
+void btrfs_set_work_high_priority(struct btrfs_work_struct *work)
+{
+ set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
}