summaryrefslogtreecommitdiff
path: root/fs/fscache
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-07-21 00:09:01 +0400
committerTejun Heo <tj@kernel.org>2010-07-23 00:58:47 +0400
commit8af7c12436803291c90295259db23d371a7ad9cc (patch)
tree5e75360876ac5783a3e64bd35a1715847d90e9ce /fs/fscache
parent8b8edefa2fffbff97f9eec8b70e78ae23abad1a0 (diff)
downloadlinux-8af7c12436803291c90295259db23d371a7ad9cc.tar.xz
fscache: convert operation to use workqueue instead of slow-work
Make fscache operation to use only workqueue instead of combination of workqueue and slow-work. FSCACHE_OP_SLOW is dropped and FSCACHE_OP_FAST is renamed to FSCACHE_OP_ASYNC and uses newly added fscache_op_wq workqueue to execute op->processor(). fscache_operation_init_slow() is dropped and fscache_operation_init() now takes @processor argument directly. * Unbound workqueue is used. * fscache_retrieval_work() is no longer necessary as OP_ASYNC now does the equivalent thing. * sysctl fscache.operation_max_active added to control concurrency. The default value is nr_cpus clamped between 2 and WQ_UNBOUND_MAX_ACTIVE. * debugfs support is dropped for now. Tracing API based debug facility is planned to be added. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'fs/fscache')
-rw-r--r--fs/fscache/internal.h1
-rw-r--r--fs/fscache/main.c23
-rw-r--r--fs/fscache/operation.c67
-rw-r--r--fs/fscache/page.c36
4 files changed, 40 insertions, 87 deletions
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
index 6e0b5fb25231..6a026441c5a6 100644
--- a/fs/fscache/internal.h
+++ b/fs/fscache/internal.h
@@ -83,6 +83,7 @@ extern unsigned fscache_defer_create;
extern unsigned fscache_debug;
extern struct kobject *fscache_root;
extern struct workqueue_struct *fscache_object_wq;
+extern struct workqueue_struct *fscache_op_wq;
DECLARE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait);
static inline bool fscache_object_congested(void)
diff --git a/fs/fscache/main.c b/fs/fscache/main.c
index bb8d4c35c7a2..44d13ddab2cc 100644
--- a/fs/fscache/main.c
+++ b/fs/fscache/main.c
@@ -42,11 +42,13 @@ MODULE_PARM_DESC(fscache_debug,
struct kobject *fscache_root;
struct workqueue_struct *fscache_object_wq;
+struct workqueue_struct *fscache_op_wq;
DEFINE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait);
/* these values serve as lower bounds, will be adjusted in fscache_init() */
static unsigned fscache_object_max_active = 4;
+static unsigned fscache_op_max_active = 2;
#ifdef CONFIG_SYSCTL
static struct ctl_table_header *fscache_sysctl_header;
@@ -74,6 +76,14 @@ ctl_table fscache_sysctls[] = {
.proc_handler = fscache_max_active_sysctl,
.extra1 = &fscache_object_wq,
},
+ {
+ .procname = "operation_max_active",
+ .data = &fscache_op_max_active,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = fscache_max_active_sysctl,
+ .extra1 = &fscache_op_wq,
+ },
{}
};
@@ -110,6 +120,16 @@ static int __init fscache_init(void)
if (!fscache_object_wq)
goto error_object_wq;
+ fscache_op_max_active =
+ clamp_val(fscache_object_max_active / 2,
+ fscache_op_max_active, WQ_UNBOUND_MAX_ACTIVE);
+
+ ret = -ENOMEM;
+ fscache_op_wq = alloc_workqueue("fscache_operation", WQ_UNBOUND,
+ fscache_op_max_active);
+ if (!fscache_op_wq)
+ goto error_op_wq;
+
for_each_possible_cpu(cpu)
init_waitqueue_head(&per_cpu(fscache_object_cong_wait, cpu));
@@ -152,6 +172,8 @@ error_sysctl:
#endif
fscache_proc_cleanup();
error_proc:
+ destroy_workqueue(fscache_op_wq);
+error_op_wq:
destroy_workqueue(fscache_object_wq);
error_object_wq:
slow_work_unregister_user(THIS_MODULE);
@@ -172,6 +194,7 @@ static void __exit fscache_exit(void)
kmem_cache_destroy(fscache_cookie_jar);
unregister_sysctl_table(fscache_sysctl_header);
fscache_proc_cleanup();
+ destroy_workqueue(fscache_op_wq);
destroy_workqueue(fscache_object_wq);
slow_work_unregister_user(THIS_MODULE);
printk(KERN_NOTICE "FS-Cache: Unloaded\n");
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index f17cecafae44..b9f34eaede09 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -42,16 +42,12 @@ void fscache_enqueue_operation(struct fscache_operation *op)
fscache_stat(&fscache_n_op_enqueue);
switch (op->flags & FSCACHE_OP_TYPE) {
- case FSCACHE_OP_FAST:
- _debug("queue fast");
+ case FSCACHE_OP_ASYNC:
+ _debug("queue async");
atomic_inc(&op->usage);
- if (!schedule_work(&op->fast_work))
+ if (!queue_work(fscache_op_wq, &op->work))
fscache_put_operation(op);
break;
- case FSCACHE_OP_SLOW:
- _debug("queue slow");
- slow_work_enqueue(&op->slow_work);
- break;
case FSCACHE_OP_MYTHREAD:
_debug("queue for caller's attention");
break;
@@ -455,36 +451,13 @@ void fscache_operation_gc(struct work_struct *work)
}
/*
- * allow the slow work item processor to get a ref on an operation
- */
-static int fscache_op_get_ref(struct slow_work *work)
-{
- struct fscache_operation *op =
- container_of(work, struct fscache_operation, slow_work);
-
- atomic_inc(&op->usage);
- return 0;
-}
-
-/*
- * allow the slow work item processor to discard a ref on an operation
- */
-static void fscache_op_put_ref(struct slow_work *work)
-{
- struct fscache_operation *op =
- container_of(work, struct fscache_operation, slow_work);
-
- fscache_put_operation(op);
-}
-
-/*
- * execute an operation using the slow thread pool to provide processing context
- * - the caller holds a ref to this object, so we don't need to hold one
+ * execute an operation using fs_op_wq to provide processing context -
+ * the caller holds a ref to this object, so we don't need to hold one
*/
-static void fscache_op_execute(struct slow_work *work)
+void fscache_op_work_func(struct work_struct *work)
{
struct fscache_operation *op =
- container_of(work, struct fscache_operation, slow_work);
+ container_of(work, struct fscache_operation, work);
unsigned long start;
_enter("{OBJ%x OP%x,%d}",
@@ -494,31 +467,7 @@ static void fscache_op_execute(struct slow_work *work)
start = jiffies;
op->processor(op);
fscache_hist(fscache_ops_histogram, start);
+ fscache_put_operation(op);
_leave("");
}
-
-/*
- * describe an operation for slow-work debugging
- */
-#ifdef CONFIG_SLOW_WORK_DEBUG
-static void fscache_op_desc(struct slow_work *work, struct seq_file *m)
-{
- struct fscache_operation *op =
- container_of(work, struct fscache_operation, slow_work);
-
- seq_printf(m, "FSC: OBJ%x OP%x: %s/%s fl=%lx",
- op->object->debug_id, op->debug_id,
- op->name, op->state, op->flags);
-}
-#endif
-
-const struct slow_work_ops fscache_op_slow_work_ops = {
- .owner = THIS_MODULE,
- .get_ref = fscache_op_get_ref,
- .put_ref = fscache_op_put_ref,
- .execute = fscache_op_execute,
-#ifdef CONFIG_SLOW_WORK_DEBUG
- .desc = fscache_op_desc,
-#endif
-};
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 723b889fd219..41c441c2058d 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -105,7 +105,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
page_busy:
/* we might want to wait here, but that could deadlock the allocator as
- * the slow-work threads writing to the cache may all end up sleeping
+ * the work threads writing to the cache may all end up sleeping
* on memory allocation */
fscache_stat(&fscache_n_store_vmscan_busy);
return false;
@@ -188,9 +188,8 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
return -ENOMEM;
}
- fscache_operation_init(op, NULL);
- fscache_operation_init_slow(op, fscache_attr_changed_op);
- op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE);
+ fscache_operation_init(op, fscache_attr_changed_op, NULL);
+ op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE);
fscache_set_op_name(op, "Attr");
spin_lock(&cookie->lock);
@@ -218,24 +217,6 @@ nobufs:
EXPORT_SYMBOL(__fscache_attr_changed);
/*
- * handle secondary execution given to a retrieval op on behalf of the
- * cache
- */
-static void fscache_retrieval_work(struct work_struct *work)
-{
- struct fscache_retrieval *op =
- container_of(work, struct fscache_retrieval, op.fast_work);
- unsigned long start;
-
- _enter("{OP%x}", op->op.debug_id);
-
- start = jiffies;
- op->op.processor(&op->op);
- fscache_hist(fscache_ops_histogram, start);
- fscache_put_operation(&op->op);
-}
-
-/*
* release a retrieval op reference
*/
static void fscache_release_retrieval_op(struct fscache_operation *_op)
@@ -269,13 +250,12 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
return NULL;
}
- fscache_operation_init(&op->op, fscache_release_retrieval_op);
+ fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING);
op->mapping = mapping;
op->end_io_func = end_io_func;
op->context = context;
op->start_time = jiffies;
- INIT_WORK(&op->op.fast_work, fscache_retrieval_work);
INIT_LIST_HEAD(&op->to_do);
fscache_set_op_name(&op->op, "Retr");
return op;
@@ -795,9 +775,9 @@ int __fscache_write_page(struct fscache_cookie *cookie,
if (!op)
goto nomem;
- fscache_operation_init(&op->op, fscache_release_write_op);
- fscache_operation_init_slow(&op->op, fscache_write_op);
- op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING);
+ fscache_operation_init(&op->op, fscache_write_op,
+ fscache_release_write_op);
+ op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING);
fscache_set_op_name(&op->op, "Write1");
ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
@@ -852,7 +832,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
fscache_stat(&fscache_n_store_ops);
fscache_stat(&fscache_n_stores_ok);
- /* the slow work queue now carries its own ref on the object */
+ /* the work queue now carries its own ref on the object */
fscache_put_operation(&op->op);
_leave(" = 0");
return 0;