summaryrefslogtreecommitdiff
path: root/include/linux/backing-dev-defs.h
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2017-09-30 11:09:06 +0300
committerJens Axboe <axboe@kernel.dk>2017-10-04 20:24:12 +0300
commit85009b4f5f0399669a44f07cb9a5622c0e71d419 (patch)
tree9aa95947a827cf53f7d48cf187b99fa4f03411d2 /include/linux/backing-dev-defs.h
parentfc13457f74dcf054b0d17efb7b94b46fdf17f412 (diff)
downloadlinux-85009b4f5f0399669a44f07cb9a5622c0e71d419.tar.xz
writeback: eliminate work item allocation in bd_start_writeback()
Handle start-all writeback like we do periodic or kupdate style writeback - by marking the bdi_writeback as needing a full flush, and simply waking the thread. This eliminates the need to allocate and queue a specific work item just for this purpose. After this change, we truly only ever have one of them running at any point in time. We mark the need to start all flushes, and the writeback thread will clear it once it has processed the request. Reviewed-by: Jan Kara <jack@suse.cz> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include/linux/backing-dev-defs.h')
-rw-r--r--include/linux/backing-dev-defs.h23
1 files changed, 23 insertions, 0 deletions
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 420de5c7c7f9..b7c7be6f5986 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -45,6 +45,28 @@ enum wb_stat_item {
#define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
/*
+ * why some writeback work was initiated
+ */
+enum wb_reason {
+ WB_REASON_BACKGROUND,
+ WB_REASON_VMSCAN,
+ WB_REASON_SYNC,
+ WB_REASON_PERIODIC,
+ WB_REASON_LAPTOP_TIMER,
+ WB_REASON_FREE_MORE_MEM,
+ WB_REASON_FS_FREE_SPACE,
+ /*
+ * There is no bdi forker thread any more and works are done
+ * by emergency worker, however, this is TPs userland visible
+ * and we'll be exposing exactly the same information,
+ * so it has a mismatch name.
+ */
+ WB_REASON_FORKER_THREAD,
+
+ WB_REASON_MAX,
+};
+
+/*
* For cgroup writeback, multiple wb's may map to the same blkcg. Those
* wb's can operate mostly independently but should share the congested
* state. To facilitate such sharing, the congested state is tracked using
@@ -116,6 +138,7 @@ struct bdi_writeback {
struct fprop_local_percpu completions;
int dirty_exceeded;
+ enum wb_reason start_all_reason;
spinlock_t work_lock; /* protects work_list & dwork scheduling */
struct list_head work_list;