summaryrefslogtreecommitdiff
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-09-17 21:59:14 +0400
committerJens Axboe <jens.axboe@oracle.com>2009-09-21 17:40:33 +0400
commit87c6a9b253520b66e7f5e8f67a37a701eaa51cee (patch)
treea3778632eabc961ce3b8c32ecfeeb48383cf78e0 /mm/page-writeback.c
parent3542a5c0de3d5b33227214a692bf9b12e249078e (diff)
downloadlinux-87c6a9b253520b66e7f5e8f67a37a701eaa51cee.tar.xz
writeback: make balance_dirty_pages() gradually back more off
Currently it just sleeps for a very short time, just 1 jiffy. If we keep looping in there, continually delay for a little longer of up to 100msec in total. That was the old limit for congestion wait. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c11
1 files changed, 10 insertions, 1 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 2585349469e0..d1ba46441053 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -485,6 +485,7 @@ static void balance_dirty_pages(struct address_space *mapping)
unsigned long bdi_thresh;
unsigned long pages_written = 0;
unsigned long write_chunk = sync_writeback_pages();
+ unsigned long pause = 1;
struct backing_dev_info *bdi = mapping->backing_dev_info;
@@ -561,7 +562,15 @@ static void balance_dirty_pages(struct address_space *mapping)
if (pages_written >= write_chunk)
break; /* We've done our duty */
- schedule_timeout_interruptible(1);
+ schedule_timeout_interruptible(pause);
+
+ /*
+ * Increase the delay for each loop, up to our previous
+ * default of taking a 100ms nap.
+ */
+ pause <<= 1;
+ if (pause > HZ / 10)
+ pause = HZ / 10;
}
if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh &&