summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2013-08-28 04:18:11 +0400
committerAl Viro <viro@zeniv.linux.org.uk>2013-09-11 02:56:32 +0400
commit7dc19d5affd71370754a2c3d36b485810eaee7a1 (patch)
tree56b84fb2f7462e81c8340dea6341c30ce247f798 /drivers/md
parent1ab6c4997e04a00c50c6d786c2f046adc0d1f5de (diff)
downloadlinux-7dc19d5affd71370754a2c3d36b485810eaee7a1.tar.xz
drivers: convert shrinkers to new count/scan API
Convert the driver shrinkers to the new API. Most changes are compile tested only because I either don't have the hardware or it's staging stuff. FWIW, the md and android code is pretty good, but the rest of it makes me want to claw my eyes out. The amount of broken code I just encountered is mind boggling. I've added comments explaining what is broken, but I fear that some of the code would be best dealt with by being dragged behind the bike shed, burying in mud up to it's neck and then run over repeatedly with a blunt lawn mower. Special mention goes to the zcache/zcache2 drivers. They can't co-exist in the build at the same time, they are under different menu options in menuconfig, they only show up when you've got the right set of mm subsystem options configured and so even compile testing is an exercise in pulling teeth. And that doesn't even take into account the horrible, broken code... [glommer@openvz.org: fixes for i915, android lowmem, zcache, bcache] Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Glauber Costa <glommer@openvz.org> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Kent Overstreet <koverstreet@google.com> Cc: John Stultz <john.stultz@linaro.org> Cc: David Rientjes <rientjes@google.com> Cc: Jerome Glisse <jglisse@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: J. Bruce Fields <bfields@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kent Overstreet <koverstreet@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/btree.c43
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/dm-bufio.c64
3 files changed, 66 insertions, 43 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index ee372884c405..f9764e61978b 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -597,24 +597,19 @@ static int mca_reap(struct btree *b, struct closure *cl, unsigned min_order)
return 0;
}
-static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
+static unsigned long bch_mca_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
{
struct cache_set *c = container_of(shrink, struct cache_set, shrink);
struct btree *b, *t;
unsigned long i, nr = sc->nr_to_scan;
+ unsigned long freed = 0;
if (c->shrinker_disabled)
- return 0;
+ return SHRINK_STOP;
if (c->try_harder)
- return 0;
-
- /*
- * If nr == 0, we're supposed to return the number of items we have
- * cached. Not allowed to return -1.
- */
- if (!nr)
- return mca_can_free(c) * c->btree_pages;
+ return SHRINK_STOP;
/* Return -1 if we can't do anything right now */
if (sc->gfp_mask & __GFP_WAIT)
@@ -634,14 +629,14 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
i = 0;
list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
- if (!nr)
+ if (freed >= nr)
break;
if (++i > 3 &&
!mca_reap(b, NULL, 0)) {
mca_data_free(b);
rw_unlock(true, b);
- --nr;
+ freed++;
}
}
@@ -652,7 +647,7 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
if (list_empty(&c->btree_cache))
goto out;
- for (i = 0; nr && i < c->bucket_cache_used; i++) {
+ for (i = 0; (nr--) && i < c->bucket_cache_used; i++) {
b = list_first_entry(&c->btree_cache, struct btree, list);
list_rotate_left(&c->btree_cache);
@@ -661,14 +656,27 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
mca_bucket_free(b);
mca_data_free(b);
rw_unlock(true, b);
- --nr;
+ freed++;
} else
b->accessed = 0;
}
out:
- nr = mca_can_free(c) * c->btree_pages;
mutex_unlock(&c->bucket_lock);
- return nr;
+ return freed;
+}
+
+static unsigned long bch_mca_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct cache_set *c = container_of(shrink, struct cache_set, shrink);
+
+ if (c->shrinker_disabled)
+ return 0;
+
+ if (c->try_harder)
+ return 0;
+
+ return mca_can_free(c) * c->btree_pages;
}
void bch_btree_cache_free(struct cache_set *c)
@@ -737,7 +745,8 @@ int bch_btree_cache_alloc(struct cache_set *c)
c->verify_data = NULL;
#endif
- c->shrink.shrink = bch_mca_shrink;
+ c->shrink.count_objects = bch_mca_count;
+ c->shrink.scan_objects = bch_mca_scan;
c->shrink.seeks = 4;
c->shrink.batch = c->btree_pages * 2;
register_shrinker(&c->shrink);
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 12a2c2846f99..4fe6ab2fbe2e 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -556,7 +556,7 @@ STORE(__bch_cache_set)
struct shrink_control sc;
sc.gfp_mask = GFP_KERNEL;
sc.nr_to_scan = strtoul_or_return(buf);
- c->shrink.shrink(&c->shrink, &sc);
+ c->shrink.scan_objects(&c->shrink, &sc);
}
sysfs_strtoul(congested_read_threshold_us,
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 5227e079a6e3..173cbb20d104 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1425,62 +1425,75 @@ static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
unsigned long max_jiffies)
{
if (jiffies - b->last_accessed < max_jiffies)
- return 1;
+ return 0;
if (!(gfp & __GFP_IO)) {
if (test_bit(B_READING, &b->state) ||
test_bit(B_WRITING, &b->state) ||
test_bit(B_DIRTY, &b->state))
- return 1;
+ return 0;
}
if (b->hold_count)
- return 1;
+ return 0;
__make_buffer_clean(b);
__unlink_buffer(b);
__free_buffer_wake(b);
- return 0;
+ return 1;
}
-static void __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
- struct shrink_control *sc)
+static long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
+ gfp_t gfp_mask)
{
int l;
struct dm_buffer *b, *tmp;
+ long freed = 0;
for (l = 0; l < LIST_SIZE; l++) {
- list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list)
- if (!__cleanup_old_buffer(b, sc->gfp_mask, 0) &&
- !--nr_to_scan)
- return;
+ list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
+ freed += __cleanup_old_buffer(b, gfp_mask, 0);
+ if (!--nr_to_scan)
+ break;
+ }
dm_bufio_cond_resched();
}
+ return freed;
}
-static int shrink(struct shrinker *shrinker, struct shrink_control *sc)
+static unsigned long
+dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
- struct dm_bufio_client *c =
- container_of(shrinker, struct dm_bufio_client, shrinker);
- unsigned long r;
- unsigned long nr_to_scan = sc->nr_to_scan;
+ struct dm_bufio_client *c;
+ unsigned long freed;
+ c = container_of(shrink, struct dm_bufio_client, shrinker);
if (sc->gfp_mask & __GFP_IO)
dm_bufio_lock(c);
else if (!dm_bufio_trylock(c))
- return !nr_to_scan ? 0 : -1;
+ return SHRINK_STOP;
- if (nr_to_scan)
- __scan(c, nr_to_scan, sc);
+ freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
+ dm_bufio_unlock(c);
+ return freed;
+}
- r = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
- if (r > INT_MAX)
- r = INT_MAX;
+static unsigned long
+dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ struct dm_bufio_client *c;
+ unsigned long count;
- dm_bufio_unlock(c);
+ c = container_of(shrink, struct dm_bufio_client, shrinker);
+ if (sc->gfp_mask & __GFP_IO)
+ dm_bufio_lock(c);
+ else if (!dm_bufio_trylock(c))
+ return 0;
- return r;
+ count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
+ dm_bufio_unlock(c);
+ return count;
}
/*
@@ -1582,7 +1595,8 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
__cache_size_refresh();
mutex_unlock(&dm_bufio_clients_lock);
- c->shrinker.shrink = shrink;
+ c->shrinker.count_objects = dm_bufio_shrink_count;
+ c->shrinker.scan_objects = dm_bufio_shrink_scan;
c->shrinker.seeks = 1;
c->shrinker.batch = 0;
register_shrinker(&c->shrinker);
@@ -1669,7 +1683,7 @@ static void cleanup_old_buffers(void)
struct dm_buffer *b;
b = list_entry(c->lru[LIST_CLEAN].prev,
struct dm_buffer, lru_list);
- if (__cleanup_old_buffer(b, 0, max_age * HZ))
+ if (!__cleanup_old_buffer(b, 0, max_age * HZ))
break;
dm_bufio_cond_resched();
}