summaryrefslogtreecommitdiff
path: root/drivers/md/bcache/alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/bcache/alloc.c')
-rw-r--r--drivers/md/bcache/alloc.c60
1 files changed, 27 insertions, 33 deletions
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 52035a78d836..8c371d5eef8e 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -49,7 +49,7 @@
*
* bch_bucket_alloc() allocates a single bucket from a specific cache.
*
- * bch_bucket_alloc_set() allocates one or more buckets from different caches
+ * bch_bucket_alloc_set() allocates one bucket from different caches
* out of a cache set.
*
* free_some_buckets() drives all the processes described above. It's called
@@ -87,8 +87,7 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
{
struct cache *ca;
struct bucket *b;
- unsigned long next = c->nbuckets * c->sb.bucket_size / 1024;
- unsigned int i;
+ unsigned long next = c->nbuckets * c->cache->sb.bucket_size / 1024;
int r;
atomic_sub(sectors, &c->rescale);
@@ -104,14 +103,14 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
c->min_prio = USHRT_MAX;
- for_each_cache(ca, c, i)
- for_each_bucket(b, ca)
- if (b->prio &&
- b->prio != BTREE_PRIO &&
- !atomic_read(&b->pin)) {
- b->prio--;
- c->min_prio = min(c->min_prio, b->prio);
- }
+ ca = c->cache;
+ for_each_bucket(b, ca)
+ if (b->prio &&
+ b->prio != BTREE_PRIO &&
+ !atomic_read(&b->pin)) {
+ b->prio--;
+ c->min_prio = min(c->min_prio, b->prio);
+ }
mutex_unlock(&c->bucket_lock);
}
@@ -362,7 +361,7 @@ retry_invalidate:
* new stuff to them:
*/
allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
- if (CACHE_SYNC(&ca->set->sb)) {
+ if (CACHE_SYNC(&ca->sb)) {
/*
* This could deadlock if an allocation with a btree
* node locked ever blocked - having the btree node
@@ -488,34 +487,29 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k)
}
int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
- struct bkey *k, int n, bool wait)
+ struct bkey *k, bool wait)
{
- int i;
+ struct cache *ca;
+ long b;
/* No allocation if CACHE_SET_IO_DISABLE bit is set */
if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
return -1;
lockdep_assert_held(&c->bucket_lock);
- BUG_ON(!n || n > c->caches_loaded || n > MAX_CACHES_PER_SET);
bkey_init(k);
- /* sort by free space/prio of oldest data in caches */
-
- for (i = 0; i < n; i++) {
- struct cache *ca = c->cache_by_alloc[i];
- long b = bch_bucket_alloc(ca, reserve, wait);
+ ca = c->cache;
+ b = bch_bucket_alloc(ca, reserve, wait);
+ if (b == -1)
+ goto err;
- if (b == -1)
- goto err;
+ k->ptr[0] = MAKE_PTR(ca->buckets[b].gen,
+ bucket_to_sector(c, b),
+ ca->sb.nr_this_dev);
- k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
- bucket_to_sector(c, b),
- ca->sb.nr_this_dev);
-
- SET_KEY_PTRS(k, i + 1);
- }
+ SET_KEY_PTRS(k, 1);
return 0;
err:
@@ -525,12 +519,12 @@ err:
}
int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
- struct bkey *k, int n, bool wait)
+ struct bkey *k, bool wait)
{
int ret;
mutex_lock(&c->bucket_lock);
- ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
+ ret = __bch_bucket_alloc_set(c, reserve, k, wait);
mutex_unlock(&c->bucket_lock);
return ret;
}
@@ -589,7 +583,7 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
struct open_bucket, list);
found:
if (!ret->sectors_free && KEY_PTRS(alloc)) {
- ret->sectors_free = c->sb.bucket_size;
+ ret->sectors_free = c->cache->sb.bucket_size;
bkey_copy(&ret->key, alloc);
bkey_init(alloc);
}
@@ -638,7 +632,7 @@ bool bch_alloc_sectors(struct cache_set *c,
spin_unlock(&c->data_bucket_lock);
- if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
+ if (bch_bucket_alloc_set(c, watermark, &alloc.key, wait))
return false;
spin_lock(&c->data_bucket_lock);
@@ -683,7 +677,7 @@ bool bch_alloc_sectors(struct cache_set *c,
&PTR_CACHE(c, &b->key, i)->sectors_written);
}
- if (b->sectors_free < c->sb.block_size)
+ if (b->sectors_free < c->cache->sb.block_size)
b->sectors_free = 0;
/*