summaryrefslogtreecommitdiff
path: root/drivers/md/bcache/journal.c
diff options
context:
space:
mode:
authorColy Li <colyli@suse.de>2020-10-01 09:50:56 +0300
committerJens Axboe <axboe@kernel.dk>2020-10-02 23:25:30 +0300
commit4a784266c6a75f375e08915b35e909df19eff17f (patch)
tree8af35322a0c328779bc46da3e29b87aa7bdf818e /drivers/md/bcache/journal.c
parent6f9414e0f6f35c7669dad5ac1a838ce323302f03 (diff)
downloadlinux-4a784266c6a75f375e08915b35e909df19eff17f.tar.xz
bcache: remove embedded struct cache_sb from struct cache_set
Since bcache code was merged into mainline kerrnel, each cache set only as one single cache in it. The multiple caches framework is here but the code is far from completed. Considering the multiple copies of cached data can also be stored on e.g. md raid1 devices, it is unnecessary to support multiple caches in one cache set indeed. The previous preparation patches fix the dependencies of explicitly making a cache set only have single cache. Now we don't have to maintain an embedded partial super block in struct cache_set, the in-memory super block can be directly referenced from struct cache. This patch removes the embedded struct cache_sb from struct cache_set, and fixes all locations where the superb lock was referenced from this removed super block by referencing the in-memory super block of struct cache. Signed-off-by: Coly Li <colyli@suse.de> Reviewed-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/md/bcache/journal.c')
-rw-r--r--drivers/md/bcache/journal.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index cd46f33db507..aefbdb7e003b 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -666,7 +666,7 @@ static void journal_reclaim(struct cache_set *c)
bkey_init(k);
SET_KEY_PTRS(k, 1);
- c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
+ c->journal.blocks_free = ca->sb.bucket_size >> c->block_bits;
out:
if (!journal_full(&c->journal))
@@ -735,7 +735,7 @@ static void journal_write_unlocked(struct closure *cl)
struct journal_write *w = c->journal.cur;
struct bkey *k = &c->journal.key;
unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) *
- c->sb.block_size;
+ ca->sb.block_size;
struct bio *bio;
struct bio_list list;
@@ -762,7 +762,7 @@ static void journal_write_unlocked(struct closure *cl)
bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
- w->data->magic = jset_magic(&c->sb);
+ w->data->magic = jset_magic(&ca->sb);
w->data->version = BCACHE_JSET_VERSION;
w->data->last_seq = last_seq(&c->journal);
w->data->csum = csum_set(w->data);
@@ -838,6 +838,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
size_t sectors;
struct closure cl;
bool wait = false;
+ struct cache *ca = c->cache;
closure_init_stack(&cl);
@@ -847,10 +848,10 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
struct journal_write *w = c->journal.cur;
sectors = __set_blocks(w->data, w->data->keys + nkeys,
- block_bytes(c->cache)) * c->sb.block_size;
+ block_bytes(ca)) * ca->sb.block_size;
if (sectors <= min_t(size_t,
- c->journal.blocks_free * c->sb.block_size,
+ c->journal.blocks_free * ca->sb.block_size,
PAGE_SECTORS << JSET_BITS))
return w;