summaryrefslogtreecommitdiff
path: root/fs/bcachefs/journal_io.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2022-03-15 09:41:21 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:09:28 +0300
commit062afcbae3b269a7d01cf5087df92d5bd8732012 (patch)
treea9bc997e3c2ba9f9e9267a50a3f29717ff9d576c /fs/bcachefs/journal_io.c
parent91d961badfd123b6759488bc4aa7a4d014b739f1 (diff)
downloadlinux-062afcbae3b269a7d01cf5087df92d5bd8732012.tar.xz
bcachefs: Restore journal write point at startup
This patch tweaks the journal recovery path so that we start writing right after where we left off, instead of the next empty bucket. This is partly prep work for supporting zoned devices, but it's also good to do in general to avoid the journal completely filling up and getting stuck. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Diffstat (limited to 'fs/bcachefs/journal_io.c')
-rw-r--r--fs/bcachefs/journal_io.c23
1 files changed, 21 insertions, 2 deletions
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index 54587ff29771..e3b3d0b72232 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -907,6 +907,7 @@ static void bch2_journal_read_device(struct closure *cl)
struct bch_fs *c = ca->fs;
struct journal_list *jlist =
container_of(cl->parent, struct journal_list, cl);
+ struct journal_replay *r;
struct journal_read_buf buf = { NULL, 0 };
u64 min_seq = U64_MAX;
unsigned i;
@@ -942,11 +943,29 @@ static void bch2_journal_read_device(struct closure *cl)
* allocate
*/
while (ja->bucket_seq[ja->cur_idx] > min_seq &&
- ja->bucket_seq[ja->cur_idx] >
+ ja->bucket_seq[ja->cur_idx] ==
ja->bucket_seq[(ja->cur_idx + 1) % ja->nr])
ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
- ja->sectors_free = 0;
+ ja->sectors_free = ca->mi.bucket_size;
+
+ mutex_lock(&jlist->lock);
+ list_for_each_entry(r, jlist->head, list) {
+ for (i = 0; i < r->nr_ptrs; i++) {
+ if (r->ptrs[i].dev == ca->dev_idx &&
+ sector_to_bucket(ca, r->ptrs[i].sector) == ja->buckets[ja->cur_idx]) {
+ unsigned wrote = (r->ptrs[i].sector % ca->mi.bucket_size) +
+ vstruct_sectors(&r->j, c->block_bits);
+
+ ja->sectors_free = min(ja->sectors_free,
+ ca->mi.bucket_size - wrote);
+ }
+ }
+ }
+ mutex_unlock(&jlist->lock);
+
+ BUG_ON(ja->bucket_seq[ja->cur_idx] &&
+ ja->sectors_free == ca->mi.bucket_size);
/*
* Set dirty_idx to indicate the entire journal is full and needs to be