summaryrefslogtreecommitdiff
path: root/fs/bcachefs/journal_reclaim.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2020-11-14 02:36:33 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:08:49 +0300
commitebb84d094141eac9ee3e22d95abc9792a1c79eca (patch)
tree4d5e66377dd2a124a626bad434c46c8d7f8e67b8 /fs/bcachefs/journal_reclaim.c
parent5db43418d5097b8aca5c725eb301186dee04c70a (diff)
downloadlinux-ebb84d094141eac9ee3e22d95abc9792a1c79eca.tar.xz
bcachefs: Increase journal pipelining
This patch increases the maximum journal buffers in flight from 2 to 4 - this will be particularly helpful when in the future we stop requiring flush+fua for every journal write. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/journal_reclaim.c')
-rw-r--r--fs/bcachefs/journal_reclaim.c46
1 files changed, 35 insertions, 11 deletions
diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c
index 4fd2b272e04e..c50352385a47 100644
--- a/fs/bcachefs/journal_reclaim.c
+++ b/fs/bcachefs/journal_reclaim.c
@@ -58,6 +58,19 @@ static void journal_set_remaining(struct journal *j, unsigned u64s_remaining)
old.v, new.v)) != old.v);
}
+static inline unsigned get_unwritten_sectors(struct journal *j, unsigned *idx)
+{
+ unsigned sectors = 0;
+
+ while (!sectors && *idx != j->reservations.idx) {
+ sectors = j->buf[*idx].sectors;
+
+ *idx = (*idx + 1) & JOURNAL_BUF_MASK;
+ }
+
+ return sectors;
+}
+
static struct journal_space {
unsigned next_entry;
unsigned remaining;
@@ -69,15 +82,14 @@ static struct journal_space {
unsigned sectors_next_entry = UINT_MAX;
unsigned sectors_total = UINT_MAX;
unsigned i, nr_devs = 0;
- unsigned unwritten_sectors = j->reservations.prev_buf_unwritten
- ? journal_prev_buf(j)->sectors
- : 0;
+ unsigned unwritten_sectors;
rcu_read_lock();
for_each_member_device_rcu(ca, c, i,
&c->rw_devs[BCH_DATA_journal]) {
struct journal_device *ja = &ca->journal;
unsigned buckets_this_device, sectors_this_device;
+ unsigned idx = j->reservations.unwritten_idx;
if (!ja->nr)
continue;
@@ -89,16 +101,20 @@ static struct journal_space {
* We that we don't allocate the space for a journal entry
* until we write it out - thus, account for it here:
*/
- if (unwritten_sectors >= sectors_this_device) {
- if (!buckets_this_device)
- continue;
-
- buckets_this_device--;
- sectors_this_device = ca->mi.bucket_size;
+ while ((unwritten_sectors = get_unwritten_sectors(j, &idx))) {
+ if (unwritten_sectors >= sectors_this_device) {
+ if (!buckets_this_device) {
+ sectors_this_device = 0;
+ break;
+ }
+
+ buckets_this_device--;
+ sectors_this_device = ca->mi.bucket_size;
+ }
+
+ sectors_this_device -= unwritten_sectors;
}
- sectors_this_device -= unwritten_sectors;
-
if (sectors_this_device < ca->mi.bucket_size &&
buckets_this_device) {
buckets_this_device--;
@@ -277,6 +293,14 @@ static void bch2_journal_reclaim_fast(struct journal *j)
bch2_journal_space_available(j);
}
+void __bch2_journal_pin_put(struct journal *j, u64 seq)
+{
+ struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
+
+ if (atomic_dec_and_test(&pin_list->count))
+ bch2_journal_reclaim_fast(j);
+}
+
void bch2_journal_pin_put(struct journal *j, u64 seq)
{
struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);