summaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_cache.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-03-02 10:12:18 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:09:55 +0300
commit1306f87de399a0c791f03d68b50e03bdb3f409ae (patch)
tree1dc9be35833714cb8310f4286876cae9f3139c0f /fs/bcachefs/btree_cache.c
parentb1cfe5ed2b5d5dbd2d8bcb2a4c1131513a1b3e1c (diff)
downloadlinux-1306f87de399a0c791f03d68b50e03bdb3f409ae.tar.xz
bcachefs: Plumb btree_trans through btree cache code
Soon, __bch2_btree_node_write() is going to require a btree_trans: zoned device support is going to require a new allocation for every btree node write. This is a bit of prep work. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_cache.c')
-rw-r--r--fs/bcachefs/btree_cache.c27
1 files changed, 14 insertions, 13 deletions
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index 769f17b67fcf..76cad6109297 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -561,8 +561,9 @@ static struct btree *btree_node_cannibalize(struct bch_fs *c)
}
}
-struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c, bool pcpu_read_locks)
+struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_read_locks)
{
+ struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
struct list_head *freed = pcpu_read_locks
? &bc->freed_pcpu
@@ -673,8 +674,7 @@ err:
}
/* Slowpath, don't want it inlined into btree_iter_traverse() */
-static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
- struct btree_trans *trans,
+static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
struct btree_path *path,
const struct bkey_i *k,
enum btree_id btree_id,
@@ -682,6 +682,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
enum six_lock_type lock_type,
bool sync)
{
+ struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
u32 seq;
@@ -691,14 +692,14 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
* Parent node must be locked, else we could read in a btree node that's
* been freed:
*/
- if (trans && !bch2_btree_node_relock(trans, path, level + 1)) {
+ if (path && !bch2_btree_node_relock(trans, path, level + 1)) {
trace_and_count(c, trans_restart_relock_parent_for_fill, trans, _THIS_IP_, path);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_relock));
}
- b = bch2_btree_node_mem_alloc(c, level != 0);
+ b = bch2_btree_node_mem_alloc(trans, level != 0);
- if (trans && b == ERR_PTR(-ENOMEM)) {
+ if (b == ERR_PTR(-ENOMEM)) {
trans->memory_allocation_failure = true;
trace_and_count(c, trans_restart_memory_allocation_failure, trans, _THIS_IP_, path);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail));
@@ -744,7 +745,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
if (!sync)
return NULL;
- if (trans) {
+ if (path) {
int ret = bch2_trans_relock(trans) ?:
bch2_btree_path_relock_intent(trans, path);
if (ret) {
@@ -754,7 +755,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
}
if (!six_relock_type(&b->c.lock, lock_type, seq)) {
- if (trans)
+ if (path)
trace_and_count(c, trans_restart_relock_after_fill, trans, _THIS_IP_, path);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill));
}
@@ -820,7 +821,7 @@ retry:
* else we could read in a btree node from disk that's been
* freed:
*/
- b = bch2_btree_node_fill(c, trans, path, k, path->btree_id,
+ b = bch2_btree_node_fill(trans, path, k, path->btree_id,
level, lock_type, true);
/* We raced and found the btree node in the cache */
@@ -1029,7 +1030,7 @@ retry:
if (nofill)
goto out;
- b = bch2_btree_node_fill(c, NULL, NULL, k, btree_id,
+ b = bch2_btree_node_fill(trans, NULL, k, btree_id,
level, SIX_LOCK_read, true);
/* We raced and found the btree node in the cache */
@@ -1089,12 +1090,12 @@ out:
return b;
}
-int bch2_btree_node_prefetch(struct bch_fs *c,
- struct btree_trans *trans,
+int bch2_btree_node_prefetch(struct btree_trans *trans,
struct btree_path *path,
const struct bkey_i *k,
enum btree_id btree_id, unsigned level)
{
+ struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
@@ -1105,7 +1106,7 @@ int bch2_btree_node_prefetch(struct bch_fs *c,
if (b)
return 0;
- b = bch2_btree_node_fill(c, trans, path, k, btree_id,
+ b = bch2_btree_node_fill(trans, path, k, btree_id,
level, SIX_LOCK_read, false);
return PTR_ERR_OR_ZERO(b);
}