summaryrefslogtreecommitdiff
path: root/fs/bcachefs
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2022-09-04 04:09:54 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:09:40 +0300
commitda4474f20961f995a1d54f82b4c462c94ea03552 (patch)
tree5f0d4340d43b67b85494ba861774eec8ded2e19a /fs/bcachefs
parent4e6defd106b69c3a78da380d694fd43275125dda (diff)
downloadlinux-da4474f20961f995a1d54f82b4c462c94ea03552.tar.xz
bcachefs: Convert more locking code to btree_bkey_cached_common
Ideally, all the code in btree_locking.c should be converted, but then we'd want to convert btree_path to point to btree_key_cached_common too, and then we'd be in for a much bigger cleanup - but a bit of incremental cleanup will still be helpful for the next patches. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs')
-rw-r--r--fs/bcachefs/btree_key_cache.c2
-rw-r--r--fs/bcachefs/btree_locking.c11
-rw-r--r--fs/bcachefs/btree_locking.h16
-rw-r--r--fs/bcachefs/btree_update_interior.c4
-rw-r--r--fs/bcachefs/btree_update_leaf.c2
5 files changed, 18 insertions, 17 deletions
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index 517b9861c01c..2de9a0cc17b6 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -343,7 +343,7 @@ static int btree_key_cache_fill(struct btree_trans *trans,
}
}
- ret = bch2_btree_node_lock_write(trans, ck_path, ck_path->l[0].b);
+ ret = bch2_btree_node_lock_write(trans, ck_path, &ck_path->l[0].b->c);
if (ret) {
kfree(new_k);
goto err;
diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c
index c73902c170d4..bfe9780aea3a 100644
--- a/fs/bcachefs/btree_locking.c
+++ b/fs/bcachefs/btree_locking.c
@@ -52,9 +52,10 @@ void bch2_btree_node_unlock_write(struct btree_trans *trans,
/* lock */
-void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
+void __bch2_btree_node_lock_write(struct btree_trans *trans,
+ struct btree_bkey_cached_common *b)
{
- int readers = bch2_btree_node_lock_counts(trans, NULL, &b->c, b->c.level).n[SIX_LOCK_read];
+ int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read];
/*
* Must drop our read locks before calling six_lock_write() -
@@ -62,9 +63,9 @@ void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
* goes to 0, and it's safe because we have the node intent
* locked:
*/
- six_lock_readers_add(&b->c.lock, -readers);
- btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write);
- six_lock_readers_add(&b->c.lock, readers);
+ six_lock_readers_add(&b->lock, -readers);
+ btree_node_lock_nopath_nofail(trans, b, SIX_LOCK_write);
+ six_lock_readers_add(&b->lock, readers);
}
static inline bool path_has_read_locks(struct btree_path *path)
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index 6eaf44fd3f37..9758a0c05d25 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -279,31 +279,31 @@ static inline int btree_node_lock(struct btree_trans *trans,
return ret;
}
-void __bch2_btree_node_lock_write(struct btree_trans *, struct btree *);
+void __bch2_btree_node_lock_write(struct btree_trans *, struct btree_bkey_cached_common *);
static inline void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
struct btree_path *path,
- struct btree *b)
+ struct btree_bkey_cached_common *b)
{
- EBUG_ON(path->l[b->c.level].b != b);
- EBUG_ON(path->l[b->c.level].lock_seq != b->c.lock.state.seq);
- EBUG_ON(!btree_node_intent_locked(path, b->c.level));
+ EBUG_ON(&path->l[b->level].b->c != b);
+ EBUG_ON(path->l[b->level].lock_seq != b->lock.state.seq);
+ EBUG_ON(!btree_node_intent_locked(path, b->level));
/*
* six locks are unfair, and read locks block while a thread wants a
* write lock: thus, we need to tell the cycle detector we have a write
* lock _before_ taking the lock:
*/
- mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_write);
+ mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_write);
- if (unlikely(!six_trylock_write(&b->c.lock)))
+ if (unlikely(!six_trylock_write(&b->lock)))
__bch2_btree_node_lock_write(trans, b);
}
static inline int __must_check
bch2_btree_node_lock_write(struct btree_trans *trans,
struct btree_path *path,
- struct btree *b)
+ struct btree_bkey_cached_common *b)
{
bch2_btree_node_lock_write_nofail(trans, path, b);
return 0;
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index db45883d27ce..d4e2ebe263a3 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -1163,7 +1163,7 @@ static void bch2_btree_set_root(struct btree_update *as,
* Ensure no one is using the old root while we switch to the
* new root:
*/
- bch2_btree_node_lock_write_nofail(trans, path, old);
+ bch2_btree_node_lock_write_nofail(trans, path, &old->c);
bch2_btree_set_root_inmem(c, b);
@@ -2002,7 +2002,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
if (ret)
goto err;
- bch2_btree_node_lock_write_nofail(trans, iter->path, b);
+ bch2_btree_node_lock_write_nofail(trans, iter->path, &b->c);
if (new_hash) {
mutex_lock(&c->btree_cache.lock);
diff --git a/fs/bcachefs/btree_update_leaf.c b/fs/bcachefs/btree_update_leaf.c
index 3efec0b30466..7f60f9f81f42 100644
--- a/fs/bcachefs/btree_update_leaf.c
+++ b/fs/bcachefs/btree_update_leaf.c
@@ -81,7 +81,7 @@ void bch2_btree_node_lock_for_insert(struct btree_trans *trans,
struct btree_path *path,
struct btree *b)
{
- bch2_btree_node_lock_write_nofail(trans, path, b);
+ bch2_btree_node_lock_write_nofail(trans, path, &b->c);
bch2_btree_node_prep_for_write(trans, path, b);
}