summaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_locking.h
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2022-09-04 04:09:54 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:09:40 +0300
commitda4474f20961f995a1d54f82b4c462c94ea03552 (patch)
tree5f0d4340d43b67b85494ba861774eec8ded2e19a /fs/bcachefs/btree_locking.h
parent4e6defd106b69c3a78da380d694fd43275125dda (diff)
downloadlinux-da4474f20961f995a1d54f82b4c462c94ea03552.tar.xz
bcachefs: Convert more locking code to btree_bkey_cached_common
Ideally, all the code in btree_locking.c should be converted, but then we'd want to convert btree_path to point to btree_key_cached_common too, and then we'd be in for a much bigger cleanup - but a bit of incremental cleanup will still be helpful for the next patches. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_locking.h')
-rw-r--r--fs/bcachefs/btree_locking.h16
1 files changed, 8 insertions, 8 deletions
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index 6eaf44fd3f37..9758a0c05d25 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -279,31 +279,31 @@ static inline int btree_node_lock(struct btree_trans *trans,
return ret;
}
-void __bch2_btree_node_lock_write(struct btree_trans *, struct btree *);
+void __bch2_btree_node_lock_write(struct btree_trans *, struct btree_bkey_cached_common *);
static inline void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
struct btree_path *path,
- struct btree *b)
+ struct btree_bkey_cached_common *b)
{
- EBUG_ON(path->l[b->c.level].b != b);
- EBUG_ON(path->l[b->c.level].lock_seq != b->c.lock.state.seq);
- EBUG_ON(!btree_node_intent_locked(path, b->c.level));
+ EBUG_ON(&path->l[b->level].b->c != b);
+ EBUG_ON(path->l[b->level].lock_seq != b->lock.state.seq);
+ EBUG_ON(!btree_node_intent_locked(path, b->level));
/*
* six locks are unfair, and read locks block while a thread wants a
* write lock: thus, we need to tell the cycle detector we have a write
* lock _before_ taking the lock:
*/
- mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_write);
+ mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_write);
- if (unlikely(!six_trylock_write(&b->c.lock)))
+ if (unlikely(!six_trylock_write(&b->lock)))
__bch2_btree_node_lock_write(trans, b);
}
static inline int __must_check
bch2_btree_node_lock_write(struct btree_trans *trans,
struct btree_path *path,
- struct btree *b)
+ struct btree_bkey_cached_common *b)
{
bch2_btree_node_lock_write_nofail(trans, path, b);
return 0;