From e3738c6909d69e980d8b56d33df2e438a2c1c798 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sun, 21 Aug 2022 23:08:53 -0400 Subject: six locks: Improve six_lock_count six_lock_count now counts up whether a write lock held, and this patch now also correctly counts six_lock->intent_lock_recurse. Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_locking.c | 12 ++++++++---- fs/bcachefs/six.c | 10 +++++++--- fs/bcachefs/six.h | 3 +-- fs/bcachefs/trace.h | 8 ++++---- 4 files changed, 20 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c index aac07e5e6854..d46109320957 100644 --- a/fs/bcachefs/btree_locking.c +++ b/fs/bcachefs/btree_locking.c @@ -22,15 +22,19 @@ struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans, unsigned level) { struct btree_path *path; - struct six_lock_count ret = { 0, 0 }; + struct six_lock_count ret; + + memset(&ret, 0, sizeof(ret)); if (IS_ERR_OR_NULL(b)) return ret; trans_for_each_path(trans, path) if (path != skip && path->l[level].b == b) { - ret.read += btree_node_read_locked(path, level); - ret.intent += btree_node_intent_locked(path, level); + int t = btree_node_locked_type(path, level); + + if (t != BTREE_NODE_UNLOCKED) + ret.n[t]++; } return ret; @@ -48,7 +52,7 @@ void bch2_btree_node_unlock_write(struct btree_trans *trans, void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b) { - int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->c.level).read; + int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->c.level).n[SIX_LOCK_read]; /* * Must drop our read locks before calling six_lock_write() - diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c index 9dd4b71e63ab..464b1313d358 100644 --- a/fs/bcachefs/six.c +++ b/fs/bcachefs/six.c @@ -764,15 +764,19 @@ EXPORT_SYMBOL_GPL(six_lock_pcpu_alloc); */ struct six_lock_count six_lock_counts(struct six_lock *lock) { - struct six_lock_count ret = { 0, lock->state.intent_lock }; + struct six_lock_count ret; + + ret.n[SIX_LOCK_read] = 0; + ret.n[SIX_LOCK_intent] = lock->state.intent_lock + lock->intent_lock_recurse; + ret.n[SIX_LOCK_write] = lock->state.seq & 1; if (!lock->readers) - ret.read += lock->state.read_lock; + ret.n[SIX_LOCK_read] += lock->state.read_lock; else { int cpu; for_each_possible_cpu(cpu) - ret.read += *per_cpu_ptr(lock->readers, cpu); + ret.n[SIX_LOCK_read] += *per_cpu_ptr(lock->readers, cpu); } return ret; diff --git a/fs/bcachefs/six.h b/fs/bcachefs/six.h index 08d0e0c7f2b4..59d796cfde43 100644 --- a/fs/bcachefs/six.h +++ b/fs/bcachefs/six.h @@ -206,8 +206,7 @@ void six_lock_pcpu_free(struct six_lock *); void six_lock_pcpu_alloc(struct six_lock *); struct six_lock_count { - unsigned read; - unsigned intent; + unsigned n[3]; }; struct six_lock_count six_lock_counts(struct six_lock *); diff --git a/fs/bcachefs/trace.h b/fs/bcachefs/trace.h index 9353191c4fc8..db05be59fa35 100644 --- a/fs/bcachefs/trace.h +++ b/fs/bcachefs/trace.h @@ -453,11 +453,11 @@ TRACE_EVENT(btree_node_upgrade_fail, __entry->locked = btree_node_locked(path, level); c = bch2_btree_node_lock_counts(trans, NULL, path->l[level].b, level), - __entry->self_read_count = c.read; - __entry->self_intent_count = c.intent; + __entry->self_read_count = c.n[SIX_LOCK_read]; + __entry->self_intent_count = c.n[SIX_LOCK_intent]; c = six_lock_counts(&path->l[level].b->c.lock); - __entry->read_count = c.read; - __entry->intent_count = c.intent; + __entry->read_count = c.n[SIX_LOCK_read]; + __entry->intent_count = c.n[SIX_LOCK_read]; ), TP_printk("%s %pS btree %s pos %llu:%llu:%u, locked %u held %u:%u lock count %u:%u", -- cgit v1.2.3