summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-05-21 03:57:55 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:10:01 +0300
commit0d2234a79e877b1bfa71b2c8c712a155be419827 (patch)
tree3ddf278178dc2b05ce81e20b0ed79dde52b8890d /fs
parent01bf56a9771466147d94a013bc5678d0ed1b1382 (diff)
downloadlinux-0d2234a79e877b1bfa71b2c8c712a155be419827.tar.xz
six locks: Kill six_lock_pcpu_(alloc|free)
six_lock_pcpu_alloc() is an unsafe interface: it's not safe to allocate or free the percpu reader count on an existing lock that's in use, the only safe time to allocate percpu readers is when the lock is first being initialized. This patch adds a flags parameter to six_lock_init(), and instead of six_lock_pcpu_free() we now expose six_lock_exit(), which does the same thing but is less likely to be misused. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs')
-rw-r--r--fs/bcachefs/btree_cache.c8
-rw-r--r--fs/bcachefs/btree_key_cache.c13
-rw-r--r--fs/bcachefs/btree_locking.c5
-rw-r--r--fs/bcachefs/btree_locking.h2
-rw-r--r--fs/bcachefs/six.c53
-rw-r--r--fs/bcachefs/six.h27
6 files changed, 56 insertions, 52 deletions
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index 76e08f2f6689..5801f4ff9097 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -121,7 +121,6 @@ static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
return NULL;
bkey_btree_ptr_init(&b->key);
- bch2_btree_lock_init(&b->c);
INIT_LIST_HEAD(&b->list);
INIT_LIST_HEAD(&b->write_blocked);
b->byte_order = ilog2(btree_bytes(c));
@@ -142,6 +141,8 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
return NULL;
}
+ bch2_btree_lock_init(&b->c, 0);
+
bc->used++;
list_add(&b->list, &bc->freeable);
return b;
@@ -435,7 +436,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
while (!list_empty(&bc->freed_nonpcpu)) {
b = list_first_entry(&bc->freed_nonpcpu, struct btree, list);
list_del(&b->list);
- six_lock_pcpu_free(&b->c.lock);
+ six_lock_exit(&b->c.lock);
kfree(b);
}
@@ -595,8 +596,7 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea
mutex_lock(&bc->lock);
}
- if (pcpu_read_locks)
- six_lock_pcpu_alloc(&b->c.lock);
+ bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0);
BUG_ON(!six_trylock_intent(&b->c.lock));
BUG_ON(!six_trylock_write(&b->c.lock));
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index 727ea2d0e58d..9725d85b99b3 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -282,9 +282,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path,
return NULL;
init:
INIT_LIST_HEAD(&ck->list);
- bch2_btree_lock_init(&ck->c);
- if (pcpu_readers)
- six_lock_pcpu_alloc(&ck->c.lock);
+ bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0);
ck->c.cached = true;
BUG_ON(!six_trylock_intent(&ck->c.lock));
@@ -340,9 +338,6 @@ btree_key_cache_create(struct btree_trans *trans, struct btree_path *path)
}
mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent);
- } else {
- if (path->btree_id == BTREE_ID_subvolumes)
- six_lock_pcpu_alloc(&ck->c.lock);
}
ck->c.level = 0;
@@ -871,7 +866,7 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
break;
list_del(&ck->list);
- six_lock_pcpu_free(&ck->c.lock);
+ six_lock_exit(&ck->c.lock);
kmem_cache_free(bch2_key_cache, ck);
atomic_long_dec(&bc->nr_freed);
scanned++;
@@ -887,7 +882,7 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
break;
list_del(&ck->list);
- six_lock_pcpu_free(&ck->c.lock);
+ six_lock_exit(&ck->c.lock);
kmem_cache_free(bch2_key_cache, ck);
atomic_long_dec(&bc->nr_freed);
scanned++;
@@ -1012,7 +1007,7 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
list_del(&ck->list);
kfree(ck->k);
- six_lock_pcpu_free(&ck->c.lock);
+ six_lock_exit(&ck->c.lock);
kmem_cache_free(bch2_key_cache, ck);
}
diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c
index d7b0c4436caf..6e1306add443 100644
--- a/fs/bcachefs/btree_locking.c
+++ b/fs/bcachefs/btree_locking.c
@@ -6,9 +6,10 @@
static struct lock_class_key bch2_btree_node_lock_key;
-void bch2_btree_lock_init(struct btree_bkey_cached_common *b)
+void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
+ enum six_lock_init_flags flags)
{
- __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key);
+ __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags);
lockdep_set_novalidate_class(&b->lock);
}
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index 76aac49966fe..660975839c89 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -13,7 +13,7 @@
#include "btree_iter.h"
#include "six.h"
-void bch2_btree_lock_init(struct btree_bkey_cached_common *);
+void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags);
#ifdef CONFIG_LOCKDEP
void bch2_assert_btree_nodes_not_locked(void);
diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c
index 0f9e1bf31008..f75387b9da88 100644
--- a/fs/bcachefs/six.c
+++ b/fs/bcachefs/six.c
@@ -814,25 +814,6 @@ void six_lock_wakeup_all(struct six_lock *lock)
}
EXPORT_SYMBOL_GPL(six_lock_wakeup_all);
-void six_lock_pcpu_free(struct six_lock *lock)
-{
- BUG_ON(lock->readers && pcpu_read_count(lock));
- BUG_ON(lock->state.read_lock);
-
- free_percpu(lock->readers);
- lock->readers = NULL;
-}
-EXPORT_SYMBOL_GPL(six_lock_pcpu_free);
-
-void six_lock_pcpu_alloc(struct six_lock *lock)
-{
-#ifdef __KERNEL__
- if (!lock->readers)
- lock->readers = alloc_percpu(unsigned);
-#endif
-}
-EXPORT_SYMBOL_GPL(six_lock_pcpu_alloc);
-
/*
* Returns lock held counts, for both read and intent
*/
@@ -860,3 +841,37 @@ void six_lock_readers_add(struct six_lock *lock, int nr)
atomic64_sub(__SIX_VAL(read_lock, -nr), &lock->state.counter);
}
EXPORT_SYMBOL_GPL(six_lock_readers_add);
+
+void six_lock_exit(struct six_lock *lock)
+{
+ WARN_ON(lock->readers && pcpu_read_count(lock));
+ WARN_ON(lock->state.read_lock);
+
+ free_percpu(lock->readers);
+ lock->readers = NULL;
+}
+EXPORT_SYMBOL_GPL(six_lock_exit);
+
+void __six_lock_init(struct six_lock *lock, const char *name,
+ struct lock_class_key *key, enum six_lock_init_flags flags)
+{
+ atomic64_set(&lock->state.counter, 0);
+ raw_spin_lock_init(&lock->wait_lock);
+ INIT_LIST_HEAD(&lock->wait_list);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ debug_check_no_locks_freed((void *) lock, sizeof(*lock));
+ lockdep_init_map(&lock->dep_map, name, key, 0);
+#endif
+
+ if (flags & SIX_LOCK_INIT_PCPU) {
+ /*
+ * We don't return an error here on memory allocation failure
+ * since percpu is an optimization, and locks will work with the
+ * same semantics in non-percpu mode: callers can check for
+ * failure if they wish by checking lock->readers, but generally
+ * will not want to treat it as an error.
+ */
+ lock->readers = alloc_percpu(unsigned);
+ }
+}
+EXPORT_SYMBOL_GPL(__six_lock_init);
diff --git a/fs/bcachefs/six.h b/fs/bcachefs/six.h
index 6b53818ae97a..2c8424bd7d2f 100644
--- a/fs/bcachefs/six.h
+++ b/fs/bcachefs/six.h
@@ -132,24 +132,20 @@ struct six_lock_waiter {
typedef int (*six_lock_should_sleep_fn)(struct six_lock *lock, void *);
-static __always_inline void __six_lock_init(struct six_lock *lock,
- const char *name,
- struct lock_class_key *key)
-{
- atomic64_set(&lock->state.counter, 0);
- raw_spin_lock_init(&lock->wait_lock);
- INIT_LIST_HEAD(&lock->wait_list);
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- debug_check_no_locks_freed((void *) lock, sizeof(*lock));
- lockdep_init_map(&lock->dep_map, name, key, 0);
-#endif
-}
+void six_lock_exit(struct six_lock *lock);
-#define six_lock_init(lock) \
+enum six_lock_init_flags {
+ SIX_LOCK_INIT_PCPU = 1U << 0,
+};
+
+void __six_lock_init(struct six_lock *lock, const char *name,
+ struct lock_class_key *key, enum six_lock_init_flags flags);
+
+#define six_lock_init(lock, flags) \
do { \
static struct lock_class_key __key; \
\
- __six_lock_init((lock), #lock, &__key); \
+ __six_lock_init((lock), #lock, &__key, flags); \
} while (0)
#define __SIX_LOCK(type) \
@@ -248,9 +244,6 @@ void six_lock_increment(struct six_lock *, enum six_lock_type);
void six_lock_wakeup_all(struct six_lock *);
-void six_lock_pcpu_free(struct six_lock *);
-void six_lock_pcpu_alloc(struct six_lock *);
-
struct six_lock_count {
unsigned n[3];
};