summaryrefslogtreecommitdiff
path: root/fs/bcachefs/six.h
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-05-21 06:57:48 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:10:02 +0300
commit1fb4fe63178881a0ac043a5c05288d9fff85d6b8 (patch)
tree0d0d67219ffab8adfe1c1ba51eee301fa2f938e3 /fs/bcachefs/six.h
parentc4bd3491b1c0b335f63599ec96d1d4ab0d37a3c1 (diff)
downloadlinux-1fb4fe63178881a0ac043a5c05288d9fff85d6b8.tar.xz
six locks: Kill six_lock_state union
As suggested by Linus, this drops the six_lock_state union in favor of raw bitmasks. On the one hand, bitfields give more type-level structure to the code. However, a significant amount of the code was working with six_lock_state as a u64/atomic64_t, and the conversions from the bitfields to the u64 were deemed a bit too out-there. More significantly, because bitfield order is poorly defined (#ifdef __LITTLE_ENDIAN_BITFIELD can be used, but is gross), incrementing the sequence number would overflow into the rest of the bitfield if the compiler didn't put the sequence number at the high end of the word. The new code is a bit saner when we're on an architecture without real atomic64_t support - all accesses to lock->state now go through atomic64_*() operations. On architectures with real atomic64_t support, we additionally use atomic bit ops for setting/clearing individual bits. Text size: 7467 bytes -> 4649 bytes - compilers still suck at bitfields. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/six.h')
-rw-r--r--fs/bcachefs/six.h40
1 files changed, 6 insertions, 34 deletions
diff --git a/fs/bcachefs/six.h b/fs/bcachefs/six.h
index 5ddabbfb8aba..449589f76628 100644
--- a/fs/bcachefs/six.h
+++ b/fs/bcachefs/six.h
@@ -68,39 +68,6 @@
#define SIX_LOCK_SEPARATE_LOCKFNS
-union six_lock_state {
- struct {
- atomic64_t counter;
- };
-
- struct {
- u64 v;
- };
-
- struct {
- /* for waitlist_bitnr() */
- unsigned long l;
- };
-
- struct {
- unsigned read_lock:26;
- unsigned write_locking:1;
- unsigned intent_lock:1;
- unsigned nospin:1;
- unsigned waiters:3;
- /*
- * seq works much like in seqlocks: it's incremented every time
- * we lock and unlock for write.
- *
- * If it's odd write lock is held, even unlocked.
- *
- * Thus readers can unlock, and then lock again later iff it
- * hasn't been modified in the meantime.
- */
- u32 seq;
- };
-};
-
enum six_lock_type {
SIX_LOCK_read,
SIX_LOCK_intent,
@@ -108,7 +75,7 @@ enum six_lock_type {
};
struct six_lock {
- union six_lock_state state;
+ atomic64_t state;
unsigned intent_lock_recurse;
struct task_struct *owner;
unsigned __percpu *readers;
@@ -148,6 +115,11 @@ do { \
__six_lock_init((lock), #lock, &__key, flags); \
} while (0)
+static inline u32 six_lock_seq(const struct six_lock *lock)
+{
+ return atomic64_read(&lock->state) >> 32;
+}
+
bool six_trylock_ip_type(struct six_lock *lock, enum six_lock_type type,
unsigned long ip);