summaryrefslogtreecommitdiff
path: root/fs/bcachefs/six.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-05-21 03:37:53 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:10:02 +0300
commitd2c86b77de5894bbe26ecbf5214227f61855aed7 (patch)
tree0a879c04b004fd61bf510344c27ddaa0c862e9c6 /fs/bcachefs/six.c
parent0157f9c5a7c77b1cb89756351929dba4b28d5f75 (diff)
downloadlinux-d2c86b77de5894bbe26ecbf5214227f61855aed7.tar.xz
six locks: Centralize setting of waiting bit
Originally, the waiting bit was always set by trylock() on failure: however, it's now set by __six_lock_type_slowpath(), with wait_lock held - which is the more correct place to do it. That made setting the waiting bit in trylock redundant, so this patch deletes that. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/six.c')
-rw-r--r--fs/bcachefs/six.c15
1 files changed, 3 insertions, 12 deletions
diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c
index 32ad545ba570..d8f1d20f5ece 100644
--- a/fs/bcachefs/six.c
+++ b/fs/bcachefs/six.c
@@ -151,14 +151,6 @@ static int __do_six_trylock_type(struct six_lock *lock,
atomic64_add(__SIX_VAL(write_locking, 1),
&lock->state.counter);
smp_mb__after_atomic();
- } else if (!(lock->state.waiters & (1 << SIX_LOCK_write))) {
- atomic64_add(__SIX_VAL(waiters, 1 << SIX_LOCK_write),
- &lock->state.counter);
- /*
- * pairs with barrier after unlock and before checking
- * for readers in unlock path
- */
- smp_mb__after_atomic();
}
ret = !pcpu_read_count(lock);
@@ -190,10 +182,9 @@ static int __do_six_trylock_type(struct six_lock *lock,
if (type == SIX_LOCK_write)
new.write_locking = 0;
- } else if (!try && !(new.waiters & (1 << type)))
- new.waiters |= 1 << type;
- else
- break; /* waiting bit already set */
+ } else {
+ break;
+ }
} while ((v = atomic64_cmpxchg_acquire(&lock->state.counter,
old.v, new.v)) != old.v);