summaryrefslogtreecommitdiff
path: root/fs/bcachefs/six.h
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2022-08-27 02:22:24 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:09:41 +0300
commit84a37cbf62e04480607ddd1940e3d8ce65b3828d (patch)
tree1a06e51f22ac15df4105a65d20836aadeab8507d /fs/bcachefs/six.h
parente4b7254c754b676a6f4d607fd92cd71d221ff130 (diff)
downloadlinux-84a37cbf62e04480607ddd1940e3d8ce65b3828d.tar.xz
six locks: Wakeup now takes lock on behalf of waiter
This brings back an important optimization, to avoid touching the wait lists an extra time, while preserving the property that a thread is on a lock waitlist iff it is waiting - it is never removed from the waitlist until it has the lock. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/six.h')
-rw-r--r--fs/bcachefs/six.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/fs/bcachefs/six.h b/fs/bcachefs/six.h
index 757f8aa4d339..9ebbf8095573 100644
--- a/fs/bcachefs/six.h
+++ b/fs/bcachefs/six.h
@@ -110,11 +110,10 @@ struct six_lock {
union six_lock_state state;
unsigned intent_lock_recurse;
struct task_struct *owner;
+ unsigned __percpu *readers;
#ifdef CONFIG_SIX_LOCK_SPIN_ON_OWNER
struct optimistic_spin_queue osq;
#endif
- unsigned __percpu *readers;
-
raw_spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -126,6 +125,7 @@ struct six_lock_waiter {
struct list_head list;
struct task_struct *task;
enum six_lock_type lock_want;
+ bool lock_acquired;
u64 start_time;
};