From 7a34bcb8b272b1300f0125c93a54f0c98812acdd Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 1 Aug 2017 17:24:05 +0200 Subject: jump_label: Do not use unserialized static_key_enabled() Any use of key->enabled (that is static_key_enabled and static_key_count) outside jump_label_lock should handle its own serialization. The only two that are not doing so are the UDP encapsulation static keys. Change them to use static_key_enable, which now correctly tests key->enabled under the jump label lock. Signed-off-by: Paolo Bonzini Signed-off-by: Peter Zijlstra (Intel) Cc: Eric Dumazet Cc: Jason Baron Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1501601046-35683-3-git-send-email-pbonzini@redhat.com Signed-off-by: Ingo Molnar --- Documentation/static-keys.txt | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'Documentation') diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt index b83dfa1c0602..870b4be3cb11 100644 --- a/Documentation/static-keys.txt +++ b/Documentation/static-keys.txt @@ -149,6 +149,11 @@ static_branch_inc(), will change the branch back to true. Likewise, if the key is initialized false, a 'static_branch_inc()', will change the branch to true. And then a 'static_branch_dec()', will again make the branch false. +The state and the reference count can be retrieved with 'static_key_enabled()' +and 'static_key_count()'. In general, if you use these functions, they +should be protected with the same mutex used around the enable/disable +or increment/decrement function. + Where an array of keys is required, it can be defined as:: DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count); -- cgit v1.2.3 From 5a40527f8f0798553764fc8db4111d7d9c33ea51 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 1 Aug 2017 09:02:56 +0100 Subject: jump_label: Provide hotplug context variants As using the normal static key API under the hotplug lock is pretty much impossible, let's provide a variant of some of them that require the hotplug lock to have already been taken. These function are only meant to be used in CPU hotplug callbacks. Signed-off-by: Marc Zyngier Signed-off-by: Peter Zijlstra (Intel) Cc: Leo Yan Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-arm-kernel@lists.infradead.org Link: http://lkml.kernel.org/r/20170801080257.5056-4-marc.zyngier@arm.com Signed-off-by: Ingo Molnar --- Documentation/static-keys.txt | 15 +++++++++++++++ include/linux/jump_label.h | 11 +++++++++-- kernel/jump_label.c | 22 ++++++++++++++++++---- 3 files changed, 42 insertions(+), 6 deletions(-) (limited to 'Documentation') diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt index 870b4be3cb11..ab16efe0c79d 100644 --- a/Documentation/static-keys.txt +++ b/Documentation/static-keys.txt @@ -154,6 +154,21 @@ and 'static_key_count()'. In general, if you use these functions, they should be protected with the same mutex used around the enable/disable or increment/decrement function. +Note that switching branches results in some locks being taken, +particularly the CPU hotplug lock (in order to avoid races against +CPUs being brought in the kernel whilst the kernel is getting +patched). Calling the static key API from within a hotplug notifier is +thus a sure deadlock recipe. In order to still allow use of the +functionnality, the following functions are provided: + + static_key_enable_cpuslocked() + static_key_disable_cpuslocked() + static_branch_enable_cpuslocked() + static_branch_disable_cpuslocked() + +These functions are *not* general purpose, and must only be used when +you really know that you're in the above context, and no other. + Where an array of keys is required, it can be defined as:: DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count); diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 740a42ea7f7f..cd5861651b17 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -163,6 +163,8 @@ extern void jump_label_apply_nops(struct module *mod); extern int static_key_count(struct static_key *key); extern void static_key_enable(struct static_key *key); extern void static_key_disable(struct static_key *key); +extern void static_key_enable_cpuslocked(struct static_key *key); +extern void static_key_disable_cpuslocked(struct static_key *key); /* * We should be using ATOMIC_INIT() for initializing .enabled, but @@ -254,6 +256,9 @@ static inline void static_key_disable(struct static_key *key) atomic_set(&key->enabled, 0); } +#define static_key_enable_cpuslocked(k) static_key_enable((k)) +#define static_key_disable_cpuslocked(k) static_key_disable((k)) + #define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) } #define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) } @@ -415,8 +420,10 @@ extern bool ____wrong_branch_error(void); * Normal usage; boolean enable/disable. */ -#define static_branch_enable(x) static_key_enable(&(x)->key) -#define static_branch_disable(x) static_key_disable(&(x)->key) +#define static_branch_enable(x) static_key_enable(&(x)->key) +#define static_branch_disable(x) static_key_disable(&(x)->key) +#define static_branch_enable_cpuslocked(x) static_key_enable_cpuslocked(&(x)->key) +#define static_branch_disable_cpuslocked(x) static_key_disable_cpuslocked(&(x)->key) #endif /* __ASSEMBLY__ */ diff --git a/kernel/jump_label.c b/kernel/jump_label.c index cc6d815c75ed..0bf2e8f5244a 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -126,15 +126,15 @@ void static_key_slow_inc(struct static_key *key) } EXPORT_SYMBOL_GPL(static_key_slow_inc); -void static_key_enable(struct static_key *key) +void static_key_enable_cpuslocked(struct static_key *key) { STATIC_KEY_CHECK_USE(); + if (atomic_read(&key->enabled) > 0) { WARN_ON_ONCE(atomic_read(&key->enabled) != 1); return; } - cpus_read_lock(); jump_label_lock(); if (atomic_read(&key->enabled) == 0) { atomic_set(&key->enabled, -1); @@ -145,23 +145,37 @@ void static_key_enable(struct static_key *key) atomic_set_release(&key->enabled, 1); } jump_label_unlock(); +} +EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked); + +void static_key_enable(struct static_key *key) +{ + cpus_read_lock(); + static_key_enable_cpuslocked(key); cpus_read_unlock(); } EXPORT_SYMBOL_GPL(static_key_enable); -void static_key_disable(struct static_key *key) +void static_key_disable_cpuslocked(struct static_key *key) { STATIC_KEY_CHECK_USE(); + if (atomic_read(&key->enabled) != 1) { WARN_ON_ONCE(atomic_read(&key->enabled) != 0); return; } - cpus_read_lock(); jump_label_lock(); if (atomic_cmpxchg(&key->enabled, 1, 0)) jump_label_update(key); jump_label_unlock(); +} +EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked); + +void static_key_disable(struct static_key *key) +{ + cpus_read_lock(); + static_key_disable_cpuslocked(key); cpus_read_unlock(); } EXPORT_SYMBOL_GPL(static_key_disable); -- cgit v1.2.3 From 706eeb3e9c6f032f2d22a1c658624cfb6ace61d4 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 12 Jun 2017 14:50:27 +0200 Subject: Documentation/locking/atomic: Add documents for new atomic_t APIs Since we've vastly expanded the atomic_t interface in recent years the existing documentation is woefully out of date and people seem to get confused a bit. Start a new document to hopefully better explain the current state of affairs. The old atomic_ops.txt also covers bitmaps and a few more details so this is not a full replacement and we'll therefore keep that document around until such a time that we've managed to write more text to cover its entire. Also please, ReST people, go away. Signed-off-by: Peter Zijlstra (Intel) Cc: Boqun Feng Cc: Linus Torvalds Cc: Paul McKenney Cc: Peter Zijlstra Cc: Randy Dunlap Cc: Thomas Gleixner Cc: Will Deacon Signed-off-by: Ingo Molnar --- Documentation/atomic_bitops.txt | 66 +++++++++++++ Documentation/atomic_t.txt | 200 ++++++++++++++++++++++++++++++++++++++ Documentation/memory-barriers.txt | 96 ++---------------- 3 files changed, 273 insertions(+), 89 deletions(-) create mode 100644 Documentation/atomic_bitops.txt create mode 100644 Documentation/atomic_t.txt (limited to 'Documentation') diff --git a/Documentation/atomic_bitops.txt b/Documentation/atomic_bitops.txt new file mode 100644 index 000000000000..5550bfdcce5f --- /dev/null +++ b/Documentation/atomic_bitops.txt @@ -0,0 +1,66 @@ + +On atomic bitops. + + +While our bitmap_{}() functions are non-atomic, we have a number of operations +operating on single bits in a bitmap that are atomic. + + +API +--- + +The single bit operations are: + +Non-RMW ops: + + test_bit() + +RMW atomic operations without return value: + + {set,clear,change}_bit() + clear_bit_unlock() + +RMW atomic operations with return value: + + test_and_{set,clear,change}_bit() + test_and_set_bit_lock() + +Barriers: + + smp_mb__{before,after}_atomic() + + +All RMW atomic operations have a '__' prefixed variant which is non-atomic. + + +SEMANTICS +--------- + +Non-atomic ops: + +In particular __clear_bit_unlock() suffers the same issue as atomic_set(), +which is why the generic version maps to clear_bit_unlock(), see atomic_t.txt. + + +RMW ops: + +The test_and_{}_bit() operations return the original value of the bit. + + +ORDERING +-------- + +Like with atomic_t, the rule of thumb is: + + - non-RMW operations are unordered; + + - RMW operations that have no return value are unordered; + + - RMW operations that have a return value are fully ordered. + +Except for test_and_set_bit_lock() which has ACQUIRE semantics and +clear_bit_unlock() which has RELEASE semantics. + +Since a platform only has a single means of achieving atomic operations +the same barriers as for atomic_t are used, see atomic_t.txt. + diff --git a/Documentation/atomic_t.txt b/Documentation/atomic_t.txt new file mode 100644 index 000000000000..eee127115277 --- /dev/null +++ b/Documentation/atomic_t.txt @@ -0,0 +1,200 @@ + +On atomic types (atomic_t atomic64_t and atomic_long_t). + +The atomic type provides an interface to the architecture's means of atomic +RMW operations between CPUs (atomic operations on MMIO are not supported and +can lead to fatal traps on some platforms). + +API +--- + +The 'full' API consists of (atomic64_ and atomic_long_ prefixes omitted for +brevity): + +Non-RMW ops: + + atomic_read(), atomic_set() + atomic_read_acquire(), atomic_set_release() + + +RMW atomic operations: + +Arithmetic: + + atomic_{add,sub,inc,dec}() + atomic_{add,sub,inc,dec}_return{,_relaxed,_acquire,_release}() + atomic_fetch_{add,sub,inc,dec}{,_relaxed,_acquire,_release}() + + +Bitwise: + + atomic_{and,or,xor,andnot}() + atomic_fetch_{and,or,xor,andnot}{,_relaxed,_acquire,_release}() + + +Swap: + + atomic_xchg{,_relaxed,_acquire,_release}() + atomic_cmpxchg{,_relaxed,_acquire,_release}() + atomic_try_cmpxchg{,_relaxed,_acquire,_release}() + + +Reference count (but please see refcount_t): + + atomic_add_unless(), atomic_inc_not_zero() + atomic_sub_and_test(), atomic_dec_and_test() + + +Misc: + + atomic_inc_and_test(), atomic_add_negative() + atomic_dec_unless_positive(), atomic_inc_unless_negative() + + +Barriers: + + smp_mb__{before,after}_atomic() + + + +SEMANTICS +--------- + +Non-RMW ops: + +The non-RMW ops are (typically) regular LOADs and STOREs and are canonically +implemented using READ_ONCE(), WRITE_ONCE(), smp_load_acquire() and +smp_store_release() respectively. + +The one detail to this is that atomic_set{}() should be observable to the RMW +ops. That is: + + C atomic-set + + { + atomic_set(v, 1); + } + + P1(atomic_t *v) + { + atomic_add_unless(v, 1, 0); + } + + P2(atomic_t *v) + { + atomic_set(v, 0); + } + + exists + (v=2) + +In this case we would expect the atomic_set() from CPU1 to either happen +before the atomic_add_unless(), in which case that latter one would no-op, or +_after_ in which case we'd overwrite its result. In no case is "2" a valid +outcome. + +This is typically true on 'normal' platforms, where a regular competing STORE +will invalidate a LL/SC or fail a CMPXCHG. + +The obvious case where this is not so is when we need to implement atomic ops +with a lock: + + CPU0 CPU1 + + atomic_add_unless(v, 1, 0); + lock(); + ret = READ_ONCE(v->counter); // == 1 + atomic_set(v, 0); + if (ret != u) WRITE_ONCE(v->counter, 0); + WRITE_ONCE(v->counter, ret + 1); + unlock(); + +the typical solution is to then implement atomic_set{}() with atomic_xchg(). + + +RMW ops: + +These come in various forms: + + - plain operations without return value: atomic_{}() + + - operations which return the modified value: atomic_{}_return() + + these are limited to the arithmetic operations because those are + reversible. Bitops are irreversible and therefore the modified value + is of dubious utility. + + - operations which return the original value: atomic_fetch_{}() + + - swap operations: xchg(), cmpxchg() and try_cmpxchg() + + - misc; the special purpose operations that are commonly used and would, + given the interface, normally be implemented using (try_)cmpxchg loops but + are time critical and can, (typically) on LL/SC architectures, be more + efficiently implemented. + +All these operations are SMP atomic; that is, the operations (for a single +atomic variable) can be fully ordered and no intermediate state is lost or +visible. + + +ORDERING (go read memory-barriers.txt first) +-------- + +The rule of thumb: + + - non-RMW operations are unordered; + + - RMW operations that have no return value are unordered; + + - RMW operations that have a return value are fully ordered; + + - RMW operations that are conditional are unordered on FAILURE, + otherwise the above rules apply. + +Except of course when an operation has an explicit ordering like: + + {}_relaxed: unordered + {}_acquire: the R of the RMW (or atomic_read) is an ACQUIRE + {}_release: the W of the RMW (or atomic_set) is a RELEASE + +Where 'unordered' is against other memory locations. Address dependencies are +not defeated. + +Fully ordered primitives are ordered against everything prior and everything +subsequent. Therefore a fully ordered primitive is like having an smp_mb() +before and an smp_mb() after the primitive. + + +The barriers: + + smp_mb__{before,after}_atomic() + +only apply to the RMW ops and can be used to augment/upgrade the ordering +inherent to the used atomic op. These barriers provide a full smp_mb(). + +These helper barriers exist because architectures have varying implicit +ordering on their SMP atomic primitives. For example our TSO architectures +provide full ordered atomics and these barriers are no-ops. + +Thus: + + atomic_fetch_add(); + +is equivalent to: + + smp_mb__before_atomic(); + atomic_fetch_add_relaxed(); + smp_mb__after_atomic(); + +However the atomic_fetch_add() might be implemented more efficiently. + +Further, while something like: + + smp_mb__before_atomic(); + atomic_dec(&X); + +is a 'typical' RELEASE pattern, the barrier is strictly stronger than +a RELEASE. Similarly for something like: + + diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt index c4ddfcd5ee32..9f34364922c8 100644 --- a/Documentation/memory-barriers.txt +++ b/Documentation/memory-barriers.txt @@ -498,11 +498,11 @@ And a couple of implicit varieties: This means that ACQUIRE acts as a minimal "acquire" operation and RELEASE acts as a minimal "release" operation. -A subset of the atomic operations described in core-api/atomic_ops.rst have -ACQUIRE and RELEASE variants in addition to fully-ordered and relaxed (no -barrier semantics) definitions. For compound atomics performing both a load -and a store, ACQUIRE semantics apply only to the load and RELEASE semantics -apply only to the store portion of the operation. +A subset of the atomic operations described in atomic_t.txt have ACQUIRE and +RELEASE variants in addition to fully-ordered and relaxed (no barrier +semantics) definitions. For compound atomics performing both a load and a +store, ACQUIRE semantics apply only to the load and RELEASE semantics apply +only to the store portion of the operation. Memory barriers are only required where there's a possibility of interaction between two CPUs or between a CPU and a device. If it can be guaranteed that @@ -1876,8 +1876,7 @@ There are some more advanced barrier functions: This makes sure that the death mark on the object is perceived to be set *before* the reference counter is decremented. - See Documentation/core-api/atomic_ops.rst for more information. See the - "Atomic operations" subsection for information on where to use these. + See Documentation/atomic_{t,bitops}.txt for more information. (*) lockless_dereference(); @@ -2503,88 +2502,7 @@ operations are noted specially as some of them imply full memory barriers and some don't, but they're very heavily relied on as a group throughout the kernel. -Any atomic operation that modifies some state in memory and returns information -about the state (old or new) implies an SMP-conditional general memory barrier -(smp_mb()) on each side of the actual operation (with the exception of -explicit lock operations, described later). These include: - - xchg(); - atomic_xchg(); atomic_long_xchg(); - atomic_inc_return(); atomic_long_inc_return(); - atomic_dec_return(); atomic_long_dec_return(); - atomic_add_return(); atomic_long_add_return(); - atomic_sub_return(); atomic_long_sub_return(); - atomic_inc_and_test(); atomic_long_inc_and_test(); - atomic_dec_and_test(); atomic_long_dec_and_test(); - atomic_sub_and_test(); atomic_long_sub_and_test(); - atomic_add_negative(); atomic_long_add_negative(); - test_and_set_bit(); - test_and_clear_bit(); - test_and_change_bit(); - - /* when succeeds */ - cmpxchg(); - atomic_cmpxchg(); atomic_long_cmpxchg(); - atomic_add_unless(); atomic_long_add_unless(); - -These are used for such things as implementing ACQUIRE-class and RELEASE-class -operations and adjusting reference counters towards object destruction, and as -such the implicit memory barrier effects are necessary. - - -The following operations are potential problems as they do _not_ imply memory -barriers, but might be used for implementing such things as RELEASE-class -operations: - - atomic_set(); - set_bit(); - clear_bit(); - change_bit(); - -With these the appropriate explicit memory barrier should be used if necessary -(smp_mb__before_atomic() for instance). - - -The following also do _not_ imply memory barriers, and so may require explicit -memory barriers under some circumstances (smp_mb__before_atomic() for -instance): - - atomic_add(); - atomic_sub(); - atomic_inc(); - atomic_dec(); - -If they're used for statistics generation, then they probably don't need memory -barriers, unless there's a coupling between statistical data. - -If they're used for reference counting on an object to control its lifetime, -they probably don't need memory barriers because either the reference count -will be adjusted inside a locked section, or the caller will already hold -sufficient references to make the lock, and thus a memory barrier unnecessary. - -If they're used for constructing a lock of some description, then they probably -do need memory barriers as a lock primitive generally has to do things in a -specific order. - -Basically, each usage case has to be carefully considered as to whether memory -barriers are needed or not. - -The following operations are special locking primitives: - - test_and_set_bit_lock(); - clear_bit_unlock(); - __clear_bit_unlock(); - -These implement ACQUIRE-class and RELEASE-class operations. These should be -used in preference to other operations when implementing locking primitives, -because their implementations can be optimised on many architectures. - -[!] Note that special memory barrier primitives are available for these -situations because on some CPUs the atomic instructions used imply full memory -barriers, and so barrier instructions are superfluous in conjunction with them, -and in such cases the special barrier primitives will be no-ops. - -See Documentation/core-api/atomic_ops.rst for more information. +See Documentation/atomic_t.txt for more information. ACCESSING DEVICES -- cgit v1.2.3 From a9668cd6ee288c4838bc668880ac085be551cac2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 7 Jun 2017 17:51:27 +0200 Subject: locking: Remove smp_mb__before_spinlock() Now that there are no users of smp_mb__before_spinlock() left, remove it entirely. Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- Documentation/memory-barriers.txt | 5 +---- .../translations/ko_KR/memory-barriers.txt | 5 +---- arch/arm64/include/asm/spinlock.h | 9 -------- arch/powerpc/include/asm/barrier.h | 7 ------ fs/userfaultfd.c | 25 ++++++++++------------ include/linux/spinlock.h | 13 ----------- 6 files changed, 13 insertions(+), 51 deletions(-) (limited to 'Documentation') diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt index 9f34364922c8..d1d1716f904b 100644 --- a/Documentation/memory-barriers.txt +++ b/Documentation/memory-barriers.txt @@ -1981,10 +1981,7 @@ for each construct. These operations all imply certain barriers: ACQUIRE operation has completed. Memory operations issued before the ACQUIRE may be completed after - the ACQUIRE operation has completed. An smp_mb__before_spinlock(), - combined with a following ACQUIRE, orders prior stores against - subsequent loads and stores. Note that this is weaker than smp_mb()! - The smp_mb__before_spinlock() primitive is free on many architectures. + the ACQUIRE operation has completed. (2) RELEASE operation implication: diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt index 38310dcd6620..bc80fc0e210f 100644 --- a/Documentation/translations/ko_KR/memory-barriers.txt +++ b/Documentation/translations/ko_KR/memory-barriers.txt @@ -1956,10 +1956,7 @@ MMIO 쓰기 배리어 뒤에 완료됩니다. ACQUIRE 앞에서 요청된 메모리 오퍼레이션은 ACQUIRE 오퍼레이션이 완료된 후에 - 완료될 수 있습니다. smp_mb__before_spinlock() 뒤에 ACQUIRE 가 실행되는 - 코드 블록은 블록 앞의 스토어를 블록 뒤의 로드와 스토어에 대해 순서 - 맞춥니다. 이건 smp_mb() 보다 완화된 것임을 기억하세요! 많은 아키텍쳐에서 - smp_mb__before_spinlock() 은 사실 아무일도 하지 않습니다. + 완료될 수 있습니다. (2) RELEASE 오퍼레이션의 영향: diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index b103888b694a..ae4241ab19a8 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -358,15 +358,6 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) #define arch_read_relax(lock) cpu_relax() #define arch_write_relax(lock) cpu_relax() -/* - * Accesses appearing in program order before a spin_lock() operation - * can be reordered with accesses inside the critical section, by virtue - * of arch_spin_lock being constructed using acquire semantics. - * - * In cases where this is problematic (e.g. try_to_wake_up), an - * smp_mb__before_spinlock() can restore the required ordering. - */ -#define smp_mb__before_spinlock() smp_mb() /* See include/linux/spinlock.h */ #define smp_mb__after_spinlock() smp_mb() diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index 25d42bd3f114..9c601adfc500 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -74,13 +74,6 @@ do { \ ___p1; \ }) -/* - * This must resolve to hwsync on SMP for the context switch path. - * See _switch, and core scheduler context switch memory ordering - * comments. - */ -#define smp_mb__before_spinlock() smp_mb() - #include #endif /* _ASM_POWERPC_BARRIER_H */ diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 06ea26b8c996..44fcbefd84a2 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -109,27 +109,24 @@ static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode, goto out; WRITE_ONCE(uwq->waken, true); /* - * The implicit smp_mb__before_spinlock in try_to_wake_up() - * renders uwq->waken visible to other CPUs before the task is - * waken. + * The Program-Order guarantees provided by the scheduler + * ensure uwq->waken is visible before the task is woken. */ ret = wake_up_state(wq->private, mode); - if (ret) + if (ret) { /* * Wake only once, autoremove behavior. * - * After the effect of list_del_init is visible to the - * other CPUs, the waitqueue may disappear from under - * us, see the !list_empty_careful() in - * handle_userfault(). try_to_wake_up() has an - * implicit smp_mb__before_spinlock, and the - * wq->private is read before calling the extern - * function "wake_up_state" (which in turns calls - * try_to_wake_up). While the spin_lock;spin_unlock; - * wouldn't be enough, the smp_mb__before_spinlock is - * enough to avoid an explicit smp_mb() here. + * After the effect of list_del_init is visible to the other + * CPUs, the waitqueue may disappear from under us, see the + * !list_empty_careful() in handle_userfault(). + * + * try_to_wake_up() has an implicit smp_mb(), and the + * wq->private is read before calling the extern function + * "wake_up_state" (which in turns calls try_to_wake_up). */ list_del_init(&wq->entry); + } out: return ret; } diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 840281095933..4e8cce19b507 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -117,19 +117,6 @@ do { \ #endif /*arch_spin_is_contended*/ #endif -/* - * Despite its name it doesn't necessarily has to be a full barrier. - * It should only guarantee that a STORE before the critical section - * can not be reordered with LOADs and STOREs inside this section. - * spin_lock() is the one-way barrier, this LOAD can not escape out - * of the region. So the default implementation simply ensures that - * a STORE can not move into the critical section, smp_wmb() should - * serialize it with another STORE done by spin_lock(). - */ -#ifndef smp_mb__before_spinlock -#define smp_mb__before_spinlock() smp_wmb() -#endif - /* * This barrier must provide two things: * -- cgit v1.2.3 From ef0758dd0fd70b98b889af26e27f003656952db8 Mon Sep 17 00:00:00 2001 From: Byungchul Park Date: Mon, 7 Aug 2017 16:13:01 +0900 Subject: locking/lockdep: Add 'crossrelease' feature documentation This document describes the concept of crossrelease feature. Signed-off-by: Byungchul Park Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: akpm@linux-foundation.org Cc: boqun.feng@gmail.com Cc: kernel-team@lge.com Cc: kirill@shutemov.name Cc: npiggin@gmail.com Cc: walken@google.com Cc: willy@infradead.org Link: http://lkml.kernel.org/r/1502089981-21272-15-git-send-email-byungchul.park@lge.com Signed-off-by: Ingo Molnar --- Documentation/locking/crossrelease.txt | 874 +++++++++++++++++++++++++++++++++ 1 file changed, 874 insertions(+) create mode 100644 Documentation/locking/crossrelease.txt (limited to 'Documentation') diff --git a/Documentation/locking/crossrelease.txt b/Documentation/locking/crossrelease.txt new file mode 100644 index 000000000000..bdf1423d5f99 --- /dev/null +++ b/Documentation/locking/crossrelease.txt @@ -0,0 +1,874 @@ +Crossrelease +============ + +Started by Byungchul Park + +Contents: + + (*) Background + + - What causes deadlock + - How lockdep works + + (*) Limitation + + - Limit lockdep + - Pros from the limitation + - Cons from the limitation + - Relax the limitation + + (*) Crossrelease + + - Introduce crossrelease + - Introduce commit + + (*) Implementation + + - Data structures + - How crossrelease works + + (*) Optimizations + + - Avoid duplication + - Lockless for hot paths + + (*) APPENDIX A: What lockdep does to work aggresively + + (*) APPENDIX B: How to avoid adding false dependencies + + +========== +Background +========== + +What causes deadlock +-------------------- + +A deadlock occurs when a context is waiting for an event to happen, +which is impossible because another (or the) context who can trigger the +event is also waiting for another (or the) event to happen, which is +also impossible due to the same reason. + +For example: + + A context going to trigger event C is waiting for event A to happen. + A context going to trigger event A is waiting for event B to happen. + A context going to trigger event B is waiting for event C to happen. + +A deadlock occurs when these three wait operations run at the same time, +because event C cannot be triggered if event A does not happen, which in +turn cannot be triggered if event B does not happen, which in turn +cannot be triggered if event C does not happen. After all, no event can +be triggered since any of them never meets its condition to wake up. + +A dependency might exist between two waiters and a deadlock might happen +due to an incorrect releationship between dependencies. Thus, we must +define what a dependency is first. A dependency exists between them if: + + 1. There are two waiters waiting for each event at a given time. + 2. The only way to wake up each waiter is to trigger its event. + 3. Whether one can be woken up depends on whether the other can. + +Each wait in the example creates its dependency like: + + Event C depends on event A. + Event A depends on event B. + Event B depends on event C. + + NOTE: Precisely speaking, a dependency is one between whether a + waiter for an event can be woken up and whether another waiter for + another event can be woken up. However from now on, we will describe + a dependency as if it's one between an event and another event for + simplicity. + +And they form circular dependencies like: + + -> C -> A -> B - + / \ + \ / + ---------------- + + where 'A -> B' means that event A depends on event B. + +Such circular dependencies lead to a deadlock since no waiter can meet +its condition to wake up as described. + +CONCLUSION + +Circular dependencies cause a deadlock. + + +How lockdep works +----------------- + +Lockdep tries to detect a deadlock by checking dependencies created by +lock operations, acquire and release. Waiting for a lock corresponds to +waiting for an event, and releasing a lock corresponds to triggering an +event in the previous section. + +In short, lockdep does: + + 1. Detect a new dependency. + 2. Add the dependency into a global graph. + 3. Check if that makes dependencies circular. + 4. Report a deadlock or its possibility if so. + +For example, consider a graph built by lockdep that looks like: + + A -> B - + \ + -> E + / + C -> D - + + where A, B,..., E are different lock classes. + +Lockdep will add a dependency into the graph on detection of a new +dependency. For example, it will add a dependency 'E -> C' when a new +dependency between lock E and lock C is detected. Then the graph will be: + + A -> B - + \ + -> E - + / \ + -> C -> D - \ + / / + \ / + ------------------ + + where A, B,..., E are different lock classes. + +This graph contains a subgraph which demonstrates circular dependencies: + + -> E - + / \ + -> C -> D - \ + / / + \ / + ------------------ + + where C, D and E are different lock classes. + +This is the condition under which a deadlock might occur. Lockdep +reports it on detection after adding a new dependency. This is the way +how lockdep works. + +CONCLUSION + +Lockdep detects a deadlock or its possibility by checking if circular +dependencies were created after adding each new dependency. + + +========== +Limitation +========== + +Limit lockdep +------------- + +Limiting lockdep to work on only typical locks e.g. spin locks and +mutexes, which are released within the acquire context, the +implementation becomes simple but its capacity for detection becomes +limited. Let's check pros and cons in next section. + + +Pros from the limitation +------------------------ + +Given the limitation, when acquiring a lock, locks in a held_locks +cannot be released if the context cannot acquire it so has to wait to +acquire it, which means all waiters for the locks in the held_locks are +stuck. It's an exact case to create dependencies between each lock in +the held_locks and the lock to acquire. + +For example: + + CONTEXT X + --------- + acquire A + acquire B /* Add a dependency 'A -> B' */ + release B + release A + + where A and B are different lock classes. + +When acquiring lock A, the held_locks of CONTEXT X is empty thus no +dependency is added. But when acquiring lock B, lockdep detects and adds +a new dependency 'A -> B' between lock A in the held_locks and lock B. +They can be simply added whenever acquiring each lock. + +And data required by lockdep exists in a local structure, held_locks +embedded in task_struct. Forcing to access the data within the context, +lockdep can avoid racy problems without explicit locks while handling +the local data. + +Lastly, lockdep only needs to keep locks currently being held, to build +a dependency graph. However, relaxing the limitation, it needs to keep +even locks already released, because a decision whether they created +dependencies might be long-deferred. + +To sum up, we can expect several advantages from the limitation: + + 1. Lockdep can easily identify a dependency when acquiring a lock. + 2. Races are avoidable while accessing local locks in a held_locks. + 3. Lockdep only needs to keep locks currently being held. + +CONCLUSION + +Given the limitation, the implementation becomes simple and efficient. + + +Cons from the limitation +------------------------ + +Given the limitation, lockdep is applicable only to typical locks. For +example, page locks for page access or completions for synchronization +cannot work with lockdep. + +Can we detect deadlocks below, under the limitation? + +Example 1: + + CONTEXT X CONTEXT Y CONTEXT Z + --------- --------- ---------- + mutex_lock A + lock_page B + lock_page B + mutex_lock A /* DEADLOCK */ + unlock_page B held by X + unlock_page B + mutex_unlock A + mutex_unlock A + + where A and B are different lock classes. + +No, we cannot. + +Example 2: + + CONTEXT X CONTEXT Y + --------- --------- + mutex_lock A + mutex_lock A + wait_for_complete B /* DEADLOCK */ + complete B + mutex_unlock A + mutex_unlock A + + where A is a lock class and B is a completion variable. + +No, we cannot. + +CONCLUSION + +Given the limitation, lockdep cannot detect a deadlock or its +possibility caused by page locks or completions. + + +Relax the limitation +-------------------- + +Under the limitation, things to create dependencies are limited to +typical locks. However, synchronization primitives like page locks and +completions, which are allowed to be released in any context, also +create dependencies and can cause a deadlock. So lockdep should track +these locks to do a better job. We have to relax the limitation for +these locks to work with lockdep. + +Detecting dependencies is very important for lockdep to work because +adding a dependency means adding an opportunity to check whether it +causes a deadlock. The more lockdep adds dependencies, the more it +thoroughly works. Thus Lockdep has to do its best to detect and add as +many true dependencies into a graph as possible. + +For example, considering only typical locks, lockdep builds a graph like: + + A -> B - + \ + -> E + / + C -> D - + + where A, B,..., E are different lock classes. + +On the other hand, under the relaxation, additional dependencies might +be created and added. Assuming additional 'FX -> C' and 'E -> GX' are +added thanks to the relaxation, the graph will be: + + A -> B - + \ + -> E -> GX + / + FX -> C -> D - + + where A, B,..., E, FX and GX are different lock classes, and a suffix + 'X' is added on non-typical locks. + +The latter graph gives us more chances to check circular dependencies +than the former. However, it might suffer performance degradation since +relaxing the limitation, with which design and implementation of lockdep +can be efficient, might introduce inefficiency inevitably. So lockdep +should provide two options, strong detection and efficient detection. + +Choosing efficient detection: + + Lockdep works with only locks restricted to be released within the + acquire context. However, lockdep works efficiently. + +Choosing strong detection: + + Lockdep works with all synchronization primitives. However, lockdep + suffers performance degradation. + +CONCLUSION + +Relaxing the limitation, lockdep can add additional dependencies giving +additional opportunities to check circular dependencies. + + +============ +Crossrelease +============ + +Introduce crossrelease +---------------------- + +In order to allow lockdep to handle additional dependencies by what +might be released in any context, namely 'crosslock', we have to be able +to identify those created by crosslocks. The proposed 'crossrelease' +feature provoides a way to do that. + +Crossrelease feature has to do: + + 1. Identify dependencies created by crosslocks. + 2. Add the dependencies into a dependency graph. + +That's all. Once a meaningful dependency is added into graph, then +lockdep would work with the graph as it did. The most important thing +crossrelease feature has to do is to correctly identify and add true +dependencies into the global graph. + +A dependency e.g. 'A -> B' can be identified only in the A's release +context because a decision required to identify the dependency can be +made only in the release context. That is to decide whether A can be +released so that a waiter for A can be woken up. It cannot be made in +other than the A's release context. + +It's no matter for typical locks because each acquire context is same as +its release context, thus lockdep can decide whether a lock can be +released in the acquire context. However for crosslocks, lockdep cannot +make the decision in the acquire context but has to wait until the +release context is identified. + +Therefore, deadlocks by crosslocks cannot be detected just when it +happens, because those cannot be identified until the crosslocks are +released. However, deadlock possibilities can be detected and it's very +worth. See 'APPENDIX A' section to check why. + +CONCLUSION + +Using crossrelease feature, lockdep can work with what might be released +in any context, namely crosslock. + + +Introduce commit +---------------- + +Since crossrelease defers the work adding true dependencies of +crosslocks until they are actually released, crossrelease has to queue +all acquisitions which might create dependencies with the crosslocks. +Then it identifies dependencies using the queued data in batches at a +proper time. We call it 'commit'. + +There are four types of dependencies: + +1. TT type: 'typical lock A -> typical lock B' + + Just when acquiring B, lockdep can see it's in the A's release + context. So the dependency between A and B can be identified + immediately. Commit is unnecessary. + +2. TC type: 'typical lock A -> crosslock BX' + + Just when acquiring BX, lockdep can see it's in the A's release + context. So the dependency between A and BX can be identified + immediately. Commit is unnecessary, too. + +3. CT type: 'crosslock AX -> typical lock B' + + When acquiring B, lockdep cannot identify the dependency because + there's no way to know if it's in the AX's release context. It has + to wait until the decision can be made. Commit is necessary. + +4. CC type: 'crosslock AX -> crosslock BX' + + When acquiring BX, lockdep cannot identify the dependency because + there's no way to know if it's in the AX's release context. It has + to wait until the decision can be made. Commit is necessary. + But, handling CC type is not implemented yet. It's a future work. + +Lockdep can work without commit for typical locks, but commit step is +necessary once crosslocks are involved. Introducing commit, lockdep +performs three steps. What lockdep does in each step is: + +1. Acquisition: For typical locks, lockdep does what it originally did + and queues the lock so that CT type dependencies can be checked using + it at the commit step. For crosslocks, it saves data which will be + used at the commit step and increases a reference count for it. + +2. Commit: No action is reauired for typical locks. For crosslocks, + lockdep adds CT type dependencies using the data saved at the + acquisition step. + +3. Release: No changes are required for typical locks. When a crosslock + is released, it decreases a reference count for it. + +CONCLUSION + +Crossrelease introduces commit step to handle dependencies of crosslocks +in batches at a proper time. + + +============== +Implementation +============== + +Data structures +--------------- + +Crossrelease introduces two main data structures. + +1. hist_lock + + This is an array embedded in task_struct, for keeping lock history so + that dependencies can be added using them at the commit step. Since + it's local data, it can be accessed locklessly in the owner context. + The array is filled at the acquisition step and consumed at the + commit step. And it's managed in circular manner. + +2. cross_lock + + One per lockdep_map exists. This is for keeping data of crosslocks + and used at the commit step. + + +How crossrelease works +---------------------- + +It's the key of how crossrelease works, to defer necessary works to an +appropriate point in time and perform in at once at the commit step. +Let's take a look with examples step by step, starting from how lockdep +works without crossrelease for typical locks. + + acquire A /* Push A onto held_locks */ + acquire B /* Push B onto held_locks and add 'A -> B' */ + acquire C /* Push C onto held_locks and add 'B -> C' */ + release C /* Pop C from held_locks */ + release B /* Pop B from held_locks */ + release A /* Pop A from held_locks */ + + where A, B and C are different lock classes. + + NOTE: This document assumes that readers already understand how + lockdep works without crossrelease thus omits details. But there's + one thing to note. Lockdep pretends to pop a lock from held_locks + when releasing it. But it's subtly different from the original pop + operation because lockdep allows other than the top to be poped. + +In this case, lockdep adds 'the top of held_locks -> the lock to acquire' +dependency every time acquiring a lock. + +After adding 'A -> B', a dependency graph will be: + + A -> B + + where A and B are different lock classes. + +And after adding 'B -> C', the graph will be: + + A -> B -> C + + where A, B and C are different lock classes. + +Let's performs commit step even for typical locks to add dependencies. +Of course, commit step is not necessary for them, however, it would work +well because this is a more general way. + + acquire A + /* + * Queue A into hist_locks + * + * In hist_locks: A + * In graph: Empty + */ + + acquire B + /* + * Queue B into hist_locks + * + * In hist_locks: A, B + * In graph: Empty + */ + + acquire C + /* + * Queue C into hist_locks + * + * In hist_locks: A, B, C + * In graph: Empty + */ + + commit C + /* + * Add 'C -> ?' + * Answer the following to decide '?' + * What has been queued since acquire C: Nothing + * + * In hist_locks: A, B, C + * In graph: Empty + */ + + release C + + commit B + /* + * Add 'B -> ?' + * Answer the following to decide '?' + * What has been queued since acquire B: C + * + * In hist_locks: A, B, C + * In graph: 'B -> C' + */ + + release B + + commit A + /* + * Add 'A -> ?' + * Answer the following to decide '?' + * What has been queued since acquire A: B, C + * + * In hist_locks: A, B, C + * In graph: 'B -> C', 'A -> B', 'A -> C' + */ + + release A + + where A, B and C are different lock classes. + +In this case, dependencies are added at the commit step as described. + +After commits for A, B and C, the graph will be: + + A -> B -> C + + where A, B and C are different lock classes. + + NOTE: A dependency 'A -> C' is optimized out. + +We can see the former graph built without commit step is same as the +latter graph built using commit steps. Of course the former way leads to +earlier finish for building the graph, which means we can detect a +deadlock or its possibility sooner. So the former way would be prefered +when possible. But we cannot avoid using the latter way for crosslocks. + +Let's look at how commit steps work for crosslocks. In this case, the +commit step is performed only on crosslock AX as real. And it assumes +that the AX release context is different from the AX acquire context. + + BX RELEASE CONTEXT BX ACQUIRE CONTEXT + ------------------ ------------------ + acquire A + /* + * Push A onto held_locks + * Queue A into hist_locks + * + * In held_locks: A + * In hist_locks: A + * In graph: Empty + */ + + acquire BX + /* + * Add 'the top of held_locks -> BX' + * + * In held_locks: A + * In hist_locks: A + * In graph: 'A -> BX' + */ + + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + It must be guaranteed that the following operations are seen after + acquiring BX globally. It can be done by things like barrier. + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + acquire C + /* + * Push C onto held_locks + * Queue C into hist_locks + * + * In held_locks: C + * In hist_locks: C + * In graph: 'A -> BX' + */ + + release C + /* + * Pop C from held_locks + * + * In held_locks: Empty + * In hist_locks: C + * In graph: 'A -> BX' + */ + acquire D + /* + * Push D onto held_locks + * Queue D into hist_locks + * Add 'the top of held_locks -> D' + * + * In held_locks: A, D + * In hist_locks: A, D + * In graph: 'A -> BX', 'A -> D' + */ + acquire E + /* + * Push E onto held_locks + * Queue E into hist_locks + * + * In held_locks: E + * In hist_locks: C, E + * In graph: 'A -> BX', 'A -> D' + */ + + release E + /* + * Pop E from held_locks + * + * In held_locks: Empty + * In hist_locks: D, E + * In graph: 'A -> BX', 'A -> D' + */ + release D + /* + * Pop D from held_locks + * + * In held_locks: A + * In hist_locks: A, D + * In graph: 'A -> BX', 'A -> D' + */ + commit BX + /* + * Add 'BX -> ?' + * What has been queued since acquire BX: C, E + * + * In held_locks: Empty + * In hist_locks: D, E + * In graph: 'A -> BX', 'A -> D', + * 'BX -> C', 'BX -> E' + */ + + release BX + /* + * In held_locks: Empty + * In hist_locks: D, E + * In graph: 'A -> BX', 'A -> D', + * 'BX -> C', 'BX -> E' + */ + release A + /* + * Pop A from held_locks + * + * In held_locks: Empty + * In hist_locks: A, D + * In graph: 'A -> BX', 'A -> D', + * 'BX -> C', 'BX -> E' + */ + + where A, BX, C,..., E are different lock classes, and a suffix 'X' is + added on crosslocks. + +Crossrelease considers all acquisitions after acqiuring BX are +candidates which might create dependencies with BX. True dependencies +will be determined when identifying the release context of BX. Meanwhile, +all typical locks are queued so that they can be used at the commit step. +And then two dependencies 'BX -> C' and 'BX -> E' are added at the +commit step when identifying the release context. + +The final graph will be, with crossrelease: + + -> C + / + -> BX - + / \ + A - -> E + \ + -> D + + where A, BX, C,..., E are different lock classes, and a suffix 'X' is + added on crosslocks. + +However, the final graph will be, without crossrelease: + + A -> D + + where A and D are different lock classes. + +The former graph has three more dependencies, 'A -> BX', 'BX -> C' and +'BX -> E' giving additional opportunities to check if they cause +deadlocks. This way lockdep can detect a deadlock or its possibility +caused by crosslocks. + +CONCLUSION + +We checked how crossrelease works with several examples. + + +============= +Optimizations +============= + +Avoid duplication +----------------- + +Crossrelease feature uses a cache like what lockdep already uses for +dependency chains, but this time it's for caching CT type dependencies. +Once that dependency is cached, the same will never be added again. + + +Lockless for hot paths +---------------------- + +To keep all locks for later use at the commit step, crossrelease adopts +a local array embedded in task_struct, which makes access to the data +lockless by forcing it to happen only within the owner context. It's +like how lockdep handles held_locks. Lockless implmentation is important +since typical locks are very frequently acquired and released. + + +================================================= +APPENDIX A: What lockdep does to work aggresively +================================================= + +A deadlock actually occurs when all wait operations creating circular +dependencies run at the same time. Even though they don't, a potential +deadlock exists if the problematic dependencies exist. Thus it's +meaningful to detect not only an actual deadlock but also its potential +possibility. The latter is rather valuable. When a deadlock occurs +actually, we can identify what happens in the system by some means or +other even without lockdep. However, there's no way to detect possiblity +without lockdep unless the whole code is parsed in head. It's terrible. +Lockdep does the both, and crossrelease only focuses on the latter. + +Whether or not a deadlock actually occurs depends on several factors. +For example, what order contexts are switched in is a factor. Assuming +circular dependencies exist, a deadlock would occur when contexts are +switched so that all wait operations creating the dependencies run +simultaneously. Thus to detect a deadlock possibility even in the case +that it has not occured yet, lockdep should consider all possible +combinations of dependencies, trying to: + +1. Use a global dependency graph. + + Lockdep combines all dependencies into one global graph and uses them, + regardless of which context generates them or what order contexts are + switched in. Aggregated dependencies are only considered so they are + prone to be circular if a problem exists. + +2. Check dependencies between classes instead of instances. + + What actually causes a deadlock are instances of lock. However, + lockdep checks dependencies between classes instead of instances. + This way lockdep can detect a deadlock which has not happened but + might happen in future by others but the same class. + +3. Assume all acquisitions lead to waiting. + + Although locks might be acquired without waiting which is essential + to create dependencies, lockdep assumes all acquisitions lead to + waiting since it might be true some time or another. + +CONCLUSION + +Lockdep detects not only an actual deadlock but also its possibility, +and the latter is more valuable. + + +================================================== +APPENDIX B: How to avoid adding false dependencies +================================================== + +Remind what a dependency is. A dependency exists if: + + 1. There are two waiters waiting for each event at a given time. + 2. The only way to wake up each waiter is to trigger its event. + 3. Whether one can be woken up depends on whether the other can. + +For example: + + acquire A + acquire B /* A dependency 'A -> B' exists */ + release B + release A + + where A and B are different lock classes. + +A depedency 'A -> B' exists since: + + 1. A waiter for A and a waiter for B might exist when acquiring B. + 2. Only way to wake up each is to release what it waits for. + 3. Whether the waiter for A can be woken up depends on whether the + other can. IOW, TASK X cannot release A if it fails to acquire B. + +For another example: + + TASK X TASK Y + ------ ------ + acquire AX + acquire B /* A dependency 'AX -> B' exists */ + release B + release AX held by Y + + where AX and B are different lock classes, and a suffix 'X' is added + on crosslocks. + +Even in this case involving crosslocks, the same rule can be applied. A +depedency 'AX -> B' exists since: + + 1. A waiter for AX and a waiter for B might exist when acquiring B. + 2. Only way to wake up each is to release what it waits for. + 3. Whether the waiter for AX can be woken up depends on whether the + other can. IOW, TASK X cannot release AX if it fails to acquire B. + +Let's take a look at more complicated example: + + TASK X TASK Y + ------ ------ + acquire B + release B + fork Y + acquire AX + acquire C /* A dependency 'AX -> C' exists */ + release C + release AX held by Y + + where AX, B and C are different lock classes, and a suffix 'X' is + added on crosslocks. + +Does a dependency 'AX -> B' exist? Nope. + +Two waiters are essential to create a dependency. However, waiters for +AX and B to create 'AX -> B' cannot exist at the same time in this +example. Thus the dependency 'AX -> B' cannot be created. + +It would be ideal if the full set of true ones can be considered. But +we can ensure nothing but what actually happened. Relying on what +actually happens at runtime, we can anyway add only true ones, though +they might be a subset of true ones. It's similar to how lockdep works +for typical locks. There might be more true dependencies than what +lockdep has detected in runtime. Lockdep has no choice but to rely on +what actually happens. Crossrelease also relies on it. + +CONCLUSION + +Relying on what actually happens, lockdep can avoid adding false +dependencies. -- cgit v1.2.3 From ca110694c6950dfd7bc864138c80fe39ea43da5b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 23 Aug 2017 18:15:20 +0200 Subject: Documentation/locking/atomic: Finish the document... Julia reported that the document looked unfinished, and it is. I forgot to include the example cooked up by Paul here: https://lkml.kernel.org/r/20170731174345.GL3730@linux.vnet.ibm.com and I added an explicit example showing how, while it is an ACQUIRE pattern, it really does provide an MB. Reported-by: Julia Cartwright Signed-off-by: Peter Zijlstra (Intel) Cc: Boqun Feng Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Signed-off-by: Ingo Molnar --- Documentation/atomic_t.txt | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) (limited to 'Documentation') diff --git a/Documentation/atomic_t.txt b/Documentation/atomic_t.txt index eee127115277..913396ac5824 100644 --- a/Documentation/atomic_t.txt +++ b/Documentation/atomic_t.txt @@ -197,4 +197,46 @@ Further, while something like: is a 'typical' RELEASE pattern, the barrier is strictly stronger than a RELEASE. Similarly for something like: + atomic_inc(&X); + smp_mb__after_atomic(); + +is an ACQUIRE pattern (though very much not typical), but again the barrier is +strictly stronger than ACQUIRE. As illustrated: + + C strong-acquire + + { + } + + P1(int *x, atomic_t *y) + { + r0 = READ_ONCE(*x); + smp_rmb(); + r1 = atomic_read(y); + } + + P2(int *x, atomic_t *y) + { + atomic_inc(y); + smp_mb__after_atomic(); + WRITE_ONCE(*x, 1); + } + + exists + (r0=1 /\ r1=0) + +This should not happen; but a hypothetical atomic_inc_acquire() -- +(void)atomic_fetch_inc_acquire() for instance -- would allow the outcome, +since then: + + P1 P2 + + t = LL.acq *y (0) + t++; + *x = 1; + r0 = *x (1) + RMB + r1 = *y (0) + SC *y, t; +is allowed. -- cgit v1.2.3