summaryrefslogtreecommitdiff
path: root/include/asm-generic/bitops/non-atomic.h
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2021-07-13 13:52:53 +0300
committerPeter Zijlstra <peterz@infradead.org>2021-07-16 19:46:45 +0300
commitcf3ee3c8c29dc349b2cf52e5e72e8cb805ff5e57 (patch)
tree3a341d32735ffce29fa67bc5bd0f84eb7fe2423d /include/asm-generic/bitops/non-atomic.h
parent67d1b0de258ad066e1fc85d0ceaa75e107fb45bb (diff)
downloadlinux-cf3ee3c8c29dc349b2cf52e5e72e8cb805ff5e57.tar.xz
locking/atomic: add generic arch_*() bitops
Now that all architectures provide arch_atomic_long_*(), we can implement the generic bitops atop these rather than atop atomic_long_*(), and provide arch_*() forms of the bitops that are safe to use in noinstr code. Now that all architectures provide arch_atomic_long_*(), we can build the generic arch_*() bitops atop these, which can be safely used in noinstr code. The regular bitop wrappers are built atop these. As the generic non-atomic bitops use plain accesses, these will be implicitly instrumented unless they are inlined into noinstr functions (which is similar to arch_atomic*_read() when based on READ_ONCE()). The wrappers are modified so that where the underlying arch_*() function uses a plain access, no explicit instrumentation is added, as this is redundant and could result in confusing reports. Since function prototypes get excessively long with both an `arch_` prefix and `__always_inline` attribute, the return type and function attributes have been split onto a separate line, matching the style of the generated atomic headers. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20210713105253.7615-6-mark.rutland@arm.com
Diffstat (limited to 'include/asm-generic/bitops/non-atomic.h')
-rw-r--r--include/asm-generic/bitops/non-atomic.h41
1 files changed, 28 insertions, 13 deletions
diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h
index 7e10c4b50c5d..c8149cd52730 100644
--- a/include/asm-generic/bitops/non-atomic.h
+++ b/include/asm-generic/bitops/non-atomic.h
@@ -5,7 +5,7 @@
#include <asm/types.h>
/**
- * __set_bit - Set a bit in memory
+ * arch___set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
@@ -13,24 +13,28 @@
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static inline void __set_bit(int nr, volatile unsigned long *addr)
+static __always_inline void
+arch___set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p |= mask;
}
+#define arch___set_bit_uses_plain_access
-static inline void __clear_bit(int nr, volatile unsigned long *addr)
+static __always_inline void
+arch___clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p &= ~mask;
}
+#define arch___clear_bit_uses_plain_access
/**
- * __change_bit - Toggle a bit in memory
+ * arch___change_bit - Toggle a bit in memory
* @nr: the bit to change
* @addr: the address to start counting from
*
@@ -38,16 +42,18 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static inline void __change_bit(int nr, volatile unsigned long *addr)
+static __always_inline
+void arch___change_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
*p ^= mask;
}
+#define arch___change_bit_uses_plain_access
/**
- * __test_and_set_bit - Set a bit and return its old value
+ * arch___test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
@@ -55,7 +61,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
+static __always_inline int
+arch___test_and_set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -64,9 +71,10 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
*p = old | mask;
return (old & mask) != 0;
}
+#define arch___test_and_set_bit_uses_plain_access
/**
- * __test_and_clear_bit - Clear a bit and return its old value
+ * arch___test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
@@ -74,7 +82,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+static __always_inline int
+arch___test_and_clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -83,10 +92,11 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
*p = old & ~mask;
return (old & mask) != 0;
}
+#define arch___test_and_clear_bit_uses_plain_access
/* WARNING: non atomic and it can be reordered! */
-static inline int __test_and_change_bit(int nr,
- volatile unsigned long *addr)
+static __always_inline int
+arch___test_and_change_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -95,15 +105,20 @@ static inline int __test_and_change_bit(int nr,
*p = old ^ mask;
return (old & mask) != 0;
}
+#define arch___test_and_change_bit_uses_plain_access
/**
- * test_bit - Determine whether a bit is set
+ * arch_test_bit - Determine whether a bit is set
* @nr: bit number to test
* @addr: Address to start counting from
*/
-static inline int test_bit(int nr, const volatile unsigned long *addr)
+static __always_inline int
+arch_test_bit(int nr, const volatile unsigned long *addr)
{
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
}
+#define arch_test_bit_uses_plain_access
+
+#include <asm-generic/bitops/instrumented-non-atomic.h>
#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */