summaryrefslogtreecommitdiff
path: root/include/asm-generic
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2020-11-07 14:50:48 +0300
committerIngo Molnar <mingo@kernel.org>2020-11-07 15:20:17 +0300
commit666fab4a3ea143315a9c059fad9f3a0f1365d54b (patch)
treee9e4be3b0eeac79346d52a86183326617d0c9999 /include/asm-generic
parent0a986ea81e1aa8ac17e82cda53cc95158217956e (diff)
parent659caaf65dc9c7150aa3e80225ec6e66b25ab3ce (diff)
downloadlinux-666fab4a3ea143315a9c059fad9f3a0f1365d54b.tar.xz
Merge branch 'linus' into perf/kprobes
Conflicts: include/asm-generic/atomic-instrumented.h kernel/kprobes.c Use the upstream atomic-instrumented.h checksum, and pick the kprobes version of kernel/kprobes.c, which effectively reverts this upstream workaround: 645f224e7ba2: ("kprobes: Tell lockdep about kprobe nesting") Since the new code *should* be fine without nesting. Knock on wood ... Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/Kbuild2
-rw-r--r--include/asm-generic/atomic-instrumented.h330
-rw-r--r--include/asm-generic/bitops/instrumented-atomic.h6
-rw-r--r--include/asm-generic/bitops/instrumented-lock.h2
-rw-r--r--include/asm-generic/bitops/instrumented-non-atomic.h30
-rw-r--r--include/asm-generic/bug.h6
-rw-r--r--include/asm-generic/checksum.h12
-rw-r--r--include/asm-generic/compat.h8
-rw-r--r--include/asm-generic/dma-contiguous.h10
-rw-r--r--include/asm-generic/error-injection.h2
-rw-r--r--include/asm-generic/io.h39
-rw-r--r--include/asm-generic/kprobes.h4
-rw-r--r--include/asm-generic/module.lds.h10
-rw-r--r--include/asm-generic/mshyperv.h4
-rw-r--r--include/asm-generic/uaccess.h111
-rw-r--r--include/asm-generic/vmlinux.lds.h75
16 files changed, 388 insertions, 263 deletions
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index 74b0612601dd..e78bbb9a07e9 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -16,7 +16,6 @@ mandatory-y += current.h
mandatory-y += delay.h
mandatory-y += device.h
mandatory-y += div64.h
-mandatory-y += dma-contiguous.h
mandatory-y += dma-mapping.h
mandatory-y += dma.h
mandatory-y += emergency-restart.h
@@ -40,6 +39,7 @@ mandatory-y += mmiowb.h
mandatory-y += mmu.h
mandatory-y += mmu_context.h
mandatory-y += module.h
+mandatory-y += module.lds.h
mandatory-y += msi.h
mandatory-y += pci.h
mandatory-y += percpu.h
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
index 2cab3fdaae3b..492cc955557b 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -60,7 +60,7 @@ atomic_set_release(atomic_t *v, int i)
static __always_inline void
atomic_add(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_add(i, v);
}
#define atomic_add atomic_add
@@ -69,7 +69,7 @@ atomic_add(int i, atomic_t *v)
static __always_inline int
atomic_add_return(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_return(i, v);
}
#define atomic_add_return atomic_add_return
@@ -79,7 +79,7 @@ atomic_add_return(int i, atomic_t *v)
static __always_inline int
atomic_add_return_acquire(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_return_acquire(i, v);
}
#define atomic_add_return_acquire atomic_add_return_acquire
@@ -89,7 +89,7 @@ atomic_add_return_acquire(int i, atomic_t *v)
static __always_inline int
atomic_add_return_release(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_return_release(i, v);
}
#define atomic_add_return_release atomic_add_return_release
@@ -99,7 +99,7 @@ atomic_add_return_release(int i, atomic_t *v)
static __always_inline int
atomic_add_return_relaxed(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_return_relaxed(i, v);
}
#define atomic_add_return_relaxed atomic_add_return_relaxed
@@ -109,7 +109,7 @@ atomic_add_return_relaxed(int i, atomic_t *v)
static __always_inline int
atomic_fetch_add(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add(i, v);
}
#define atomic_fetch_add atomic_fetch_add
@@ -119,7 +119,7 @@ atomic_fetch_add(int i, atomic_t *v)
static __always_inline int
atomic_fetch_add_acquire(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add_acquire(i, v);
}
#define atomic_fetch_add_acquire atomic_fetch_add_acquire
@@ -129,7 +129,7 @@ atomic_fetch_add_acquire(int i, atomic_t *v)
static __always_inline int
atomic_fetch_add_release(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add_release(i, v);
}
#define atomic_fetch_add_release atomic_fetch_add_release
@@ -139,7 +139,7 @@ atomic_fetch_add_release(int i, atomic_t *v)
static __always_inline int
atomic_fetch_add_relaxed(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add_relaxed(i, v);
}
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
@@ -148,7 +148,7 @@ atomic_fetch_add_relaxed(int i, atomic_t *v)
static __always_inline void
atomic_sub(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_sub(i, v);
}
#define atomic_sub atomic_sub
@@ -157,7 +157,7 @@ atomic_sub(int i, atomic_t *v)
static __always_inline int
atomic_sub_return(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_return(i, v);
}
#define atomic_sub_return atomic_sub_return
@@ -167,7 +167,7 @@ atomic_sub_return(int i, atomic_t *v)
static __always_inline int
atomic_sub_return_acquire(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_return_acquire(i, v);
}
#define atomic_sub_return_acquire atomic_sub_return_acquire
@@ -177,7 +177,7 @@ atomic_sub_return_acquire(int i, atomic_t *v)
static __always_inline int
atomic_sub_return_release(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_return_release(i, v);
}
#define atomic_sub_return_release atomic_sub_return_release
@@ -187,7 +187,7 @@ atomic_sub_return_release(int i, atomic_t *v)
static __always_inline int
atomic_sub_return_relaxed(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_return_relaxed(i, v);
}
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
@@ -197,7 +197,7 @@ atomic_sub_return_relaxed(int i, atomic_t *v)
static __always_inline int
atomic_fetch_sub(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_sub(i, v);
}
#define atomic_fetch_sub atomic_fetch_sub
@@ -207,7 +207,7 @@ atomic_fetch_sub(int i, atomic_t *v)
static __always_inline int
atomic_fetch_sub_acquire(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_sub_acquire(i, v);
}
#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
@@ -217,7 +217,7 @@ atomic_fetch_sub_acquire(int i, atomic_t *v)
static __always_inline int
atomic_fetch_sub_release(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_sub_release(i, v);
}
#define atomic_fetch_sub_release atomic_fetch_sub_release
@@ -227,7 +227,7 @@ atomic_fetch_sub_release(int i, atomic_t *v)
static __always_inline int
atomic_fetch_sub_relaxed(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_sub_relaxed(i, v);
}
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
@@ -237,7 +237,7 @@ atomic_fetch_sub_relaxed(int i, atomic_t *v)
static __always_inline void
atomic_inc(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_inc(v);
}
#define atomic_inc atomic_inc
@@ -247,7 +247,7 @@ atomic_inc(atomic_t *v)
static __always_inline int
atomic_inc_return(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_return(v);
}
#define atomic_inc_return atomic_inc_return
@@ -257,7 +257,7 @@ atomic_inc_return(atomic_t *v)
static __always_inline int
atomic_inc_return_acquire(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_return_acquire(v);
}
#define atomic_inc_return_acquire atomic_inc_return_acquire
@@ -267,7 +267,7 @@ atomic_inc_return_acquire(atomic_t *v)
static __always_inline int
atomic_inc_return_release(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_return_release(v);
}
#define atomic_inc_return_release atomic_inc_return_release
@@ -277,7 +277,7 @@ atomic_inc_return_release(atomic_t *v)
static __always_inline int
atomic_inc_return_relaxed(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_return_relaxed(v);
}
#define atomic_inc_return_relaxed atomic_inc_return_relaxed
@@ -287,7 +287,7 @@ atomic_inc_return_relaxed(atomic_t *v)
static __always_inline int
atomic_fetch_inc(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_inc(v);
}
#define atomic_fetch_inc atomic_fetch_inc
@@ -297,7 +297,7 @@ atomic_fetch_inc(atomic_t *v)
static __always_inline int
atomic_fetch_inc_acquire(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_inc_acquire(v);
}
#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
@@ -307,7 +307,7 @@ atomic_fetch_inc_acquire(atomic_t *v)
static __always_inline int
atomic_fetch_inc_release(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_inc_release(v);
}
#define atomic_fetch_inc_release atomic_fetch_inc_release
@@ -317,7 +317,7 @@ atomic_fetch_inc_release(atomic_t *v)
static __always_inline int
atomic_fetch_inc_relaxed(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_inc_relaxed(v);
}
#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
@@ -327,7 +327,7 @@ atomic_fetch_inc_relaxed(atomic_t *v)
static __always_inline void
atomic_dec(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_dec(v);
}
#define atomic_dec atomic_dec
@@ -337,7 +337,7 @@ atomic_dec(atomic_t *v)
static __always_inline int
atomic_dec_return(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_return(v);
}
#define atomic_dec_return atomic_dec_return
@@ -347,7 +347,7 @@ atomic_dec_return(atomic_t *v)
static __always_inline int
atomic_dec_return_acquire(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_return_acquire(v);
}
#define atomic_dec_return_acquire atomic_dec_return_acquire
@@ -357,7 +357,7 @@ atomic_dec_return_acquire(atomic_t *v)
static __always_inline int
atomic_dec_return_release(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_return_release(v);
}
#define atomic_dec_return_release atomic_dec_return_release
@@ -367,7 +367,7 @@ atomic_dec_return_release(atomic_t *v)
static __always_inline int
atomic_dec_return_relaxed(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_return_relaxed(v);
}
#define atomic_dec_return_relaxed atomic_dec_return_relaxed
@@ -377,7 +377,7 @@ atomic_dec_return_relaxed(atomic_t *v)
static __always_inline int
atomic_fetch_dec(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_dec(v);
}
#define atomic_fetch_dec atomic_fetch_dec
@@ -387,7 +387,7 @@ atomic_fetch_dec(atomic_t *v)
static __always_inline int
atomic_fetch_dec_acquire(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_dec_acquire(v);
}
#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
@@ -397,7 +397,7 @@ atomic_fetch_dec_acquire(atomic_t *v)
static __always_inline int
atomic_fetch_dec_release(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_dec_release(v);
}
#define atomic_fetch_dec_release atomic_fetch_dec_release
@@ -407,7 +407,7 @@ atomic_fetch_dec_release(atomic_t *v)
static __always_inline int
atomic_fetch_dec_relaxed(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_dec_relaxed(v);
}
#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
@@ -416,7 +416,7 @@ atomic_fetch_dec_relaxed(atomic_t *v)
static __always_inline void
atomic_and(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_and(i, v);
}
#define atomic_and atomic_and
@@ -425,7 +425,7 @@ atomic_and(int i, atomic_t *v)
static __always_inline int
atomic_fetch_and(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_and(i, v);
}
#define atomic_fetch_and atomic_fetch_and
@@ -435,7 +435,7 @@ atomic_fetch_and(int i, atomic_t *v)
static __always_inline int
atomic_fetch_and_acquire(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_and_acquire(i, v);
}
#define atomic_fetch_and_acquire atomic_fetch_and_acquire
@@ -445,7 +445,7 @@ atomic_fetch_and_acquire(int i, atomic_t *v)
static __always_inline int
atomic_fetch_and_release(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_and_release(i, v);
}
#define atomic_fetch_and_release atomic_fetch_and_release
@@ -455,7 +455,7 @@ atomic_fetch_and_release(int i, atomic_t *v)
static __always_inline int
atomic_fetch_and_relaxed(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_and_relaxed(i, v);
}
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
@@ -465,7 +465,7 @@ atomic_fetch_and_relaxed(int i, atomic_t *v)
static __always_inline void
atomic_andnot(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_andnot(i, v);
}
#define atomic_andnot atomic_andnot
@@ -475,7 +475,7 @@ atomic_andnot(int i, atomic_t *v)
static __always_inline int
atomic_fetch_andnot(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_andnot(i, v);
}
#define atomic_fetch_andnot atomic_fetch_andnot
@@ -485,7 +485,7 @@ atomic_fetch_andnot(int i, atomic_t *v)
static __always_inline int
atomic_fetch_andnot_acquire(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_acquire(i, v);
}
#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
@@ -495,7 +495,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v)
static __always_inline int
atomic_fetch_andnot_release(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_release(i, v);
}
#define atomic_fetch_andnot_release atomic_fetch_andnot_release
@@ -505,7 +505,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v)
static __always_inline int
atomic_fetch_andnot_relaxed(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_relaxed(i, v);
}
#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
@@ -514,7 +514,7 @@ atomic_fetch_andnot_relaxed(int i, atomic_t *v)
static __always_inline void
atomic_or(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_or(i, v);
}
#define atomic_or atomic_or
@@ -523,7 +523,7 @@ atomic_or(int i, atomic_t *v)
static __always_inline int
atomic_fetch_or(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_or(i, v);
}
#define atomic_fetch_or atomic_fetch_or
@@ -533,7 +533,7 @@ atomic_fetch_or(int i, atomic_t *v)
static __always_inline int
atomic_fetch_or_acquire(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_or_acquire(i, v);
}
#define atomic_fetch_or_acquire atomic_fetch_or_acquire
@@ -543,7 +543,7 @@ atomic_fetch_or_acquire(int i, atomic_t *v)
static __always_inline int
atomic_fetch_or_release(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_or_release(i, v);
}
#define atomic_fetch_or_release atomic_fetch_or_release
@@ -553,7 +553,7 @@ atomic_fetch_or_release(int i, atomic_t *v)
static __always_inline int
atomic_fetch_or_relaxed(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_or_relaxed(i, v);
}
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
@@ -562,7 +562,7 @@ atomic_fetch_or_relaxed(int i, atomic_t *v)
static __always_inline void
atomic_xor(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic_xor(i, v);
}
#define atomic_xor atomic_xor
@@ -571,7 +571,7 @@ atomic_xor(int i, atomic_t *v)
static __always_inline int
atomic_fetch_xor(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_xor(i, v);
}
#define atomic_fetch_xor atomic_fetch_xor
@@ -581,7 +581,7 @@ atomic_fetch_xor(int i, atomic_t *v)
static __always_inline int
atomic_fetch_xor_acquire(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_xor_acquire(i, v);
}
#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
@@ -591,7 +591,7 @@ atomic_fetch_xor_acquire(int i, atomic_t *v)
static __always_inline int
atomic_fetch_xor_release(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_xor_release(i, v);
}
#define atomic_fetch_xor_release atomic_fetch_xor_release
@@ -601,7 +601,7 @@ atomic_fetch_xor_release(int i, atomic_t *v)
static __always_inline int
atomic_fetch_xor_relaxed(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_xor_relaxed(i, v);
}
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
@@ -611,7 +611,7 @@ atomic_fetch_xor_relaxed(int i, atomic_t *v)
static __always_inline int
atomic_xchg(atomic_t *v, int i)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_xchg(v, i);
}
#define atomic_xchg atomic_xchg
@@ -621,7 +621,7 @@ atomic_xchg(atomic_t *v, int i)
static __always_inline int
atomic_xchg_acquire(atomic_t *v, int i)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_xchg_acquire(v, i);
}
#define atomic_xchg_acquire atomic_xchg_acquire
@@ -631,7 +631,7 @@ atomic_xchg_acquire(atomic_t *v, int i)
static __always_inline int
atomic_xchg_release(atomic_t *v, int i)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_xchg_release(v, i);
}
#define atomic_xchg_release atomic_xchg_release
@@ -641,7 +641,7 @@ atomic_xchg_release(atomic_t *v, int i)
static __always_inline int
atomic_xchg_relaxed(atomic_t *v, int i)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_xchg_relaxed(v, i);
}
#define atomic_xchg_relaxed atomic_xchg_relaxed
@@ -651,7 +651,7 @@ atomic_xchg_relaxed(atomic_t *v, int i)
static __always_inline int
atomic_cmpxchg(atomic_t *v, int old, int new)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_cmpxchg(v, old, new);
}
#define atomic_cmpxchg atomic_cmpxchg
@@ -661,7 +661,7 @@ atomic_cmpxchg(atomic_t *v, int old, int new)
static __always_inline int
atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_cmpxchg_acquire(v, old, new);
}
#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
@@ -671,7 +671,7 @@ atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
static __always_inline int
atomic_cmpxchg_release(atomic_t *v, int old, int new)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_cmpxchg_release(v, old, new);
}
#define atomic_cmpxchg_release atomic_cmpxchg_release
@@ -681,7 +681,7 @@ atomic_cmpxchg_release(atomic_t *v, int old, int new)
static __always_inline int
atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_cmpxchg_relaxed(v, old, new);
}
#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
@@ -691,8 +691,8 @@ atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
static __always_inline bool
atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
- instrument_atomic_write(v, sizeof(*v));
- instrument_atomic_write(old, sizeof(*old));
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg(v, old, new);
}
#define atomic_try_cmpxchg atomic_try_cmpxchg
@@ -702,8 +702,8 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new)
static __always_inline bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
- instrument_atomic_write(v, sizeof(*v));
- instrument_atomic_write(old, sizeof(*old));
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_acquire(v, old, new);
}
#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
@@ -713,8 +713,8 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
static __always_inline bool
atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
- instrument_atomic_write(v, sizeof(*v));
- instrument_atomic_write(old, sizeof(*old));
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_release(v, old, new);
}
#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
@@ -724,8 +724,8 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
static __always_inline bool
atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
{
- instrument_atomic_write(v, sizeof(*v));
- instrument_atomic_write(old, sizeof(*old));
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_relaxed(v, old, new);
}
#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
@@ -735,7 +735,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
static __always_inline bool
atomic_sub_and_test(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_sub_and_test(i, v);
}
#define atomic_sub_and_test atomic_sub_and_test
@@ -745,7 +745,7 @@ atomic_sub_and_test(int i, atomic_t *v)
static __always_inline bool
atomic_dec_and_test(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_and_test(v);
}
#define atomic_dec_and_test atomic_dec_and_test
@@ -755,7 +755,7 @@ atomic_dec_and_test(atomic_t *v)
static __always_inline bool
atomic_inc_and_test(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_and_test(v);
}
#define atomic_inc_and_test atomic_inc_and_test
@@ -765,7 +765,7 @@ atomic_inc_and_test(atomic_t *v)
static __always_inline bool
atomic_add_negative(int i, atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_negative(i, v);
}
#define atomic_add_negative atomic_add_negative
@@ -775,7 +775,7 @@ atomic_add_negative(int i, atomic_t *v)
static __always_inline int
atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_fetch_add_unless(v, a, u);
}
#define atomic_fetch_add_unless atomic_fetch_add_unless
@@ -785,7 +785,7 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u)
static __always_inline bool
atomic_add_unless(atomic_t *v, int a, int u)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_add_unless(v, a, u);
}
#define atomic_add_unless atomic_add_unless
@@ -795,7 +795,7 @@ atomic_add_unless(atomic_t *v, int a, int u)
static __always_inline bool
atomic_inc_not_zero(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_not_zero(v);
}
#define atomic_inc_not_zero atomic_inc_not_zero
@@ -805,7 +805,7 @@ atomic_inc_not_zero(atomic_t *v)
static __always_inline bool
atomic_inc_unless_negative(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_inc_unless_negative(v);
}
#define atomic_inc_unless_negative atomic_inc_unless_negative
@@ -815,7 +815,7 @@ atomic_inc_unless_negative(atomic_t *v)
static __always_inline bool
atomic_dec_unless_positive(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_unless_positive(v);
}
#define atomic_dec_unless_positive atomic_dec_unless_positive
@@ -825,7 +825,7 @@ atomic_dec_unless_positive(atomic_t *v)
static __always_inline int
atomic_dec_if_positive(atomic_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic_dec_if_positive(v);
}
#define atomic_dec_if_positive atomic_dec_if_positive
@@ -870,7 +870,7 @@ atomic64_set_release(atomic64_t *v, s64 i)
static __always_inline void
atomic64_add(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_add(i, v);
}
#define atomic64_add atomic64_add
@@ -879,7 +879,7 @@ atomic64_add(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_add_return(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_return(i, v);
}
#define atomic64_add_return atomic64_add_return
@@ -889,7 +889,7 @@ atomic64_add_return(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_add_return_acquire(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_return_acquire(i, v);
}
#define atomic64_add_return_acquire atomic64_add_return_acquire
@@ -899,7 +899,7 @@ atomic64_add_return_acquire(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_add_return_release(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_return_release(i, v);
}
#define atomic64_add_return_release atomic64_add_return_release
@@ -909,7 +909,7 @@ atomic64_add_return_release(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_add_return_relaxed(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_return_relaxed(i, v);
}
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
@@ -919,7 +919,7 @@ atomic64_add_return_relaxed(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_add(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add(i, v);
}
#define atomic64_fetch_add atomic64_fetch_add
@@ -929,7 +929,7 @@ atomic64_fetch_add(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add_acquire(i, v);
}
#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
@@ -939,7 +939,7 @@ atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_add_release(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add_release(i, v);
}
#define atomic64_fetch_add_release atomic64_fetch_add_release
@@ -949,7 +949,7 @@ atomic64_fetch_add_release(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add_relaxed(i, v);
}
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
@@ -958,7 +958,7 @@ atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
static __always_inline void
atomic64_sub(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_sub(i, v);
}
#define atomic64_sub atomic64_sub
@@ -967,7 +967,7 @@ atomic64_sub(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_sub_return(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_return(i, v);
}
#define atomic64_sub_return atomic64_sub_return
@@ -977,7 +977,7 @@ atomic64_sub_return(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_sub_return_acquire(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_return_acquire(i, v);
}
#define atomic64_sub_return_acquire atomic64_sub_return_acquire
@@ -987,7 +987,7 @@ atomic64_sub_return_acquire(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_sub_return_release(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_return_release(i, v);
}
#define atomic64_sub_return_release atomic64_sub_return_release
@@ -997,7 +997,7 @@ atomic64_sub_return_release(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_return_relaxed(i, v);
}
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
@@ -1007,7 +1007,7 @@ atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_sub(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_sub(i, v);
}
#define atomic64_fetch_sub atomic64_fetch_sub
@@ -1017,7 +1017,7 @@ atomic64_fetch_sub(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_acquire(i, v);
}
#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
@@ -1027,7 +1027,7 @@ atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_sub_release(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_release(i, v);
}
#define atomic64_fetch_sub_release atomic64_fetch_sub_release
@@ -1037,7 +1037,7 @@ atomic64_fetch_sub_release(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_relaxed(i, v);
}
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
@@ -1047,7 +1047,7 @@ atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
static __always_inline void
atomic64_inc(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_inc(v);
}
#define atomic64_inc atomic64_inc
@@ -1057,7 +1057,7 @@ atomic64_inc(atomic64_t *v)
static __always_inline s64
atomic64_inc_return(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_return(v);
}
#define atomic64_inc_return atomic64_inc_return
@@ -1067,7 +1067,7 @@ atomic64_inc_return(atomic64_t *v)
static __always_inline s64
atomic64_inc_return_acquire(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_return_acquire(v);
}
#define atomic64_inc_return_acquire atomic64_inc_return_acquire
@@ -1077,7 +1077,7 @@ atomic64_inc_return_acquire(atomic64_t *v)
static __always_inline s64
atomic64_inc_return_release(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_return_release(v);
}
#define atomic64_inc_return_release atomic64_inc_return_release
@@ -1087,7 +1087,7 @@ atomic64_inc_return_release(atomic64_t *v)
static __always_inline s64
atomic64_inc_return_relaxed(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_return_relaxed(v);
}
#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
@@ -1097,7 +1097,7 @@ atomic64_inc_return_relaxed(atomic64_t *v)
static __always_inline s64
atomic64_fetch_inc(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_inc(v);
}
#define atomic64_fetch_inc atomic64_fetch_inc
@@ -1107,7 +1107,7 @@ atomic64_fetch_inc(atomic64_t *v)
static __always_inline s64
atomic64_fetch_inc_acquire(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_acquire(v);
}
#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
@@ -1117,7 +1117,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v)
static __always_inline s64
atomic64_fetch_inc_release(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_release(v);
}
#define atomic64_fetch_inc_release atomic64_fetch_inc_release
@@ -1127,7 +1127,7 @@ atomic64_fetch_inc_release(atomic64_t *v)
static __always_inline s64
atomic64_fetch_inc_relaxed(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_relaxed(v);
}
#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
@@ -1137,7 +1137,7 @@ atomic64_fetch_inc_relaxed(atomic64_t *v)
static __always_inline void
atomic64_dec(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_dec(v);
}
#define atomic64_dec atomic64_dec
@@ -1147,7 +1147,7 @@ atomic64_dec(atomic64_t *v)
static __always_inline s64
atomic64_dec_return(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_return(v);
}
#define atomic64_dec_return atomic64_dec_return
@@ -1157,7 +1157,7 @@ atomic64_dec_return(atomic64_t *v)
static __always_inline s64
atomic64_dec_return_acquire(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_return_acquire(v);
}
#define atomic64_dec_return_acquire atomic64_dec_return_acquire
@@ -1167,7 +1167,7 @@ atomic64_dec_return_acquire(atomic64_t *v)
static __always_inline s64
atomic64_dec_return_release(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_return_release(v);
}
#define atomic64_dec_return_release atomic64_dec_return_release
@@ -1177,7 +1177,7 @@ atomic64_dec_return_release(atomic64_t *v)
static __always_inline s64
atomic64_dec_return_relaxed(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_return_relaxed(v);
}
#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
@@ -1187,7 +1187,7 @@ atomic64_dec_return_relaxed(atomic64_t *v)
static __always_inline s64
atomic64_fetch_dec(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_dec(v);
}
#define atomic64_fetch_dec atomic64_fetch_dec
@@ -1197,7 +1197,7 @@ atomic64_fetch_dec(atomic64_t *v)
static __always_inline s64
atomic64_fetch_dec_acquire(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_acquire(v);
}
#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
@@ -1207,7 +1207,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v)
static __always_inline s64
atomic64_fetch_dec_release(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_release(v);
}
#define atomic64_fetch_dec_release atomic64_fetch_dec_release
@@ -1217,7 +1217,7 @@ atomic64_fetch_dec_release(atomic64_t *v)
static __always_inline s64
atomic64_fetch_dec_relaxed(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_relaxed(v);
}
#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
@@ -1226,7 +1226,7 @@ atomic64_fetch_dec_relaxed(atomic64_t *v)
static __always_inline void
atomic64_and(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_and(i, v);
}
#define atomic64_and atomic64_and
@@ -1235,7 +1235,7 @@ atomic64_and(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_and(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_and(i, v);
}
#define atomic64_fetch_and atomic64_fetch_and
@@ -1245,7 +1245,7 @@ atomic64_fetch_and(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_and_acquire(i, v);
}
#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
@@ -1255,7 +1255,7 @@ atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_and_release(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_and_release(i, v);
}
#define atomic64_fetch_and_release atomic64_fetch_and_release
@@ -1265,7 +1265,7 @@ atomic64_fetch_and_release(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_and_relaxed(i, v);
}
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
@@ -1275,7 +1275,7 @@ atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
static __always_inline void
atomic64_andnot(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_andnot(i, v);
}
#define atomic64_andnot atomic64_andnot
@@ -1285,7 +1285,7 @@ atomic64_andnot(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_andnot(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot(i, v);
}
#define atomic64_fetch_andnot atomic64_fetch_andnot
@@ -1295,7 +1295,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_acquire(i, v);
}
#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
@@ -1305,7 +1305,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_release(i, v);
}
#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
@@ -1315,7 +1315,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_relaxed(i, v);
}
#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
@@ -1324,7 +1324,7 @@ atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
static __always_inline void
atomic64_or(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_or(i, v);
}
#define atomic64_or atomic64_or
@@ -1333,7 +1333,7 @@ atomic64_or(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_or(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_or(i, v);
}
#define atomic64_fetch_or atomic64_fetch_or
@@ -1343,7 +1343,7 @@ atomic64_fetch_or(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_or_acquire(i, v);
}
#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
@@ -1353,7 +1353,7 @@ atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_or_release(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_or_release(i, v);
}
#define atomic64_fetch_or_release atomic64_fetch_or_release
@@ -1363,7 +1363,7 @@ atomic64_fetch_or_release(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_or_relaxed(i, v);
}
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
@@ -1372,7 +1372,7 @@ atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
static __always_inline void
atomic64_xor(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
arch_atomic64_xor(i, v);
}
#define atomic64_xor atomic64_xor
@@ -1381,7 +1381,7 @@ atomic64_xor(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_xor(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_xor(i, v);
}
#define atomic64_fetch_xor atomic64_fetch_xor
@@ -1391,7 +1391,7 @@ atomic64_fetch_xor(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_acquire(i, v);
}
#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
@@ -1401,7 +1401,7 @@ atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_xor_release(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_release(i, v);
}
#define atomic64_fetch_xor_release atomic64_fetch_xor_release
@@ -1411,7 +1411,7 @@ atomic64_fetch_xor_release(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_relaxed(i, v);
}
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
@@ -1421,7 +1421,7 @@ atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_xchg(atomic64_t *v, s64 i)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_xchg(v, i);
}
#define atomic64_xchg atomic64_xchg
@@ -1431,7 +1431,7 @@ atomic64_xchg(atomic64_t *v, s64 i)
static __always_inline s64
atomic64_xchg_acquire(atomic64_t *v, s64 i)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_xchg_acquire(v, i);
}
#define atomic64_xchg_acquire atomic64_xchg_acquire
@@ -1441,7 +1441,7 @@ atomic64_xchg_acquire(atomic64_t *v, s64 i)
static __always_inline s64
atomic64_xchg_release(atomic64_t *v, s64 i)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_xchg_release(v, i);
}
#define atomic64_xchg_release atomic64_xchg_release
@@ -1451,7 +1451,7 @@ atomic64_xchg_release(atomic64_t *v, s64 i)
static __always_inline s64
atomic64_xchg_relaxed(atomic64_t *v, s64 i)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_xchg_relaxed(v, i);
}
#define atomic64_xchg_relaxed atomic64_xchg_relaxed
@@ -1461,7 +1461,7 @@ atomic64_xchg_relaxed(atomic64_t *v, s64 i)
static __always_inline s64
atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_cmpxchg(v, old, new);
}
#define atomic64_cmpxchg atomic64_cmpxchg
@@ -1471,7 +1471,7 @@ atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
static __always_inline s64
atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_cmpxchg_acquire(v, old, new);
}
#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
@@ -1481,7 +1481,7 @@ atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
static __always_inline s64
atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_cmpxchg_release(v, old, new);
}
#define atomic64_cmpxchg_release atomic64_cmpxchg_release
@@ -1491,7 +1491,7 @@ atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
static __always_inline s64
atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_cmpxchg_relaxed(v, old, new);
}
#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
@@ -1501,8 +1501,8 @@ atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
static __always_inline bool
atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
- instrument_atomic_write(v, sizeof(*v));
- instrument_atomic_write(old, sizeof(*old));
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg(v, old, new);
}
#define atomic64_try_cmpxchg atomic64_try_cmpxchg
@@ -1512,8 +1512,8 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
static __always_inline bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
- instrument_atomic_write(v, sizeof(*v));
- instrument_atomic_write(old, sizeof(*old));
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_acquire(v, old, new);
}
#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
@@ -1523,8 +1523,8 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
static __always_inline bool
atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
- instrument_atomic_write(v, sizeof(*v));
- instrument_atomic_write(old, sizeof(*old));
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_release(v, old, new);
}
#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
@@ -1534,8 +1534,8 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
static __always_inline bool
atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
{
- instrument_atomic_write(v, sizeof(*v));
- instrument_atomic_write(old, sizeof(*old));
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
}
#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
@@ -1545,7 +1545,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
static __always_inline bool
atomic64_sub_and_test(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_sub_and_test(i, v);
}
#define atomic64_sub_and_test atomic64_sub_and_test
@@ -1555,7 +1555,7 @@ atomic64_sub_and_test(s64 i, atomic64_t *v)
static __always_inline bool
atomic64_dec_and_test(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_and_test(v);
}
#define atomic64_dec_and_test atomic64_dec_and_test
@@ -1565,7 +1565,7 @@ atomic64_dec_and_test(atomic64_t *v)
static __always_inline bool
atomic64_inc_and_test(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_and_test(v);
}
#define atomic64_inc_and_test atomic64_inc_and_test
@@ -1575,7 +1575,7 @@ atomic64_inc_and_test(atomic64_t *v)
static __always_inline bool
atomic64_add_negative(s64 i, atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_negative(i, v);
}
#define atomic64_add_negative atomic64_add_negative
@@ -1585,7 +1585,7 @@ atomic64_add_negative(s64 i, atomic64_t *v)
static __always_inline s64
atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_fetch_add_unless(v, a, u);
}
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
@@ -1595,7 +1595,7 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
static __always_inline bool
atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_add_unless(v, a, u);
}
#define atomic64_add_unless atomic64_add_unless
@@ -1605,7 +1605,7 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
static __always_inline bool
atomic64_inc_not_zero(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_not_zero(v);
}
#define atomic64_inc_not_zero atomic64_inc_not_zero
@@ -1615,7 +1615,7 @@ atomic64_inc_not_zero(atomic64_t *v)
static __always_inline bool
atomic64_inc_unless_negative(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_inc_unless_negative(v);
}
#define atomic64_inc_unless_negative atomic64_inc_unless_negative
@@ -1625,7 +1625,7 @@ atomic64_inc_unless_negative(atomic64_t *v)
static __always_inline bool
atomic64_dec_unless_positive(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_unless_positive(v);
}
#define atomic64_dec_unless_positive atomic64_dec_unless_positive
@@ -1635,7 +1635,7 @@ atomic64_dec_unless_positive(atomic64_t *v)
static __always_inline s64
atomic64_dec_if_positive(atomic64_t *v)
{
- instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_read_write(v, sizeof(*v));
return arch_atomic64_dec_if_positive(v);
}
#define atomic64_dec_if_positive atomic64_dec_if_positive
@@ -1830,4 +1830,4 @@ atomic64_dec_if_positive(atomic64_t *v)
})
#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */
-// ff0fe7f81ee97f01f13bb78b0e3ce800bc56d9dd
+// 9d5e6a315fb1335d02f0ccd3655a91c3dafcc63e
diff --git a/include/asm-generic/bitops/instrumented-atomic.h b/include/asm-generic/bitops/instrumented-atomic.h
index fb2cb33a4013..81915dcd4b4e 100644
--- a/include/asm-generic/bitops/instrumented-atomic.h
+++ b/include/asm-generic/bitops/instrumented-atomic.h
@@ -67,7 +67,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
{
- instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit(nr, addr);
}
@@ -80,7 +80,7 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_clear_bit(nr, addr);
}
@@ -93,7 +93,7 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
{
- instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_change_bit(nr, addr);
}
diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h
index b9bec468ae03..75ef606f7145 100644
--- a/include/asm-generic/bitops/instrumented-lock.h
+++ b/include/asm-generic/bitops/instrumented-lock.h
@@ -52,7 +52,7 @@ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
{
- instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit_lock(nr, addr);
}
diff --git a/include/asm-generic/bitops/instrumented-non-atomic.h b/include/asm-generic/bitops/instrumented-non-atomic.h
index 20f788a25ef9..37363d570b9b 100644
--- a/include/asm-generic/bitops/instrumented-non-atomic.h
+++ b/include/asm-generic/bitops/instrumented-non-atomic.h
@@ -58,6 +58,30 @@ static inline void __change_bit(long nr, volatile unsigned long *addr)
arch___change_bit(nr, addr);
}
+static inline void __instrument_read_write_bitop(long nr, volatile unsigned long *addr)
+{
+ if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC)) {
+ /*
+ * We treat non-atomic read-write bitops a little more special.
+ * Given the operations here only modify a single bit, assuming
+ * non-atomicity of the writer is sufficient may be reasonable
+ * for certain usage (and follows the permissible nature of the
+ * assume-plain-writes-atomic rule):
+ * 1. report read-modify-write races -> check read;
+ * 2. do not report races with marked readers, but do report
+ * races with unmarked readers -> check "atomic" write.
+ */
+ kcsan_check_read(addr + BIT_WORD(nr), sizeof(long));
+ /*
+ * Use generic write instrumentation, in case other sanitizers
+ * or tools are enabled alongside KCSAN.
+ */
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
+ } else {
+ instrument_read_write(addr + BIT_WORD(nr), sizeof(long));
+ }
+}
+
/**
* __test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
@@ -68,7 +92,7 @@ static inline void __change_bit(long nr, volatile unsigned long *addr)
*/
static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
{
- instrument_write(addr + BIT_WORD(nr), sizeof(long));
+ __instrument_read_write_bitop(nr, addr);
return arch___test_and_set_bit(nr, addr);
}
@@ -82,7 +106,7 @@ static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
*/
static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- instrument_write(addr + BIT_WORD(nr), sizeof(long));
+ __instrument_read_write_bitop(nr, addr);
return arch___test_and_clear_bit(nr, addr);
}
@@ -96,7 +120,7 @@ static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
*/
static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
{
- instrument_write(addr + BIT_WORD(nr), sizeof(long));
+ __instrument_read_write_bitop(nr, addr);
return arch___test_and_change_bit(nr, addr);
}
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 18b0f4eee8cb..76a10e0dca9f 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -141,7 +141,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
#ifndef WARN_ON_ONCE
#define WARN_ON_ONCE(condition) ({ \
- static bool __section(.data.once) __warned; \
+ static bool __section(".data.once") __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once && !__warned)) { \
@@ -153,7 +153,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
#endif
#define WARN_ONCE(condition, format...) ({ \
- static bool __section(.data.once) __warned; \
+ static bool __section(".data.once") __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once && !__warned)) { \
@@ -164,7 +164,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
})
#define WARN_TAINT_ONCE(condition, taint, format...) ({ \
- static bool __section(.data.once) __warned; \
+ static bool __section(".data.once") __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once && !__warned)) { \
diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h
index cd8b75aa770d..43e18db89c14 100644
--- a/include/asm-generic/checksum.h
+++ b/include/asm-generic/checksum.h
@@ -16,18 +16,6 @@
*/
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
-/*
- * the same as csum_partial, but copies from src while it
- * checksums
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-#ifndef csum_partial_copy_nocheck
-__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len,
- __wsum sum);
-#endif
-
#ifndef ip_fast_csum
/*
* This is a version of ip_compute_csum() optimized for IP headers,
diff --git a/include/asm-generic/compat.h b/include/asm-generic/compat.h
index a86f65bffab8..30f7b18a36f9 100644
--- a/include/asm-generic/compat.h
+++ b/include/asm-generic/compat.h
@@ -22,4 +22,12 @@ typedef u32 compat_ulong_t;
typedef u32 compat_uptr_t;
typedef u32 compat_aio_context_t;
+#ifdef CONFIG_COMPAT_FOR_U64_ALIGNMENT
+typedef s64 __attribute__((aligned(4))) compat_s64;
+typedef u64 __attribute__((aligned(4))) compat_u64;
+#else
+typedef s64 compat_s64;
+typedef u64 compat_u64;
+#endif
+
#endif
diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h
deleted file mode 100644
index f24b0f9a4f05..000000000000
--- a/include/asm-generic/dma-contiguous.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_DMA_CONTIGUOUS_H
-#define _ASM_GENERIC_DMA_CONTIGUOUS_H
-
-#include <linux/types.h>
-
-static inline void
-dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
-
-#endif
diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h
index 80ca61058dd2..7ddd9dc10ce9 100644
--- a/include/asm-generic/error-injection.h
+++ b/include/asm-generic/error-injection.h
@@ -25,7 +25,7 @@ struct pt_regs;
*/
#define ALLOW_ERROR_INJECTION(fname, _etype) \
static struct error_injection_entry __used \
- __attribute__((__section__("_error_injection_whitelist"))) \
+ __section("_error_injection_whitelist") \
_eil_addr_##fname = { \
.addr = (unsigned long)fname, \
.etype = EI_ETYPE_##_etype, \
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index dabf8cb7203b..9ea83d80eb6f 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -911,18 +911,6 @@ static inline void iowrite64_rep(volatile void __iomem *addr,
#include <linux/vmalloc.h>
#define __io_virt(x) ((void __force *)(x))
-#ifndef CONFIG_GENERIC_IOMAP
-struct pci_dev;
-extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
-
-#ifndef pci_iounmap
-#define pci_iounmap pci_iounmap
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
-{
-}
-#endif
-#endif /* CONFIG_GENERIC_IOMAP */
-
/*
* Change virtual addresses to physical addresses and vv.
* These are pretty trivial
@@ -1016,6 +1004,16 @@ static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
port &= IO_SPACE_LIMIT;
return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
}
+#define __pci_ioport_unmap __pci_ioport_unmap
+static inline void __pci_ioport_unmap(void __iomem *p)
+{
+ uintptr_t start = (uintptr_t) PCI_IOBASE;
+ uintptr_t addr = (uintptr_t) p;
+
+ if (addr >= start && addr < start + IO_SPACE_LIMIT)
+ return;
+ iounmap(p);
+}
#endif
#ifndef ioport_unmap
@@ -1030,6 +1028,23 @@ extern void ioport_unmap(void __iomem *p);
#endif /* CONFIG_GENERIC_IOMAP */
#endif /* CONFIG_HAS_IOPORT_MAP */
+#ifndef CONFIG_GENERIC_IOMAP
+struct pci_dev;
+extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+
+#ifndef __pci_ioport_unmap
+static inline void __pci_ioport_unmap(void __iomem *p) {}
+#endif
+
+#ifndef pci_iounmap
+#define pci_iounmap pci_iounmap
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
+{
+ __pci_ioport_unmap(p);
+}
+#endif
+#endif /* CONFIG_GENERIC_IOMAP */
+
/*
* Convert a virtual cached pointer to an uncached pointer
*/
diff --git a/include/asm-generic/kprobes.h b/include/asm-generic/kprobes.h
index 4a982089c95c..060eab094e5a 100644
--- a/include/asm-generic/kprobes.h
+++ b/include/asm-generic/kprobes.h
@@ -10,11 +10,11 @@
*/
# define __NOKPROBE_SYMBOL(fname) \
static unsigned long __used \
- __attribute__((__section__("_kprobe_blacklist"))) \
+ __section("_kprobe_blacklist") \
_kbl_addr_##fname = (unsigned long)fname;
# define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname)
/* Use this to forbid a kprobes attach on very low level functions */
-# define __kprobes __attribute__((__section__(".kprobes.text")))
+# define __kprobes __section(".kprobes.text")
# define nokprobe_inline __always_inline
#else
# define NOKPROBE_SYMBOL(fname)
diff --git a/include/asm-generic/module.lds.h b/include/asm-generic/module.lds.h
new file mode 100644
index 000000000000..f210d5c1b78b
--- /dev/null
+++ b/include/asm-generic/module.lds.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_GENERIC_MODULE_LDS_H
+#define __ASM_GENERIC_MODULE_LDS_H
+
+/*
+ * <asm/module.lds.h> can specify arch-specific sections for linking modules.
+ * Empty for the asm-generic header.
+ */
+
+#endif /* __ASM_GENERIC_MODULE_LDS_H */
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
index c5edc5e08b94..c57799684170 100644
--- a/include/asm-generic/mshyperv.h
+++ b/include/asm-generic/mshyperv.h
@@ -89,7 +89,7 @@ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
}
}
-void hv_setup_vmbus_irq(void (*handler)(void));
+int hv_setup_vmbus_irq(int irq, void (*handler)(void));
void hv_remove_vmbus_irq(void);
void hv_enable_vmbus_irq(void);
void hv_disable_vmbus_irq(void);
@@ -99,6 +99,8 @@ void hv_remove_kexec_handler(void);
void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
void hv_remove_crash_handler(void);
+extern int vmbus_interrupt;
+
#if IS_ENABLED(CONFIG_HYPERV)
/*
* Hypervisor's notion of virtual processor ID is different from
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index ba68ee4dabfa..4973328f3c6e 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -10,28 +10,78 @@
#include <linux/string.h>
#ifdef CONFIG_UACCESS_MEMCPY
-static inline __must_check unsigned long
-raw_copy_from_user(void *to, const void __user * from, unsigned long n)
+#include <asm/unaligned.h>
+
+static __always_inline int
+__get_user_fn(size_t size, const void __user *from, void *to)
{
- if (__builtin_constant_p(n)) {
- switch(n) {
- case 1:
- *(u8 *)to = *(u8 __force *)from;
- return 0;
- case 2:
- *(u16 *)to = *(u16 __force *)from;
- return 0;
- case 4:
- *(u32 *)to = *(u32 __force *)from;
- return 0;
-#ifdef CONFIG_64BIT
- case 8:
- *(u64 *)to = *(u64 __force *)from;
- return 0;
-#endif
- }
+ BUILD_BUG_ON(!__builtin_constant_p(size));
+
+ switch (size) {
+ case 1:
+ *(u8 *)to = get_unaligned((u8 __force *)from);
+ return 0;
+ case 2:
+ *(u16 *)to = get_unaligned((u16 __force *)from);
+ return 0;
+ case 4:
+ *(u32 *)to = get_unaligned((u32 __force *)from);
+ return 0;
+ case 8:
+ *(u64 *)to = get_unaligned((u64 __force *)from);
+ return 0;
+ default:
+ BUILD_BUG();
+ return 0;
}
+}
+#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
+
+static __always_inline int
+__put_user_fn(size_t size, void __user *to, void *from)
+{
+ BUILD_BUG_ON(!__builtin_constant_p(size));
+
+ switch (size) {
+ case 1:
+ put_unaligned(*(u8 *)from, (u8 __force *)to);
+ return 0;
+ case 2:
+ put_unaligned(*(u16 *)from, (u16 __force *)to);
+ return 0;
+ case 4:
+ put_unaligned(*(u32 *)from, (u32 __force *)to);
+ return 0;
+ case 8:
+ put_unaligned(*(u64 *)from, (u64 __force *)to);
+ return 0;
+ default:
+ BUILD_BUG();
+ return 0;
+ }
+}
+#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
+
+#define __get_kernel_nofault(dst, src, type, err_label) \
+do { \
+ *((type *)dst) = get_unaligned((type *)(src)); \
+ if (0) /* make sure the label looks used to the compiler */ \
+ goto err_label; \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, err_label) \
+do { \
+ put_unaligned(*((type *)src), (type *)(dst)); \
+ if (0) /* make sure the label looks used to the compiler */ \
+ goto err_label; \
+} while (0)
+
+#define HAVE_GET_KERNEL_NOFAULT 1
+
+static inline __must_check unsigned long
+raw_copy_from_user(void *to, const void __user * from, unsigned long n)
+{
memcpy(to, (const void __force *)from, n);
return 0;
}
@@ -39,27 +89,6 @@ raw_copy_from_user(void *to, const void __user * from, unsigned long n)
static inline __must_check unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (__builtin_constant_p(n)) {
- switch(n) {
- case 1:
- *(u8 __force *)to = *(u8 *)from;
- return 0;
- case 2:
- *(u16 __force *)to = *(u16 *)from;
- return 0;
- case 4:
- *(u32 __force *)to = *(u32 *)from;
- return 0;
-#ifdef CONFIG_64BIT
- case 8:
- *(u64 __force *)to = *(u64 *)from;
- return 0;
-#endif
- default:
- break;
- }
- }
-
memcpy((void __force *)to, from, n);
return 0;
}
@@ -67,6 +96,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
#define INLINE_COPY_TO_USER
#endif /* CONFIG_UACCESS_MEMCPY */
+#ifdef CONFIG_SET_FS
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
#ifndef KERNEL_DS
@@ -89,6 +119,7 @@ static inline void set_fs(mm_segment_t fs)
#ifndef uaccess_kernel
#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
#endif
+#endif /* CONFIG_SET_FS */
#define access_ok(addr, size) __access_ok((unsigned long)(addr),(size))
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 5430febd34be..b2b3d81b1535 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -34,6 +34,7 @@
*
* STABS_DEBUG
* DWARF_DEBUG
+ * ELF_DETAILS
*
* DISCARDS // must be the last
* }
@@ -388,6 +389,12 @@
KEEP(*(__jump_table)) \
__stop___jump_table = .;
+#define STATIC_CALL_DATA \
+ . = ALIGN(8); \
+ __start_static_call_sites = .; \
+ KEEP(*(.static_call_sites)) \
+ __stop_static_call_sites = .;
+
/*
* Allow architectures to handle ro_after_init data on their
* own by defining an empty RO_AFTER_INIT_DATA.
@@ -398,6 +405,7 @@
__start_ro_after_init = .; \
*(.data..ro_after_init) \
JUMP_TABLE_DATA \
+ STATIC_CALL_DATA \
__end_ro_after_init = .;
#endif
@@ -581,7 +589,10 @@
*/
#define TEXT_TEXT \
ALIGN_FUNCTION(); \
- *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \
+ *(.text.hot .text.hot.*) \
+ *(TEXT_MAIN .text.fixup) \
+ *(.text.unlikely .text.unlikely.*) \
+ *(.text.unknown .text.unknown.*) \
NOINSTR_TEXT \
*(.text..refcount) \
*(.ref.text) \
@@ -635,6 +646,12 @@
*(.softirqentry.text) \
__softirqentry_text_end = .;
+#define STATIC_CALL_TEXT \
+ ALIGN_FUNCTION(); \
+ __static_call_text_start = .; \
+ *(.static_call.text) \
+ __static_call_text_end = .;
+
/* Section used for early init (in .S files) */
#define HEAD_TEXT KEEP(*(.head.text))
@@ -661,7 +678,7 @@
#define BTF \
.BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \
__start_BTF = .; \
- *(.BTF) \
+ KEEP(*(.BTF)) \
__stop_BTF = .; \
} \
. = ALIGN(4); \
@@ -684,6 +701,7 @@
#ifdef CONFIG_CONSTRUCTORS
#define KERNEL_CTORS() . = ALIGN(8); \
__ctors_start = .; \
+ KEEP(*(SORT(.ctors.*))) \
KEEP(*(.ctors)) \
KEEP(*(SORT(.init_array.*))) \
KEEP(*(.init_array)) \
@@ -717,7 +735,8 @@
THERMAL_TABLE(governor) \
EARLYCON_TABLE() \
LSM_TABLE() \
- EARLY_LSM_TABLE()
+ EARLY_LSM_TABLE() \
+ KUNIT_TABLE()
#define INIT_TEXT \
*(.init.text .init.text.*) \
@@ -812,15 +831,21 @@
.debug_macro 0 : { *(.debug_macro) } \
.debug_addr 0 : { *(.debug_addr) }
- /* Stabs debugging sections. */
+/* Stabs debugging sections. */
#define STABS_DEBUG \
.stab 0 : { *(.stab) } \
.stabstr 0 : { *(.stabstr) } \
.stab.excl 0 : { *(.stab.excl) } \
.stab.exclstr 0 : { *(.stab.exclstr) } \
.stab.index 0 : { *(.stab.index) } \
- .stab.indexstr 0 : { *(.stab.indexstr) } \
- .comment 0 : { *(.comment) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+
+/* Required sections not related to debugging. */
+#define ELF_DETAILS \
+ .comment 0 : { *(.comment) } \
+ .symtab 0 : { *(.symtab) } \
+ .strtab 0 : { *(.strtab) } \
+ .shstrtab 0 : { *(.shstrtab) }
#ifdef CONFIG_GENERIC_BUG
#define BUG_TABLE \
@@ -909,6 +934,13 @@
KEEP(*(.con_initcall.init)) \
__con_initcall_end = .;
+/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
+#define KUNIT_TABLE() \
+ . = ALIGN(8); \
+ __kunit_suites_start = .; \
+ KEEP(*(.kunit_test_suites)) \
+ __kunit_suites_end = .;
+
#ifdef CONFIG_BLK_DEV_INITRD
#define INIT_RAM_FS \
. = ALIGN(4); \
@@ -955,13 +987,38 @@
EXIT_DATA
#endif
+/*
+ * Clang's -fsanitize=kernel-address and -fsanitize=thread produce
+ * unwanted sections (.eh_frame and .init_array.*), but
+ * CONFIG_CONSTRUCTORS wants to keep any .init_array.* sections.
+ * https://bugs.llvm.org/show_bug.cgi?id=46478
+ */
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN)
+# ifdef CONFIG_CONSTRUCTORS
+# define SANITIZER_DISCARDS \
+ *(.eh_frame)
+# else
+# define SANITIZER_DISCARDS \
+ *(.init_array) *(.init_array.*) \
+ *(.eh_frame)
+# endif
+#else
+# define SANITIZER_DISCARDS
+#endif
+
+#define COMMON_DISCARDS \
+ SANITIZER_DISCARDS \
+ *(.discard) \
+ *(.discard.*) \
+ *(.modinfo) \
+ /* ld.bfd warns about .gnu.version* even when not emitted */ \
+ *(.gnu.version*) \
+
#define DISCARDS \
/DISCARD/ : { \
EXIT_DISCARDS \
EXIT_CALL \
- *(.discard) \
- *(.discard.*) \
- *(.modinfo) \
+ COMMON_DISCARDS \
}
/**