summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/cmpxchg_32.h
blob: ed2797f132ce09f39837588141cabde041c01f1f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_CMPXCHG_32_H
#define _ASM_X86_CMPXCHG_32_H

/*
 * Note: if you use __cmpxchg64(), or their variants,
 *       you need to test for the feature in boot_cpu_data.
 */

union __u64_halves {
	u64 full;
	struct {
		u32 low, high;
	};
};

#define __arch_cmpxchg64(_ptr, _old, _new, _lock)			\
({									\
	union __u64_halves o = { .full = (_old), },			\
			   n = { .full = (_new), };			\
									\
	asm volatile(_lock "cmpxchg8b %[ptr]"				\
		     : [ptr] "+m" (*(_ptr)),				\
		       "+a" (o.low), "+d" (o.high)			\
		     : "b" (n.low), "c" (n.high)			\
		     : "memory");					\
									\
	o.full;								\
})


static __always_inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
{
	return __arch_cmpxchg64(ptr, old, new, LOCK_PREFIX);
}

static __always_inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
{
	return __arch_cmpxchg64(ptr, old, new,);
}

#define __arch_try_cmpxchg64(_ptr, _oldp, _new, _lock)			\
({									\
	union __u64_halves o = { .full = *(_oldp), },			\
			   n = { .full = (_new), };			\
	bool ret;							\
									\
	asm volatile(_lock "cmpxchg8b %[ptr]"				\
		     CC_SET(e)						\
		     : CC_OUT(e) (ret),					\
		       [ptr] "+m" (*(_ptr)),				\
		       "+a" (o.low), "+d" (o.high)			\
		     : "b" (n.low), "c" (n.high)			\
		     : "memory");					\
									\
	if (unlikely(!ret))						\
		*(_oldp) = o.full;					\
									\
	likely(ret);							\
})

static __always_inline bool __try_cmpxchg64(volatile u64 *ptr, u64 *oldp, u64 new)
{
	return __arch_try_cmpxchg64(ptr, oldp, new, LOCK_PREFIX);
}

static __always_inline bool __try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp, u64 new)
{
	return __arch_try_cmpxchg64(ptr, oldp, new,);
}

#ifdef CONFIG_X86_CMPXCHG64

#define arch_cmpxchg64 __cmpxchg64

#define arch_cmpxchg64_local __cmpxchg64_local

#define arch_try_cmpxchg64 __try_cmpxchg64

#define arch_try_cmpxchg64_local __try_cmpxchg64_local

#else

/*
 * Building a kernel capable running on 80386 and 80486. It may be necessary
 * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
 */

#define __arch_cmpxchg64_emu(_ptr, _old, _new, _lock_loc, _lock)	\
({									\
	union __u64_halves o = { .full = (_old), },			\
			   n = { .full = (_new), };			\
									\
	asm volatile(ALTERNATIVE(_lock_loc				\
				 "call cmpxchg8b_emu",			\
				 _lock "cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
		     : [ptr] "+m" (*(_ptr)),				\
		       "+a" (o.low), "+d" (o.high)			\
		     : "b" (n.low), "c" (n.high), "S" (_ptr)		\
		     : "memory");					\
									\
	o.full;								\
})

static __always_inline u64 arch_cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
{
	return __arch_cmpxchg64_emu(ptr, old, new, LOCK_PREFIX_HERE, "lock; ");
}
#define arch_cmpxchg64 arch_cmpxchg64

static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
{
	return __arch_cmpxchg64_emu(ptr, old, new, ,);
}
#define arch_cmpxchg64_local arch_cmpxchg64_local

#define __arch_try_cmpxchg64_emu(_ptr, _oldp, _new, _lock_loc, _lock)	\
({									\
	union __u64_halves o = { .full = *(_oldp), },			\
			   n = { .full = (_new), };			\
	bool ret;							\
									\
	asm volatile(ALTERNATIVE(_lock_loc				\
				 "call cmpxchg8b_emu",			\
				 _lock "cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
		     CC_SET(e)						\
		     : CC_OUT(e) (ret),					\
		       [ptr] "+m" (*(_ptr)),				\
		       "+a" (o.low), "+d" (o.high)			\
		     : "b" (n.low), "c" (n.high), "S" (_ptr)		\
		     : "memory");					\
									\
	if (unlikely(!ret))						\
		*(_oldp) = o.full;					\
									\
	likely(ret);							\
})

static __always_inline bool arch_try_cmpxchg64(volatile u64 *ptr, u64 *oldp, u64 new)
{
	return __arch_try_cmpxchg64_emu(ptr, oldp, new, LOCK_PREFIX_HERE, "lock; ");
}
#define arch_try_cmpxchg64 arch_try_cmpxchg64

static __always_inline bool arch_try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp, u64 new)
{
	return __arch_try_cmpxchg64_emu(ptr, oldp, new, ,);
}
#define arch_try_cmpxchg64_local arch_try_cmpxchg64_local

#endif

#define system_has_cmpxchg64()		boot_cpu_has(X86_FEATURE_CX8)

#endif /* _ASM_X86_CMPXCHG_32_H */