summaryrefslogtreecommitdiff
path: root/kernel/locking/spinlock_rt.c
blob: edfa7b5776d73fe66ad877f851668686e9cbbafe (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
// SPDX-License-Identifier: GPL-2.0-only
/*
 * PREEMPT_RT substitution for spin/rw_locks
 *
 * spinlocks and rwlocks on RT are based on rtmutexes, with a few twists to
 * resemble the non RT semantics:
 *
 * - Contrary to plain rtmutexes, spinlocks and rwlocks are state
 *   preserving. The task state is saved before blocking on the underlying
 *   rtmutex, and restored when the lock has been acquired. Regular wakeups
 *   during that time are redirected to the saved state so no wake up is
 *   missed.
 *
 * - Non RT spin/rwlocks disable preemption and eventually interrupts.
 *   Disabling preemption has the side effect of disabling migration and
 *   preventing RCU grace periods.
 *
 *   The RT substitutions explicitly disable migration and take
 *   rcu_read_lock() across the lock held section.
 */
#include <linux/spinlock.h>
#include <linux/export.h>

#define RT_MUTEX_BUILD_SPINLOCKS
#include "rtmutex.c"

static __always_inline void rtlock_lock(struct rt_mutex_base *rtm)
{
	if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
		rtlock_slowlock(rtm);
}

static __always_inline void __rt_spin_lock(spinlock_t *lock)
{
	___might_sleep(__FILE__, __LINE__, 0);
	rtlock_lock(&lock->lock);
	rcu_read_lock();
	migrate_disable();
}

void __sched rt_spin_lock(spinlock_t *lock)
{
	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
	__rt_spin_lock(lock);
}
EXPORT_SYMBOL(rt_spin_lock);

#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __sched rt_spin_lock_nested(spinlock_t *lock, int subclass)
{
	spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
	__rt_spin_lock(lock);
}
EXPORT_SYMBOL(rt_spin_lock_nested);

void __sched rt_spin_lock_nest_lock(spinlock_t *lock,
				    struct lockdep_map *nest_lock)
{
	spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
	__rt_spin_lock(lock);
}
EXPORT_SYMBOL(rt_spin_lock_nest_lock);
#endif

void __sched rt_spin_unlock(spinlock_t *lock)
{
	spin_release(&lock->dep_map, _RET_IP_);
	migrate_enable();
	rcu_read_unlock();

	if (unlikely(!rt_mutex_cmpxchg_release(&lock->lock, current, NULL)))
		rt_mutex_slowunlock(&lock->lock);
}
EXPORT_SYMBOL(rt_spin_unlock);

/*
 * Wait for the lock to get unlocked: instead of polling for an unlock
 * (like raw spinlocks do), lock and unlock, to force the kernel to
 * schedule if there's contention:
 */
void __sched rt_spin_lock_unlock(spinlock_t *lock)
{
	spin_lock(lock);
	spin_unlock(lock);
}
EXPORT_SYMBOL(rt_spin_lock_unlock);

static __always_inline int __rt_spin_trylock(spinlock_t *lock)
{
	int ret = 1;

	if (unlikely(!rt_mutex_cmpxchg_acquire(&lock->lock, NULL, current)))
		ret = rt_mutex_slowtrylock(&lock->lock);

	if (ret) {
		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
		rcu_read_lock();
		migrate_disable();
	}
	return ret;
}

int __sched rt_spin_trylock(spinlock_t *lock)
{
	return __rt_spin_trylock(lock);
}
EXPORT_SYMBOL(rt_spin_trylock);

int __sched rt_spin_trylock_bh(spinlock_t *lock)
{
	int ret;

	local_bh_disable();
	ret = __rt_spin_trylock(lock);
	if (!ret)
		local_bh_enable();
	return ret;
}
EXPORT_SYMBOL(rt_spin_trylock_bh);

#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __rt_spin_lock_init(spinlock_t *lock, const char *name,
			 struct lock_class_key *key)
{
	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
	lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG);
}
EXPORT_SYMBOL(__rt_spin_lock_init);
#endif