summaryrefslogtreecommitdiff
path: root/kernel/time/tick-sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time/tick-sched.c')
-rw-r--r--kernel/time/tick-sched.c74
1 files changed, 47 insertions, 27 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index cc7cba20382e..a9e68936822d 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -57,36 +57,42 @@ static ktime_t last_jiffies_update;
static void tick_do_update_jiffies64(ktime_t now)
{
unsigned long ticks = 1;
- ktime_t delta;
+ ktime_t delta, nextp;
/*
- * Do a quick check without holding jiffies_lock. The READ_ONCE()
+ * 64bit can do a quick check without holding jiffies lock and
+ * without looking at the sequence count. The smp_load_acquire()
* pairs with the update done later in this function.
*
- * This is also an intentional data race which is even safe on
- * 32bit in theory. If there is a concurrent update then the check
- * might give a random answer. It does not matter because if it
- * returns then the concurrent update is already taking care, if it
- * falls through then it will pointlessly contend on jiffies_lock.
- *
- * Though there is one nasty case on 32bit due to store tearing of
- * the 64bit value. If the first 32bit store makes the quick check
- * return on all other CPUs and the writing CPU context gets
- * delayed to complete the second store (scheduled out on virt)
- * then jiffies can become stale for up to ~2^32 nanoseconds
- * without noticing. After that point all CPUs will wait for
- * jiffies lock.
- *
- * OTOH, this is not any different than the situation with NOHZ=off
- * where one CPU is responsible for updating jiffies and
- * timekeeping. If that CPU goes out for lunch then all other CPUs
- * will operate on stale jiffies until it decides to come back.
+ * 32bit cannot do that because the store of tick_next_period
+ * consists of two 32bit stores and the first store could move it
+ * to a random point in the future.
*/
- if (ktime_before(now, READ_ONCE(tick_next_period)))
- return;
+ if (IS_ENABLED(CONFIG_64BIT)) {
+ if (ktime_before(now, smp_load_acquire(&tick_next_period)))
+ return;
+ } else {
+ unsigned int seq;
- /* Reevaluate with jiffies_lock held */
+ /*
+ * Avoid contention on jiffies_lock and protect the quick
+ * check with the sequence count.
+ */
+ do {
+ seq = read_seqcount_begin(&jiffies_seq);
+ nextp = tick_next_period;
+ } while (read_seqcount_retry(&jiffies_seq, seq));
+
+ if (ktime_before(now, nextp))
+ return;
+ }
+
+ /* Quick check failed, i.e. update is required. */
raw_spin_lock(&jiffies_lock);
+ /*
+ * Reevaluate with the lock held. Another CPU might have done the
+ * update already.
+ */
if (ktime_before(now, tick_next_period)) {
raw_spin_unlock(&jiffies_lock);
return;
@@ -112,11 +118,25 @@ static void tick_do_update_jiffies64(ktime_t now)
jiffies_64 += ticks;
/*
- * Keep the tick_next_period variable up to date. WRITE_ONCE()
- * pairs with the READ_ONCE() in the lockless quick check above.
+ * Keep the tick_next_period variable up to date.
*/
- WRITE_ONCE(tick_next_period,
- ktime_add_ns(last_jiffies_update, TICK_NSEC));
+ nextp = ktime_add_ns(last_jiffies_update, TICK_NSEC);
+
+ if (IS_ENABLED(CONFIG_64BIT)) {
+ /*
+ * Pairs with smp_load_acquire() in the lockless quick
+ * check above and ensures that the update to jiffies_64 is
+ * not reordered vs. the store to tick_next_period, neither
+ * by the compiler nor by the CPU.
+ */
+ smp_store_release(&tick_next_period, nextp);
+ } else {
+ /*
+ * A plain store is good enough on 32bit as the quick check
+ * above is protected by the sequence count.
+ */
+ tick_next_period = nextp;
+ }
/*
* Release the sequence count. calc_global_load() below is not