From e9088adda13cd23249d4b0abb97ff8a81bf5573a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 30 Apr 2018 12:00:09 +0200 Subject: x86/tsc: Always unregister clocksource_tsc_early Don't leave the tsc-early clocksource registered if it errors out early. This was reported by Diego, who on his Core2 era machine got TSC invalidated while it was running with tsc-early (due to C-states). This results in keeping tsc-early with very bad effects. Reported-and-Tested-by: Diego Viola Fixes: aa83c45762a2 ("x86/tsc: Introduce early tsc clocksource") Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Reviewed-by: Rafael J. Wysocki Cc: len.brown@intel.com Cc: rjw@rjwysocki.net Cc: diego.viola@gmail.com Cc: rui.zhang@intel.com Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180430100344.350507853@infradead.org --- arch/x86/kernel/tsc.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 91e6da48cbb6..23f36bc236ba 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -1244,7 +1244,7 @@ static void tsc_refine_calibration_work(struct work_struct *work) /* Don't bother refining TSC on unstable systems */ if (tsc_unstable) - return; + goto unreg; /* * Since the work is started early in boot, we may be @@ -1297,11 +1297,12 @@ static void tsc_refine_calibration_work(struct work_struct *work) out: if (tsc_unstable) - return; + goto unreg; if (boot_cpu_has(X86_FEATURE_ART)) art_related_clocksource = &clocksource_tsc; clocksource_register_khz(&clocksource_tsc, tsc_khz); +unreg: clocksource_unregister(&clocksource_tsc_early); } @@ -1311,8 +1312,8 @@ static int __init init_tsc_clocksource(void) if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz) return 0; - if (check_tsc_unstable()) - return 0; + if (tsc_unstable) + goto unreg; if (tsc_clocksource_reliable) clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; @@ -1328,6 +1329,7 @@ static int __init init_tsc_clocksource(void) if (boot_cpu_has(X86_FEATURE_ART)) art_related_clocksource = &clocksource_tsc; clocksource_register_khz(&clocksource_tsc, tsc_khz); +unreg: clocksource_unregister(&clocksource_tsc_early); return 0; } -- cgit v1.2.3 From 2aae7bcfa4104b770e6f612356adb8d66c6144d6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 23 Apr 2018 17:28:55 +0200 Subject: clocksource: Allow clocksource_mark_unstable() on unregistered clocksources Because of how the code flips between tsc-early and tsc clocksources it might need to mark one or both unstable. The current code in mark_tsc_unstable() only worked because previously it registered the tsc clocksource once and then never touched it. Since it now unregisters the tsc-early clocksource, it needs to know if a clocksource got unregistered and the current cs->mult test doesn't work for that. Instead use list_empty(&cs->list) to test for registration. Furthermore, since clocksource_mark_unstable() needs to place the cs on the wd_list, it links the cs->list and cs->wd_list serialization. It must not see a clocsource registered (!empty cs->list) but already past dequeue_watchdog(). So place {en,de}queue{,_watchdog}() under the same lock. Provided cs->list is initialized to empty, this then allows us to unconditionally use clocksource_mark_unstable(), regardless of the registration state. Fixes: aa83c45762a2 ("x86/tsc: Introduce early tsc clocksource") Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Reviewed-by: Rafael J. Wysocki Tested-by: Diego Viola Cc: len.brown@intel.com Cc: rjw@rjwysocki.net Cc: diego.viola@gmail.com Cc: rui.zhang@intel.com Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180502135312.GS12217@hirez.programming.kicks-ass.net --- kernel/time/clocksource.c | 50 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 34 insertions(+), 16 deletions(-) diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 0e974cface0b..c3d2b94723dc 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -119,6 +119,16 @@ static DEFINE_SPINLOCK(watchdog_lock); static int watchdog_running; static atomic_t watchdog_reset_pending; +static void inline clocksource_watchdog_lock(unsigned long *flags) +{ + spin_lock_irqsave(&watchdog_lock, *flags); +} + +static void inline clocksource_watchdog_unlock(unsigned long *flags) +{ + spin_unlock_irqrestore(&watchdog_lock, *flags); +} + static int clocksource_watchdog_kthread(void *data); static void __clocksource_change_rating(struct clocksource *cs, int rating); @@ -142,6 +152,9 @@ static void __clocksource_unstable(struct clocksource *cs) cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); cs->flags |= CLOCK_SOURCE_UNSTABLE; + if (list_empty(&cs->list)) + return; + if (cs->mark_unstable) cs->mark_unstable(cs); @@ -164,7 +177,7 @@ void clocksource_mark_unstable(struct clocksource *cs) spin_lock_irqsave(&watchdog_lock, flags); if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { - if (list_empty(&cs->wd_list)) + if (!list_empty(&cs->list) && list_empty(&cs->wd_list)) list_add(&cs->wd_list, &watchdog_list); __clocksource_unstable(cs); } @@ -319,9 +332,6 @@ static void clocksource_resume_watchdog(void) static void clocksource_enqueue_watchdog(struct clocksource *cs) { - unsigned long flags; - - spin_lock_irqsave(&watchdog_lock, flags); if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { /* cs is a clocksource to be watched. */ list_add(&cs->wd_list, &watchdog_list); @@ -331,7 +341,6 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs) if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; } - spin_unlock_irqrestore(&watchdog_lock, flags); } static void clocksource_select_watchdog(bool fallback) @@ -373,9 +382,6 @@ static void clocksource_select_watchdog(bool fallback) static void clocksource_dequeue_watchdog(struct clocksource *cs) { - unsigned long flags; - - spin_lock_irqsave(&watchdog_lock, flags); if (cs != watchdog) { if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { /* cs is a watched clocksource. */ @@ -384,21 +390,19 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs) clocksource_stop_watchdog(); } } - spin_unlock_irqrestore(&watchdog_lock, flags); } static int __clocksource_watchdog_kthread(void) { struct clocksource *cs, *tmp; unsigned long flags; - LIST_HEAD(unstable); int select = 0; spin_lock_irqsave(&watchdog_lock, flags); list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { if (cs->flags & CLOCK_SOURCE_UNSTABLE) { list_del_init(&cs->wd_list); - list_add(&cs->wd_list, &unstable); + __clocksource_change_rating(cs, 0); select = 1; } if (cs->flags & CLOCK_SOURCE_RESELECT) { @@ -410,11 +414,6 @@ static int __clocksource_watchdog_kthread(void) clocksource_stop_watchdog(); spin_unlock_irqrestore(&watchdog_lock, flags); - /* Needs to be done outside of watchdog lock */ - list_for_each_entry_safe(cs, tmp, &unstable, wd_list) { - list_del_init(&cs->wd_list); - __clocksource_change_rating(cs, 0); - } return select; } @@ -447,6 +446,9 @@ static inline int __clocksource_watchdog_kthread(void) { return 0; } static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } void clocksource_mark_unstable(struct clocksource *cs) { } +static void inline clocksource_watchdog_lock(unsigned long *flags) { } +static void inline clocksource_watchdog_unlock(unsigned long *flags) { } + #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ /** @@ -779,14 +781,19 @@ EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale); */ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) { + unsigned long flags; /* Initialize mult/shift and max_idle_ns */ __clocksource_update_freq_scale(cs, scale, freq); /* Add clocksource to the clocksource list */ mutex_lock(&clocksource_mutex); + + clocksource_watchdog_lock(&flags); clocksource_enqueue(cs); clocksource_enqueue_watchdog(cs); + clocksource_watchdog_unlock(&flags); + clocksource_select(); clocksource_select_watchdog(false); mutex_unlock(&clocksource_mutex); @@ -808,8 +815,13 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating) */ void clocksource_change_rating(struct clocksource *cs, int rating) { + unsigned long flags; + mutex_lock(&clocksource_mutex); + clocksource_watchdog_lock(&flags); __clocksource_change_rating(cs, rating); + clocksource_watchdog_unlock(&flags); + clocksource_select(); clocksource_select_watchdog(false); mutex_unlock(&clocksource_mutex); @@ -821,6 +833,8 @@ EXPORT_SYMBOL(clocksource_change_rating); */ static int clocksource_unbind(struct clocksource *cs) { + unsigned long flags; + if (clocksource_is_watchdog(cs)) { /* Select and try to install a replacement watchdog. */ clocksource_select_watchdog(true); @@ -834,8 +848,12 @@ static int clocksource_unbind(struct clocksource *cs) if (curr_clocksource == cs) return -EBUSY; } + + clocksource_watchdog_lock(&flags); clocksource_dequeue_watchdog(cs); list_del_init(&cs->list); + clocksource_watchdog_unlock(&flags); + return 0; } -- cgit v1.2.3 From 5b9e886a4af97574ca3ce1147f35545da0e7afc7 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 30 Apr 2018 12:00:11 +0200 Subject: clocksource: Initialize cs->wd_list A number of places relies on list_empty(&cs->wd_list), however the list_head does not get initialized. Do so upon registration, such that thereafter it is possible to rely on list_empty() correctly reflecting the list membership status. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Tested-by: Diego Viola Reviewed-by: Rafael J. Wysocki Cc: stable@vger.kernel.org Cc: len.brown@intel.com Cc: rjw@rjwysocki.net Cc: rui.zhang@intel.com Link: https://lkml.kernel.org/r/20180430100344.472662715@infradead.org --- kernel/time/clocksource.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index c3d2b94723dc..935f39eb8aac 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -332,6 +332,8 @@ static void clocksource_resume_watchdog(void) static void clocksource_enqueue_watchdog(struct clocksource *cs) { + INIT_LIST_HEAD(&cs->wd_list); + if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { /* cs is a clocksource to be watched. */ list_add(&cs->wd_list, &watchdog_list); -- cgit v1.2.3 From e3b4f79025e0a4eb7e2a2c7d24dadfa1e38893b0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 30 Apr 2018 12:00:12 +0200 Subject: x86/tsc: Fix mark_tsc_unstable() mark_tsc_unstable() also needs to affect tsc_early, Now that clocksource_mark_unstable() can be used on a clocksource irrespective of its registration state, use it on both tsc_early and tsc. This does however require cs->list to be initialized empty, otherwise it cannot tell the registation state before registation. Fixes: aa83c45762a2 ("x86/tsc: Introduce early tsc clocksource") Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Tested-by: Diego Viola Reviewed-by: Rafael J. Wysocki Cc: len.brown@intel.com Cc: rjw@rjwysocki.net Cc: rui.zhang@intel.com Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180430100344.533326547@infradead.org --- arch/x86/kernel/tsc.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 23f36bc236ba..74392d9d51e0 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -1067,6 +1067,7 @@ static struct clocksource clocksource_tsc_early = { .resume = tsc_resume, .mark_unstable = tsc_cs_mark_unstable, .tick_stable = tsc_cs_tick_stable, + .list = LIST_HEAD_INIT(clocksource_tsc_early.list), }; /* @@ -1086,6 +1087,7 @@ static struct clocksource clocksource_tsc = { .resume = tsc_resume, .mark_unstable = tsc_cs_mark_unstable, .tick_stable = tsc_cs_tick_stable, + .list = LIST_HEAD_INIT(clocksource_tsc.list), }; void mark_tsc_unstable(char *reason) @@ -1098,13 +1100,9 @@ void mark_tsc_unstable(char *reason) clear_sched_clock_stable(); disable_sched_clock_irqtime(); pr_info("Marking TSC unstable due to %s\n", reason); - /* Change only the rating, when not registered */ - if (clocksource_tsc.mult) { - clocksource_mark_unstable(&clocksource_tsc); - } else { - clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE; - clocksource_tsc.rating = 0; - } + + clocksource_mark_unstable(&clocksource_tsc_early); + clocksource_mark_unstable(&clocksource_tsc); } EXPORT_SYMBOL_GPL(mark_tsc_unstable); -- cgit v1.2.3 From cd2af07d823e5287cd6c91d54337348c2a873462 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 30 Apr 2018 12:00:13 +0200 Subject: clocksource: Consistent de-rate when marking unstable When a registered clocksource gets marked unstable the watchdog_kthread will de-rate and re-select the clocksource. Ensure it also de-rates when getting called on an unregistered clocksource. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Reviewed-by: Rafael J. Wysocki Cc: len.brown@intel.com Cc: rjw@rjwysocki.net Cc: diego.viola@gmail.com Cc: rui.zhang@intel.com Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180430100344.594904898@infradead.org --- kernel/time/clocksource.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 935f39eb8aac..605656df16c4 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -152,12 +152,19 @@ static void __clocksource_unstable(struct clocksource *cs) cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); cs->flags |= CLOCK_SOURCE_UNSTABLE; - if (list_empty(&cs->list)) + /* + * If the clocksource is registered clocksource_watchdog_kthread() will + * re-rate and re-select. + */ + if (list_empty(&cs->list)) { + cs->rating = 0; return; + } if (cs->mark_unstable) cs->mark_unstable(cs); + /* kick clocksource_watchdog_kthread() */ if (finished_booting) schedule_work(&watchdog_work); } -- cgit v1.2.3 From 7dba33c6346c337aac3f7cd188137d4a6d3d1f3a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 30 Apr 2018 12:00:14 +0200 Subject: clocksource: Rework stale comment AFAICS the hotplug code no longer uses this function. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Reviewed-by: Rafael J. Wysocki Cc: len.brown@intel.com Cc: rjw@rjwysocki.net Cc: diego.viola@gmail.com Cc: rui.zhang@intel.com Link: https://lkml.kernel.org/r/20180430100344.656525644@infradead.org --- kernel/time/clocksource.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 605656df16c4..84f37420fcf5 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -173,10 +173,8 @@ static void __clocksource_unstable(struct clocksource *cs) * clocksource_mark_unstable - mark clocksource unstable via watchdog * @cs: clocksource to be marked unstable * - * This function is called instead of clocksource_change_rating from - * cpu hotplug code to avoid a deadlock between the clocksource mutex - * and the cpu hotplug mutex. It defers the update of the clocksource - * to the watchdog thread. + * This function is called by the x86 TSC code to mark clocksources as unstable; + * it defers demotion and re-selection to a kthread. */ void clocksource_mark_unstable(struct clocksource *cs) { -- cgit v1.2.3