summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-29 23:55:30 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-29 23:55:30 +0300
commita6408f6cb63ac0958fee7dbce7861ffb540d8a49 (patch)
treec94a835d343974171951e3b805e6bbbb02852ebc /drivers
parent1a81a8f2a5918956e214bb718099a89e500e7ec5 (diff)
parent4fae16dffb812f0e0d98a0b2b0856ca48ca63e6c (diff)
downloadlinux-a6408f6cb63ac0958fee7dbce7861ffb540d8a49.tar.xz
Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull smp hotplug updates from Thomas Gleixner: "This is the next part of the hotplug rework. - Convert all notifiers with a priority assigned - Convert all CPU_STARTING/DYING notifiers The final removal of the STARTING/DYING infrastructure will happen when the merge window closes. Another 700 hundred line of unpenetrable maze gone :)" * 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits) timers/core: Correct callback order during CPU hot plug leds/trigger/cpu: Move from CPU_STARTING to ONLINE level powerpc/numa: Convert to hotplug state machine arm/perf: Fix hotplug state machine conversion irqchip/armada: Avoid unused function warnings ARC/time: Convert to hotplug state machine clocksource/atlas7: Convert to hotplug state machine clocksource/armada-370-xp: Convert to hotplug state machine clocksource/exynos_mct: Convert to hotplug state machine clocksource/arm_global_timer: Convert to hotplug state machine rcu: Convert rcutree to hotplug state machine KVM/arm/arm64/vgic-new: Convert to hotplug state machine smp/cfd: Convert core to hotplug state machine x86/x2apic: Convert to CPU hotplug state machine profile: Convert to hotplug state machine timers/core: Convert to hotplug state machine hrtimer: Convert to hotplug state machine x86/tboot: Convert to hotplug state machine arm64/armv8 deprecated: Convert to hotplug state machine hwtracing/coresight-etm4x: Convert to hotplug state machine ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/processor_driver.c11
-rw-r--r--drivers/bus/arm-cci.c53
-rw-r--r--drivers/bus/arm-ccn.c57
-rw-r--r--drivers/clocksource/arm_arch_timer.c54
-rw-r--r--drivers/clocksource/arm_global_timer.c39
-rw-r--r--drivers/clocksource/dummy_timer.c36
-rw-r--r--drivers/clocksource/exynos_mct.c46
-rw-r--r--drivers/clocksource/metag_generic.c33
-rw-r--r--drivers/clocksource/mips-gic-timer.c38
-rw-r--r--drivers/clocksource/qcom-timer.c41
-rw-r--r--drivers/clocksource/time-armada-370-xp.c41
-rw-r--r--drivers/clocksource/timer-atlas7.c41
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c90
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c87
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c44
-rw-r--r--drivers/irqchip/irq-bcm2836.c34
-rw-r--r--drivers/irqchip/irq-gic-v3.c22
-rw-r--r--drivers/irqchip/irq-gic.c23
-rw-r--r--drivers/irqchip/irq-hip04.c25
-rw-r--r--drivers/leds/trigger/ledtrig-cpu.c32
-rw-r--r--drivers/perf/arm_pmu.c59
21 files changed, 357 insertions, 549 deletions
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 0ca14ac7bb28..0553aeebb228 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -118,12 +118,13 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
struct acpi_device *device;
action &= ~CPU_TASKS_FROZEN;
- /*
- * CPU_STARTING and CPU_DYING must not sleep. Return here since
- * acpi_bus_get_device() may sleep.
- */
- if (action == CPU_STARTING || action == CPU_DYING)
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_DEAD:
+ break;
+ default:
return NOTIFY_DONE;
+ }
if (!pr || acpi_bus_get_device(pr->handle, &device))
return NOTIFY_DONE;
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index a49b28378d59..5755907f836f 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -144,12 +144,15 @@ struct cci_pmu {
int num_cntrs;
atomic_t active_events;
struct mutex reserve_mutex;
- struct notifier_block cpu_nb;
+ struct list_head entry;
cpumask_t cpus;
};
#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
+static DEFINE_MUTEX(cci_pmu_mutex);
+static LIST_HEAD(cci_pmu_list);
+
enum cci_models {
#ifdef CONFIG_ARM_CCI400_PMU
CCI400_R0,
@@ -1503,31 +1506,26 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
return perf_pmu_register(&cci_pmu->pmu, name, -1);
}
-static int cci_pmu_cpu_notifier(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int cci_pmu_offline_cpu(unsigned int cpu)
{
- struct cci_pmu *cci_pmu = container_of(self,
- struct cci_pmu, cpu_nb);
- unsigned int cpu = (long)hcpu;
+ struct cci_pmu *cci_pmu;
unsigned int target;
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DOWN_PREPARE:
+ mutex_lock(&cci_pmu_mutex);
+ list_for_each_entry(cci_pmu, &cci_pmu_list, entry) {
if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
- break;
+ continue;
target = cpumask_any_but(cpu_online_mask, cpu);
- if (target >= nr_cpu_ids) // UP, last CPU
- break;
+ if (target >= nr_cpu_ids)
+ continue;
/*
* TODO: migrate context once core races on event->ctx have
* been fixed.
*/
cpumask_set_cpu(target, &cci_pmu->cpus);
- default:
- break;
}
-
- return NOTIFY_OK;
+ mutex_unlock(&cci_pmu_mutex);
+ return 0;
}
static struct cci_pmu_model cci_pmu_models[] = {
@@ -1766,24 +1764,13 @@ static int cci_pmu_probe(struct platform_device *pdev)
atomic_set(&cci_pmu->active_events, 0);
cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus);
- cci_pmu->cpu_nb = (struct notifier_block) {
- .notifier_call = cci_pmu_cpu_notifier,
- /*
- * to migrate uncore events, our notifier should be executed
- * before perf core's notifier.
- */
- .priority = CPU_PRI_PERF + 1,
- };
-
- ret = register_cpu_notifier(&cci_pmu->cpu_nb);
+ ret = cci_pmu_init(cci_pmu, pdev);
if (ret)
return ret;
- ret = cci_pmu_init(cci_pmu, pdev);
- if (ret) {
- unregister_cpu_notifier(&cci_pmu->cpu_nb);
- return ret;
- }
+ mutex_lock(&cci_pmu_mutex);
+ list_add(&cci_pmu->entry, &cci_pmu_list);
+ mutex_unlock(&cci_pmu_mutex);
pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
return 0;
@@ -1817,6 +1804,12 @@ static int __init cci_platform_init(void)
{
int ret;
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
+ "AP_PERF_ARM_CCI_ONLINE", NULL,
+ cci_pmu_offline_cpu);
+ if (ret)
+ return ret;
+
ret = platform_driver_register(&cci_pmu_driver);
if (ret)
return ret;
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index acc3eb542c74..97a9185af433 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -167,7 +167,7 @@ struct arm_ccn_dt {
struct hrtimer hrtimer;
cpumask_t cpu;
- struct notifier_block cpu_nb;
+ struct list_head entry;
struct pmu pmu;
};
@@ -189,6 +189,8 @@ struct arm_ccn {
struct arm_ccn_dt dt;
};
+static DEFINE_MUTEX(arm_ccn_mutex);
+static LIST_HEAD(arm_ccn_list);
static int arm_ccn_node_to_xp(int node)
{
@@ -1171,30 +1173,27 @@ static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer)
}
-static int arm_ccn_pmu_cpu_notifier(struct notifier_block *nb,
- unsigned long action, void *hcpu)
+static int arm_ccn_pmu_offline_cpu(unsigned int cpu)
{
- struct arm_ccn_dt *dt = container_of(nb, struct arm_ccn_dt, cpu_nb);
- struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
- unsigned int cpu = (long)hcpu; /* for (long) see kernel/cpu.c */
+ struct arm_ccn_dt *dt;
unsigned int target;
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DOWN_PREPARE:
+ mutex_lock(&arm_ccn_mutex);
+ list_for_each_entry(dt, &arm_ccn_list, entry) {
+ struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
+
if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu))
- break;
+ continue;
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
- break;
+ continue;
perf_pmu_migrate_context(&dt->pmu, cpu, target);
cpumask_set_cpu(target, &dt->cpu);
if (ccn->irq)
WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0);
- default:
- break;
}
-
- return NOTIFY_OK;
+ mutex_unlock(&arm_ccn_mutex);
+ return 0;
}
@@ -1266,16 +1265,6 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
/* Pick one CPU which we will use to collect data from CCN... */
cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu);
- /*
- * ... and change the selection when it goes offline. Priority is
- * picked to have a chance to migrate events before perf is notified.
- */
- ccn->dt.cpu_nb.notifier_call = arm_ccn_pmu_cpu_notifier;
- ccn->dt.cpu_nb.priority = CPU_PRI_PERF + 1,
- err = register_cpu_notifier(&ccn->dt.cpu_nb);
- if (err)
- goto error_cpu_notifier;
-
/* Also make sure that the overflow interrupt is handled by this CPU */
if (ccn->irq) {
err = irq_set_affinity_hint(ccn->irq, &ccn->dt.cpu);
@@ -1289,12 +1278,13 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
if (err)
goto error_pmu_register;
+ mutex_lock(&arm_ccn_mutex);
+ list_add(&ccn->dt.entry, &arm_ccn_list);
+ mutex_unlock(&arm_ccn_mutex);
return 0;
error_pmu_register:
error_set_affinity:
- unregister_cpu_notifier(&ccn->dt.cpu_nb);
-error_cpu_notifier:
ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
for (i = 0; i < ccn->num_xps; i++)
writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
@@ -1306,9 +1296,12 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
{
int i;
+ mutex_lock(&arm_ccn_mutex);
+ list_del(&ccn->dt.entry);
+ mutex_unlock(&arm_ccn_mutex);
+
if (ccn->irq)
irq_set_affinity_hint(ccn->irq, NULL);
- unregister_cpu_notifier(&ccn->dt.cpu_nb);
for (i = 0; i < ccn->num_xps; i++)
writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
writel(0, ccn->dt.base + CCN_DT_PMCR);
@@ -1316,7 +1309,6 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
}
-
static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn,
int (*callback)(struct arm_ccn *ccn, int region,
void __iomem *base, u32 type, u32 id))
@@ -1533,7 +1525,13 @@ static struct platform_driver arm_ccn_driver = {
static int __init arm_ccn_init(void)
{
- int i;
+ int i, ret;
+
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
+ "AP_PERF_ARM_CCN_ONLINE", NULL,
+ arm_ccn_pmu_offline_cpu);
+ if (ret)
+ return ret;
for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++)
arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr;
@@ -1543,6 +1541,7 @@ static int __init arm_ccn_init(void)
static void __exit arm_ccn_exit(void)
{
+ cpuhp_remove_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE);
platform_driver_unregister(&arm_ccn_driver);
}
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 5effd3027319..28bce3f4f81d 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -370,8 +370,10 @@ static bool arch_timer_has_nonsecure_ppi(void)
arch_timer_ppi[PHYS_NONSECURE_PPI]);
}
-static int arch_timer_setup(struct clock_event_device *clk)
+static int arch_timer_starting_cpu(unsigned int cpu)
{
+ struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
+
__arch_timer_setup(ARCH_CP15_TIMER, clk);
enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0);
@@ -527,29 +529,14 @@ static void arch_timer_stop(struct clock_event_device *clk)
clk->set_state_shutdown(clk);
}
-static int arch_timer_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int arch_timer_dying_cpu(unsigned int cpu)
{
- /*
- * Grab cpu pointer in each case to avoid spurious
- * preemptible warnings
- */
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- arch_timer_setup(this_cpu_ptr(arch_timer_evt));
- break;
- case CPU_DYING:
- arch_timer_stop(this_cpu_ptr(arch_timer_evt));
- break;
- }
+ struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
- return NOTIFY_OK;
+ arch_timer_stop(clk);
+ return 0;
}
-static struct notifier_block arch_timer_cpu_nb = {
- .notifier_call = arch_timer_cpu_notify,
-};
-
#ifdef CONFIG_CPU_PM
static unsigned int saved_cntkctl;
static int arch_timer_cpu_pm_notify(struct notifier_block *self,
@@ -570,11 +557,21 @@ static int __init arch_timer_cpu_pm_init(void)
{
return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
}
+
+static void __init arch_timer_cpu_pm_deinit(void)
+{
+ WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
+}
+
#else
static int __init arch_timer_cpu_pm_init(void)
{
return 0;
}
+
+static void __init arch_timer_cpu_pm_deinit(void)
+{
+}
#endif
static int __init arch_timer_register(void)
@@ -621,22 +618,23 @@ static int __init arch_timer_register(void)
goto out_free;
}
- err = register_cpu_notifier(&arch_timer_cpu_nb);
- if (err)
- goto out_free_irq;
-
err = arch_timer_cpu_pm_init();
if (err)
goto out_unreg_notify;
- /* Immediately configure the timer on the boot CPU */
- arch_timer_setup(this_cpu_ptr(arch_timer_evt));
+ /* Register and immediately configure the timer on the boot CPU */
+ err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
+ "AP_ARM_ARCH_TIMER_STARTING",
+ arch_timer_starting_cpu, arch_timer_dying_cpu);
+ if (err)
+ goto out_unreg_cpupm;
return 0;
+out_unreg_cpupm:
+ arch_timer_cpu_pm_deinit();
+
out_unreg_notify:
- unregister_cpu_notifier(&arch_timer_cpu_nb);
-out_free_irq:
free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
if (arch_timer_has_nonsecure_ppi())
free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 2a9ceb6e93f9..8da03298f844 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -165,9 +165,9 @@ static irqreturn_t gt_clockevent_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int gt_clockevents_init(struct clock_event_device *clk)
+static int gt_starting_cpu(unsigned int cpu)
{
- int cpu = smp_processor_id();
+ struct clock_event_device *clk = this_cpu_ptr(gt_evt);
clk->name = "arm_global_timer";
clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
@@ -186,10 +186,13 @@ static int gt_clockevents_init(struct clock_event_device *clk)
return 0;
}
-static void gt_clockevents_stop(struct clock_event_device *clk)
+static int gt_dying_cpu(unsigned int cpu)
{
+ struct clock_event_device *clk = this_cpu_ptr(gt_evt);
+
gt_clockevent_shutdown(clk);
disable_percpu_irq(clk->irq);
+ return 0;
}
static cycle_t gt_clocksource_read(struct clocksource *cs)
@@ -252,24 +255,6 @@ static int __init gt_clocksource_init(void)
return clocksource_register_hz(&gt_clocksource, gt_clk_rate);
}
-static int gt_cpu_notify(struct notifier_block *self, unsigned long action,
- void *hcpu)
-{
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- gt_clockevents_init(this_cpu_ptr(gt_evt));
- break;
- case CPU_DYING:
- gt_clockevents_stop(this_cpu_ptr(gt_evt));
- break;
- }
-
- return NOTIFY_OK;
-}
-static struct notifier_block gt_cpu_nb = {
- .notifier_call = gt_cpu_notify,
-};
-
static int __init global_timer_of_register(struct device_node *np)
{
struct clk *gt_clk;
@@ -325,18 +310,14 @@ static int __init global_timer_of_register(struct device_node *np)
goto out_free;
}
- err = register_cpu_notifier(&gt_cpu_nb);
- if (err) {
- pr_warn("global-timer: unable to register cpu notifier.\n");
- goto out_irq;
- }
-
- /* Immediately configure the timer on the boot CPU */
+ /* Register and immediately configure the timer on the boot CPU */
err = gt_clocksource_init();
if (err)
goto out_irq;
- err = gt_clockevents_init(this_cpu_ptr(gt_evt));
+ err = cpuhp_setup_state(CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
+ "AP_ARM_GLOBAL_TIMER_STARTING",
+ gt_starting_cpu, gt_dying_cpu);
if (err)
goto out_irq;
diff --git a/drivers/clocksource/dummy_timer.c b/drivers/clocksource/dummy_timer.c
index 776b6c86dcd5..89f1c2edbe02 100644
--- a/drivers/clocksource/dummy_timer.c
+++ b/drivers/clocksource/dummy_timer.c
@@ -16,10 +16,9 @@
static DEFINE_PER_CPU(struct clock_event_device, dummy_timer_evt);
-static void dummy_timer_setup(void)
+static int dummy_timer_starting_cpu(unsigned int cpu)
{
- int cpu = smp_processor_id();
- struct clock_event_device *evt = raw_cpu_ptr(&dummy_timer_evt);
+ struct clock_event_device *evt = per_cpu_ptr(&dummy_timer_evt, cpu);
evt->name = "dummy_timer";
evt->features = CLOCK_EVT_FEAT_PERIODIC |
@@ -29,36 +28,13 @@ static void dummy_timer_setup(void)
evt->cpumask = cpumask_of(cpu);
clockevents_register_device(evt);
+ return 0;
}
-static int dummy_timer_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING)
- dummy_timer_setup();
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block dummy_timer_cpu_nb = {
- .notifier_call = dummy_timer_cpu_notify,
-};
-
static int __init dummy_timer_register(void)
{
- int err = 0;
-
- cpu_notifier_register_begin();
- err = __register_cpu_notifier(&dummy_timer_cpu_nb);
- if (err)
- goto out;
-
- /* We won't get a call on the boot CPU, so register immediately */
- if (num_possible_cpus() > 1)
- dummy_timer_setup();
-
-out:
- cpu_notifier_register_done();
- return err;
+ return cpuhp_setup_state(CPUHP_AP_DUMMY_TIMER_STARTING,
+ "AP_DUMMY_TIMER_STARTING",
+ dummy_timer_starting_cpu, NULL);
}
early_initcall(dummy_timer_register);
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 0d18dd4b3bd2..41840d02c331 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -443,10 +443,11 @@ static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt)
+static int exynos4_mct_starting_cpu(unsigned int cpu)
{
+ struct mct_clock_event_device *mevt =
+ per_cpu_ptr(&percpu_mct_tick, cpu);
struct clock_event_device *evt = &mevt->evt;
- unsigned int cpu = smp_processor_id();
mevt->base = EXYNOS4_MCT_L_BASE(cpu);
snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu);
@@ -480,8 +481,10 @@ static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt)
return 0;
}
-static void exynos4_local_timer_stop(struct mct_clock_event_device *mevt)
+static int exynos4_mct_dying_cpu(unsigned int cpu)
{
+ struct mct_clock_event_device *mevt =
+ per_cpu_ptr(&percpu_mct_tick, cpu);
struct clock_event_device *evt = &mevt->evt;
evt->set_state_shutdown(evt);
@@ -491,39 +494,12 @@ static void exynos4_local_timer_stop(struct mct_clock_event_device *mevt)
} else {
disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
}
+ return 0;
}
-static int exynos4_mct_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- struct mct_clock_event_device *mevt;
-
- /*
- * Grab cpu pointer in each case to avoid spurious
- * preemptible warnings
- */
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- mevt = this_cpu_ptr(&percpu_mct_tick);
- exynos4_local_timer_setup(mevt);
- break;
- case CPU_DYING:
- mevt = this_cpu_ptr(&percpu_mct_tick);
- exynos4_local_timer_stop(mevt);
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block exynos4_mct_cpu_nb = {
- .notifier_call = exynos4_mct_cpu_notify,
-};
-
static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
{
int err, cpu;
- struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
struct clk *mct_clk, *tick_clk;
tick_clk = np ? of_clk_get_by_name(np, "fin_pll") :
@@ -570,12 +546,14 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem *
}
}
- err = register_cpu_notifier(&exynos4_mct_cpu_nb);
+ /* Install hotplug callbacks which configure the timer on this CPU */
+ err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
+ "AP_EXYNOS4_MCT_TIMER_STARTING",
+ exynos4_mct_starting_cpu,
+ exynos4_mct_dying_cpu);
if (err)
goto out_irq;
- /* Immediately configure the timer on the boot CPU */
- exynos4_local_timer_setup(mevt);
return 0;
out_irq:
diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
index bcd5c0d602a0..a80ab3e446b7 100644
--- a/drivers/clocksource/metag_generic.c
+++ b/drivers/clocksource/metag_generic.c
@@ -90,7 +90,7 @@ unsigned long long sched_clock(void)
return ticks << HARDWARE_TO_NS_SHIFT;
}
-static void arch_timer_setup(unsigned int cpu)
+static int arch_timer_starting_cpu(unsigned int cpu)
{
unsigned int txdivtime;
struct clock_event_device *clk = &per_cpu(local_clockevent, cpu);
@@ -132,27 +132,9 @@ static void arch_timer_setup(unsigned int cpu)
val = core_reg_read(TXUCT_ID, TXTIMER_REGNUM, thread0);
__core_reg_set(TXTIMER, val);
}
+ return 0;
}
-static int arch_timer_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- int cpu = (long)hcpu;
-
- switch (action) {
- case CPU_STARTING:
- case CPU_STARTING_FROZEN:
- arch_timer_setup(cpu);
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block arch_timer_cpu_nb = {
- .notifier_call = arch_timer_cpu_notify,
-};
-
int __init metag_generic_timer_init(void)
{
/*
@@ -170,11 +152,8 @@ int __init metag_generic_timer_init(void)
setup_irq(tbisig_map(TBID_SIGNUM_TRT), &metag_timer_irq);
- /* Configure timer on boot CPU */
- arch_timer_setup(smp_processor_id());
-
- /* Hook cpu boot to configure other CPU's timers */
- register_cpu_notifier(&arch_timer_cpu_nb);
-
- return 0;
+ /* Hook cpu boot to configure the CPU's timers */
+ return cpuhp_setup_state(CPUHP_AP_METAG_TIMER_STARTING,
+ "AP_METAG_TIMER_STARTING",
+ arch_timer_starting_cpu, NULL);
}
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 1572c7a778ab..d91e8725917c 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -49,10 +49,9 @@ struct irqaction gic_compare_irqaction = {
.name = "timer",
};
-static void gic_clockevent_cpu_init(struct clock_event_device *cd)
+static void gic_clockevent_cpu_init(unsigned int cpu,
+ struct clock_event_device *cd)
{
- unsigned int cpu = smp_processor_id();
-
cd->name = "MIPS GIC";
cd->features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_C3STOP;
@@ -79,19 +78,10 @@ static void gic_update_frequency(void *data)
clockevents_update_freq(this_cpu_ptr(&gic_clockevent_device), rate);
}
-static int gic_cpu_notifier(struct notifier_block *nb, unsigned long action,
- void *data)
+static int gic_starting_cpu(unsigned int cpu)
{
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device));
- break;
- case CPU_DYING:
- gic_clockevent_cpu_exit(this_cpu_ptr(&gic_clockevent_device));
- break;
- }
-
- return NOTIFY_OK;
+ gic_clockevent_cpu_init(cpu, this_cpu_ptr(&gic_clockevent_device));
+ return 0;
}
static int gic_clk_notifier(struct notifier_block *nb, unsigned long action,
@@ -105,10 +95,11 @@ static int gic_clk_notifier(struct notifier_block *nb, unsigned long action,
return NOTIFY_OK;
}
-
-static struct notifier_block gic_cpu_nb = {
- .notifier_call = gic_cpu_notifier,
-};
+static int gic_dying_cpu(unsigned int cpu)
+{
+ gic_clockevent_cpu_exit(this_cpu_ptr(&gic_clockevent_device));
+ return 0;
+}
static struct notifier_block gic_clk_nb = {
.notifier_call = gic_clk_notifier,
@@ -125,12 +116,9 @@ static int gic_clockevent_init(void)
if (ret < 0)
return ret;
- ret = register_cpu_notifier(&gic_cpu_nb);
- if (ret < 0)
- pr_warn("GIC: Unable to register CPU notifier\n");
-
- gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device));
-
+ cpuhp_setup_state(CPUHP_AP_MIPS_GIC_TIMER_STARTING,
+ "AP_MIPS_GIC_TIMER_STARTING", gic_starting_cpu,
+ gic_dying_cpu);
return 0;
}
diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c
index 662576339049..3283cfa2aa52 100644
--- a/drivers/clocksource/qcom-timer.c
+++ b/drivers/clocksource/qcom-timer.c
@@ -105,9 +105,9 @@ static struct clocksource msm_clocksource = {
static int msm_timer_irq;
static int msm_timer_has_ppi;
-static int msm_local_timer_setup(struct clock_event_device *evt)
+static int msm_local_timer_starting_cpu(unsigned int cpu)
{
- int cpu = smp_processor_id();
+ struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu);
int err;
evt->irq = msm_timer_irq;
@@ -135,35 +135,15 @@ static int msm_local_timer_setup(struct clock_event_device *evt)
return 0;
}
-static void msm_local_timer_stop(struct clock_event_device *evt)
+static int msm_local_timer_dying_cpu(unsigned int cpu)
{
+ struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu);
+
evt->set_state_shutdown(evt);
disable_percpu_irq(evt->irq);
+ return 0;
}
-static int msm_timer_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- /*
- * Grab cpu pointer in each case to avoid spurious
- * preemptible warnings
- */
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- msm_local_timer_setup(this_cpu_ptr(msm_evt));
- break;
- case CPU_DYING:
- msm_local_timer_stop(this_cpu_ptr(msm_evt));
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block msm_timer_cpu_nb = {
- .notifier_call = msm_timer_cpu_notify,
-};
-
static u64 notrace msm_sched_clock_read(void)
{
return msm_clocksource.read(&msm_clocksource);
@@ -200,14 +180,15 @@ static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
if (res) {
pr_err("request_percpu_irq failed\n");
} else {
- res = register_cpu_notifier(&msm_timer_cpu_nb);
+ /* Install and invoke hotplug callbacks */
+ res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING,
+ "AP_QCOM_TIMER_STARTING",
+ msm_local_timer_starting_cpu,
+ msm_local_timer_dying_cpu);
if (res) {
free_percpu_irq(irq, msm_evt);
goto err;
}
-
- /* Immediately configure the timer on the boot CPU */
- msm_local_timer_setup(raw_cpu_ptr(msm_evt));
}
err:
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index 20ec066481fe..719b478d136e 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -170,10 +170,10 @@ static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id)
/*
* Setup the local clock events for a CPU.
*/
-static int armada_370_xp_timer_setup(struct clock_event_device *evt)
+static int armada_370_xp_timer_starting_cpu(unsigned int cpu)
{
+ struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
u32 clr = 0, set = 0;
- int cpu = smp_processor_id();
if (timer25Mhz)
set = TIMER0_25MHZ;
@@ -200,35 +200,15 @@ static int armada_370_xp_timer_setup(struct clock_event_device *evt)
return 0;
}
-static void armada_370_xp_timer_stop(struct clock_event_device *evt)
+static int armada_370_xp_timer_dying_cpu(unsigned int cpu)
{
+ struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
+
evt->set_state_shutdown(evt);
disable_percpu_irq(evt->irq);
+ return 0;
}
-static int armada_370_xp_timer_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- /*
- * Grab cpu pointer in each case to avoid spurious
- * preemptible warnings
- */
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
- break;
- case CPU_DYING:
- armada_370_xp_timer_stop(this_cpu_ptr(armada_370_xp_evt));
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block armada_370_xp_timer_cpu_nb = {
- .notifier_call = armada_370_xp_timer_cpu_notify,
-};
-
static u32 timer0_ctrl_reg, timer0_local_ctrl_reg;
static int armada_370_xp_timer_suspend(void)
@@ -322,8 +302,6 @@ static int __init armada_370_xp_timer_common_init(struct device_node *np)
return res;
}
- register_cpu_notifier(&armada_370_xp_timer_cpu_nb);
-
armada_370_xp_evt = alloc_percpu(struct clock_event_device);
if (!armada_370_xp_evt)
return -ENOMEM;
@@ -341,9 +319,12 @@ static int __init armada_370_xp_timer_common_init(struct device_node *np)
return res;
}
- res = armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
+ res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING,
+ "AP_ARMADA_TIMER_STARTING",
+ armada_370_xp_timer_starting_cpu,
+ armada_370_xp_timer_dying_cpu);
if (res) {
- pr_err("Failed to setup timer");
+ pr_err("Failed to setup hotplug state and timer");
return res;
}
diff --git a/drivers/clocksource/timer-atlas7.c b/drivers/clocksource/timer-atlas7.c
index 90f8fbc154a4..4334e0330ada 100644
--- a/drivers/clocksource/timer-atlas7.c
+++ b/drivers/clocksource/timer-atlas7.c
@@ -172,9 +172,9 @@ static struct irqaction sirfsoc_timer1_irq = {
.handler = sirfsoc_timer_interrupt,
};
-static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
+static int sirfsoc_local_timer_starting_cpu(unsigned int cpu)
{
- int cpu = smp_processor_id();
+ struct clock_event_device *ce = per_cpu_ptr(sirfsoc_clockevent, cpu);
struct irqaction *action;
if (cpu == 0)
@@ -203,50 +203,27 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
return 0;
}
-static void sirfsoc_local_timer_stop(struct clock_event_device *ce)
+static int sirfsoc_local_timer_dying_cpu(unsigned int cpu)
{
- int cpu = smp_processor_id();
-
sirfsoc_timer_count_disable(1);
if (cpu == 0)
remove_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq);
else
remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq);
+ return 0;
}
-static int sirfsoc_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- /*
- * Grab cpu pointer in each case to avoid spurious
- * preemptible warnings
- */
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent));
- break;
- case CPU_DYING:
- sirfsoc_local_timer_stop(this_cpu_ptr(sirfsoc_clockevent));
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block sirfsoc_cpu_nb = {
- .notifier_call = sirfsoc_cpu_notify,
-};
-
static int __init sirfsoc_clockevent_init(void)
{
sirfsoc_clockevent = alloc_percpu(struct clock_event_device);
BUG_ON(!sirfsoc_clockevent);
- BUG_ON(register_cpu_notifier(&sirfsoc_cpu_nb));
-
- /* Immediately configure the timer on the boot CPU */
- return sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent));
+ /* Install and invoke hotplug callbacks */
+ return cpuhp_setup_state(CPUHP_AP_MARCO_TIMER_STARTING,
+ "AP_MARCO_TIMER_STARTING",
+ sirfsoc_local_timer_starting_cpu,
+ sirfsoc_local_timer_dying_cpu);
}
/* initialize the kernel jiffy timer source */
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index d83ab82672e4..2de4cad9c5ed 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -51,6 +51,8 @@ module_param_named(boot_enable, boot_enable, int, S_IRUGO);
static int etm_count;
static struct etm_drvdata *etmdrvdata[NR_CPUS];
+static enum cpuhp_state hp_online;
+
/*
* Memory mapped writes to clear os lock are not supported on some processors
* and OS lock must be unlocked before any memory mapped access on such
@@ -481,8 +483,7 @@ static int etm_enable_sysfs(struct coresight_device *csdev)
/*
* Configure the ETM only if the CPU is online. If it isn't online
- * hw configuration will take place when 'CPU_STARTING' is received
- * in @etm_cpu_callback.
+ * hw configuration will take place on the local CPU during bring up.
*/
if (cpu_online(drvdata->cpu)) {
ret = smp_call_function_single(drvdata->cpu,
@@ -641,47 +642,44 @@ static const struct coresight_ops etm_cs_ops = {
.source_ops = &etm_source_ops,
};
-static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
- void *hcpu)
+static int etm_online_cpu(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
-
if (!etmdrvdata[cpu])
- goto out;
+ return 0;
- switch (action & (~CPU_TASKS_FROZEN)) {
- case CPU_STARTING:
- spin_lock(&etmdrvdata[cpu]->spinlock);
- if (!etmdrvdata[cpu]->os_unlock) {
- etm_os_unlock(etmdrvdata[cpu]);
- etmdrvdata[cpu]->os_unlock = true;
- }
-
- if (local_read(&etmdrvdata[cpu]->mode))
- etm_enable_hw(etmdrvdata[cpu]);
- spin_unlock(&etmdrvdata[cpu]->spinlock);
- break;
+ if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
+ coresight_enable(etmdrvdata[cpu]->csdev);
+ return 0;
+}
- case CPU_ONLINE:
- if (etmdrvdata[cpu]->boot_enable &&
- !etmdrvdata[cpu]->sticky_enable)
- coresight_enable(etmdrvdata[cpu]->csdev);
- break;
+static int etm_starting_cpu(unsigned int cpu)
+{
+ if (!etmdrvdata[cpu])
+ return 0;
- case CPU_DYING:
- spin_lock(&etmdrvdata[cpu]->spinlock);
- if (local_read(&etmdrvdata[cpu]->mode))
- etm_disable_hw(etmdrvdata[cpu]);
- spin_unlock(&etmdrvdata[cpu]->spinlock);
- break;
+ spin_lock(&etmdrvdata[cpu]->spinlock);
+ if (!etmdrvdata[cpu]->os_unlock) {
+ etm_os_unlock(etmdrvdata[cpu]);
+ etmdrvdata[cpu]->os_unlock = true;
}
-out:
- return NOTIFY_OK;
+
+ if (local_read(&etmdrvdata[cpu]->mode))
+ etm_enable_hw(etmdrvdata[cpu]);
+ spin_unlock(&etmdrvdata[cpu]->spinlock);
+ return 0;
}
-static struct notifier_block etm_cpu_notifier = {
- .notifier_call = etm_cpu_callback,
-};
+static int etm_dying_cpu(unsigned int cpu)
+{
+ if (!etmdrvdata[cpu])
+ return 0;
+
+ spin_lock(&etmdrvdata[cpu]->spinlock);
+ if (local_read(&etmdrvdata[cpu]->mode))
+ etm_disable_hw(etmdrvdata[cpu]);
+ spin_unlock(&etmdrvdata[cpu]->spinlock);
+ return 0;
+}
static bool etm_arch_supported(u8 arch)
{
@@ -806,9 +804,17 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
etm_init_arch_data, drvdata, 1))
dev_err(dev, "ETM arch init failed\n");
- if (!etm_count++)
- register_hotcpu_notifier(&etm_cpu_notifier);
-
+ if (!etm_count++) {
+ cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
+ "AP_ARM_CORESIGHT_STARTING",
+ etm_starting_cpu, etm_dying_cpu);
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "AP_ARM_CORESIGHT_ONLINE",
+ etm_online_cpu, NULL);
+ if (ret < 0)
+ goto err_arch_supported;
+ hp_online = ret;
+ }
put_online_cpus();
if (etm_arch_supported(drvdata->arch) == false) {
@@ -839,7 +845,6 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
pm_runtime_put(&adev->dev);
dev_info(dev, "%s initialized\n", (char *)id->data);
-
if (boot_enable) {
coresight_enable(drvdata->csdev);
drvdata->boot_enable = true;
@@ -848,8 +853,11 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
err_arch_supported:
- if (--etm_count == 0)
- unregister_hotcpu_notifier(&etm_cpu_notifier);
+ if (--etm_count == 0) {
+ cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
+ if (hp_online)
+ cpuhp_remove_state_nocalls(hp_online);
+ }
return ret;
}
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 462f0dc15757..1a5e0d14c1dd 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -48,6 +48,8 @@ static int etm4_count;
static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
static void etm4_set_default(struct etmv4_config *config);
+static enum cpuhp_state hp_online;
+
static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
{
/* Writing any value to ETMOSLAR unlocks the trace registers */
@@ -673,47 +675,44 @@ void etm4_config_trace_mode(struct etmv4_config *config)
config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc;
}
-static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
- void *hcpu)
+static int etm4_online_cpu(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
-
if (!etmdrvdata[cpu])
- goto out;
-
- switch (action & (~CPU_TASKS_FROZEN)) {
- case CPU_STARTING:
- spin_lock(&etmdrvdata[cpu]->spinlock);
- if (!etmdrvdata[cpu]->os_unlock) {
- etm4_os_unlock(etmdrvdata[cpu]);
- etmdrvdata[cpu]->os_unlock = true;
- }
-
- if (local_read(&etmdrvdata[cpu]->mode))
- etm4_enable_hw(etmdrvdata[cpu]);
- spin_unlock(&etmdrvdata[cpu]->spinlock);
- break;
+ return 0;
- case CPU_ONLINE:
- if (etmdrvdata[cpu]->boot_enable &&
- !etmdrvdata[cpu]->sticky_enable)
- coresight_enable(etmdrvdata[cpu]->csdev);
- break;
+ if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
+ coresight_enable(etmdrvdata[cpu]->csdev);
+ return 0;
+}
- case CPU_DYING:
- spin_lock(&etmdrvdata[cpu]->spinlock);
- if (local_read(&etmdrvdata[cpu]->mode))
- etm4_disable_hw(etmdrvdata[cpu]);
- spin_unlock(&etmdrvdata[cpu]->spinlock);
- break;
+static int etm4_starting_cpu(unsigned int cpu)
+{
+ if (!etmdrvdata[cpu])
+ return 0;
+
+ spin_lock(&etmdrvdata[cpu]->spinlock);
+ if (!etmdrvdata[cpu]->os_unlock) {
+ etm4_os_unlock(etmdrvdata[cpu]);
+ etmdrvdata[cpu]->os_unlock = true;
}
-out:
- return NOTIFY_OK;
+
+ if (local_read(&etmdrvdata[cpu]->mode))
+ etm4_enable_hw(etmdrvdata[cpu]);
+ spin_unlock(&etmdrvdata[cpu]->spinlock);
+ return 0;
}
-static struct notifier_block etm4_cpu_notifier = {
- .notifier_call = etm4_cpu_callback,
-};
+static int etm4_dying_cpu(unsigned int cpu)
+{
+ if (!etmdrvdata[cpu])
+ return 0;
+
+ spin_lock(&etmdrvdata[cpu]->spinlock);
+ if (local_read(&etmdrvdata[cpu]->mode))
+ etm4_disable_hw(etmdrvdata[cpu]);
+ spin_unlock(&etmdrvdata[cpu]->spinlock);
+ return 0;
+}
static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
{
@@ -767,8 +766,17 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
etm4_init_arch_data, drvdata, 1))
dev_err(dev, "ETM arch init failed\n");
- if (!etm4_count++)
- register_hotcpu_notifier(&etm4_cpu_notifier);
+ if (!etm4_count++) {
+ cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING,
+ "AP_ARM_CORESIGHT4_STARTING",
+ etm4_starting_cpu, etm4_dying_cpu);
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "AP_ARM_CORESIGHT4_ONLINE",
+ etm4_online_cpu, NULL);
+ if (ret < 0)
+ goto err_arch_supported;
+ hp_online = ret;
+ }
put_online_cpus();
@@ -809,8 +817,11 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
err_arch_supported:
- if (--etm4_count == 0)
- unregister_hotcpu_notifier(&etm4_cpu_notifier);
+ if (--etm4_count == 0) {
+ cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING);
+ if (hp_online)
+ cpuhp_remove_state_nocalls(hp_online);
+ }
return ret;
}
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 7c42b1d13faf..8bcee65a0b8c 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -345,38 +345,20 @@ static void armada_mpic_send_doorbell(const struct cpumask *mask,
ARMADA_370_XP_SW_TRIG_INT_OFFS);
}
-static int armada_xp_mpic_secondary_init(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int armada_xp_mpic_starting_cpu(unsigned int cpu)
{
- if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) {
- armada_xp_mpic_perf_init();
- armada_xp_mpic_smp_cpu_init();
- }
-
- return NOTIFY_OK;
+ armada_xp_mpic_perf_init();
+ armada_xp_mpic_smp_cpu_init();
+ return 0;
}
-static struct notifier_block armada_370_xp_mpic_cpu_notifier = {
- .notifier_call = armada_xp_mpic_secondary_init,
- .priority = 100,
-};
-
-static int mpic_cascaded_secondary_init(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int mpic_cascaded_starting_cpu(unsigned int cpu)
{
- if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) {
- armada_xp_mpic_perf_init();
- enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
- }
-
- return NOTIFY_OK;
+ armada_xp_mpic_perf_init();
+ enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
+ return 0;
}
-
-static struct notifier_block mpic_cascaded_cpu_notifier = {
- .notifier_call = mpic_cascaded_secondary_init,
- .priority = 100,
-};
-#endif /* CONFIG_SMP */
+#endif
static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
.map = armada_370_xp_mpic_irq_map,
@@ -595,11 +577,15 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
set_handle_irq(armada_370_xp_handle_irq);
#ifdef CONFIG_SMP
set_smp_cross_call(armada_mpic_send_doorbell);
- register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier);
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
+ "AP_IRQ_ARMADA_XP_STARTING",
+ armada_xp_mpic_starting_cpu, NULL);
#endif
} else {
#ifdef CONFIG_SMP
- register_cpu_notifier(&mpic_cascaded_cpu_notifier);
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_CASC_STARTING,
+ "AP_IRQ_ARMADA_CASC_STARTING",
+ mpic_cascaded_starting_cpu, NULL);
#endif
irq_set_chained_handler(parent_irq,
armada_370_xp_mpic_handle_cascade_irq);
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index df1949c0aa23..d96b2c947e74 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -202,26 +202,19 @@ static void bcm2836_arm_irqchip_send_ipi(const struct cpumask *mask,
}
}
-/* Unmasks the IPI on the CPU when it's online. */
-static int bcm2836_arm_irqchip_cpu_notify(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int bcm2836_cpu_starting(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
- unsigned int int_reg = LOCAL_MAILBOX_INT_CONTROL0;
- unsigned int mailbox = 0;
-
- if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
- bcm2836_arm_irqchip_unmask_per_cpu_irq(int_reg, mailbox, cpu);
- else if (action == CPU_DYING)
- bcm2836_arm_irqchip_mask_per_cpu_irq(int_reg, mailbox, cpu);
-
- return NOTIFY_OK;
+ bcm2836_arm_irqchip_unmask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0,
+ cpu);
+ return 0;
}
-static struct notifier_block bcm2836_arm_irqchip_cpu_notifier = {
- .notifier_call = bcm2836_arm_irqchip_cpu_notify,
- .priority = 100,
-};
+static int bcm2836_cpu_dying(unsigned int cpu)
+{
+ bcm2836_arm_irqchip_mask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0,
+ cpu);
+ return 0;
+}
#ifdef CONFIG_ARM
static int __init bcm2836_smp_boot_secondary(unsigned int cpu,
@@ -251,10 +244,9 @@ bcm2836_arm_irqchip_smp_init(void)
{
#ifdef CONFIG_SMP
/* Unmask IPIs to the boot CPU. */
- bcm2836_arm_irqchip_cpu_notify(&bcm2836_arm_irqchip_cpu_notifier,
- CPU_STARTING,
- (void *)(uintptr_t)smp_processor_id());
- register_cpu_notifier(&bcm2836_arm_irqchip_cpu_notifier);
+ cpuhp_setup_state(CPUHP_AP_IRQ_BCM2836_STARTING,
+ "AP_IRQ_BCM2836_STARTING", bcm2836_cpu_starting,
+ bcm2836_cpu_dying);
set_smp_cross_call(bcm2836_arm_irqchip_send_ipi);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 2c5ba0e704bf..6fc56c3466b0 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -538,23 +538,13 @@ static void gic_cpu_init(void)
}
#ifdef CONFIG_SMP
-static int gic_secondary_init(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+
+static int gic_starting_cpu(unsigned int cpu)
{
- if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
- gic_cpu_init();
- return NOTIFY_OK;
+ gic_cpu_init();
+ return 0;
}
-/*
- * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
- * priority because the GIC needs to be up before the ARM generic timers.
- */
-static struct notifier_block gic_cpu_notifier = {
- .notifier_call = gic_secondary_init,
- .priority = 100,
-};
-
static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
unsigned long cluster_id)
{
@@ -634,7 +624,9 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
static void gic_smp_init(void)
{
set_smp_cross_call(gic_raise_softirq);
- register_cpu_notifier(&gic_cpu_notifier);
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GICV3_STARTING,
+ "AP_IRQ_GICV3_STARTING", gic_starting_cpu,
+ NULL);
}
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 1de07eb5839c..c2cab572c511 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -984,25 +984,12 @@ static int gic_irq_domain_translate(struct irq_domain *d,
return -EINVAL;
}
-#ifdef CONFIG_SMP
-static int gic_secondary_init(struct notifier_block *nfb, unsigned long action,
- void *hcpu)
+static int gic_starting_cpu(unsigned int cpu)
{
- if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
- gic_cpu_init(&gic_data[0]);
- return NOTIFY_OK;
+ gic_cpu_init(&gic_data[0]);
+ return 0;
}
-/*
- * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
- * priority because the GIC needs to be up before the ARM generic timers.
- */
-static struct notifier_block gic_cpu_notifier = {
- .notifier_call = gic_secondary_init,
- .priority = 100,
-};
-#endif
-
static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
@@ -1177,8 +1164,10 @@ static int __init __gic_init_bases(struct gic_chip_data *gic,
gic_cpu_map[i] = 0xff;
#ifdef CONFIG_SMP
set_smp_cross_call(gic_raise_softirq);
- register_cpu_notifier(&gic_cpu_notifier);
#endif
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
+ "AP_IRQ_GIC_STARTING",
+ gic_starting_cpu, NULL);
set_handle_irq(gic_handle_irq);
if (static_key_true(&supports_deactivate))
pr_info("GIC: Using split EOI/Deactivate mode\n");
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 9e25d8ce08e5..021b0e0833c1 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -342,26 +342,12 @@ static int hip04_irq_domain_xlate(struct irq_domain *d,
return ret;
}
-#ifdef CONFIG_SMP
-static int hip04_irq_secondary_init(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
+static int hip04_irq_starting_cpu(unsigned int cpu)
{
- if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
- hip04_irq_cpu_init(&hip04_data);
- return NOTIFY_OK;
+ hip04_irq_cpu_init(&hip04_data);
+ return 0;
}
-/*
- * Notifier for enabling the INTC CPU interface. Set an arbitrarily high
- * priority because the GIC needs to be up before the ARM generic timers.
- */
-static struct notifier_block hip04_irq_cpu_notifier = {
- .notifier_call = hip04_irq_secondary_init,
- .priority = 100,
-};
-#endif
-
static const struct irq_domain_ops hip04_irq_domain_ops = {
.map = hip04_irq_domain_map,
.xlate = hip04_irq_domain_xlate,
@@ -417,13 +403,12 @@ hip04_of_init(struct device_node *node, struct device_node *parent)
#ifdef CONFIG_SMP
set_smp_cross_call(hip04_raise_softirq);
- register_cpu_notifier(&hip04_irq_cpu_notifier);
#endif
set_handle_irq(hip04_handle_irq);
hip04_irq_dist_init(&hip04_data);
- hip04_irq_cpu_init(&hip04_data);
-
+ cpuhp_setup_state(CPUHP_AP_IRQ_HIP04_STARTING, "AP_IRQ_HIP04_STARTING",
+ hip04_irq_starting_cpu, NULL);
return 0;
}
IRQCHIP_DECLARE(hip04_intc, "hisilicon,hip04-intc", hip04_of_init);
diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c
index 938467fb82be..22f0634dd3fa 100644
--- a/drivers/leds/trigger/ledtrig-cpu.c
+++ b/drivers/leds/trigger/ledtrig-cpu.c
@@ -92,29 +92,22 @@ static struct syscore_ops ledtrig_cpu_syscore_ops = {
.resume = ledtrig_cpu_syscore_resume,
};
-static int ledtrig_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int ledtrig_online_cpu(unsigned int cpu)
{
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- ledtrig_cpu(CPU_LED_START);
- break;
- case CPU_DYING:
- ledtrig_cpu(CPU_LED_STOP);
- break;
- }
-
- return NOTIFY_OK;
+ ledtrig_cpu(CPU_LED_START);
+ return 0;
}
-
-static struct notifier_block ledtrig_cpu_nb = {
- .notifier_call = ledtrig_cpu_notify,
-};
+static int ledtrig_prepare_down_cpu(unsigned int cpu)
+{
+ ledtrig_cpu(CPU_LED_STOP);
+ return 0;
+}
static int __init ledtrig_cpu_init(void)
{
int cpu;
+ int ret;
/* Supports up to 9999 cpu cores */
BUILD_BUG_ON(CONFIG_NR_CPUS > 9999);
@@ -133,7 +126,12 @@ static int __init ledtrig_cpu_init(void)
}
register_syscore_ops(&ledtrig_cpu_syscore_ops);
- register_cpu_notifier(&ledtrig_cpu_nb);
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_LEDTRIG_STARTING",
+ ledtrig_online_cpu, ledtrig_prepare_down_cpu);
+ if (ret < 0)
+ pr_err("CPU hotplug notifier for ledtrig-cpu could not be registered: %d\n",
+ ret);
pr_info("ledtrig-cpu: registered to indicate activity on CPUs\n");
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 8e4d7f590b06..6ccb994bdfcb 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -688,30 +688,29 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
return 0;
}
+static DEFINE_MUTEX(arm_pmu_mutex);
+static LIST_HEAD(arm_pmu_list);
+
/*
* PMU hardware loses all context when a CPU goes offline.
* When a CPU is hotplugged back in, since some hardware registers are
* UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
* junk values out of them.
*/
-static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
- void *hcpu)
+static int arm_perf_starting_cpu(unsigned int cpu)
{
- int cpu = (unsigned long)hcpu;
- struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb);
-
- if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
- return NOTIFY_DONE;
-
- if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
- return NOTIFY_DONE;
+ struct arm_pmu *pmu;
- if (pmu->reset)
- pmu->reset(pmu);
- else
- return NOTIFY_DONE;
+ mutex_lock(&arm_pmu_mutex);
+ list_for_each_entry(pmu, &arm_pmu_list, entry) {
- return NOTIFY_OK;
+ if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
+ continue;
+ if (pmu->reset)
+ pmu->reset(pmu);
+ }
+ mutex_unlock(&arm_pmu_mutex);
+ return 0;
}
#ifdef CONFIG_CPU_PM
@@ -822,10 +821,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
if (!cpu_hw_events)
return -ENOMEM;
- cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify;
- err = register_cpu_notifier(&cpu_pmu->hotplug_nb);
- if (err)
- goto out_hw_events;
+ mutex_lock(&arm_pmu_mutex);
+ list_add_tail(&cpu_pmu->entry, &arm_pmu_list);
+ mutex_unlock(&arm_pmu_mutex);
err = cpu_pm_pmu_register(cpu_pmu);
if (err)
@@ -861,8 +859,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
return 0;
out_unregister:
- unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
-out_hw_events:
+ mutex_lock(&arm_pmu_mutex);
+ list_del(&cpu_pmu->entry);
+ mutex_unlock(&arm_pmu_mutex);
free_percpu(cpu_hw_events);
return err;
}
@@ -870,7 +869,9 @@ out_hw_events:
static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
{
cpu_pm_pmu_unregister(cpu_pmu);
- unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
+ mutex_lock(&arm_pmu_mutex);
+ list_del(&cpu_pmu->entry);
+ mutex_unlock(&arm_pmu_mutex);
free_percpu(cpu_pmu->hw_events);
}
@@ -1061,3 +1062,17 @@ out_free:
kfree(pmu);
return ret;
}
+
+static int arm_pmu_hp_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_STARTING,
+ "AP_PERF_ARM_STARTING",
+ arm_perf_starting_cpu, NULL);
+ if (ret)
+ pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
+ ret);
+ return ret;
+}
+subsys_initcall(arm_pmu_hp_init);