summaryrefslogtreecommitdiff
path: root/drivers/cpuidle/cpuidle-psci.c
diff options
context:
space:
mode:
authorUlf Hansson <ulf.hansson@linaro.org>2019-10-10 13:01:48 +0300
committerUlf Hansson <ulf.hansson@linaro.org>2020-01-02 18:52:18 +0300
commit9c6ceecb6541954dfc410aa8883f872469326c73 (patch)
tree1599807edf38ae8b942b60e50efe1d19d16bb02f /drivers/cpuidle/cpuidle-psci.c
parentce85aef570df406fc8cb360e7351024570ef7d4f (diff)
downloadlinux-9c6ceecb6541954dfc410aa8883f872469326c73.tar.xz
cpuidle: psci: Support CPU hotplug for the hierarchical model
When the hierarchical CPU topology is used and when a CPU is put offline, that CPU prevents its PM domain from being powered off, which is because genpd observes the corresponding attached device as being active from a runtime PM point of view. Furthermore, any potential master PM domains are also prevented from being powered off. To address this limitation, let's add add a new CPU hotplug state (CPUHP_AP_CPU_PM_STARTING) and register up/down callbacks for it, which allows us to deal with runtime PM accordingly. Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org> Reviewed-by: Sudeep Holla <sudeep.holla@arm.com> Acked-by: Rafael J. Wysocki <rafael@kernel.org>
Diffstat (limited to 'drivers/cpuidle/cpuidle-psci.c')
-rw-r--r--drivers/cpuidle/cpuidle-psci.c45
1 files changed, 44 insertions, 1 deletions
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index 6e7804e697ed..9d779be27071 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -8,6 +8,7 @@
#define pr_fmt(fmt) "CPUidle PSCI: " fmt
+#include <linux/cpuhotplug.h>
#include <linux/cpuidle.h>
#include <linux/cpumask.h>
#include <linux/cpu_pm.h>
@@ -31,6 +32,7 @@ struct psci_cpuidle_data {
static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data);
static DEFINE_PER_CPU(u32, domain_state);
+static bool psci_cpuidle_use_cpuhp __initdata;
static inline void psci_set_domain_state(u32 state)
{
@@ -72,6 +74,44 @@ static int psci_enter_domain_idle_state(struct cpuidle_device *dev,
return ret;
}
+static int psci_idle_cpuhp_up(unsigned int cpu)
+{
+ struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);
+
+ if (pd_dev)
+ pm_runtime_get_sync(pd_dev);
+
+ return 0;
+}
+
+static int psci_idle_cpuhp_down(unsigned int cpu)
+{
+ struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);
+
+ if (pd_dev) {
+ pm_runtime_put_sync(pd_dev);
+ /* Clear domain state to start fresh at next online. */
+ psci_set_domain_state(0);
+ }
+
+ return 0;
+}
+
+static void __init psci_idle_init_cpuhp(void)
+{
+ int err;
+
+ if (!psci_cpuidle_use_cpuhp)
+ return;
+
+ err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
+ "cpuidle/psci:online",
+ psci_idle_cpuhp_up,
+ psci_idle_cpuhp_down);
+ if (err)
+ pr_warn("Failed %d while setup cpuhp state\n", err);
+}
+
static int psci_enter_idle_state(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int idx)
{
@@ -166,9 +206,11 @@ static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv,
* selection of a shared state for the domain, assumes the
* domain states are all deeper states.
*/
- if (data->dev)
+ if (data->dev) {
drv->states[state_count - 1].enter =
psci_enter_domain_idle_state;
+ psci_cpuidle_use_cpuhp = true;
+ }
}
/* Idle states parsed correctly, store them in the per-cpu struct. */
@@ -289,6 +331,7 @@ static int __init psci_idle_init(void)
goto out_fail;
}
+ psci_idle_init_cpuhp();
return 0;
out_fail: