summaryrefslogtreecommitdiff
path: root/drivers/cpuidle/cpuidle-riscv-sbi.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2023-01-12 22:43:19 +0300
committerIngo Molnar <mingo@kernel.org>2023-01-13 13:03:22 +0300
commit8e9ab9e8da1eae61fdff35690d998eaf8cd527dc (patch)
tree33344cda132b119cca8111e51e9c3161995deb98 /drivers/cpuidle/cpuidle-riscv-sbi.c
parentbb7b11258561e47abbacebf76e3ce4092953dfdf (diff)
downloadlinux-8e9ab9e8da1eae61fdff35690d998eaf8cd527dc.tar.xz
cpuidle, riscv: Push RCU-idle into driver
Doing RCU-idle outside the driver, only to then temporarily enable it again, at least twice, before going idle is suboptimal. That is, once implicitly through the cpu_pm_*() calls and once explicitly doing ct_irq_*_irqon(). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Tested-by: Tony Lindgren <tony@atomide.com> Tested-by: Ulf Hansson <ulf.hansson@linaro.org> Reviewed-by: Anup Patel <anup@brainfault.org> Reviewed-by: Frederic Weisbecker <frederic@kernel.org> Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Link: https://lore.kernel.org/r/20230112195539.637185846@infradead.org
Diffstat (limited to 'drivers/cpuidle/cpuidle-riscv-sbi.c')
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index 05fe2902df9a..cbdbb11b972b 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -121,12 +121,12 @@ static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
return -1;
/* Do runtime PM to manage a hierarchical CPU toplogy. */
- ct_irq_enter_irqson();
if (s2idle)
dev_pm_genpd_suspend(pd_dev);
else
pm_runtime_put_sync_suspend(pd_dev);
- ct_irq_exit_irqson();
+
+ ct_idle_enter();
if (sbi_is_domain_state_available())
state = sbi_get_domain_state();
@@ -135,12 +135,12 @@ static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
ret = sbi_suspend(state) ? -1 : idx;
- ct_irq_enter_irqson();
+ ct_idle_exit();
+
if (s2idle)
dev_pm_genpd_resume(pd_dev);
else
pm_runtime_get_sync(pd_dev);
- ct_irq_exit_irqson();
cpu_pm_exit();
@@ -251,6 +251,7 @@ static int sbi_dt_cpu_init_topology(struct cpuidle_driver *drv,
* of a shared state for the domain, assumes the domain states are all
* deeper states.
*/
+ drv->states[state_count - 1].flags |= CPUIDLE_FLAG_RCU_IDLE;
drv->states[state_count - 1].enter = sbi_enter_domain_idle_state;
drv->states[state_count - 1].enter_s2idle =
sbi_enter_s2idle_domain_idle_state;