summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2021-02-24 16:30:06 +0300
committerIngo Molnar <mingo@kernel.org>2021-03-06 14:40:22 +0300
commitc6f886546cb8a38617cdbe755fe50d3acd2463e4 (patch)
tree7bc2bc591b13312964251693f7a005f00f616f59
parent6553fc18179113a11835d5fde1735259f8943a55 (diff)
downloadlinux-c6f886546cb8a38617cdbe755fe50d3acd2463e4.tar.xz
sched/fair: Trigger the update of blocked load on newly idle cpu
Instead of waking up a random and already idle CPU, we can take advantage of this_cpu being about to enter idle to run the ILB and update the blocked load. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Valentin Schneider <valentin.schneider@arm.com> Link: https://lkml.kernel.org/r/20210224133007.28644-7-vincent.guittot@linaro.org
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/fair.c24
-rw-r--r--kernel/sched/idle.c6
-rw-r--r--kernel/sched/sched.h7
4 files changed, 35 insertions, 4 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f9dfb349146e..361974efc243 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -737,7 +737,7 @@ static void nohz_csd_func(void *info)
/*
* Release the rq::nohz_csd.
*/
- flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu));
+ flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
WARN_ON(!(flags & NOHZ_KICK_MASK));
rq->idle_balance = idle_cpu(cpu);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 356a2456c5e9..e87e1b3bcdca 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10453,6 +10453,24 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
return true;
}
+/*
+ * Check if we need to run the ILB for updating blocked load before entering
+ * idle state.
+ */
+void nohz_run_idle_balance(int cpu)
+{
+ unsigned int flags;
+
+ flags = atomic_fetch_andnot(NOHZ_NEWILB_KICK, nohz_flags(cpu));
+
+ /*
+ * Update the blocked load only if no SCHED_SOFTIRQ is about to happen
+ * (ie NOHZ_STATS_KICK set) and will do the same.
+ */
+ if ((flags == NOHZ_NEWILB_KICK) && !need_resched())
+ _nohz_idle_balance(cpu_rq(cpu), NOHZ_STATS_KICK, CPU_IDLE);
+}
+
static void nohz_newidle_balance(struct rq *this_rq)
{
int this_cpu = this_rq->cpu;
@@ -10474,10 +10492,10 @@ static void nohz_newidle_balance(struct rq *this_rq)
return;
/*
- * Blocked load of idle CPUs need to be updated.
- * Kick an ILB to update statistics.
+ * Set the need to trigger ILB in order to update blocked load
+ * before entering idle state.
*/
- kick_ilb(NOHZ_STATS_KICK);
+ atomic_or(NOHZ_NEWILB_KICK, nohz_flags(this_cpu));
}
#else /* !CONFIG_NO_HZ_COMMON */
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 7199e6f23789..7a92d6054aba 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -261,6 +261,12 @@ exit_idle:
static void do_idle(void)
{
int cpu = smp_processor_id();
+
+ /*
+ * Check if we need to update blocked load
+ */
+ nohz_run_idle_balance(cpu);
+
/*
* If the arch has a polling bit, we maintain an invariant:
*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 10a1522b1e30..0ddc9a6ff03a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2385,9 +2385,11 @@ extern void cfs_bandwidth_usage_dec(void);
#ifdef CONFIG_NO_HZ_COMMON
#define NOHZ_BALANCE_KICK_BIT 0
#define NOHZ_STATS_KICK_BIT 1
+#define NOHZ_NEWILB_KICK_BIT 2
#define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT)
#define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT)
+#define NOHZ_NEWILB_KICK BIT(NOHZ_NEWILB_KICK_BIT)
#define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
@@ -2398,6 +2400,11 @@ extern void nohz_balance_exit_idle(struct rq *rq);
static inline void nohz_balance_exit_idle(struct rq *rq) { }
#endif
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+extern void nohz_run_idle_balance(int cpu);
+#else
+static inline void nohz_run_idle_balance(int cpu) { }
+#endif
#ifdef CONFIG_SMP
static inline