diff options
author | Ingo Molnar <mingo@kernel.org> | 2024-03-08 14:18:10 +0300 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2024-03-12 13:59:59 +0300 |
commit | 14ff4dbd34f46cc6b6105f549983321241ccbba9 (patch) | |
tree | 744c74967f445e7332ab7022f9793e9ec20b60a9 /kernel/sched | |
parent | 983be0628c061989b6cc175d2f5e429b40699fbb (diff) | |
download | linux-14ff4dbd34f46cc6b6105f549983321241ccbba9.tar.xz |
sched/balancing: Rename rebalance_domains() => sched_balance_domains()
Standardize scheduler load-balancing function names on the
sched_balance_() prefix.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Shrikanth Hegde <sshegde@linux.ibm.com>
Link: https://lore.kernel.org/r/20240308111819.1101550-5-mingo@kernel.org
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/fair.c | 8 | ||||
-rw-r--r-- | kernel/sched/sched.h | 2 |
2 files changed, 5 insertions, 5 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e377b675920a..330788b0c617 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -11685,7 +11685,7 @@ static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost) * * Balancing parameters are set up in init_sched_domains. */ -static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) +static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle) { int continue_balancing = 1; int cpu = rq->cpu; @@ -12161,7 +12161,7 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags) rq_unlock_irqrestore(rq, &rf); if (flags & NOHZ_BALANCE_KICK) - rebalance_domains(rq, CPU_IDLE); + sched_balance_domains(rq, CPU_IDLE); } if (time_after(next_balance, rq->next_balance)) { @@ -12422,7 +12422,7 @@ static __latent_entropy void sched_balance_softirq(struct softirq_action *h) /* * If this CPU has a pending NOHZ_BALANCE_KICK, then do the * balancing on behalf of the other idle CPUs whose ticks are - * stopped. Do nohz_idle_balance *before* rebalance_domains to + * stopped. Do nohz_idle_balance *before* sched_balance_domains to * give the idle CPUs a chance to load balance. Else we may * load balance only within the local sched_domain hierarchy * and abort nohz_idle_balance altogether if we pull some load. @@ -12432,7 +12432,7 @@ static __latent_entropy void sched_balance_softirq(struct softirq_action *h) /* normal load balance */ update_blocked_averages(this_rq->cpu); - rebalance_domains(this_rq, idle); + sched_balance_domains(this_rq, idle); } /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 5b0ddb0e6017..41024c1c49b4 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2904,7 +2904,7 @@ extern void cfs_bandwidth_usage_dec(void); #define NOHZ_NEWILB_KICK_BIT 2 #define NOHZ_NEXT_KICK_BIT 3 -/* Run rebalance_domains() */ +/* Run sched_balance_domains() */ #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) /* Update blocked load */ #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) |