From d4a3dcbc4727966a64a64d57e2b5106a138d426d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 15 Oct 2019 21:18:09 +0200 Subject: sched/rt, xen: Use CONFIG_PREEMPTION CONFIG_PREEMPTION is selected by CONFIG_PREEMPT and by CONFIG_PREEMPT_RT. Both PREEMPT and PREEMPT_RT require the same functionality which today depends on CONFIG_PREEMPT. Switch the preempt anand xen-ops code over to use CONFIG_PREEMPTION. Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Reviewed-by: Juergen Gross Cc: Boris Ostrovsky Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stefano Stabellini Cc: xen-devel@lists.xenproject.org Link: https://lore.kernel.org/r/20191015191821.11479-23-bigeasy@linutronix.de Signed-off-by: Ingo Molnar --- include/xen/xen-ops.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index d89969aa9942..095be1d66f31 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h @@ -215,7 +215,7 @@ bool xen_running_on_version_or_later(unsigned int major, unsigned int minor); void xen_efi_runtime_setup(void); -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION static inline void xen_preemptible_hcall_begin(void) { @@ -239,6 +239,6 @@ static inline void xen_preemptible_hcall_end(void) __this_cpu_write(xen_in_preemptible_hcall, false); } -#endif /* CONFIG_PREEMPT */ +#endif /* CONFIG_PREEMPTION */ #endif /* INCLUDE_XEN_OPS_H */ -- cgit v1.2.3 From 2496396fcb44404ead24b578c583d5286886e857 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 15 Oct 2019 21:18:10 +0200 Subject: sched/rt, fs: Use CONFIG_PREEMPTION MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CONFIG_PREEMPTION is selected by CONFIG_PREEMPT and by CONFIG_PREEMPT_RT. Both PREEMPT and PREEMPT_RT require the same functionality which today depends on CONFIG_PREEMPT. Switch the i_size() and part_nr_sects_…() code over to use CONFIG_PREEMPTION. Update the comment for fsstack_copy_inode_size() also to refer to CONFIG_PREEMPTION. [bigeasy: +PREEMPT comments] Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Cc: Alexander Viro Cc: Linus Torvalds Cc: Peter Zijlstra Cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/20191015191821.11479-24-bigeasy@linutronix.de Signed-off-by: Ingo Molnar --- fs/stack.c | 6 +++--- include/linux/fs.h | 4 ++-- include/linux/genhd.h | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/fs/stack.c b/fs/stack.c index 4ef2c056269d..c9830924eb12 100644 --- a/fs/stack.c +++ b/fs/stack.c @@ -23,7 +23,7 @@ void fsstack_copy_inode_size(struct inode *dst, struct inode *src) /* * But on 32-bit, we ought to make an effort to keep the two halves of - * i_blocks in sync despite SMP or PREEMPT - though stat's + * i_blocks in sync despite SMP or PREEMPTION - though stat's * generic_fillattr() doesn't bother, and we won't be applying quotas * (where i_blocks does become important) at the upper level. * @@ -38,14 +38,14 @@ void fsstack_copy_inode_size(struct inode *dst, struct inode *src) spin_unlock(&src->i_lock); /* - * If CONFIG_SMP or CONFIG_PREEMPT on 32-bit, it's vital for + * If CONFIG_SMP or CONFIG_PREEMPTION on 32-bit, it's vital for * fsstack_copy_inode_size() to hold some lock around * i_size_write(), otherwise i_size_read() may spin forever (see * include/linux/fs.h). We don't necessarily hold i_mutex when this * is called, so take i_lock for that case. * * And if on 32-bit, continue our effort to keep the two halves of - * i_blocks in sync despite SMP or PREEMPT: use i_lock for that case + * i_blocks in sync despite SMP or PREEMPTION: use i_lock for that case * too, and do both at once by combining the tests. * * There is none of this locking overhead in the 64-bit case. diff --git a/include/linux/fs.h b/include/linux/fs.h index 98e0349adb52..dddfcbb140a7 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -855,7 +855,7 @@ static inline loff_t i_size_read(const struct inode *inode) i_size = inode->i_size; } while (read_seqcount_retry(&inode->i_size_seqcount, seq)); return i_size; -#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) +#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION) loff_t i_size; preempt_disable(); @@ -880,7 +880,7 @@ static inline void i_size_write(struct inode *inode, loff_t i_size) inode->i_size = i_size; write_seqcount_end(&inode->i_size_seqcount); preempt_enable(); -#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) +#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION) preempt_disable(); inode->i_size = i_size; preempt_enable(); diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 8bb63027e4d6..a927829bb73a 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -718,7 +718,7 @@ static inline void hd_free_part(struct hd_struct *part) * accessor function. * * Code written along the lines of i_size_read() and i_size_write(). - * CONFIG_PREEMPT case optimizes the case of UP kernel with preemption + * CONFIG_PREEMPTION case optimizes the case of UP kernel with preemption * on. */ static inline sector_t part_nr_sects_read(struct hd_struct *part) @@ -731,7 +731,7 @@ static inline sector_t part_nr_sects_read(struct hd_struct *part) nr_sects = part->nr_sects; } while (read_seqcount_retry(&part->nr_sects_seq, seq)); return nr_sects; -#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) +#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION) sector_t nr_sects; preempt_disable(); @@ -754,7 +754,7 @@ static inline void part_nr_sects_write(struct hd_struct *part, sector_t size) write_seqcount_begin(&part->nr_sects_seq); part->nr_sects = size; write_seqcount_end(&part->nr_sects_seq); -#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) +#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION) preempt_disable(); part->nr_sects = size; preempt_enable(); -- cgit v1.2.3 From a5e37de90e67ac1072a9a44bd0cec9f5e98ded08 Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Sat, 14 Dec 2019 19:51:07 +0000 Subject: stop_machine: remove try_stop_cpus helper try_stop_cpus is not used after this: commit c190c3b16c0f ("rcu: Switch synchronize_sched_expedited() to stop_one_cpu()") So remove it. Signed-off-by: Yangtao Li Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20191214195107.26480-1-tiny.windzz@gmail.com --- include/linux/stop_machine.h | 7 ------- kernel/stop_machine.c | 30 ------------------------------ 2 files changed, 37 deletions(-) (limited to 'include') diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index f9a0c6189852..648298f877da 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -33,7 +33,6 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void * bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, struct cpu_stop_work *work_buf); int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); -int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); void stop_machine_park(int cpu); void stop_machine_unpark(int cpu); void stop_machine_yield(const struct cpumask *cpumask); @@ -90,12 +89,6 @@ static inline int stop_cpus(const struct cpumask *cpumask, return -ENOENT; } -static inline int try_stop_cpus(const struct cpumask *cpumask, - cpu_stop_fn_t fn, void *arg) -{ - return stop_cpus(cpumask, fn, arg); -} - #endif /* CONFIG_SMP */ /* diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 1fe34a9fabc2..5d68ec4c4015 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -453,36 +453,6 @@ int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) return ret; } -/** - * try_stop_cpus - try to stop multiple cpus - * @cpumask: cpus to stop - * @fn: function to execute - * @arg: argument to @fn - * - * Identical to stop_cpus() except that it fails with -EAGAIN if - * someone else is already using the facility. - * - * CONTEXT: - * Might sleep. - * - * RETURNS: - * -EAGAIN if someone else is already stopping cpus, -ENOENT if - * @fn(@arg) was not executed at all because all cpus in @cpumask were - * offline; otherwise, 0 if all executions of @fn returned 0, any non - * zero return value if any returned non zero. - */ -int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) -{ - int ret; - - /* static works are used, process one request at a time */ - if (!mutex_trylock(&stop_cpus_mutex)) - return -EAGAIN; - ret = __stop_cpus(cpumask, fn, arg); - mutex_unlock(&stop_cpus_mutex); - return ret; -} - static int cpu_stop_should_run(unsigned int cpu) { struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); -- cgit v1.2.3 From 35f4cd96f5551dc1b2641159e7bb7bf91de6600f Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Sat, 28 Dec 2019 16:19:12 +0000 Subject: stop_machine: Make stop_cpus() static The function stop_cpus() is only used internally by the stop_machine for stop multiple cpus. Make it static. Signed-off-by: Yangtao Li Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20191228161912.24082-1-tiny.windzz@gmail.com --- include/linux/stop_machine.h | 9 --------- kernel/stop_machine.c | 2 +- 2 files changed, 1 insertion(+), 10 deletions(-) (limited to 'include') diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 648298f877da..76d8b09384a7 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -32,7 +32,6 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg); int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg); bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, struct cpu_stop_work *work_buf); -int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); void stop_machine_park(int cpu); void stop_machine_unpark(int cpu); void stop_machine_yield(const struct cpumask *cpumask); @@ -81,14 +80,6 @@ static inline bool stop_one_cpu_nowait(unsigned int cpu, return false; } -static inline int stop_cpus(const struct cpumask *cpumask, - cpu_stop_fn_t fn, void *arg) -{ - if (cpumask_test_cpu(raw_smp_processor_id(), cpumask)) - return stop_one_cpu(raw_smp_processor_id(), fn, arg); - return -ENOENT; -} - #endif /* CONFIG_SMP */ /* diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 5d68ec4c4015..865bb0228ab6 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -442,7 +442,7 @@ static int __stop_cpus(const struct cpumask *cpumask, * @cpumask were offline; otherwise, 0 if all executions of @fn * returned 0, any non zero return value if any returned non zero. */ -int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) +static int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) { int ret; -- cgit v1.2.3 From a4f9a0e51bbf89cb461b1985a1a570e6b87da3b5 Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Wed, 15 Jan 2020 11:20:20 +0100 Subject: sched/fair: Remove redundant call to cpufreq_update_util() With commit bef69dd87828 ("sched/cpufreq: Move the cfs_rq_util_change() call to cpufreq_update_util()") update_load_avg() has become the central point for calling cpufreq (not including the update of blocked load). This change helps to simplify further the number of calls to cpufreq_update_util() and to remove last redundant ones. With update_load_avg(), we are now sure that cpufreq_update_util() will be called after every task attachment to a cfs_rq and especially after propagating this event down to the util_avg of the root cfs_rq, which is the level that is used by cpufreq governors like schedutil to set the frequency of a CPU. The SCHED_CPUFREQ_MIGRATION flag forces an early call to cpufreq when the migration happens in a cgroup whereas util_avg of root cfs_rq is not yet updated and this call is duplicated with the one that happens immediately after when the migration event reaches the root cfs_rq. The dedicated flag SCHED_CPUFREQ_MIGRATION is now useless and can be removed. The interface of attach_entity_load_avg() can also be simplified accordingly. Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Rafael J. Wysocki Link: https://lkml.kernel.org/r/1579083620-24943-1-git-send-email-vincent.guittot@linaro.org --- include/linux/sched/cpufreq.h | 1 - kernel/sched/fair.c | 14 +++++++------- 2 files changed, 7 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h index cc6bcc1e96bc..3ed5aa18593f 100644 --- a/include/linux/sched/cpufreq.h +++ b/include/linux/sched/cpufreq.h @@ -9,7 +9,6 @@ */ #define SCHED_CPUFREQ_IOWAIT (1U << 0) -#define SCHED_CPUFREQ_MIGRATION (1U << 1) #ifdef CONFIG_CPU_FREQ struct cpufreq_policy; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e84723c5c661..ebf50955fe8a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -801,7 +801,7 @@ void post_init_entity_util_avg(struct task_struct *p) * For !fair tasks do: * update_cfs_rq_load_avg(now, cfs_rq); - attach_entity_load_avg(cfs_rq, se, 0); + attach_entity_load_avg(cfs_rq, se); switched_from_fair(rq, p); * * such that the next switched_to_fair() has the @@ -3114,7 +3114,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) { struct rq *rq = rq_of(cfs_rq); - if (&rq->cfs == cfs_rq || (flags & SCHED_CPUFREQ_MIGRATION)) { + if (&rq->cfs == cfs_rq) { /* * There are a few boundary cases this might miss but it should * get called often enough that that should (hopefully) not be @@ -3521,7 +3521,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) * Must call update_cfs_rq_load_avg() before this, since we rely on * cfs_rq->avg.last_update_time being current. */ -static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) +static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib; @@ -3557,7 +3557,7 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); - cfs_rq_util_change(cfs_rq, flags); + cfs_rq_util_change(cfs_rq, 0); trace_pelt_cfs_tp(cfs_rq); } @@ -3615,7 +3615,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s * * IOW we're enqueueing a task on a new CPU. */ - attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION); + attach_entity_load_avg(cfs_rq, se); update_tg_load_avg(cfs_rq, 0); } else if (decayed) { @@ -3872,7 +3872,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s static inline void remove_entity_load_avg(struct sched_entity *se) {} static inline void -attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) {} +attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} static inline void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} @@ -10436,7 +10436,7 @@ static void attach_entity_cfs_rq(struct sched_entity *se) /* Synchronize entity with its cfs_rq */ update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); - attach_entity_load_avg(cfs_rq, se, 0); + attach_entity_load_avg(cfs_rq, se); update_tg_load_avg(cfs_rq, false); propagate_entity_cfs_rq(se); } -- cgit v1.2.3