From 387f77cc8249c847b4fa4d8c93694818b79efee3 Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Tue, 13 Feb 2018 09:59:42 +0100 Subject: sched/fair: Remove stray space in #ifdef Remove a useless space in # ifdef and align it with others. Signed-off-by: Vincent Guittot Acked-by: Peter Zijlstra Cc: Linus Torvalds Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1518512382-29426-1-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 5eb3ffc9be84..820f94c9b200 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2823,7 +2823,7 @@ void reweight_task(struct task_struct *p, int prio) } #ifdef CONFIG_FAIR_GROUP_SCHED -# ifdef CONFIG_SMP +#ifdef CONFIG_SMP /* * All this does is approximate the hierarchical proportion which includes that * global sum we all love to hate. @@ -2974,7 +2974,7 @@ static long calc_group_runnable(struct cfs_rq *cfs_rq, long shares) return clamp_t(long, runnable, MIN_SHARES, shares); } -# endif /* CONFIG_SMP */ +#endif /* CONFIG_SMP */ static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); -- cgit v1.2.3 From 7ebb66a12f85bc375beaf45ca900427fe47aa8f7 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Tue, 13 Feb 2018 13:37:25 +0000 Subject: sched/fair: Avoid an unnecessary lookup of current CPU ID during wake_affine The only caller of wake_affine() knows the CPU ID. Pass it in instead of rechecking it. Signed-off-by: Mel Gorman Signed-off-by: Peter Zijlstra (Intel) Cc: Giovanni Gherdovich Cc: Linus Torvalds Cc: Matt Fleming Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180213133730.24064-2-mgorman@techsingularity.net Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 820f94c9b200..0132572d7523 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5751,9 +5751,8 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p, } static int wake_affine(struct sched_domain *sd, struct task_struct *p, - int prev_cpu, int sync) + int this_cpu, int prev_cpu, int sync) { - int this_cpu = smp_processor_id(); int target = nr_cpumask_bits; if (sched_feat(WA_IDLE)) @@ -6376,7 +6375,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f if (cpu == prev_cpu) goto pick_cpu; - new_cpu = wake_affine(affine_sd, p, prev_cpu, sync); + new_cpu = wake_affine(affine_sd, p, cpu, prev_cpu, sync); } if (sd && !(sd_flag & SD_BALANCE_FORK)) { -- cgit v1.2.3 From eeb60398639143c11ff2c8b509e3a471411bb5d3 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Tue, 13 Feb 2018 13:37:26 +0000 Subject: sched/fair: Defer calculation of 'prev_eff_load' in wake_affine_weight() until needed On sync wakeups, the previous CPU effective load may not be used so delay the calculation until it's needed. Signed-off-by: Mel Gorman Signed-off-by: Peter Zijlstra (Intel) Cc: Giovanni Gherdovich Cc: Linus Torvalds Cc: Matt Fleming Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180213133730.24064-3-mgorman@techsingularity.net Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0132572d7523..ae3e6f877711 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5724,7 +5724,6 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p, unsigned long task_load; this_eff_load = target_load(this_cpu, sd->wake_idx); - prev_eff_load = source_load(prev_cpu, sd->wake_idx); if (sync) { unsigned long current_load = task_h_load(current); @@ -5742,6 +5741,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p, this_eff_load *= 100; this_eff_load *= capacity_of(prev_cpu); + prev_eff_load = source_load(prev_cpu, sd->wake_idx); prev_eff_load -= task_load; if (sched_feat(WA_BIAS)) prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2; -- cgit v1.2.3 From 082f764a2f3f2968afa1a0b04a1ccb1b70633844 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Tue, 13 Feb 2018 13:37:27 +0000 Subject: sched/fair: Do not migrate on wake_affine_weight() if weights are equal wake_affine_weight() will consider migrating a task to, or near, the current CPU if there is a load imbalance. If the CPUs share LLC then either CPU is valid as a search-for-idle-sibling target and equally appropriate for stacking two tasks on one CPU if an idle sibling is unavailable. If they do not share cache then a cross-node migration potentially impacts locality so while they are equal from a CPU capacity point of view, they are not equal in terms of memory locality. In either case, it's more appropriate to migrate only if there is a difference in their effective load. This patch modifies wake_affine_weight() to only consider migrating a task if there is a load imbalance for normal wakeups but will allow potential stacking if the loads are equal and it's a sync wakeup. For the most part, the different in performance is marginal. For example, on a 4-socket server running netperf UDP_STREAM on localhost the differences are as follows: 4.15.0 4.15.0 16rc0 noequal-v1r23 Hmean send-64 355.47 ( 0.00%) 349.50 ( -1.68%) Hmean send-128 697.98 ( 0.00%) 693.35 ( -0.66%) Hmean send-256 1328.02 ( 0.00%) 1318.77 ( -0.70%) Hmean send-1024 5051.83 ( 0.00%) 5051.11 ( -0.01%) Hmean send-2048 9637.02 ( 0.00%) 9601.34 ( -0.37%) Hmean send-3312 14355.37 ( 0.00%) 14414.51 ( 0.41%) Hmean send-4096 16464.97 ( 0.00%) 16301.37 ( -0.99%) Hmean send-8192 26722.42 ( 0.00%) 26428.95 ( -1.10%) Hmean send-16384 38137.81 ( 0.00%) 38046.11 ( -0.24%) Hmean recv-64 355.47 ( 0.00%) 349.50 ( -1.68%) Hmean recv-128 697.98 ( 0.00%) 693.35 ( -0.66%) Hmean recv-256 1328.02 ( 0.00%) 1318.77 ( -0.70%) Hmean recv-1024 5051.83 ( 0.00%) 5051.11 ( -0.01%) Hmean recv-2048 9636.95 ( 0.00%) 9601.30 ( -0.37%) Hmean recv-3312 14355.32 ( 0.00%) 14414.48 ( 0.41%) Hmean recv-4096 16464.74 ( 0.00%) 16301.16 ( -0.99%) Hmean recv-8192 26721.63 ( 0.00%) 26428.17 ( -1.10%) Hmean recv-16384 38136.00 ( 0.00%) 38044.88 ( -0.24%) Stddev send-64 7.30 ( 0.00%) 4.75 ( 34.96%) Stddev send-128 15.15 ( 0.00%) 22.38 ( -47.66%) Stddev send-256 13.99 ( 0.00%) 19.14 ( -36.81%) Stddev send-1024 105.73 ( 0.00%) 67.38 ( 36.27%) Stddev send-2048 294.57 ( 0.00%) 223.88 ( 24.00%) Stddev send-3312 302.28 ( 0.00%) 271.74 ( 10.10%) Stddev send-4096 195.92 ( 0.00%) 121.10 ( 38.19%) Stddev send-8192 399.71 ( 0.00%) 563.77 ( -41.04%) Stddev send-16384 1163.47 ( 0.00%) 1103.68 ( 5.14%) Stddev recv-64 7.30 ( 0.00%) 4.75 ( 34.96%) Stddev recv-128 15.15 ( 0.00%) 22.38 ( -47.66%) Stddev recv-256 13.99 ( 0.00%) 19.14 ( -36.81%) Stddev recv-1024 105.73 ( 0.00%) 67.38 ( 36.27%) Stddev recv-2048 294.59 ( 0.00%) 223.89 ( 24.00%) Stddev recv-3312 302.24 ( 0.00%) 271.75 ( 10.09%) Stddev recv-4096 196.03 ( 0.00%) 121.14 ( 38.20%) Stddev recv-8192 399.86 ( 0.00%) 563.65 ( -40.96%) Stddev recv-16384 1163.79 ( 0.00%) 1103.86 ( 5.15%) The difference in overall performance is marginal but note that most measurements are less variable. There were similar observations for other netperf comparisons. hackbench with sockets or threads with processes or threads showed minor difference with some reduction of migration. tbench showed only marginal differences that were within the noise. dbench, regardless of filesystem, showed minor differences all of which are within noise. Multiple machines, both UMA and NUMA were tested without any regressions showing up. The biggest risk with a patch like this is affecting wakeup latencies. However, the schbench load from Facebook which is very sensitive to wakeup latency showed a mixed result with mostly improvements in wakeup latency: 4.15.0 4.15.0 16rc0 noequal-v1r23 Lat 50.00th-qrtle-1 38.00 ( 0.00%) 38.00 ( 0.00%) Lat 75.00th-qrtle-1 49.00 ( 0.00%) 41.00 ( 16.33%) Lat 90.00th-qrtle-1 52.00 ( 0.00%) 50.00 ( 3.85%) Lat 95.00th-qrtle-1 54.00 ( 0.00%) 51.00 ( 5.56%) Lat 99.00th-qrtle-1 63.00 ( 0.00%) 60.00 ( 4.76%) Lat 99.50th-qrtle-1 66.00 ( 0.00%) 61.00 ( 7.58%) Lat 99.90th-qrtle-1 78.00 ( 0.00%) 65.00 ( 16.67%) Lat 50.00th-qrtle-2 38.00 ( 0.00%) 38.00 ( 0.00%) Lat 75.00th-qrtle-2 42.00 ( 0.00%) 43.00 ( -2.38%) Lat 90.00th-qrtle-2 46.00 ( 0.00%) 48.00 ( -4.35%) Lat 95.00th-qrtle-2 49.00 ( 0.00%) 50.00 ( -2.04%) Lat 99.00th-qrtle-2 55.00 ( 0.00%) 57.00 ( -3.64%) Lat 99.50th-qrtle-2 58.00 ( 0.00%) 60.00 ( -3.45%) Lat 99.90th-qrtle-2 65.00 ( 0.00%) 68.00 ( -4.62%) Lat 50.00th-qrtle-4 41.00 ( 0.00%) 41.00 ( 0.00%) Lat 75.00th-qrtle-4 45.00 ( 0.00%) 46.00 ( -2.22%) Lat 90.00th-qrtle-4 50.00 ( 0.00%) 50.00 ( 0.00%) Lat 95.00th-qrtle-4 54.00 ( 0.00%) 53.00 ( 1.85%) Lat 99.00th-qrtle-4 61.00 ( 0.00%) 61.00 ( 0.00%) Lat 99.50th-qrtle-4 65.00 ( 0.00%) 64.00 ( 1.54%) Lat 99.90th-qrtle-4 76.00 ( 0.00%) 82.00 ( -7.89%) Lat 50.00th-qrtle-8 48.00 ( 0.00%) 46.00 ( 4.17%) Lat 75.00th-qrtle-8 55.00 ( 0.00%) 54.00 ( 1.82%) Lat 90.00th-qrtle-8 60.00 ( 0.00%) 59.00 ( 1.67%) Lat 95.00th-qrtle-8 63.00 ( 0.00%) 63.00 ( 0.00%) Lat 99.00th-qrtle-8 71.00 ( 0.00%) 69.00 ( 2.82%) Lat 99.50th-qrtle-8 74.00 ( 0.00%) 73.00 ( 1.35%) Lat 99.90th-qrtle-8 98.00 ( 0.00%) 90.00 ( 8.16%) Lat 50.00th-qrtle-16 56.00 ( 0.00%) 55.00 ( 1.79%) Lat 75.00th-qrtle-16 68.00 ( 0.00%) 67.00 ( 1.47%) Lat 90.00th-qrtle-16 77.00 ( 0.00%) 78.00 ( -1.30%) Lat 95.00th-qrtle-16 82.00 ( 0.00%) 84.00 ( -2.44%) Lat 99.00th-qrtle-16 90.00 ( 0.00%) 93.00 ( -3.33%) Lat 99.50th-qrtle-16 93.00 ( 0.00%) 97.00 ( -4.30%) Lat 99.90th-qrtle-16 110.00 ( 0.00%) 110.00 ( 0.00%) Lat 50.00th-qrtle-32 68.00 ( 0.00%) 62.00 ( 8.82%) Lat 75.00th-qrtle-32 90.00 ( 0.00%) 83.00 ( 7.78%) Lat 90.00th-qrtle-32 110.00 ( 0.00%) 100.00 ( 9.09%) Lat 95.00th-qrtle-32 122.00 ( 0.00%) 111.00 ( 9.02%) Lat 99.00th-qrtle-32 145.00 ( 0.00%) 133.00 ( 8.28%) Lat 99.50th-qrtle-32 154.00 ( 0.00%) 143.00 ( 7.14%) Lat 99.90th-qrtle-32 2316.00 ( 0.00%) 515.00 ( 77.76%) Lat 50.00th-qrtle-35 69.00 ( 0.00%) 72.00 ( -4.35%) Lat 75.00th-qrtle-35 92.00 ( 0.00%) 95.00 ( -3.26%) Lat 90.00th-qrtle-35 111.00 ( 0.00%) 114.00 ( -2.70%) Lat 95.00th-qrtle-35 122.00 ( 0.00%) 124.00 ( -1.64%) Lat 99.00th-qrtle-35 142.00 ( 0.00%) 144.00 ( -1.41%) Lat 99.50th-qrtle-35 150.00 ( 0.00%) 154.00 ( -2.67%) Lat 99.90th-qrtle-35 6104.00 ( 0.00%) 5640.00 ( 7.60%) Signed-off-by: Mel Gorman Signed-off-by: Peter Zijlstra (Intel) Cc: Giovanni Gherdovich Cc: Linus Torvalds Cc: Matt Fleming Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180213133730.24064-4-mgorman@techsingularity.net Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ae3e6f877711..a07920f3a2fd 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5747,7 +5747,16 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p, prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2; prev_eff_load *= capacity_of(this_cpu); - return this_eff_load <= prev_eff_load ? this_cpu : nr_cpumask_bits; + /* + * If sync, adjust the weight of prev_eff_load such that if + * prev_eff == this_eff that select_idle_sibling() will consider + * stacking the wakee on top of the waker if no other CPU is + * idle. + */ + if (sync) + prev_eff_load += 1; + + return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits; } static int wake_affine(struct sched_domain *sd, struct task_struct *p, -- cgit v1.2.3 From 24d0c1d6e65f635b2c0684d0a42ff6c0674aa0e6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 13 Feb 2018 13:37:28 +0000 Subject: sched/fair: Do not migrate due to a sync wakeup on exit When a task exits, it notifies the parent that it has exited. This is a sync wakeup and the exiting task may pull the parent towards the wakers CPU. For simple workloads like using a shell, it was observed that the shell is pulled across nodes by exiting processes. This is daft as the parent may be long-lived and properly placed. This patch special cases a sync wakeup on exit to avoid pulling tasks across nodes. Testing on a range of workloads and machines showed very little differences in performance although there was a small 3% boost on some machines running a shellscript intensive workload (git regression test suite). Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Mel Gorman Signed-off-by: Peter Zijlstra (Intel) Cc: Giovanni Gherdovich Cc: Linus Torvalds Cc: Matt Fleming Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180213133730.24064-5-mgorman@techsingularity.net Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index a07920f3a2fd..302dda81e192 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6350,7 +6350,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f int cpu = smp_processor_id(); int new_cpu = prev_cpu; int want_affine = 0; - int sync = wake_flags & WF_SYNC; + int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING); if (sd_flag & SD_BALANCE_WAKE) { record_wakee(p); -- cgit v1.2.3 From 2c83362734dad8e48ccc0710b5cd2436a0323893 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Tue, 13 Feb 2018 13:37:29 +0000 Subject: sched/fair: Consider SD_NUMA when selecting the most idle group to schedule on find_idlest_group() compares a local group with each other group to select the one that is most idle. When comparing groups in different NUMA domains, a very slight imbalance is enough to select a remote NUMA node even if the runnable load on both groups is 0 or close to 0. This ignores the cost of remote accesses entirely and is a problem when selecting the CPU for a newly forked task to run on. This is problematic when a forking server is almost guaranteed to run on a remote node incurring numerous remote accesses and potentially causing automatic NUMA balancing to try migrate the task back or migrate the data to another node. Similar weirdness is observed if a basic shell command pipes output to another as each process in the pipeline is likely to start on different nodes and then get adjusted later by wake_affine(). This patch adds imbalance to remote domains when considering whether to select CPUs from remote domains. If the local domain is selected, imbalance will still be used to try select a CPU from a lower scheduler domain's group instead of stacking tasks on the same CPU. A variety of workloads and machines were tested and as expected, there is no difference on UMA. The difference on NUMA can be dramatic. This is a comparison of elapsed times running the git regression test suite. It's fork-intensive with short-lived processes: 4.15.0 4.15.0 noexit-v1r23 sdnuma-v1r23 Elapsed min 1706.06 ( 0.00%) 1435.94 ( 15.83%) Elapsed mean 1709.53 ( 0.00%) 1436.98 ( 15.94%) Elapsed stddev 2.16 ( 0.00%) 1.01 ( 53.38%) Elapsed coeffvar 0.13 ( 0.00%) 0.07 ( 44.54%) Elapsed max 1711.59 ( 0.00%) 1438.01 ( 15.98%) 4.15.0 4.15.0 noexit-v1r23 sdnuma-v1r23 User 5434.12 5188.41 System 4878.77 3467.09 Elapsed 10259.06 8624.21 That shows a considerable reduction in elapsed times. It's important to note that automatic NUMA balancing does not affect this load as processes are too short-lived. There is also a noticable impact on hackbench such as this example using processes and pipes: hackbench-process-pipes 4.15.0 4.15.0 noexit-v1r23 sdnuma-v1r23 Amean 1 1.0973 ( 0.00%) 0.9393 ( 14.40%) Amean 4 1.3427 ( 0.00%) 1.3730 ( -2.26%) Amean 7 1.4233 ( 0.00%) 1.6670 ( -17.12%) Amean 12 3.0250 ( 0.00%) 3.3013 ( -9.13%) Amean 21 9.0860 ( 0.00%) 9.5343 ( -4.93%) Amean 30 14.6547 ( 0.00%) 13.2433 ( 9.63%) Amean 48 22.5447 ( 0.00%) 20.4303 ( 9.38%) Amean 79 29.2010 ( 0.00%) 26.7853 ( 8.27%) Amean 110 36.7443 ( 0.00%) 35.8453 ( 2.45%) Amean 141 45.8533 ( 0.00%) 42.6223 ( 7.05%) Amean 172 55.1317 ( 0.00%) 50.6473 ( 8.13%) Amean 203 64.4420 ( 0.00%) 58.3957 ( 9.38%) Amean 234 73.2293 ( 0.00%) 67.1047 ( 8.36%) Amean 265 80.5220 ( 0.00%) 75.7330 ( 5.95%) Amean 296 88.7567 ( 0.00%) 82.1533 ( 7.44%) It's not a universal win as there are occasions when spreading wide and quickly is a benefit but it's more of a win than it is a loss. For other workloads, there is little difference but netperf is interesting. Without the patch, the server and client starts on different nodes but quickly get migrated due to wake_affine. Hence, the difference is overall performance is marginal but detectable: 4.15.0 4.15.0 noexit-v1r23 sdnuma-v1r23 Hmean send-64 349.09 ( 0.00%) 354.67 ( 1.60%) Hmean send-128 699.16 ( 0.00%) 702.91 ( 0.54%) Hmean send-256 1316.34 ( 0.00%) 1350.07 ( 2.56%) Hmean send-1024 5063.99 ( 0.00%) 5124.38 ( 1.19%) Hmean send-2048 9705.19 ( 0.00%) 9687.44 ( -0.18%) Hmean send-3312 14359.48 ( 0.00%) 14577.64 ( 1.52%) Hmean send-4096 16324.20 ( 0.00%) 16393.62 ( 0.43%) Hmean send-8192 26112.61 ( 0.00%) 26877.26 ( 2.93%) Hmean send-16384 37208.44 ( 0.00%) 38683.43 ( 3.96%) Hmean recv-64 349.09 ( 0.00%) 354.67 ( 1.60%) Hmean recv-128 699.16 ( 0.00%) 702.91 ( 0.54%) Hmean recv-256 1316.34 ( 0.00%) 1350.07 ( 2.56%) Hmean recv-1024 5063.99 ( 0.00%) 5124.38 ( 1.19%) Hmean recv-2048 9705.16 ( 0.00%) 9687.43 ( -0.18%) Hmean recv-3312 14359.42 ( 0.00%) 14577.59 ( 1.52%) Hmean recv-4096 16323.98 ( 0.00%) 16393.55 ( 0.43%) Hmean recv-8192 26111.85 ( 0.00%) 26876.96 ( 2.93%) Hmean recv-16384 37206.99 ( 0.00%) 38682.41 ( 3.97%) However, what is very interesting is how automatic NUMA balancing behaves. Each netperf instance runs long enough for balancing to activate: NUMA base PTE updates 4620 1473 NUMA huge PMD updates 0 0 NUMA page range updates 4620 1473 NUMA hint faults 4301 1383 NUMA hint local faults 1309 451 NUMA hint local percent 30 32 NUMA pages migrated 1335 491 AutoNUMA cost 21% 6% There is an unfortunate number of remote faults although tracing indicated that the vast majority are in shared libraries. However, the tendency to start tasks on the same node if there is capacity means that there were far fewer PTE updates and faults incurred overall. Signed-off-by: Mel Gorman Signed-off-by: Peter Zijlstra (Intel) Cc: Giovanni Gherdovich Cc: Linus Torvalds Cc: Matt Fleming Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180213133730.24064-6-mgorman@techsingularity.net Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 302dda81e192..94aea5b91a96 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5911,6 +5911,18 @@ skip_spare: if (!idlest) return NULL; + /* + * When comparing groups across NUMA domains, it's possible for the + * local domain to be very lightly loaded relative to the remote + * domains but "imbalance" skews the comparison making remote CPUs + * look much more favourable. When considering cross-domain, add + * imbalance to the runnable load on the remote node and consider + * staying local. + */ + if ((sd->flags & SD_NUMA) && + min_runnable_load + imbalance >= this_runnable_load) + return NULL; + if (min_runnable_load > (this_runnable_load + imbalance)) return NULL; -- cgit v1.2.3 From 7347fc87dfe6b7315e74310ee1243dc222c68086 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Tue, 13 Feb 2018 13:37:30 +0000 Subject: sched/numa: Delay retrying placement for automatic NUMA balance after wake_affine() If wake_affine() pulls a task to another node for any reason and the node is no longer preferred then temporarily stop automatic NUMA balancing pulling the task back. Otherwise, tasks with a strong waker/wakee relationship may constantly fight automatic NUMA balancing over where a task should be placed. Once again netperf is interesting here. The performance barely changes but automatic NUMA balancing is interesting: Hmean send-64 354.67 ( 0.00%) 352.15 ( -0.71%) Hmean send-128 702.91 ( 0.00%) 693.84 ( -1.29%) Hmean send-256 1350.07 ( 0.00%) 1344.19 ( -0.44%) Hmean send-1024 5124.38 ( 0.00%) 4941.24 ( -3.57%) Hmean send-2048 9687.44 ( 0.00%) 9624.45 ( -0.65%) Hmean send-3312 14577.64 ( 0.00%) 14514.35 ( -0.43%) Hmean send-4096 16393.62 ( 0.00%) 16488.30 ( 0.58%) Hmean send-8192 26877.26 ( 0.00%) 26431.63 ( -1.66%) Hmean send-16384 38683.43 ( 0.00%) 38264.91 ( -1.08%) Hmean recv-64 354.67 ( 0.00%) 352.15 ( -0.71%) Hmean recv-128 702.91 ( 0.00%) 693.84 ( -1.29%) Hmean recv-256 1350.07 ( 0.00%) 1344.19 ( -0.44%) Hmean recv-1024 5124.38 ( 0.00%) 4941.24 ( -3.57%) Hmean recv-2048 9687.43 ( 0.00%) 9624.45 ( -0.65%) Hmean recv-3312 14577.59 ( 0.00%) 14514.35 ( -0.43%) Hmean recv-4096 16393.55 ( 0.00%) 16488.20 ( 0.58%) Hmean recv-8192 26876.96 ( 0.00%) 26431.29 ( -1.66%) Hmean recv-16384 38682.41 ( 0.00%) 38263.94 ( -1.08%) NUMA alloc hit 1465986 1423090 NUMA alloc miss 0 0 NUMA interleave hit 0 0 NUMA alloc local 1465897 1423003 NUMA base PTE updates 1473 1420 NUMA huge PMD updates 0 0 NUMA page range updates 1473 1420 NUMA hint faults 1383 1312 NUMA hint local faults 451 124 NUMA hint local percent 32 9 There is a slight degrading in performance but there are slightly fewer NUMA faults. There is a large drop in the percentage of local faults but the bulk of migrations for netperf are in small shared libraries so it's reflecting the fact that automatic NUMA balancing has backed off. This is a case where despite wake_affine() and automatic NUMA balancing fighting for placement that there is a marginal benefit to rescheduling to local data quickly. However, it should be noted that wake_affine() and automatic NUMA balancing fighting each other constantly is undesirable. However, the benefit in other cases is large. This is the result for NAS with the D class sizing on a 4-socket machine: nas-mpi 4.15.0 4.15.0 sdnuma-v1r23 delayretry-v1r23 Time cg.D 557.00 ( 0.00%) 431.82 ( 22.47%) Time ep.D 77.83 ( 0.00%) 79.01 ( -1.52%) Time is.D 26.46 ( 0.00%) 26.64 ( -0.68%) Time lu.D 727.14 ( 0.00%) 597.94 ( 17.77%) Time mg.D 191.35 ( 0.00%) 146.85 ( 23.26%) 4.15.0 4.15.0 sdnuma-v1r23delayretry-v1r23 User 75665.20 70413.30 System 20321.59 8861.67 Elapsed 766.13 634.92 Minor Faults 16528502 7127941 Major Faults 4553 5068 NUMA alloc local 6963197 6749135 NUMA base PTE updates 366409093 107491434 NUMA huge PMD updates 687556 198880 NUMA page range updates 718437765 209317994 NUMA hint faults 13643410 4601187 NUMA hint local faults 9212593 3063996 NUMA hint local percent 67 66 Note the massive reduction in system CPU usage even though the percentage of local faults is barely affected. There is a massive reduction in the number of PTE updates showing that automatic NUMA balancing has backed off. A critical observation is also that there is a massive reduction in minor faults which is due to far fewer NUMA hinting faults being trapped. There were questions on NAS OMP and how it behaved related to threads being bound to CPUs. First, there are more gains than losses with this patch applied and a reduction in system CPU usage: nas-omp 4.16.0-rc1 4.16.0-rc1 sdnuma-v2r1 delayretry-v2r1 Time bt.D 436.71 ( 0.00%) 430.05 ( 1.53%) Time cg.D 201.02 ( 0.00%) 180.87 ( 10.02%) Time ep.D 32.84 ( 0.00%) 32.68 ( 0.49%) Time is.D 9.63 ( 0.00%) 9.64 ( -0.10%) Time lu.D 331.20 ( 0.00%) 304.80 ( 7.97%) Time mg.D 54.87 ( 0.00%) 52.72 ( 3.92%) Time sp.D 1108.78 ( 0.00%) 917.10 ( 17.29%) Time ua.D 378.81 ( 0.00%) 398.83 ( -5.28%) 4.16.0-rc1 4.16.0-rc1 sdnuma-v2r1delayretry-v2r1 User 305633.08 296751.91 System 451.75 357.80 Elapsed 2595.73 2368.13 However, it does not close the gap between binding and being unbound. There is negligible difference between the performance of the baseline and a patched kernel when threads are bound so it is not presented here: 4.16.0-rc1 4.16.0-rc1 delayretry-bind delayretry-unbound Time bt.D 385.02 ( 0.00%) 430.05 ( -11.70%) Time cg.D 144.02 ( 0.00%) 180.87 ( -25.59%) Time ep.D 32.85 ( 0.00%) 32.68 ( 0.52%) Time is.D 10.52 ( 0.00%) 9.64 ( 8.37%) Time lu.D 285.31 ( 0.00%) 304.80 ( -6.83%) Time mg.D 43.21 ( 0.00%) 52.72 ( -22.01%) Time sp.D 820.24 ( 0.00%) 917.10 ( -11.81%) Time ua.D 337.09 ( 0.00%) 398.83 ( -18.32%) 4.16.0-rc1 4.16.0-rc1 delayretry-binddelayretry-unbound User 277731.25 296751.91 System 261.29 357.80 Elapsed 2100.55 2368.13 Unfortunately, while performance is improved by the patch, there is still quite a long way to go before it's equivalent to hard binding. Other workloads like hackbench, tbench, dbench and schbench are barely affected. dbench shows a mix of gains and losses depending on the machine although in general, the results are more stable. Signed-off-by: Mel Gorman Signed-off-by: Peter Zijlstra (Intel) Cc: Giovanni Gherdovich Cc: Linus Torvalds Cc: Matt Fleming Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180213133730.24064-7-mgorman@techsingularity.net Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 57 ++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 94aea5b91a96..33662a3bdc6d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1869,6 +1869,7 @@ static int task_numa_migrate(struct task_struct *p) static void numa_migrate_preferred(struct task_struct *p) { unsigned long interval = HZ; + unsigned long numa_migrate_retry; /* This task has no NUMA fault statistics yet */ if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults)) @@ -1876,7 +1877,18 @@ static void numa_migrate_preferred(struct task_struct *p) /* Periodically retry migrating the task to the preferred node */ interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16); - p->numa_migrate_retry = jiffies + interval; + numa_migrate_retry = jiffies + interval; + + /* + * Check that the new retry threshold is after the current one. If + * the retry is in the future, it implies that wake_affine has + * temporarily asked NUMA balancing to backoff from placement. + */ + if (numa_migrate_retry > p->numa_migrate_retry) + return; + + /* Safe to try placing the task on the preferred node */ + p->numa_migrate_retry = numa_migrate_retry; /* Success if task is already running on preferred CPU */ if (task_node(p) == p->numa_preferred_nid) @@ -5759,6 +5771,48 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p, return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits; } +#ifdef CONFIG_NUMA_BALANCING +static void +update_wa_numa_placement(struct task_struct *p, int prev_cpu, int target) +{ + unsigned long interval; + + if (!static_branch_likely(&sched_numa_balancing)) + return; + + /* If balancing has no preference then continue gathering data */ + if (p->numa_preferred_nid == -1) + return; + + /* + * If the wakeup is not affecting locality then it is neutral from + * the perspective of NUMA balacing so continue gathering data. + */ + if (cpu_to_node(prev_cpu) == cpu_to_node(target)) + return; + + /* + * Temporarily prevent NUMA balancing trying to place waker/wakee after + * wakee has been moved by wake_affine. This will potentially allow + * related tasks to converge and update their data placement. The + * 4 * numa_scan_period is to allow the two-pass filter to migrate + * hot data to the wakers node. + */ + interval = max(sysctl_numa_balancing_scan_delay, + p->numa_scan_period << 2); + p->numa_migrate_retry = jiffies + msecs_to_jiffies(interval); + + interval = max(sysctl_numa_balancing_scan_delay, + current->numa_scan_period << 2); + current->numa_migrate_retry = jiffies + msecs_to_jiffies(interval); +} +#else +static void +update_wa_numa_placement(struct task_struct *p, int prev_cpu, int target) +{ +} +#endif + static int wake_affine(struct sched_domain *sd, struct task_struct *p, int this_cpu, int prev_cpu, int sync) { @@ -5774,6 +5828,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, if (target == nr_cpumask_bits) return prev_cpu; + update_wa_numa_placement(p, prev_cpu, target); schedstat_inc(sd->ttwu_move_affine); schedstat_inc(p->se.statistics.nr_wakeups_affine); return target; -- cgit v1.2.3 From 77a021be383ebdacb17594e280242f7fd116095f Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 21 Feb 2018 05:17:23 +0100 Subject: sched/core: Rename init_rq_hrtick() to hrtick_rq_init() Do that rename in order to normalize the hrtick namespace. Signed-off-by: Frederic Weisbecker Reviewed-by: Thomas Gleixner Acked-by: Peter Zijlstra Cc: Chris Metcalf Cc: Christoph Lameter Cc: Linus Torvalds Cc: Luiz Capitulino Cc: Mike Galbraith Cc: Paul E. McKenney Cc: Rik van Riel Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1519186649-3242-2-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e7c535eee0a6..e72ca3c574fc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -333,7 +333,7 @@ void hrtick_start(struct rq *rq, u64 delay) } #endif /* CONFIG_SMP */ -static void init_rq_hrtick(struct rq *rq) +static void hrtick_rq_init(struct rq *rq) { #ifdef CONFIG_SMP rq->hrtick_csd_pending = 0; @@ -351,7 +351,7 @@ static inline void hrtick_clear(struct rq *rq) { } -static inline void init_rq_hrtick(struct rq *rq) +static inline void hrtick_rq_init(struct rq *rq) { } #endif /* CONFIG_SCHED_HRTICK */ @@ -6028,7 +6028,7 @@ void __init sched_init(void) rq->last_sched_tick = 0; #endif #endif /* CONFIG_SMP */ - init_rq_hrtick(rq); + hrtick_rq_init(rq); atomic_set(&rq->nr_iowait, 0); } -- cgit v1.2.3 From a364298359e74a414857bbbf3b725564feb22d09 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 21 Feb 2018 05:17:24 +0100 Subject: nohz: Convert tick_nohz_tick_stopped() to bool It makes this function more self-explanatory about what it does and how to use it. Reported-by: Thomas Gleixner Signed-off-by: Frederic Weisbecker Reviewed-by: Thomas Gleixner Acked-by: Peter Zijlstra Cc: Chris Metcalf Cc: Christoph Lameter Cc: Linus Torvalds Cc: Luiz Capitulino Cc: Mike Galbraith Cc: Paul E. McKenney Cc: Rik van Riel Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1519186649-3242-3-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar --- include/linux/tick.h | 2 +- kernel/time/tick-sched.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/include/linux/tick.h b/include/linux/tick.h index 7cc35921218e..86576d9d2311 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -113,7 +113,7 @@ enum tick_dep_bits { #ifdef CONFIG_NO_HZ_COMMON extern bool tick_nohz_enabled; -extern int tick_nohz_tick_stopped(void); +extern bool tick_nohz_tick_stopped(void); extern void tick_nohz_idle_enter(void); extern void tick_nohz_idle_exit(void); extern void tick_nohz_irq_exit(void); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 29a5733eff83..0aba0412ede5 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -481,7 +481,7 @@ static int __init setup_tick_nohz(char *str) __setup("nohz=", setup_tick_nohz); -int tick_nohz_tick_stopped(void) +bool tick_nohz_tick_stopped(void) { return __this_cpu_read(tick_cpu_sched.tick_stopped); } -- cgit v1.2.3 From 22ab8bc02a5f6e8ffc418759894f7a6b0b632331 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 21 Feb 2018 05:17:25 +0100 Subject: nohz: Allow to check if remote CPU tick is stopped This check is racy but provides a good heuristic to determine whether a CPU may need a remote tick or not. Signed-off-by: Frederic Weisbecker Reviewed-by: Thomas Gleixner Acked-by: Peter Zijlstra Cc: Chris Metcalf Cc: Christoph Lameter Cc: Linus Torvalds Cc: Luiz Capitulino Cc: Mike Galbraith Cc: Paul E. McKenney Cc: Rik van Riel Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1519186649-3242-4-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar --- include/linux/tick.h | 2 ++ kernel/time/tick-sched.c | 7 +++++++ 2 files changed, 9 insertions(+) (limited to 'kernel') diff --git a/include/linux/tick.h b/include/linux/tick.h index 86576d9d2311..7f8c9a127f5a 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -114,6 +114,7 @@ enum tick_dep_bits { #ifdef CONFIG_NO_HZ_COMMON extern bool tick_nohz_enabled; extern bool tick_nohz_tick_stopped(void); +extern bool tick_nohz_tick_stopped_cpu(int cpu); extern void tick_nohz_idle_enter(void); extern void tick_nohz_idle_exit(void); extern void tick_nohz_irq_exit(void); @@ -125,6 +126,7 @@ extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); #else /* !CONFIG_NO_HZ_COMMON */ #define tick_nohz_enabled (0) static inline int tick_nohz_tick_stopped(void) { return 0; } +static inline int tick_nohz_tick_stopped_cpu(int cpu) { return 0; } static inline void tick_nohz_idle_enter(void) { } static inline void tick_nohz_idle_exit(void) { } diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 0aba0412ede5..d479b21a848b 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -486,6 +486,13 @@ bool tick_nohz_tick_stopped(void) return __this_cpu_read(tick_cpu_sched.tick_stopped); } +bool tick_nohz_tick_stopped_cpu(int cpu) +{ + struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); + + return ts->tick_stopped; +} + /** * tick_nohz_update_jiffies - update jiffies when idle was interrupted * -- cgit v1.2.3 From 1bda3f8087fce9063da0b8aef87f17a3fe541aca Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 21 Feb 2018 05:17:26 +0100 Subject: sched/isolation: Isolate workqueues when "nohz_full=" is set As we prepare for offloading the residual 1hz scheduler ticks to workqueue, let's affine those to housekeepers so that they don't interrupt the CPUs that don't want to be disturbed. Signed-off-by: Frederic Weisbecker Reviewed-by: Thomas Gleixner Acked-by: Peter Zijlstra Cc: Chris Metcalf Cc: Christoph Lameter Cc: Linus Torvalds Cc: Luiz Capitulino Cc: Mike Galbraith Cc: Paul E. McKenney Cc: Rik van Riel Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1519186649-3242-5-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar --- include/linux/sched/isolation.h | 1 + kernel/sched/isolation.c | 3 ++- kernel/workqueue.c | 3 ++- 3 files changed, 5 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h index d849431c8060..4a6582c27dea 100644 --- a/include/linux/sched/isolation.h +++ b/include/linux/sched/isolation.h @@ -12,6 +12,7 @@ enum hk_flags { HK_FLAG_SCHED = (1 << 3), HK_FLAG_TICK = (1 << 4), HK_FLAG_DOMAIN = (1 << 5), + HK_FLAG_WQ = (1 << 6), }; #ifdef CONFIG_CPU_ISOLATION diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index b71b436f59f2..a2500c459617 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -3,6 +3,7 @@ * any CPU: unbound workqueues, timers, kthreads and any offloadable work. * * Copyright (C) 2017 Red Hat, Inc., Frederic Weisbecker + * Copyright (C) 2017-2018 SUSE, Frederic Weisbecker * */ @@ -119,7 +120,7 @@ static int __init housekeeping_nohz_full_setup(char *str) { unsigned int flags; - flags = HK_FLAG_TICK | HK_FLAG_TIMER | HK_FLAG_RCU | HK_FLAG_MISC; + flags = HK_FLAG_TICK | HK_FLAG_WQ | HK_FLAG_TIMER | HK_FLAG_RCU | HK_FLAG_MISC; return housekeeping_setup(str, flags); } diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 017044c26233..593dbe749174 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -5565,12 +5565,13 @@ static void __init wq_numa_init(void) int __init workqueue_init_early(void) { int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; + int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ; int i, cpu; WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); - cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_FLAG_DOMAIN)); + cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(hk_flags)); pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); -- cgit v1.2.3 From d84b31313ef8a8de55a2cbfb72f76f36d8c927fb Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 21 Feb 2018 05:17:27 +0100 Subject: sched/isolation: Offload residual 1Hz scheduler tick When a CPU runs in full dynticks mode, a 1Hz tick remains in order to keep the scheduler stats alive. However this residual tick is a burden for bare metal tasks that can't stand any interruption at all, or want to minimize them. The usual boot parameters "nohz_full=" or "isolcpus=nohz" will now outsource these scheduler ticks to the global workqueue so that a housekeeping CPU handles those remotely. The sched_class::task_tick() implementations have been audited and look safe to be called remotely as the target runqueue and its current task are passed in parameter and don't seem to be accessed locally. Note that in the case of using isolcpus, it's still up to the user to affine the global workqueues to the housekeeping CPUs through /sys/devices/virtual/workqueue/cpumask or domains isolation "isolcpus=nohz,domain". Signed-off-by: Frederic Weisbecker Reviewed-by: Thomas Gleixner Acked-by: Peter Zijlstra Cc: Chris Metcalf Cc: Christoph Lameter Cc: Linus Torvalds Cc: Luiz Capitulino Cc: Mike Galbraith Cc: Paul E. McKenney Cc: Rik van Riel Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1519186649-3242-6-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 92 ++++++++++++++++++++++++++++++++++++++++++++++++ kernel/sched/deadline.c | 8 +++++ kernel/sched/fair.c | 7 +++- kernel/sched/idle_task.c | 8 +++++ kernel/sched/isolation.c | 4 +++ kernel/sched/rt.c | 8 +++++ kernel/sched/sched.h | 2 ++ kernel/sched/stop_task.c | 8 +++++ 8 files changed, 136 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e72ca3c574fc..5dfef458ab52 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3125,6 +3125,96 @@ u64 scheduler_tick_max_deferment(void) return jiffies_to_nsecs(next - now); } + +struct tick_work { + int cpu; + struct delayed_work work; +}; + +static struct tick_work __percpu *tick_work_cpu; + +static void sched_tick_remote(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct tick_work *twork = container_of(dwork, struct tick_work, work); + int cpu = twork->cpu; + struct rq *rq = cpu_rq(cpu); + struct rq_flags rf; + + /* + * Handle the tick only if it appears the remote CPU is running in full + * dynticks mode. The check is racy by nature, but missing a tick or + * having one too much is no big deal because the scheduler tick updates + * statistics and checks timeslices in a time-independent way, regardless + * of when exactly it is running. + */ + if (!idle_cpu(cpu) && tick_nohz_tick_stopped_cpu(cpu)) { + struct task_struct *curr; + u64 delta; + + rq_lock_irq(rq, &rf); + update_rq_clock(rq); + curr = rq->curr; + delta = rq_clock_task(rq) - curr->se.exec_start; + + /* + * Make sure the next tick runs within a reasonable + * amount of time. + */ + WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); + curr->sched_class->task_tick(rq, curr, 0); + rq_unlock_irq(rq, &rf); + } + + /* + * Run the remote tick once per second (1Hz). This arbitrary + * frequency is large enough to avoid overload but short enough + * to keep scheduler internal stats reasonably up to date. + */ + queue_delayed_work(system_unbound_wq, dwork, HZ); +} + +static void sched_tick_start(int cpu) +{ + struct tick_work *twork; + + if (housekeeping_cpu(cpu, HK_FLAG_TICK)) + return; + + WARN_ON_ONCE(!tick_work_cpu); + + twork = per_cpu_ptr(tick_work_cpu, cpu); + twork->cpu = cpu; + INIT_DELAYED_WORK(&twork->work, sched_tick_remote); + queue_delayed_work(system_unbound_wq, &twork->work, HZ); +} + +#ifdef CONFIG_HOTPLUG_CPU +static void sched_tick_stop(int cpu) +{ + struct tick_work *twork; + + if (housekeeping_cpu(cpu, HK_FLAG_TICK)) + return; + + WARN_ON_ONCE(!tick_work_cpu); + + twork = per_cpu_ptr(tick_work_cpu, cpu); + cancel_delayed_work_sync(&twork->work); +} +#endif /* CONFIG_HOTPLUG_CPU */ + +int __init sched_tick_offload_init(void) +{ + tick_work_cpu = alloc_percpu(struct tick_work); + BUG_ON(!tick_work_cpu); + + return 0; +} + +#else /* !CONFIG_NO_HZ_FULL */ +static inline void sched_tick_start(int cpu) { } +static inline void sched_tick_stop(int cpu) { } #endif #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ @@ -5786,6 +5876,7 @@ int sched_cpu_starting(unsigned int cpu) { set_cpu_rq_start_time(cpu); sched_rq_cpu_starting(cpu); + sched_tick_start(cpu); return 0; } @@ -5797,6 +5888,7 @@ int sched_cpu_dying(unsigned int cpu) /* Handle pending wakeups and then migrate everything off */ sched_ttwu_pending(); + sched_tick_stop(cpu); rq_lock_irqsave(rq, &rf); if (rq->rd) { diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 9df09782025c..65cd5ead1759 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1776,6 +1776,14 @@ static void put_prev_task_dl(struct rq *rq, struct task_struct *p) enqueue_pushable_dl_task(rq, p); } +/* + * scheduler tick hitting a task of our scheduling class. + * + * NOTE: This function can be called remotely by the tick offload that + * goes along full dynticks. Therefore no local assumption can be made + * and everything must be accessed through the @rq and @curr passed in + * parameters. + */ static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued) { update_curr_dl(rq); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 33662a3bdc6d..e1febd252a84 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9515,7 +9515,12 @@ static void rq_offline_fair(struct rq *rq) #endif /* CONFIG_SMP */ /* - * scheduler tick hitting a task of our scheduling class: + * scheduler tick hitting a task of our scheduling class. + * + * NOTE: This function can be called remotely by the tick offload that + * goes along full dynticks. Therefore no local assumption can be made + * and everything must be accessed through the @rq and @curr passed in + * parameters. */ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) { diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index d518664cce4f..e1b46e08c8e1 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c @@ -51,6 +51,14 @@ static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) rq_last_tick_reset(rq); } +/* + * scheduler tick hitting a task of our scheduling class. + * + * NOTE: This function can be called remotely by the tick offload that + * goes along full dynticks. Therefore no local assumption can be made + * and everything must be accessed through the @rq and @curr passed in + * parameters. + */ static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) { } diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index a2500c459617..39f340dde1d7 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -13,6 +13,7 @@ #include #include #include +#include "sched.h" DEFINE_STATIC_KEY_FALSE(housekeeping_overriden); EXPORT_SYMBOL_GPL(housekeeping_overriden); @@ -61,6 +62,9 @@ void __init housekeeping_init(void) static_branch_enable(&housekeeping_overriden); + if (housekeeping_flags & HK_FLAG_TICK) + sched_tick_offload_init(); + /* We need at least one CPU to handle housekeeping work */ WARN_ON_ONCE(cpumask_empty(housekeeping_mask)); } diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index aad49451584e..c80563b4f6b9 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2292,6 +2292,14 @@ static void watchdog(struct rq *rq, struct task_struct *p) static inline void watchdog(struct rq *rq, struct task_struct *p) { } #endif +/* + * scheduler tick hitting a task of our scheduling class. + * + * NOTE: This function can be called remotely by the tick offload that + * goes along full dynticks. Therefore no local assumption can be made + * and everything must be accessed through the @rq and @curr passed in + * parameters. + */ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) { struct sched_rt_entity *rt_se = &p->rt; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index fb5fc458547f..c1c7c788da1c 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1574,6 +1574,7 @@ extern void post_init_entity_util_avg(struct sched_entity *se); #ifdef CONFIG_NO_HZ_FULL extern bool sched_can_stop_tick(struct rq *rq); +extern int __init sched_tick_offload_init(void); /* * Tick may be needed by tasks in the runqueue depending on their policy and @@ -1598,6 +1599,7 @@ static inline void sched_update_tick_dependency(struct rq *rq) tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); } #else +static inline int sched_tick_offload_init(void) { return 0; } static inline void sched_update_tick_dependency(struct rq *rq) { } #endif diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index 210b1f2146ff..ea8d2b6a1239 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -75,6 +75,14 @@ static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) cgroup_account_cputime(curr, delta_exec); } +/* + * scheduler tick hitting a task of our scheduling class. + * + * NOTE: This function can be called remotely by the tick offload that + * goes along full dynticks. Therefore no local assumption can be made + * and everything must be accessed through the @rq and @curr passed in + * parameters. + */ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) { } -- cgit v1.2.3 From dcdedb24159be3487e3dbbe1faa79ae7d00c92ac Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 21 Feb 2018 05:17:28 +0100 Subject: sched/nohz: Remove the 1 Hz tick code Now that the 1Hz tick is offloaded to workqueues, we can safely remove the residual code that used to handle it locally. Signed-off-by: Frederic Weisbecker Reviewed-by: Thomas Gleixner Acked-by: Peter Zijlstra Cc: Chris Metcalf Cc: Christoph Lameter Cc: Linus Torvalds Cc: Luiz Capitulino Cc: Mike Galbraith Cc: Paul E. McKenney Cc: Rik van Riel Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1519186649-3242-7-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar --- include/linux/sched/nohz.h | 4 ---- kernel/sched/core.c | 29 ----------------------------- kernel/sched/idle_task.c | 1 - kernel/sched/sched.h | 11 +---------- kernel/time/tick-sched.c | 6 ------ 5 files changed, 1 insertion(+), 50 deletions(-) (limited to 'kernel') diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h index 3d3a97d9399d..094217273ff9 100644 --- a/include/linux/sched/nohz.h +++ b/include/linux/sched/nohz.h @@ -37,8 +37,4 @@ extern void wake_up_nohz_cpu(int cpu); static inline void wake_up_nohz_cpu(int cpu) { } #endif -#ifdef CONFIG_NO_HZ_FULL -extern u64 scheduler_tick_max_deferment(void); -#endif - #endif /* _LINUX_SCHED_NOHZ_H */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5dfef458ab52..8fff4f16c510 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3096,35 +3096,9 @@ void scheduler_tick(void) rq->idle_balance = idle_cpu(cpu); trigger_load_balance(rq); #endif - rq_last_tick_reset(rq); } #ifdef CONFIG_NO_HZ_FULL -/** - * scheduler_tick_max_deferment - * - * Keep at least one tick per second when a single - * active task is running because the scheduler doesn't - * yet completely support full dynticks environment. - * - * This makes sure that uptime, CFS vruntime, load - * balancing, etc... continue to move forward, even - * with a very low granularity. - * - * Return: Maximum deferment in nanoseconds. - */ -u64 scheduler_tick_max_deferment(void) -{ - struct rq *rq = this_rq(); - unsigned long next, now = READ_ONCE(jiffies); - - next = rq->last_sched_tick + HZ; - - if (time_before_eq(next, now)) - return 0; - - return jiffies_to_nsecs(next - now); -} struct tick_work { int cpu; @@ -6116,9 +6090,6 @@ void __init sched_init(void) rq->last_load_update_tick = jiffies; rq->nohz_flags = 0; #endif -#ifdef CONFIG_NO_HZ_FULL - rq->last_sched_tick = 0; -#endif #endif /* CONFIG_SMP */ hrtick_rq_init(rq); atomic_set(&rq->nr_iowait, 0); diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index e1b46e08c8e1..48b8a83f5185 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c @@ -48,7 +48,6 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) { - rq_last_tick_reset(rq); } /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c1c7c788da1c..dc6c8b5a24ad 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -727,9 +727,7 @@ struct rq { #endif /* CONFIG_SMP */ unsigned long nohz_flags; #endif /* CONFIG_NO_HZ_COMMON */ -#ifdef CONFIG_NO_HZ_FULL - unsigned long last_sched_tick; -#endif + /* capture load from *all* tasks on this cpu: */ struct load_weight load; unsigned long nr_load_updates; @@ -1626,13 +1624,6 @@ static inline void sub_nr_running(struct rq *rq, unsigned count) sched_update_tick_dependency(rq); } -static inline void rq_last_tick_reset(struct rq *rq) -{ -#ifdef CONFIG_NO_HZ_FULL - rq->last_sched_tick = jiffies; -#endif -} - extern void update_rq_clock(struct rq *rq); extern void activate_task(struct rq *rq, struct task_struct *p, int flags); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index d479b21a848b..f2fa2e940fe5 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -748,12 +748,6 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, delta = KTIME_MAX; } -#ifdef CONFIG_NO_HZ_FULL - /* Limit the tick delta to the maximum scheduler deferment */ - if (!ts->inidle) - delta = min(delta, scheduler_tick_max_deferment()); -#endif - /* Calculate the next expiry time */ if (delta < (KTIME_MAX - basemono)) expires = basemono + delta; -- cgit v1.2.3 From c2e513821d5df5e772287f6d0c23fd17b7c2bb1a Mon Sep 17 00:00:00 2001 From: Mario Leinweber Date: Fri, 2 Mar 2018 13:20:07 -0500 Subject: sched/deadline: Clean up various coding style details - Fixed style error: Missing space before the open parenthesis - Fixed style warnings: 2x Missing blank line after declaration One warning left: else after return (I don't feel comfortable fixing that without side effects) Signed-off-by: Mario Leinweber Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/20180302182007.28691-1-marioleinweber@web.de Signed-off-by: Ingo Molnar --- kernel/sched/cpudeadline.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index 8d9562d890d3..6a9defebbb54 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c @@ -42,8 +42,9 @@ static void cpudl_heapify_down(struct cpudl *cp, int idx) return; /* adapted from lib/prio_heap.c */ - while(1) { + while (1) { u64 largest_dl; + l = left_child(idx); r = right_child(idx); largest = idx; @@ -131,6 +132,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, return 1; } else { int best_cpu = cpudl_maximum(cp); + WARN_ON(best_cpu != -1 && !cpu_present(best_cpu)); if (cpumask_test_cpu(best_cpu, &p->cpus_allowed) && @@ -205,6 +207,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl) old_idx = cp->elements[cpu].idx; if (old_idx == IDX_INVALID) { int new_idx = cp->size++; + cp->elements[new_idx].dl = dl; cp->elements[new_idx].cpu = cpu; cp->elements[cpu].idx = new_idx; -- cgit v1.2.3 From 97fb7a0a8944bd6d2c5634e1e0fa689a5c40bc22 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 3 Mar 2018 14:01:12 +0100 Subject: sched: Clean up and harmonize the coding style of the scheduler code base A good number of small style inconsistencies have accumulated in the scheduler core, so do a pass over them to harmonize all these details: - fix speling in comments, - use curly braces for multi-line statements, - remove unnecessary parentheses from integer literals, - capitalize consistently, - remove stray newlines, - add comments where necessary, - remove invalid/unnecessary comments, - align structure definitions and other data types vertically, - add missing newlines for increased readability, - fix vertical tabulation where it's misaligned, - harmonize preprocessor conditional block labeling and vertical alignment, - remove line-breaks where they uglify the code, - add newline after local variable definitions, No change in functionality: md5: 1191fa0a890cfa8132156d2959d7e9e2 built-in.o.before.asm 1191fa0a890cfa8132156d2959d7e9e2 built-in.o.after.asm Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/autogroup.c | 12 +- kernel/sched/autogroup.h | 8 +- kernel/sched/clock.c | 22 +- kernel/sched/core.c | 6 +- kernel/sched/cpuacct.c | 20 +- kernel/sched/cpudeadline.c | 13 +- kernel/sched/cpudeadline.h | 27 +- kernel/sched/cpufreq_schedutil.c | 129 +++++----- kernel/sched/cpupri.c | 9 +- kernel/sched/cpupri.h | 24 +- kernel/sched/cputime.c | 48 ++-- kernel/sched/deadline.c | 51 ++-- kernel/sched/debug.c | 88 +++---- kernel/sched/fair.c | 183 +++++++------- kernel/sched/idle.c | 6 +- kernel/sched/idle_task.c | 3 +- kernel/sched/isolation.c | 2 +- kernel/sched/loadavg.c | 30 +-- kernel/sched/membarrier.c | 18 +- kernel/sched/rt.c | 25 +- kernel/sched/sched.h | 529 ++++++++++++++++++++------------------- kernel/sched/stats.c | 7 +- kernel/sched/stats.h | 86 +++---- kernel/sched/stop_task.c | 3 +- kernel/sched/swait.c | 3 + kernel/sched/topology.c | 42 ++-- kernel/sched/wait.c | 4 + kernel/sched/wait_bit.c | 18 +- 28 files changed, 706 insertions(+), 710 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c index bb4b9fe026a1..ff1b7b647b86 100644 --- a/kernel/sched/autogroup.c +++ b/kernel/sched/autogroup.c @@ -168,18 +168,19 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag) autogroup_kref_put(prev); } -/* Allocates GFP_KERNEL, cannot be called under any spinlock */ +/* Allocates GFP_KERNEL, cannot be called under any spinlock: */ void sched_autogroup_create_attach(struct task_struct *p) { struct autogroup *ag = autogroup_create(); autogroup_move_group(p, ag); - /* drop extra reference added by autogroup_create() */ + + /* Drop extra reference added by autogroup_create(): */ autogroup_kref_put(ag); } EXPORT_SYMBOL(sched_autogroup_create_attach); -/* Cannot be called under siglock. Currently has no users */ +/* Cannot be called under siglock. Currently has no users: */ void sched_autogroup_detach(struct task_struct *p) { autogroup_move_group(p, &autogroup_default); @@ -202,7 +203,6 @@ static int __init setup_autogroup(char *str) return 1; } - __setup("noautogroup", setup_autogroup); #ifdef CONFIG_PROC_FS @@ -224,7 +224,7 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice) if (nice < 0 && !can_nice(current, nice)) return -EPERM; - /* this is a heavy operation taking global locks.. */ + /* This is a heavy operation, taking global locks.. */ if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next)) return -EAGAIN; @@ -267,4 +267,4 @@ int autogroup_path(struct task_group *tg, char *buf, int buflen) return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id); } -#endif /* CONFIG_SCHED_DEBUG */ +#endif diff --git a/kernel/sched/autogroup.h b/kernel/sched/autogroup.h index 27cd22b89824..49e6ec9559cf 100644 --- a/kernel/sched/autogroup.h +++ b/kernel/sched/autogroup.h @@ -7,9 +7,9 @@ struct autogroup { /* - * reference doesn't mean how many thread attach to this - * autogroup now. It just stands for the number of task - * could use this autogroup. + * Reference doesn't mean how many threads attach to this + * autogroup now. It just stands for the number of tasks + * which could use this autogroup. */ struct kref kref; struct task_group *tg; @@ -56,11 +56,9 @@ autogroup_task_group(struct task_struct *p, struct task_group *tg) return tg; } -#ifdef CONFIG_SCHED_DEBUG static inline int autogroup_path(struct task_group *tg, char *buf, int buflen) { return 0; } -#endif #endif /* CONFIG_SCHED_AUTOGROUP */ diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index e086babe6c61..7da6bec8a2ff 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c @@ -1,5 +1,5 @@ /* - * sched_clock for unstable cpu clocks + * sched_clock() for unstable CPU clocks * * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra * @@ -11,7 +11,7 @@ * Guillaume Chazarain * * - * What: + * What this file implements: * * cpu_clock(i) provides a fast (execution time) high resolution * clock with bounded drift between CPUs. The value of cpu_clock(i) @@ -26,11 +26,11 @@ * at 0 on boot (but people really shouldn't rely on that). * * cpu_clock(i) -- can be used from any context, including NMI. - * local_clock() -- is cpu_clock() on the current cpu. + * local_clock() -- is cpu_clock() on the current CPU. * * sched_clock_cpu(i) * - * How: + * How it is implemented: * * The implementation either uses sched_clock() when * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the @@ -302,21 +302,21 @@ again: * cmpxchg64 below only protects one readout. * * We must reread via sched_clock_local() in the retry case on - * 32bit as an NMI could use sched_clock_local() via the + * 32-bit kernels as an NMI could use sched_clock_local() via the * tracer and hit between the readout of - * the low32bit and the high 32bit portion. + * the low 32-bit and the high 32-bit portion. */ this_clock = sched_clock_local(my_scd); /* - * We must enforce atomic readout on 32bit, otherwise the - * update on the remote cpu can hit inbetween the readout of - * the low32bit and the high 32bit portion. + * We must enforce atomic readout on 32-bit, otherwise the + * update on the remote CPU can hit inbetween the readout of + * the low 32-bit and the high 32-bit portion. */ remote_clock = cmpxchg64(&scd->clock, 0, 0); #else /* - * On 64bit the read of [my]scd->clock is atomic versus the - * update, so we can avoid the above 32bit dance. + * On 64-bit kernels the read of [my]scd->clock is atomic versus the + * update, so we can avoid the above 32-bit dance. */ sched_clock_local(my_scd); again: diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8fff4f16c510..9427b59551c1 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -135,7 +135,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) * [L] ->on_rq * RELEASE (rq->lock) * - * If we observe the old cpu in task_rq_lock, the acquire of + * If we observe the old CPU in task_rq_lock, the acquire of * the old rq->lock will fully serialize against the stores. * * If we observe the new CPU in task_rq_lock, the acquire will @@ -1457,7 +1457,7 @@ EXPORT_SYMBOL_GPL(kick_process); * * - cpu_active must be a subset of cpu_online * - * - on cpu-up we allow per-cpu kthreads on the online && !active cpu, + * - on CPU-up we allow per-CPU kthreads on the online && !active CPU, * see __set_cpus_allowed_ptr(). At this point the newly online * CPU isn't yet part of the sched domains, and balancing will not * see it. @@ -3037,7 +3037,7 @@ unsigned long long task_sched_runtime(struct task_struct *p) #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) /* - * 64-bit doesn't need locks to atomically read a 64bit value. + * 64-bit doesn't need locks to atomically read a 64-bit value. * So we have a optimization chance when the task's delta_exec is 0. * Reading ->on_cpu is racy, but this is ok. * diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 44ab32a4fab6..1abd325e733a 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -18,7 +18,7 @@ * (balbir@in.ibm.com). */ -/* Time spent by the tasks of the cpu accounting group executing in ... */ +/* Time spent by the tasks of the CPU accounting group executing in ... */ enum cpuacct_stat_index { CPUACCT_STAT_USER, /* ... user mode */ CPUACCT_STAT_SYSTEM, /* ... kernel mode */ @@ -35,12 +35,12 @@ struct cpuacct_usage { u64 usages[CPUACCT_STAT_NSTATS]; }; -/* track cpu usage of a group of tasks and its child groups */ +/* track CPU usage of a group of tasks and its child groups */ struct cpuacct { - struct cgroup_subsys_state css; - /* cpuusage holds pointer to a u64-type object on every cpu */ - struct cpuacct_usage __percpu *cpuusage; - struct kernel_cpustat __percpu *cpustat; + struct cgroup_subsys_state css; + /* cpuusage holds pointer to a u64-type object on every CPU */ + struct cpuacct_usage __percpu *cpuusage; + struct kernel_cpustat __percpu *cpustat; }; static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css) @@ -48,7 +48,7 @@ static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css) return css ? container_of(css, struct cpuacct, css) : NULL; } -/* return cpu accounting group to which this task belongs */ +/* Return CPU accounting group to which this task belongs */ static inline struct cpuacct *task_ca(struct task_struct *tsk) { return css_ca(task_css(tsk, cpuacct_cgrp_id)); @@ -65,7 +65,7 @@ static struct cpuacct root_cpuacct = { .cpuusage = &root_cpuacct_cpuusage, }; -/* create a new cpu accounting group */ +/* Create a new CPU accounting group */ static struct cgroup_subsys_state * cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) { @@ -96,7 +96,7 @@ out: return ERR_PTR(-ENOMEM); } -/* destroy an existing cpu accounting group */ +/* Destroy an existing CPU accounting group */ static void cpuacct_css_free(struct cgroup_subsys_state *css) { struct cpuacct *ca = css_ca(css); @@ -162,7 +162,7 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) #endif } -/* return total cpu usage (in nanoseconds) of a group */ +/* Return total CPU usage (in nanoseconds) of a group */ static u64 __cpuusage_read(struct cgroup_subsys_state *css, enum cpuacct_stat_index index) { diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index 6a9defebbb54..cb172b61d191 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c @@ -10,7 +10,6 @@ * as published by the Free Software Foundation; version 2 * of the License. */ - #include #include #include @@ -147,9 +146,9 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, } /* - * cpudl_clear - remove a cpu from the cpudl max-heap + * cpudl_clear - remove a CPU from the cpudl max-heap * @cp: the cpudl max-heap context - * @cpu: the target cpu + * @cpu: the target CPU * * Notes: assumes cpu_rq(cpu)->lock is locked * @@ -188,8 +187,8 @@ void cpudl_clear(struct cpudl *cp, int cpu) /* * cpudl_set - update the cpudl max-heap * @cp: the cpudl max-heap context - * @cpu: the target cpu - * @dl: the new earliest deadline for this cpu + * @cpu: the target CPU + * @dl: the new earliest deadline for this CPU * * Notes: assumes cpu_rq(cpu)->lock is locked * @@ -224,7 +223,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl) /* * cpudl_set_freecpu - Set the cpudl.free_cpus * @cp: the cpudl max-heap context - * @cpu: rd attached cpu + * @cpu: rd attached CPU */ void cpudl_set_freecpu(struct cpudl *cp, int cpu) { @@ -234,7 +233,7 @@ void cpudl_set_freecpu(struct cpudl *cp, int cpu) /* * cpudl_clear_freecpu - Clear the cpudl.free_cpus * @cp: the cpudl max-heap context - * @cpu: rd attached cpu + * @cpu: rd attached CPU */ void cpudl_clear_freecpu(struct cpudl *cp, int cpu) { diff --git a/kernel/sched/cpudeadline.h b/kernel/sched/cpudeadline.h index b010d26e108e..c26e7a0e5a66 100644 --- a/kernel/sched/cpudeadline.h +++ b/kernel/sched/cpudeadline.h @@ -1,35 +1,28 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_CPUDL_H -#define _LINUX_CPUDL_H - #include #include -#define IDX_INVALID -1 +#define IDX_INVALID -1 struct cpudl_item { - u64 dl; - int cpu; - int idx; + u64 dl; + int cpu; + int idx; }; struct cpudl { - raw_spinlock_t lock; - int size; - cpumask_var_t free_cpus; - struct cpudl_item *elements; + raw_spinlock_t lock; + int size; + cpumask_var_t free_cpus; + struct cpudl_item *elements; }; - #ifdef CONFIG_SMP -int cpudl_find(struct cpudl *cp, struct task_struct *p, - struct cpumask *later_mask); +int cpudl_find(struct cpudl *cp, struct task_struct *p, struct cpumask *later_mask); void cpudl_set(struct cpudl *cp, int cpu, u64 dl); void cpudl_clear(struct cpudl *cp, int cpu); -int cpudl_init(struct cpudl *cp); +int cpudl_init(struct cpudl *cp); void cpudl_set_freecpu(struct cpudl *cp, int cpu); void cpudl_clear_freecpu(struct cpudl *cp, int cpu); void cpudl_cleanup(struct cpudl *cp); #endif /* CONFIG_SMP */ - -#endif /* _LINUX_CPUDL_H */ diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 7936f548e071..0dad8160e00f 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -20,52 +20,52 @@ #include "sched.h" struct sugov_tunables { - struct gov_attr_set attr_set; - unsigned int rate_limit_us; + struct gov_attr_set attr_set; + unsigned int rate_limit_us; }; struct sugov_policy { - struct cpufreq_policy *policy; - - struct sugov_tunables *tunables; - struct list_head tunables_hook; - - raw_spinlock_t update_lock; /* For shared policies */ - u64 last_freq_update_time; - s64 freq_update_delay_ns; - unsigned int next_freq; - unsigned int cached_raw_freq; - - /* The next fields are only needed if fast switch cannot be used. */ - struct irq_work irq_work; - struct kthread_work work; - struct mutex work_lock; - struct kthread_worker worker; - struct task_struct *thread; - bool work_in_progress; - - bool need_freq_update; + struct cpufreq_policy *policy; + + struct sugov_tunables *tunables; + struct list_head tunables_hook; + + raw_spinlock_t update_lock; /* For shared policies */ + u64 last_freq_update_time; + s64 freq_update_delay_ns; + unsigned int next_freq; + unsigned int cached_raw_freq; + + /* The next fields are only needed if fast switch cannot be used: */ + struct irq_work irq_work; + struct kthread_work work; + struct mutex work_lock; + struct kthread_worker worker; + struct task_struct *thread; + bool work_in_progress; + + bool need_freq_update; }; struct sugov_cpu { - struct update_util_data update_util; - struct sugov_policy *sg_policy; - unsigned int cpu; + struct update_util_data update_util; + struct sugov_policy *sg_policy; + unsigned int cpu; - bool iowait_boost_pending; - unsigned int iowait_boost; - unsigned int iowait_boost_max; + bool iowait_boost_pending; + unsigned int iowait_boost; + unsigned int iowait_boost_max; u64 last_update; - /* The fields below are only needed when sharing a policy. */ - unsigned long util_cfs; - unsigned long util_dl; - unsigned long max; - unsigned int flags; + /* The fields below are only needed when sharing a policy: */ + unsigned long util_cfs; + unsigned long util_dl; + unsigned long max; + unsigned int flags; - /* The field below is for single-CPU policies only. */ + /* The field below is for single-CPU policies only: */ #ifdef CONFIG_NO_HZ_COMMON - unsigned long saved_idle_calls; + unsigned long saved_idle_calls; #endif }; @@ -79,9 +79,9 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) /* * Since cpufreq_update_util() is called with rq->lock held for - * the @target_cpu, our per-cpu data is fully serialized. + * the @target_cpu, our per-CPU data is fully serialized. * - * However, drivers cannot in general deal with cross-cpu + * However, drivers cannot in general deal with cross-CPU * requests, so while get_next_freq() will work, our * sugov_update_commit() call may not for the fast switching platforms. * @@ -111,6 +111,7 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) } delta_ns = time - sg_policy->last_freq_update_time; + return delta_ns >= sg_policy->freq_update_delay_ns; } @@ -345,8 +346,8 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) return get_next_freq(sg_policy, util, max); } -static void sugov_update_shared(struct update_util_data *hook, u64 time, - unsigned int flags) +static void +sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags) { struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); struct sugov_policy *sg_policy = sg_cpu->sg_policy; @@ -423,8 +424,8 @@ static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf) return sprintf(buf, "%u\n", tunables->rate_limit_us); } -static ssize_t rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, - size_t count) +static ssize_t +rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count) { struct sugov_tunables *tunables = to_sugov_tunables(attr_set); struct sugov_policy *sg_policy; @@ -479,11 +480,11 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy) { struct task_struct *thread; struct sched_attr attr = { - .size = sizeof(struct sched_attr), - .sched_policy = SCHED_DEADLINE, - .sched_flags = SCHED_FLAG_SUGOV, - .sched_nice = 0, - .sched_priority = 0, + .size = sizeof(struct sched_attr), + .sched_policy = SCHED_DEADLINE, + .sched_flags = SCHED_FLAG_SUGOV, + .sched_nice = 0, + .sched_priority = 0, /* * Fake (unused) bandwidth; workaround to "fix" * priority inheritance. @@ -663,21 +664,21 @@ static int sugov_start(struct cpufreq_policy *policy) struct sugov_policy *sg_policy = policy->governor_data; unsigned int cpu; - sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC; - sg_policy->last_freq_update_time = 0; - sg_policy->next_freq = UINT_MAX; - sg_policy->work_in_progress = false; - sg_policy->need_freq_update = false; - sg_policy->cached_raw_freq = 0; + sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC; + sg_policy->last_freq_update_time = 0; + sg_policy->next_freq = UINT_MAX; + sg_policy->work_in_progress = false; + sg_policy->need_freq_update = false; + sg_policy->cached_raw_freq = 0; for_each_cpu(cpu, policy->cpus) { struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); memset(sg_cpu, 0, sizeof(*sg_cpu)); - sg_cpu->cpu = cpu; - sg_cpu->sg_policy = sg_policy; - sg_cpu->flags = 0; - sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; + sg_cpu->cpu = cpu; + sg_cpu->sg_policy = sg_policy; + sg_cpu->flags = 0; + sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; } for_each_cpu(cpu, policy->cpus) { @@ -721,14 +722,14 @@ static void sugov_limits(struct cpufreq_policy *policy) } static struct cpufreq_governor schedutil_gov = { - .name = "schedutil", - .owner = THIS_MODULE, - .dynamic_switching = true, - .init = sugov_init, - .exit = sugov_exit, - .start = sugov_start, - .stop = sugov_stop, - .limits = sugov_limits, + .name = "schedutil", + .owner = THIS_MODULE, + .dynamic_switching = true, + .init = sugov_init, + .exit = sugov_exit, + .start = sugov_start, + .stop = sugov_stop, + .limits = sugov_limits, }; #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index 2511aba36b89..f43e14ccb67d 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -14,7 +14,7 @@ * * going from the lowest priority to the highest. CPUs in the INVALID state * are not eligible for routing. The system maintains this state with - * a 2 dimensional bitmap (the first for priority class, the second for cpus + * a 2 dimensional bitmap (the first for priority class, the second for CPUs * in that class). Therefore a typical application without affinity * restrictions can find a suitable CPU with O(1) complexity (e.g. two bit * searches). For tasks with affinity restrictions, the algorithm has a @@ -26,7 +26,6 @@ * as published by the Free Software Foundation; version 2 * of the License. */ - #include #include #include @@ -128,9 +127,9 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, } /** - * cpupri_set - update the cpu priority setting + * cpupri_set - update the CPU priority setting * @cp: The cpupri context - * @cpu: The target cpu + * @cpu: The target CPU * @newpri: The priority (INVALID-RT99) to assign to this CPU * * Note: Assumes cpu_rq(cpu)->lock is locked @@ -151,7 +150,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) return; /* - * If the cpu was currently mapped to a different value, we + * If the CPU was currently mapped to a different value, we * need to map it to the new value then remove the old value. * Note, we must add the new value first, otherwise we risk the * cpu being missed by the priority loop in cpupri_find. diff --git a/kernel/sched/cpupri.h b/kernel/sched/cpupri.h index bab050019071..141a06c914c6 100644 --- a/kernel/sched/cpupri.h +++ b/kernel/sched/cpupri.h @@ -1,32 +1,26 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_CPUPRI_H -#define _LINUX_CPUPRI_H - #include #define CPUPRI_NR_PRIORITIES (MAX_RT_PRIO + 2) -#define CPUPRI_INVALID -1 -#define CPUPRI_IDLE 0 -#define CPUPRI_NORMAL 1 +#define CPUPRI_INVALID -1 +#define CPUPRI_IDLE 0 +#define CPUPRI_NORMAL 1 /* values 2-101 are RT priorities 0-99 */ struct cpupri_vec { - atomic_t count; - cpumask_var_t mask; + atomic_t count; + cpumask_var_t mask; }; struct cpupri { - struct cpupri_vec pri_to_cpu[CPUPRI_NR_PRIORITIES]; - int *cpu_to_pri; + struct cpupri_vec pri_to_cpu[CPUPRI_NR_PRIORITIES]; + int *cpu_to_pri; }; #ifdef CONFIG_SMP -int cpupri_find(struct cpupri *cp, - struct task_struct *p, struct cpumask *lowest_mask); +int cpupri_find(struct cpupri *cp, struct task_struct *p, struct cpumask *lowest_mask); void cpupri_set(struct cpupri *cp, int cpu, int pri); -int cpupri_init(struct cpupri *cp); +int cpupri_init(struct cpupri *cp); void cpupri_cleanup(struct cpupri *cp); #endif - -#endif /* _LINUX_CPUPRI_H */ diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index bac6ac9a4ec7..d3b450b57ade 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -113,9 +113,9 @@ static inline void task_group_account_field(struct task_struct *p, int index, } /* - * Account user cpu time to a process. - * @p: the process that the cpu time gets accounted to - * @cputime: the cpu time spent in user space since the last update + * Account user CPU time to a process. + * @p: the process that the CPU time gets accounted to + * @cputime: the CPU time spent in user space since the last update */ void account_user_time(struct task_struct *p, u64 cputime) { @@ -135,9 +135,9 @@ void account_user_time(struct task_struct *p, u64 cputime) } /* - * Account guest cpu time to a process. - * @p: the process that the cpu time gets accounted to - * @cputime: the cpu time spent in virtual machine since the last update + * Account guest CPU time to a process. + * @p: the process that the CPU time gets accounted to + * @cputime: the CPU time spent in virtual machine since the last update */ void account_guest_time(struct task_struct *p, u64 cputime) { @@ -159,9 +159,9 @@ void account_guest_time(struct task_struct *p, u64 cputime) } /* - * Account system cpu time to a process and desired cpustat field - * @p: the process that the cpu time gets accounted to - * @cputime: the cpu time spent in kernel space since the last update + * Account system CPU time to a process and desired cpustat field + * @p: the process that the CPU time gets accounted to + * @cputime: the CPU time spent in kernel space since the last update * @index: pointer to cpustat field that has to be updated */ void account_system_index_time(struct task_struct *p, @@ -179,10 +179,10 @@ void account_system_index_time(struct task_struct *p, } /* - * Account system cpu time to a process. - * @p: the process that the cpu time gets accounted to + * Account system CPU time to a process. + * @p: the process that the CPU time gets accounted to * @hardirq_offset: the offset to subtract from hardirq_count() - * @cputime: the cpu time spent in kernel space since the last update + * @cputime: the CPU time spent in kernel space since the last update */ void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime) { @@ -205,7 +205,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime) /* * Account for involuntary wait time. - * @cputime: the cpu time spent in involuntary wait + * @cputime: the CPU time spent in involuntary wait */ void account_steal_time(u64 cputime) { @@ -216,7 +216,7 @@ void account_steal_time(u64 cputime) /* * Account for idle time. - * @cputime: the cpu time spent in idle wait + * @cputime: the CPU time spent in idle wait */ void account_idle_time(u64 cputime) { @@ -338,7 +338,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) #ifdef CONFIG_IRQ_TIME_ACCOUNTING /* * Account a tick to a process and cpustat - * @p: the process that the cpu time gets accounted to + * @p: the process that the CPU time gets accounted to * @user_tick: is the tick from userspace * @rq: the pointer to rq * @@ -400,17 +400,16 @@ static void irqtime_account_idle_ticks(int ticks) irqtime_account_process_tick(current, 0, rq, ticks); } #else /* CONFIG_IRQ_TIME_ACCOUNTING */ -static inline void irqtime_account_idle_ticks(int ticks) {} +static inline void irqtime_account_idle_ticks(int ticks) { } static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick, - struct rq *rq, int nr_ticks) {} + struct rq *rq, int nr_ticks) { } #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ /* * Use precise platform statistics if available: */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING - -#ifndef __ARCH_HAS_VTIME_TASK_SWITCH +# ifndef __ARCH_HAS_VTIME_TASK_SWITCH void vtime_common_task_switch(struct task_struct *prev) { if (is_idle_task(prev)) @@ -421,8 +420,7 @@ void vtime_common_task_switch(struct task_struct *prev) vtime_flush(prev); arch_vtime_task_switch(prev); } -#endif - +# endif #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ @@ -469,10 +467,12 @@ void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) *ut = cputime.utime; *st = cputime.stime; } -#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ + +#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE: */ + /* - * Account a single tick of cpu time. - * @p: the process that the cpu time gets accounted to + * Account a single tick of CPU time. + * @p: the process that the CPU time gets accounted to * @user_tick: indicates if the tick is a user or a system tick */ void account_process_tick(struct task_struct *p, int user_tick) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 65cd5ead1759..58f8b7b37983 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -539,12 +539,12 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p /* * If we cannot preempt any rq, fall back to pick any - * online cpu. + * online CPU: */ cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed); if (cpu >= nr_cpu_ids) { /* - * Fail to find any suitable cpu. + * Failed to find any suitable CPU. * The task will never come back! */ BUG_ON(dl_bandwidth_enabled()); @@ -608,8 +608,7 @@ static inline void queue_pull_task(struct rq *rq) static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags); static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags); -static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, - int flags); +static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags); /* * We are being explicitly informed that a new instance is starting, @@ -1873,7 +1872,7 @@ static int find_later_rq(struct task_struct *task) /* * We have to consider system topology and task affinity - * first, then we can look for a suitable cpu. + * first, then we can look for a suitable CPU. */ if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask)) return -1; @@ -1887,7 +1886,7 @@ static int find_later_rq(struct task_struct *task) * Now we check how well this matches with task's * affinity and system topology. * - * The last cpu where the task run is our first + * The last CPU where the task run is our first * guess, since it is most likely cache-hot there. */ if (cpumask_test_cpu(cpu, later_mask)) @@ -1917,9 +1916,9 @@ static int find_later_rq(struct task_struct *task) best_cpu = cpumask_first_and(later_mask, sched_domain_span(sd)); /* - * Last chance: if a cpu being in both later_mask + * Last chance: if a CPU being in both later_mask * and current sd span is valid, that becomes our - * choice. Of course, the latest possible cpu is + * choice. Of course, the latest possible CPU is * already under consideration through later_mask. */ if (best_cpu < nr_cpu_ids) { @@ -2075,7 +2074,7 @@ retry: if (task == next_task) { /* * The task is still there. We don't try - * again, some other cpu will pull it when ready. + * again, some other CPU will pull it when ready. */ goto out; } @@ -2308,7 +2307,7 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p) /* * Since this might be the only -deadline task on the rq, * this is the right place to try to pull some other one - * from an overloaded cpu, if any. + * from an overloaded CPU, if any. */ if (!task_on_rq_queued(p) || rq->dl.dl_nr_running) return; @@ -2634,17 +2633,17 @@ void __dl_clear_params(struct task_struct *p) { struct sched_dl_entity *dl_se = &p->dl; - dl_se->dl_runtime = 0; - dl_se->dl_deadline = 0; - dl_se->dl_period = 0; - dl_se->flags = 0; - dl_se->dl_bw = 0; - dl_se->dl_density = 0; + dl_se->dl_runtime = 0; + dl_se->dl_deadline = 0; + dl_se->dl_period = 0; + dl_se->flags = 0; + dl_se->dl_bw = 0; + dl_se->dl_density = 0; - dl_se->dl_throttled = 0; - dl_se->dl_yielded = 0; - dl_se->dl_non_contending = 0; - dl_se->dl_overrun = 0; + dl_se->dl_throttled = 0; + dl_se->dl_yielded = 0; + dl_se->dl_non_contending = 0; + dl_se->dl_overrun = 0; } bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr) @@ -2663,21 +2662,22 @@ bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr) #ifdef CONFIG_SMP int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed) { - unsigned int dest_cpu = cpumask_any_and(cpu_active_mask, - cs_cpus_allowed); + unsigned int dest_cpu; struct dl_bw *dl_b; bool overflow; int cpus, ret; unsigned long flags; + dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed); + rcu_read_lock_sched(); dl_b = dl_bw_of(dest_cpu); raw_spin_lock_irqsave(&dl_b->lock, flags); cpus = dl_bw_cpus(dest_cpu); overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw); - if (overflow) + if (overflow) { ret = -EBUSY; - else { + } else { /* * We reserve space for this task in the destination * root_domain, as we can't fail after this point. @@ -2689,6 +2689,7 @@ int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allo } raw_spin_unlock_irqrestore(&dl_b->lock, flags); rcu_read_unlock_sched(); + return ret; } @@ -2709,6 +2710,7 @@ int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, ret = 0; raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags); rcu_read_unlock_sched(); + return ret; } @@ -2726,6 +2728,7 @@ bool dl_cpu_busy(unsigned int cpu) overflow = __dl_overflow(dl_b, cpus, 0, 0); raw_spin_unlock_irqrestore(&dl_b->lock, flags); rcu_read_unlock_sched(); + return overflow; } #endif diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 1ca0130ed4f9..7c82a9b88510 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -9,7 +9,6 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ - #include #include #include @@ -274,34 +273,19 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd) if (table == NULL) return NULL; - set_table_entry(&table[0], "min_interval", &sd->min_interval, - sizeof(long), 0644, proc_doulongvec_minmax, false); - set_table_entry(&table[1], "max_interval", &sd->max_interval, - sizeof(long), 0644, proc_doulongvec_minmax, false); - set_table_entry(&table[2], "busy_idx", &sd->busy_idx, - sizeof(int), 0644, proc_dointvec_minmax, true); - set_table_entry(&table[3], "idle_idx", &sd->idle_idx, - sizeof(int), 0644, proc_dointvec_minmax, true); - set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, - sizeof(int), 0644, proc_dointvec_minmax, true); - set_table_entry(&table[5], "wake_idx", &sd->wake_idx, - sizeof(int), 0644, proc_dointvec_minmax, true); - set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, - sizeof(int), 0644, proc_dointvec_minmax, true); - set_table_entry(&table[7], "busy_factor", &sd->busy_factor, - sizeof(int), 0644, proc_dointvec_minmax, false); - set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, - sizeof(int), 0644, proc_dointvec_minmax, false); - set_table_entry(&table[9], "cache_nice_tries", - &sd->cache_nice_tries, - sizeof(int), 0644, proc_dointvec_minmax, false); - set_table_entry(&table[10], "flags", &sd->flags, - sizeof(int), 0644, proc_dointvec_minmax, false); - set_table_entry(&table[11], "max_newidle_lb_cost", - &sd->max_newidle_lb_cost, - sizeof(long), 0644, proc_doulongvec_minmax, false); - set_table_entry(&table[12], "name", sd->name, - CORENAME_MAX_SIZE, 0444, proc_dostring, false); + set_table_entry(&table[0] , "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax, false); + set_table_entry(&table[1] , "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax, false); + set_table_entry(&table[2] , "busy_idx", &sd->busy_idx, sizeof(int) , 0644, proc_dointvec_minmax, true ); + set_table_entry(&table[3] , "idle_idx", &sd->idle_idx, sizeof(int) , 0644, proc_dointvec_minmax, true ); + set_table_entry(&table[4] , "newidle_idx", &sd->newidle_idx, sizeof(int) , 0644, proc_dointvec_minmax, true ); + set_table_entry(&table[5] , "wake_idx", &sd->wake_idx, sizeof(int) , 0644, proc_dointvec_minmax, true ); + set_table_entry(&table[6] , "forkexec_idx", &sd->forkexec_idx, sizeof(int) , 0644, proc_dointvec_minmax, true ); + set_table_entry(&table[7] , "busy_factor", &sd->busy_factor, sizeof(int) , 0644, proc_dointvec_minmax, false); + set_table_entry(&table[8] , "imbalance_pct", &sd->imbalance_pct, sizeof(int) , 0644, proc_dointvec_minmax, false); + set_table_entry(&table[9] , "cache_nice_tries", &sd->cache_nice_tries, sizeof(int) , 0644, proc_dointvec_minmax, false); + set_table_entry(&table[10], "flags", &sd->flags, sizeof(int) , 0644, proc_dointvec_minmax, false); + set_table_entry(&table[11], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax, false); + set_table_entry(&table[12], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring, false); /* &table[13] is terminator */ return table; @@ -332,8 +316,8 @@ static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu) return table; } -static cpumask_var_t sd_sysctl_cpus; -static struct ctl_table_header *sd_sysctl_header; +static cpumask_var_t sd_sysctl_cpus; +static struct ctl_table_header *sd_sysctl_header; void register_sched_domain_sysctl(void) { @@ -413,14 +397,10 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group { struct sched_entity *se = tg->se[cpu]; -#define P(F) \ - SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F) -#define P_SCHEDSTAT(F) \ - SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F)) -#define PN(F) \ - SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F)) -#define PN_SCHEDSTAT(F) \ - SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F))) +#define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F) +#define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F)) +#define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F)) +#define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F))) if (!se) return; @@ -428,6 +408,7 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group PN(se->exec_start); PN(se->vruntime); PN(se->sum_exec_runtime); + if (schedstat_enabled()) { PN_SCHEDSTAT(se->statistics.wait_start); PN_SCHEDSTAT(se->statistics.sleep_start); @@ -440,6 +421,7 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group PN_SCHEDSTAT(se->statistics.wait_sum); P_SCHEDSTAT(se->statistics.wait_count); } + P(se->load.weight); P(se->runnable_weight); #ifdef CONFIG_SMP @@ -464,6 +446,7 @@ static char *task_group_path(struct task_group *tg) return group_path; cgroup_path(tg->css.cgroup, group_path, PATH_MAX); + return group_path; } #endif @@ -799,9 +782,9 @@ void sysrq_sched_debug_show(void) /* * This itererator needs some explanation. * It returns 1 for the header position. - * This means 2 is cpu 0. - * In a hotplugged system some cpus, including cpu 0, may be missing so we have - * to use cpumask_* to iterate over the cpus. + * This means 2 is CPU 0. + * In a hotplugged system some CPUs, including CPU 0, may be missing so we have + * to use cpumask_* to iterate over the CPUs. */ static void *sched_debug_start(struct seq_file *file, loff_t *offset) { @@ -821,6 +804,7 @@ static void *sched_debug_start(struct seq_file *file, loff_t *offset) if (n < nr_cpu_ids) return (void *)(unsigned long)(n + 2); + return NULL; } @@ -835,10 +819,10 @@ static void sched_debug_stop(struct seq_file *file, void *data) } static const struct seq_operations sched_debug_sops = { - .start = sched_debug_start, - .next = sched_debug_next, - .stop = sched_debug_stop, - .show = sched_debug_show, + .start = sched_debug_start, + .next = sched_debug_next, + .stop = sched_debug_stop, + .show = sched_debug_show, }; static int sched_debug_release(struct inode *inode, struct file *file) @@ -876,14 +860,10 @@ static int __init init_sched_debug_procfs(void) __initcall(init_sched_debug_procfs); -#define __P(F) \ - SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F) -#define P(F) \ - SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F) -#define __PN(F) \ - SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F)) -#define PN(F) \ - SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F)) +#define __P(F) SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F) +#define P(F) SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F) +#define __PN(F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F)) +#define PN(F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F)) #ifdef CONFIG_NUMA_BALANCING diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e1febd252a84..1f877de96c9b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -20,7 +20,6 @@ * Adaptive scheduling granularity, math enhancements by Peter Zijlstra * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra */ - #include #include @@ -103,7 +102,7 @@ const_debug unsigned int sysctl_sched_migration_cost = 500000UL; #ifdef CONFIG_SMP /* - * For asym packing, by default the lower numbered cpu has higher priority. + * For asym packing, by default the lower numbered CPU has higher priority. */ int __weak arch_asym_cpu_priority(int cpu) { @@ -1181,7 +1180,7 @@ pid_t task_numa_group_id(struct task_struct *p) } /* - * The averaged statistics, shared & private, memory & cpu, + * The averaged statistics, shared & private, memory & CPU, * occupy the first half of the array. The second half of the * array is for current counters, which are averaged into the * first set by task_numa_placement. @@ -1587,7 +1586,7 @@ static void task_numa_compare(struct task_numa_env *env, * be incurred if the tasks were swapped. */ if (cur) { - /* Skip this swap candidate if cannot move to the source cpu */ + /* Skip this swap candidate if cannot move to the source CPU: */ if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed)) goto unlock; @@ -1631,7 +1630,7 @@ static void task_numa_compare(struct task_numa_env *env, goto balance; } - /* Balance doesn't matter much if we're running a task per cpu */ + /* Balance doesn't matter much if we're running a task per CPU: */ if (imp > env->best_imp && src_rq->nr_running == 1 && dst_rq->nr_running == 1) goto assign; @@ -1676,7 +1675,7 @@ balance: */ if (!cur) { /* - * select_idle_siblings() uses an per-cpu cpumask that + * select_idle_siblings() uses an per-CPU cpumask that * can be used from IRQ context. */ local_irq_disable(); @@ -3362,7 +3361,7 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) } /* - * Called within set_task_rq() right before setting a task's cpu. The + * Called within set_task_rq() right before setting a task's CPU. The * caller only guarantees p->pi_lock is held; no other assumptions, * including the state of rq->lock, should be made. */ @@ -3541,7 +3540,7 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf /* * runnable_sum can't be lower than running_sum - * As running sum is scale with cpu capacity wehreas the runnable sum + * As running sum is scale with CPU capacity wehreas the runnable sum * is not we rescale running_sum 1st */ running_sum = se->avg.util_sum / @@ -4688,7 +4687,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) if (!se) add_nr_running(rq, task_delta); - /* determine whether we need to wake up potentially idle cpu */ + /* Determine whether we need to wake up potentially idle CPU: */ if (rq->curr == rq->idle && rq->cfs.nr_running) resched_curr(rq); } @@ -5053,7 +5052,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) } /* - * Both these cpu hotplug callbacks race against unregister_fair_sched_group() + * Both these CPU hotplug callbacks race against unregister_fair_sched_group() * * The race is harmless, since modifying bandwidth settings of unhooked group * bits doesn't do much. @@ -5098,7 +5097,7 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) */ cfs_rq->runtime_remaining = 1; /* - * Offline rq is schedulable till cpu is completely disabled + * Offline rq is schedulable till CPU is completely disabled * in take_cpu_down(), so we prevent new cfs throttling here. */ cfs_rq->runtime_enabled = 0; @@ -5335,8 +5334,8 @@ DEFINE_PER_CPU(cpumask_var_t, select_idle_mask); * * load' = (1 - 1/2^i) * load + (1/2^i) * cur_load * - * If a cpu misses updates for n ticks (as it was idle) and update gets - * called on the n+1-th tick when cpu may be busy, then we have: + * If a CPU misses updates for n ticks (as it was idle) and update gets + * called on the n+1-th tick when CPU may be busy, then we have: * * load_n = (1 - 1/2^i)^n * load_0 * load_n+1 = (1 - 1/2^i) * load_n + (1/2^i) * cur_load @@ -5480,7 +5479,7 @@ static unsigned long weighted_cpuload(struct rq *rq) #ifdef CONFIG_NO_HZ_COMMON /* * There is no sane way to deal with nohz on smp when using jiffies because the - * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading + * CPU doing the jiffies update might drift wrt the CPU doing the jiffy reading * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}. * * Therefore we need to avoid the delta approach from the regular tick when @@ -5591,7 +5590,7 @@ void cpu_load_update_active(struct rq *this_rq) } /* - * Return a low guess at the load of a migration-source cpu weighted + * Return a low guess at the load of a migration-source CPU weighted * according to the scheduling class and "nice" value. * * We want to under-estimate the load of migration sources, to @@ -5609,7 +5608,7 @@ static unsigned long source_load(int cpu, int type) } /* - * Return a high guess at the load of a migration-target cpu weighted + * Return a high guess at the load of a migration-target CPU weighted * according to the scheduling class and "nice" value. */ static unsigned long target_load(int cpu, int type) @@ -5889,7 +5888,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, max_spare_cap = 0; for_each_cpu(i, sched_group_span(group)) { - /* Bias balancing toward cpus of our domain */ + /* Bias balancing toward CPUs of our domain */ if (local_group) load = source_load(i, load_idx); else @@ -5919,7 +5918,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, if (min_runnable_load > (runnable_load + imbalance)) { /* * The runnable load is significantly smaller - * so we can pick this new cpu + * so we can pick this new CPU: */ min_runnable_load = runnable_load; min_avg_load = avg_load; @@ -5928,7 +5927,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, (100*min_avg_load > imbalance_scale*avg_load)) { /* * The runnable loads are close so take the - * blocked load into account through avg_load. + * blocked load into account through avg_load: */ min_avg_load = avg_load; idlest = group; @@ -5989,7 +5988,7 @@ skip_spare: } /* - * find_idlest_group_cpu - find the idlest cpu among the cpus in group. + * find_idlest_group_cpu - find the idlest CPU among the CPUs in the group. */ static int find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) @@ -6067,12 +6066,12 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p new_cpu = find_idlest_group_cpu(group, p, cpu); if (new_cpu == cpu) { - /* Now try balancing at a lower domain level of cpu */ + /* Now try balancing at a lower domain level of 'cpu': */ sd = sd->child; continue; } - /* Now try balancing at a lower domain level of new_cpu */ + /* Now try balancing at a lower domain level of 'new_cpu': */ cpu = new_cpu; weight = sd->span_weight; sd = NULL; @@ -6082,7 +6081,6 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p if (tmp->flags & sd_flag) sd = tmp; } - /* while loop will break here if sd == NULL */ } return new_cpu; @@ -6278,12 +6276,12 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) return target; /* - * If the previous cpu is cache affine and idle, don't be stupid. + * If the previous CPU is cache affine and idle, don't be stupid: */ if (prev != target && cpus_share_cache(prev, target) && idle_cpu(prev)) return prev; - /* Check a recently used CPU as a potential idle candidate */ + /* Check a recently used CPU as a potential idle candidate: */ recent_used_cpu = p->recent_used_cpu; if (recent_used_cpu != prev && recent_used_cpu != target && @@ -6292,7 +6290,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) cpumask_test_cpu(p->recent_used_cpu, &p->cpus_allowed)) { /* * Replace recent_used_cpu with prev as it is a potential - * candidate for the next wake. + * candidate for the next wake: */ p->recent_used_cpu = prev; return recent_used_cpu; @@ -6357,7 +6355,7 @@ static inline unsigned long task_util(struct task_struct *p) } /* - * cpu_util_wake: Compute cpu utilization with any contributions from + * cpu_util_wake: Compute CPU utilization with any contributions from * the waking task p removed. */ static unsigned long cpu_util_wake(int cpu, struct task_struct *p) @@ -6403,10 +6401,10 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu) * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE, * SD_BALANCE_FORK, or SD_BALANCE_EXEC. * - * Balances load by selecting the idlest cpu in the idlest group, or under - * certain conditions an idle sibling cpu if the domain has SD_WAKE_AFFINE set. + * Balances load by selecting the idlest CPU in the idlest group, or under + * certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set. * - * Returns the target cpu number. + * Returns the target CPU number. * * preempt must be disabled. */ @@ -6431,7 +6429,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f break; /* - * If both cpu and prev_cpu are part of this domain, + * If both 'cpu' and 'prev_cpu' are part of this domain, * cpu is a valid SD_WAKE_AFFINE target. */ if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && @@ -6482,9 +6480,9 @@ pick_cpu: static void detach_entity_cfs_rq(struct sched_entity *se); /* - * Called immediately before a task is migrated to a new cpu; task_cpu(p) and + * Called immediately before a task is migrated to a new CPU; task_cpu(p) and * cfs_rq_of(p) references at time of call are still valid and identify the - * previous cpu. The caller guarantees p->pi_lock or task_rq(p)->lock is held. + * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held. */ static void migrate_task_rq_fair(struct task_struct *p) { @@ -6918,17 +6916,17 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preemp * BASICS * * The purpose of load-balancing is to achieve the same basic fairness the - * per-cpu scheduler provides, namely provide a proportional amount of compute + * per-CPU scheduler provides, namely provide a proportional amount of compute * time to each task. This is expressed in the following equation: * * W_i,n/P_i == W_j,n/P_j for all i,j (1) * - * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight + * Where W_i,n is the n-th weight average for CPU i. The instantaneous weight * W_i,0 is defined as: * * W_i,0 = \Sum_j w_i,j (2) * - * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight + * Where w_i,j is the weight of the j-th runnable task on CPU i. This weight * is derived from the nice value as per sched_prio_to_weight[]. * * The weight average is an exponential decay average of the instantaneous @@ -6936,7 +6934,7 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preemp * * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3) * - * C_i is the compute capacity of cpu i, typically it is the + * C_i is the compute capacity of CPU i, typically it is the * fraction of 'recent' time available for SCHED_OTHER task execution. But it * can also include other factors [XXX]. * @@ -6957,11 +6955,11 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preemp * SCHED DOMAINS * * In order to solve the imbalance equation (4), and avoid the obvious O(n^2) - * for all i,j solution, we create a tree of cpus that follows the hardware + * for all i,j solution, we create a tree of CPUs that follows the hardware * topology where each level pairs two lower groups (or better). This results - * in O(log n) layers. Furthermore we reduce the number of cpus going up the + * in O(log n) layers. Furthermore we reduce the number of CPUs going up the * tree to only the first of the previous level and we decrease the frequency - * of load-balance at each level inv. proportional to the number of cpus in + * of load-balance at each level inv. proportional to the number of CPUs in * the groups. * * This yields: @@ -6970,7 +6968,7 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preemp * \Sum { --- * --- * 2^i } = O(n) (5) * i = 0 2^i 2^i * `- size of each group - * | | `- number of cpus doing load-balance + * | | `- number of CPUs doing load-balance * | `- freq * `- sum over all levels * @@ -6978,7 +6976,7 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preemp * this makes (5) the runtime complexity of the balancer. * * An important property here is that each CPU is still (indirectly) connected - * to every other cpu in at most O(log n) steps: + * to every other CPU in at most O(log n) steps: * * The adjacency matrix of the resulting graph is given by: * @@ -6990,7 +6988,7 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preemp * * A^(log_2 n)_i,j != 0 for all i,j (7) * - * Showing there's indeed a path between every cpu in at most O(log n) steps. + * Showing there's indeed a path between every CPU in at most O(log n) steps. * The task movement gives a factor of O(m), giving a convergence complexity * of: * @@ -7000,7 +6998,7 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preemp * WORK CONSERVING * * In order to avoid CPUs going idle while there's still work to do, new idle - * balancing is more aggressive and has the newly idle cpu iterate up the domain + * balancing is more aggressive and has the newly idle CPU iterate up the domain * tree itself instead of relying on other CPUs to bring it work. * * This adds some complexity to both (5) and (8) but it reduces the total idle @@ -7021,7 +7019,7 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preemp * * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10) * - * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i. + * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on CPU i. * * The big problem is S_k, its a global sum needed to compute a local (W_i) * property. @@ -7185,7 +7183,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) env->flags |= LBF_SOME_PINNED; /* - * Remember if this task can be migrated to any other cpu in + * Remember if this task can be migrated to any other CPU in * our sched_group. We may want to revisit it if we couldn't * meet load balance goals by pulling other tasks on src_cpu. * @@ -7195,7 +7193,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) if (env->idle == CPU_NEWLY_IDLE || (env->flags & LBF_DST_PINNED)) return 0; - /* Prevent to re-select dst_cpu via env's cpus */ + /* Prevent to re-select dst_cpu via env's CPUs: */ for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { if (cpumask_test_cpu(cpu, &p->cpus_allowed)) { env->flags |= LBF_DST_PINNED; @@ -7769,8 +7767,8 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd) * Group imbalance indicates (and tries to solve) the problem where balancing * groups is inadequate due to ->cpus_allowed constraints. * - * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a - * cpumask covering 1 cpu of the first group and 3 cpus of the second group. + * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a + * cpumask covering 1 CPU of the first group and 3 CPUs of the second group. * Something like: * * { 0 1 2 3 } { 4 5 6 7 } @@ -7778,7 +7776,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd) * * If we were to balance group-wise we'd place two tasks in the first group and * two tasks in the second group. Clearly this is undesired as it will overload - * cpu 3 and leave one of the cpus in the second group unused. + * cpu 3 and leave one of the CPUs in the second group unused. * * The current solution to this issue is detecting the skew in the first group * by noticing the lower domain failed to reach balance and had difficulty @@ -7891,7 +7889,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, for_each_cpu_and(i, sched_group_span(group), env->cpus) { struct rq *rq = cpu_rq(i); - /* Bias balancing toward cpus of our domain */ + /* Bias balancing toward CPUs of our domain: */ if (local_group) load = target_load(i, load_idx); else @@ -7977,7 +7975,7 @@ asym_packing: if (!(env->sd->flags & SD_ASYM_PACKING)) return true; - /* No ASYM_PACKING if target cpu is already busy */ + /* No ASYM_PACKING if target CPU is already busy */ if (env->idle == CPU_NOT_IDLE) return true; /* @@ -7990,7 +7988,7 @@ asym_packing: if (!sds->busiest) return true; - /* Prefer to move from lowest priority cpu's work */ + /* Prefer to move from lowest priority CPU's work */ if (sched_asym_prefer(sds->busiest->asym_prefer_cpu, sg->asym_prefer_cpu)) return true; @@ -8243,7 +8241,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s if (busiest->group_type == group_imbalanced) { /* * In the group_imb case we cannot rely on group-wide averages - * to ensure cpu-load equilibrium, look at wider averages. XXX + * to ensure CPU-load equilibrium, look at wider averages. XXX */ busiest->load_per_task = min(busiest->load_per_task, sds->avg_load); @@ -8262,7 +8260,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s } /* - * If there aren't any idle cpus, avoid creating some. + * If there aren't any idle CPUs, avoid creating some. */ if (busiest->group_type == group_overloaded && local->group_type == group_overloaded) { @@ -8276,9 +8274,9 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s } /* - * We're trying to get all the cpus to the average_load, so we don't + * We're trying to get all the CPUs to the average_load, so we don't * want to push ourselves above the average load, nor do we wish to - * reduce the max loaded cpu below the average load. At the same time, + * reduce the max loaded CPU below the average load. At the same time, * we also don't want to reduce the group load below the group * capacity. Thus we look for the minimum possible imbalance. */ @@ -8372,9 +8370,9 @@ static struct sched_group *find_busiest_group(struct lb_env *env) if (env->idle == CPU_IDLE) { /* - * This cpu is idle. If the busiest group is not overloaded + * This CPU is idle. If the busiest group is not overloaded * and there is no imbalance between this and busiest group - * wrt idle cpus, it is balanced. The imbalance becomes + * wrt idle CPUs, it is balanced. The imbalance becomes * significant if the diff is greater than 1 otherwise we * might end up to just move the imbalance on another group */ @@ -8402,7 +8400,7 @@ out_balanced: } /* - * find_busiest_queue - find the busiest runqueue among the cpus in group. + * find_busiest_queue - find the busiest runqueue among the CPUs in the group. */ static struct rq *find_busiest_queue(struct lb_env *env, struct sched_group *group) @@ -8446,7 +8444,7 @@ static struct rq *find_busiest_queue(struct lb_env *env, /* * When comparing with imbalance, use weighted_cpuload() - * which is not scaled with the cpu capacity. + * which is not scaled with the CPU capacity. */ if (rq->nr_running == 1 && wl > env->imbalance && @@ -8454,9 +8452,9 @@ static struct rq *find_busiest_queue(struct lb_env *env, continue; /* - * For the load comparisons with the other cpu's, consider - * the weighted_cpuload() scaled with the cpu capacity, so - * that the load can be moved away from the cpu that is + * For the load comparisons with the other CPU's, consider + * the weighted_cpuload() scaled with the CPU capacity, so + * that the load can be moved away from the CPU that is * potentially running at a lower capacity. * * Thus we're looking for max(wl_i / capacity_i), crosswise @@ -8527,13 +8525,13 @@ static int should_we_balance(struct lb_env *env) return 0; /* - * In the newly idle case, we will allow all the cpu's + * In the newly idle case, we will allow all the CPUs * to do the newly idle load balance. */ if (env->idle == CPU_NEWLY_IDLE) return 1; - /* Try to find first idle cpu */ + /* Try to find first idle CPU */ for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) { if (!idle_cpu(cpu)) continue; @@ -8546,7 +8544,7 @@ static int should_we_balance(struct lb_env *env) balance_cpu = group_balance_cpu(sg); /* - * First idle cpu or the first cpu(busiest) in this sched group + * First idle CPU or the first CPU(busiest) in this sched group * is eligible for doing load balancing at this and above domains. */ return balance_cpu == env->dst_cpu; @@ -8655,7 +8653,7 @@ more_balance: * Revisit (affine) tasks on src_cpu that couldn't be moved to * us and move them to an alternate dst_cpu in our sched_group * where they can run. The upper limit on how many times we - * iterate on same src_cpu is dependent on number of cpus in our + * iterate on same src_cpu is dependent on number of CPUs in our * sched_group. * * This changes load balance semantics a bit on who can move @@ -8672,7 +8670,7 @@ more_balance: */ if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) { - /* Prevent to re-select dst_cpu via env's cpus */ + /* Prevent to re-select dst_cpu via env's CPUs */ cpumask_clear_cpu(env.dst_cpu, env.cpus); env.dst_rq = cpu_rq(env.new_dst_cpu); @@ -8734,9 +8732,10 @@ more_balance: raw_spin_lock_irqsave(&busiest->lock, flags); - /* don't kick the active_load_balance_cpu_stop, - * if the curr task on busiest cpu can't be - * moved to this_cpu + /* + * Don't kick the active_load_balance_cpu_stop, + * if the curr task on busiest CPU can't be + * moved to this_cpu: */ if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { raw_spin_unlock_irqrestore(&busiest->lock, @@ -8962,7 +8961,7 @@ out: } /* - * active_load_balance_cpu_stop is run by cpu stopper. It pushes + * active_load_balance_cpu_stop is run by the CPU stopper. It pushes * running tasks off the busiest CPU onto idle CPUs. It requires at * least 1 task to be running on each physical CPU where possible, and * avoids physical / logical imbalances. @@ -8986,7 +8985,7 @@ static int active_load_balance_cpu_stop(void *data) if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu)) goto out_unlock; - /* make sure the requested cpu hasn't gone down in the meantime */ + /* Make sure the requested CPU hasn't gone down in the meantime: */ if (unlikely(busiest_cpu != smp_processor_id() || !busiest_rq->active_balance)) goto out_unlock; @@ -8998,7 +8997,7 @@ static int active_load_balance_cpu_stop(void *data) /* * This condition is "impossible", if it occurs * we need to fix it. Originally reported by - * Bjorn Helgaas on a 128-cpu setup. + * Bjorn Helgaas on a 128-CPU setup. */ BUG_ON(busiest_rq == target_rq); @@ -9100,7 +9099,7 @@ static void nohz_balancer_kick(void) return; /* * Use smp_send_reschedule() instead of resched_cpu(). - * This way we generate a sched IPI on the target cpu which + * This way we generate a sched IPI on the target CPU which * is idle. And the softirq performing nohz idle load balance * will be run before returning from the IPI. */ @@ -9157,14 +9156,12 @@ unlock: } /* - * This routine will record that the cpu is going idle with tick stopped. + * This routine will record that the CPU is going idle with tick stopped. * This info will be used in performing idle load balancing in the future. */ void nohz_balance_enter_idle(int cpu) { - /* - * If this cpu is going down, then nothing needs to be done. - */ + /* If this CPU is going down, then nothing needs to be done: */ if (!cpu_active(cpu)) return; @@ -9175,9 +9172,7 @@ void nohz_balance_enter_idle(int cpu) if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) return; - /* - * If we're a completely isolated CPU, we don't play. - */ + /* If we're a completely isolated CPU, we don't play: */ if (on_null_domain(cpu_rq(cpu))) return; @@ -9286,7 +9281,7 @@ out: /* * next_balance will be updated only when there is a need. - * When the cpu is attached to null domain for ex, it will not be + * When the CPU is attached to null domain for ex, it will not be * updated. */ if (likely(update_next_balance)) { @@ -9310,7 +9305,7 @@ out: #ifdef CONFIG_NO_HZ_COMMON /* * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the - * rebalancing for all the cpus for whom scheduler ticks are stopped. + * rebalancing for all the CPUs for whom scheduler ticks are stopped. */ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { @@ -9330,8 +9325,8 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) continue; /* - * If this cpu gets work to do, stop the load balancing - * work being done for other cpus. Next load + * If this CPU gets work to do, stop the load balancing + * work being done for other CPUs. Next load * balancing owner will pick it up. */ if (need_resched()) @@ -9373,13 +9368,13 @@ end: /* * Current heuristic for kicking the idle load balancer in the presence - * of an idle cpu in the system. + * of an idle CPU in the system. * - This rq has more than one task. * - This rq has at least one CFS task and the capacity of the CPU is * significantly reduced because of RT tasks or IRQs. - * - At parent of LLC scheduler domain level, this cpu's scheduler group has - * multiple busy cpu. - * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler + * - At parent of LLC scheduler domain level, this CPU's scheduler group has + * multiple busy CPUs. + * - For SD_ASYM_PACKING, if the lower numbered CPU's in the scheduler * domain span are idle. */ static inline bool nohz_kick_needed(struct rq *rq) @@ -9469,10 +9464,10 @@ static __latent_entropy void run_rebalance_domains(struct softirq_action *h) CPU_IDLE : CPU_NOT_IDLE; /* - * If this cpu has a pending nohz_balance_kick, then do the - * balancing on behalf of the other idle cpus whose ticks are + * If this CPU has a pending nohz_balance_kick, then do the + * balancing on behalf of the other idle CPUs whose ticks are * stopped. Do nohz_idle_balance *before* rebalance_domains to - * give the idle cpus a chance to load balance. Else we may + * give the idle CPUs a chance to load balance. Else we may * load balance only within the local sched_domain hierarchy * and abort nohz_idle_balance altogether if we pull some load. */ diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 7dae9eb8c042..343d25f85477 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -1,5 +1,5 @@ /* - * Generic entry point for the idle threads + * Generic entry points for the idle threads */ #include #include @@ -332,8 +332,8 @@ void cpu_startup_entry(enum cpuhp_state state) { /* * This #ifdef needs to die, but it's too late in the cycle to - * make this generic (arm and sh have never invoked the canary - * init for the non boot cpus!). Will be fixed in 3.11 + * make this generic (ARM and SH have never invoked the canary + * init for the non boot CPUs!). Will be fixed in 3.11 */ #ifdef CONFIG_X86 /* diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index 48b8a83f5185..ec73680922f8 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c @@ -14,7 +14,7 @@ select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags) { return task_cpu(p); /* IDLE tasks as never migrated */ } -#endif /* CONFIG_SMP */ +#endif /* * Idle tasks are unconditionally rescheduled: @@ -30,6 +30,7 @@ pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf put_prev_task(rq, prev); update_idle_core(rq); schedstat_inc(rq->sched_goidle); + return rq->idle; } diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index 39f340dde1d7..aad5f48a07c6 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -6,13 +6,13 @@ * Copyright (C) 2017-2018 SUSE, Frederic Weisbecker * */ - #include #include #include #include #include #include + #include "sched.h" DEFINE_STATIC_KEY_FALSE(housekeeping_overriden); diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index 89a989e4d758..a398e7e28a8a 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -32,29 +32,29 @@ * Due to a number of reasons the above turns in the mess below: * * - for_each_possible_cpu() is prohibitively expensive on machines with - * serious number of cpus, therefore we need to take a distributed approach + * serious number of CPUs, therefore we need to take a distributed approach * to calculating nr_active. * * \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0 * = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) } * * So assuming nr_active := 0 when we start out -- true per definition, we - * can simply take per-cpu deltas and fold those into a global accumulate + * can simply take per-CPU deltas and fold those into a global accumulate * to obtain the same result. See calc_load_fold_active(). * - * Furthermore, in order to avoid synchronizing all per-cpu delta folding + * Furthermore, in order to avoid synchronizing all per-CPU delta folding * across the machine, we assume 10 ticks is sufficient time for every - * cpu to have completed this task. + * CPU to have completed this task. * * This places an upper-bound on the IRQ-off latency of the machine. Then * again, being late doesn't loose the delta, just wrecks the sample. * - * - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because - * this would add another cross-cpu cacheline miss and atomic operation - * to the wakeup path. Instead we increment on whatever cpu the task ran - * when it went into uninterruptible state and decrement on whatever cpu + * - cpu_rq()->nr_uninterruptible isn't accurately tracked per-CPU because + * this would add another cross-CPU cacheline miss and atomic operation + * to the wakeup path. Instead we increment on whatever CPU the task ran + * when it went into uninterruptible state and decrement on whatever CPU * did the wakeup. This means that only the sum of nr_uninterruptible over - * all cpus yields the correct result. + * all CPUs yields the correct result. * * This covers the NO_HZ=n code, for extra head-aches, see the comment below. */ @@ -115,11 +115,11 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active) * Handle NO_HZ for the global load-average. * * Since the above described distributed algorithm to compute the global - * load-average relies on per-cpu sampling from the tick, it is affected by + * load-average relies on per-CPU sampling from the tick, it is affected by * NO_HZ. * * The basic idea is to fold the nr_active delta into a global NO_HZ-delta upon - * entering NO_HZ state such that we can include this as an 'extra' cpu delta + * entering NO_HZ state such that we can include this as an 'extra' CPU delta * when we read the global state. * * Obviously reality has to ruin such a delightfully simple scheme: @@ -146,9 +146,9 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active) * busy state. * * This is solved by pushing the window forward, and thus skipping the - * sample, for this cpu (effectively using the NO_HZ-delta for this cpu which + * sample, for this CPU (effectively using the NO_HZ-delta for this CPU which * was in effect at the time the window opened). This also solves the issue - * of having to deal with a cpu having been in NO_HZ for multiple LOAD_FREQ + * of having to deal with a CPU having been in NO_HZ for multiple LOAD_FREQ * intervals. * * When making the ILB scale, we should try to pull this in as well. @@ -299,7 +299,7 @@ calc_load_n(unsigned long load, unsigned long exp, } /* - * NO_HZ can leave us missing all per-cpu ticks calling + * NO_HZ can leave us missing all per-CPU ticks calling * calc_load_fold_active(), but since a NO_HZ CPU folds its delta into * calc_load_nohz per calc_load_nohz_start(), all we need to do is fold * in the pending NO_HZ delta if our NO_HZ period crossed a load cycle boundary. @@ -363,7 +363,7 @@ void calc_global_load(unsigned long ticks) return; /* - * Fold the 'old' NO_HZ-delta to include all NO_HZ cpus. + * Fold the 'old' NO_HZ-delta to include all NO_HZ CPUs. */ delta = calc_load_nohz_fold(); if (delta) diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c index 5d0762633639..2c6ae2413fa2 100644 --- a/kernel/sched/membarrier.c +++ b/kernel/sched/membarrier.c @@ -27,18 +27,18 @@ * except MEMBARRIER_CMD_QUERY. */ #ifdef CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE -#define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \ - (MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE \ +#define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \ + (MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE \ | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE) #else #define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK 0 #endif -#define MEMBARRIER_CMD_BITMASK \ - (MEMBARRIER_CMD_GLOBAL | MEMBARRIER_CMD_GLOBAL_EXPEDITED \ - | MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \ - | MEMBARRIER_CMD_PRIVATE_EXPEDITED \ - | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \ +#define MEMBARRIER_CMD_BITMASK \ + (MEMBARRIER_CMD_GLOBAL | MEMBARRIER_CMD_GLOBAL_EXPEDITED \ + | MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \ + | MEMBARRIER_CMD_PRIVATE_EXPEDITED \ + | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \ | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK) static void ipi_mb(void *info) @@ -85,6 +85,7 @@ static int membarrier_global_expedited(void) */ if (cpu == raw_smp_processor_id()) continue; + rcu_read_lock(); p = task_rcu_dereference(&cpu_rq(cpu)->curr); if (p && p->mm && (atomic_read(&p->mm->membarrier_state) & @@ -188,6 +189,7 @@ static int membarrier_private_expedited(int flags) * rq->curr modification in scheduler. */ smp_mb(); /* exit from system call is not a mb */ + return 0; } @@ -219,6 +221,7 @@ static int membarrier_register_global_expedited(void) } atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY, &mm->membarrier_state); + return 0; } @@ -253,6 +256,7 @@ static int membarrier_register_private_expedited(int flags) synchronize_sched(); } atomic_or(state, &mm->membarrier_state); + return 0; } diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index c80563b4f6b9..e40498872111 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1453,9 +1453,9 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) return; /* - * There appears to be other cpus that can accept - * current and none to run 'p', so lets reschedule - * to try and push current away: + * There appear to be other CPUs that can accept + * the current task but none can run 'p', so lets reschedule + * to try and push the current task away: */ requeue_task_rt(rq, p, 1); resched_curr(rq); @@ -1596,12 +1596,13 @@ static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) if (!task_running(rq, p) && cpumask_test_cpu(cpu, &p->cpus_allowed)) return 1; + return 0; } /* * Return the highest pushable rq's task, which is suitable to be executed - * on the cpu, NULL otherwise + * on the CPU, NULL otherwise */ static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu) { @@ -1639,11 +1640,11 @@ static int find_lowest_rq(struct task_struct *task) return -1; /* No targets found */ /* - * At this point we have built a mask of cpus representing the + * At this point we have built a mask of CPUs representing the * lowest priority tasks in the system. Now we want to elect * the best one based on our affinity and topology. * - * We prioritize the last cpu that the task executed on since + * We prioritize the last CPU that the task executed on since * it is most likely cache-hot in that location. */ if (cpumask_test_cpu(cpu, lowest_mask)) @@ -1651,7 +1652,7 @@ static int find_lowest_rq(struct task_struct *task) /* * Otherwise, we consult the sched_domains span maps to figure - * out which cpu is logically closest to our hot cache data. + * out which CPU is logically closest to our hot cache data. */ if (!cpumask_test_cpu(this_cpu, lowest_mask)) this_cpu = -1; /* Skip this_cpu opt if not among lowest */ @@ -1692,6 +1693,7 @@ static int find_lowest_rq(struct task_struct *task) cpu = cpumask_any(lowest_mask); if (cpu < nr_cpu_ids) return cpu; + return -1; } @@ -1827,7 +1829,7 @@ retry: * The task hasn't migrated, and is still the next * eligible task, but we failed to find a run-queue * to push it to. Do not retry in this case, since - * other cpus will pull from us when ready. + * other CPUs will pull from us when ready. */ goto out; } @@ -1919,7 +1921,7 @@ static int rto_next_cpu(struct root_domain *rd) * rt_next_cpu() will simply return the first CPU found in * the rto_mask. * - * If rto_next_cpu() is called with rto_cpu is a valid cpu, it + * If rto_next_cpu() is called with rto_cpu is a valid CPU, it * will return the next CPU found in the rto_mask. * * If there are no more CPUs left in the rto_mask, then a check is made @@ -1980,7 +1982,7 @@ static void tell_cpu_to_push(struct rq *rq) raw_spin_lock(&rq->rd->rto_lock); /* - * The rto_cpu is updated under the lock, if it has a valid cpu + * The rto_cpu is updated under the lock, if it has a valid CPU * then the IPI is still running and will continue due to the * update to loop_next, and nothing needs to be done here. * Otherwise it is finishing up and an ipi needs to be sent. @@ -2105,7 +2107,7 @@ static void pull_rt_task(struct rq *this_rq) /* * There's a chance that p is higher in priority - * than what's currently running on its cpu. + * than what's currently running on its CPU. * This is just that p is wakeing up and hasn't * had a chance to schedule. We only pull * p if it is lower in priority than the @@ -2693,6 +2695,7 @@ int sched_rr_handler(struct ctl_table *table, int write, msecs_to_jiffies(sysctl_sched_rr_timeslice); } mutex_unlock(&mutex); + return ret; } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index dc6c8b5a24ad..bd1461ae06e4 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1,5 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ - +/* + * Scheduler internal types and methods: + */ #include #include #include @@ -79,11 +81,11 @@ static inline void cpu_load_update_active(struct rq *this_rq) { } * and does not change the user-interface for setting shares/weights. * * We increase resolution only if we have enough bits to allow this increased - * resolution (i.e. 64bit). The costs for increasing resolution when 32bit are - * pretty high and the returns do not justify the increased costs. + * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit + * are pretty high and the returns do not justify the increased costs. * - * Really only required when CONFIG_FAIR_GROUP_SCHED is also set, but to - * increase coverage and consistency always enable it on 64bit platforms. + * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to + * increase coverage and consistency always enable it on 64-bit platforms. */ #ifdef CONFIG_64BIT # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) @@ -111,16 +113,12 @@ static inline void cpu_load_update_active(struct rq *this_rq) { } * 10 -> just above 1us * 9 -> just above 0.5us */ -#define DL_SCALE (10) - -/* - * These are the 'tuning knobs' of the scheduler: - */ +#define DL_SCALE 10 /* - * single value that denotes runtime == period, ie unlimited time. + * Single value that denotes runtime == period, ie unlimited time. */ -#define RUNTIME_INF ((u64)~0ULL) +#define RUNTIME_INF ((u64)~0ULL) static inline int idle_policy(int policy) { @@ -235,9 +233,9 @@ void __dl_clear_params(struct task_struct *p); * control. */ struct dl_bandwidth { - raw_spinlock_t dl_runtime_lock; - u64 dl_runtime; - u64 dl_period; + raw_spinlock_t dl_runtime_lock; + u64 dl_runtime; + u64 dl_period; }; static inline int dl_bandwidth_enabled(void) @@ -246,8 +244,9 @@ static inline int dl_bandwidth_enabled(void) } struct dl_bw { - raw_spinlock_t lock; - u64 bw, total_bw; + raw_spinlock_t lock; + u64 bw; + u64 total_bw; }; static inline void __dl_update(struct dl_bw *dl_b, s64 bw); @@ -273,20 +272,17 @@ bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; } -void dl_change_utilization(struct task_struct *p, u64 new_bw); +extern void dl_change_utilization(struct task_struct *p, u64 new_bw); extern void init_dl_bw(struct dl_bw *dl_b); -extern int sched_dl_global_validate(void); +extern int sched_dl_global_validate(void); extern void sched_dl_do_global(void); -extern int sched_dl_overflow(struct task_struct *p, int policy, - const struct sched_attr *attr); +extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); extern bool __checkparam_dl(const struct sched_attr *attr); extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); -extern int dl_task_can_attach(struct task_struct *p, - const struct cpumask *cs_cpus_allowed); -extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, - const struct cpumask *trial); +extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); +extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); extern bool dl_cpu_busy(unsigned int cpu); #ifdef CONFIG_CGROUP_SCHED @@ -300,32 +296,36 @@ extern struct list_head task_groups; struct cfs_bandwidth { #ifdef CONFIG_CFS_BANDWIDTH - raw_spinlock_t lock; - ktime_t period; - u64 quota, runtime; - s64 hierarchical_quota; - u64 runtime_expires; - - int idle, period_active; - struct hrtimer period_timer, slack_timer; - struct list_head throttled_cfs_rq; - - /* statistics */ - int nr_periods, nr_throttled; - u64 throttled_time; + raw_spinlock_t lock; + ktime_t period; + u64 quota; + u64 runtime; + s64 hierarchical_quota; + u64 runtime_expires; + + int idle; + int period_active; + struct hrtimer period_timer; + struct hrtimer slack_timer; + struct list_head throttled_cfs_rq; + + /* Statistics: */ + int nr_periods; + int nr_throttled; + u64 throttled_time; #endif }; -/* task group related information */ +/* Task group related information */ struct task_group { struct cgroup_subsys_state css; #ifdef CONFIG_FAIR_GROUP_SCHED - /* schedulable entities of this group on each cpu */ - struct sched_entity **se; - /* runqueue "owned" by this group on each cpu */ - struct cfs_rq **cfs_rq; - unsigned long shares; + /* schedulable entities of this group on each CPU */ + struct sched_entity **se; + /* runqueue "owned" by this group on each CPU */ + struct cfs_rq **cfs_rq; + unsigned long shares; #ifdef CONFIG_SMP /* @@ -333,29 +333,29 @@ struct task_group { * it in its own cacheline separated from the fields above which * will also be accessed at each tick. */ - atomic_long_t load_avg ____cacheline_aligned; + atomic_long_t load_avg ____cacheline_aligned; #endif #endif #ifdef CONFIG_RT_GROUP_SCHED - struct sched_rt_entity **rt_se; - struct rt_rq **rt_rq; + struct sched_rt_entity **rt_se; + struct rt_rq **rt_rq; - struct rt_bandwidth rt_bandwidth; + struct rt_bandwidth rt_bandwidth; #endif - struct rcu_head rcu; - struct list_head list; + struct rcu_head rcu; + struct list_head list; - struct task_group *parent; - struct list_head siblings; - struct list_head children; + struct task_group *parent; + struct list_head siblings; + struct list_head children; #ifdef CONFIG_SCHED_AUTOGROUP - struct autogroup *autogroup; + struct autogroup *autogroup; #endif - struct cfs_bandwidth cfs_bandwidth; + struct cfs_bandwidth cfs_bandwidth; }; #ifdef CONFIG_FAIR_GROUP_SCHED @@ -369,8 +369,8 @@ struct task_group { * (The default weight is 1024 - so there's no practical * limitation from this.) */ -#define MIN_SHARES (1UL << 1) -#define MAX_SHARES (1UL << 18) +#define MIN_SHARES (1UL << 1) +#define MAX_SHARES (1UL << 18) #endif typedef int (*tg_visitor)(struct task_group *, void *); @@ -443,35 +443,39 @@ struct cfs_bandwidth { }; /* CFS-related fields in a runqueue */ struct cfs_rq { - struct load_weight load; - unsigned long runnable_weight; - unsigned int nr_running, h_nr_running; + struct load_weight load; + unsigned long runnable_weight; + unsigned int nr_running; + unsigned int h_nr_running; - u64 exec_clock; - u64 min_vruntime; + u64 exec_clock; + u64 min_vruntime; #ifndef CONFIG_64BIT - u64 min_vruntime_copy; + u64 min_vruntime_copy; #endif - struct rb_root_cached tasks_timeline; + struct rb_root_cached tasks_timeline; /* * 'curr' points to currently running entity on this cfs_rq. * It is set to NULL otherwise (i.e when none are currently running). */ - struct sched_entity *curr, *next, *last, *skip; + struct sched_entity *curr; + struct sched_entity *next; + struct sched_entity *last; + struct sched_entity *skip; #ifdef CONFIG_SCHED_DEBUG - unsigned int nr_spread_over; + unsigned int nr_spread_over; #endif #ifdef CONFIG_SMP /* * CFS load tracking */ - struct sched_avg avg; + struct sched_avg avg; #ifndef CONFIG_64BIT - u64 load_last_update_time_copy; + u64 load_last_update_time_copy; #endif struct { raw_spinlock_t lock ____cacheline_aligned; @@ -482,9 +486,9 @@ struct cfs_rq { } removed; #ifdef CONFIG_FAIR_GROUP_SCHED - unsigned long tg_load_avg_contrib; - long propagate; - long prop_runnable_sum; + unsigned long tg_load_avg_contrib; + long propagate; + long prop_runnable_sum; /* * h_load = weight * f(tg) @@ -492,36 +496,38 @@ struct cfs_rq { * Where f(tg) is the recursive weight fraction assigned to * this group. */ - unsigned long h_load; - u64 last_h_load_update; - struct sched_entity *h_load_next; + unsigned long h_load; + u64 last_h_load_update; + struct sched_entity *h_load_next; #endif /* CONFIG_FAIR_GROUP_SCHED */ #endif /* CONFIG_SMP */ #ifdef CONFIG_FAIR_GROUP_SCHED - struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ + struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ /* * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in * a hierarchy). Non-leaf lrqs hold other higher schedulable entities * (like users, containers etc.) * - * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This - * list is used during load balance. + * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU. + * This list is used during load balance. */ - int on_list; - struct list_head leaf_cfs_rq_list; - struct task_group *tg; /* group that "owns" this runqueue */ + int on_list; + struct list_head leaf_cfs_rq_list; + struct task_group *tg; /* group that "owns" this runqueue */ #ifdef CONFIG_CFS_BANDWIDTH - int runtime_enabled; - u64 runtime_expires; - s64 runtime_remaining; - - u64 throttled_clock, throttled_clock_task; - u64 throttled_clock_task_time; - int throttled, throttle_count; - struct list_head throttled_list; + int runtime_enabled; + u64 runtime_expires; + s64 runtime_remaining; + + u64 throttled_clock; + u64 throttled_clock_task; + u64 throttled_clock_task_time; + int throttled; + int throttle_count; + struct list_head throttled_list; #endif /* CONFIG_CFS_BANDWIDTH */ #endif /* CONFIG_FAIR_GROUP_SCHED */ }; @@ -538,45 +544,45 @@ static inline int rt_bandwidth_enabled(void) /* Real-Time classes' related field in a runqueue: */ struct rt_rq { - struct rt_prio_array active; - unsigned int rt_nr_running; - unsigned int rr_nr_running; + struct rt_prio_array active; + unsigned int rt_nr_running; + unsigned int rr_nr_running; #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED struct { - int curr; /* highest queued rt task prio */ + int curr; /* highest queued rt task prio */ #ifdef CONFIG_SMP - int next; /* next highest */ + int next; /* next highest */ #endif } highest_prio; #endif #ifdef CONFIG_SMP - unsigned long rt_nr_migratory; - unsigned long rt_nr_total; - int overloaded; - struct plist_head pushable_tasks; + unsigned long rt_nr_migratory; + unsigned long rt_nr_total; + int overloaded; + struct plist_head pushable_tasks; #endif /* CONFIG_SMP */ - int rt_queued; + int rt_queued; - int rt_throttled; - u64 rt_time; - u64 rt_runtime; + int rt_throttled; + u64 rt_time; + u64 rt_runtime; /* Nests inside the rq lock: */ - raw_spinlock_t rt_runtime_lock; + raw_spinlock_t rt_runtime_lock; #ifdef CONFIG_RT_GROUP_SCHED - unsigned long rt_nr_boosted; + unsigned long rt_nr_boosted; - struct rq *rq; - struct task_group *tg; + struct rq *rq; + struct task_group *tg; #endif }; /* Deadline class' related fields in a runqueue */ struct dl_rq { /* runqueue is an rbtree, ordered by deadline */ - struct rb_root_cached root; + struct rb_root_cached root; - unsigned long dl_nr_running; + unsigned long dl_nr_running; #ifdef CONFIG_SMP /* @@ -586,28 +592,28 @@ struct dl_rq { * should migrate somewhere else. */ struct { - u64 curr; - u64 next; + u64 curr; + u64 next; } earliest_dl; - unsigned long dl_nr_migratory; - int overloaded; + unsigned long dl_nr_migratory; + int overloaded; /* * Tasks on this rq that can be pushed away. They are kept in * an rb-tree, ordered by tasks' deadlines, with caching * of the leftmost (earliest deadline) element. */ - struct rb_root_cached pushable_dl_tasks_root; + struct rb_root_cached pushable_dl_tasks_root; #else - struct dl_bw dl_bw; + struct dl_bw dl_bw; #endif /* * "Active utilization" for this runqueue: increased when a * task wakes up (becomes TASK_RUNNING) and decreased when a * task blocks */ - u64 running_bw; + u64 running_bw; /* * Utilization of the tasks "assigned" to this runqueue (including @@ -618,14 +624,14 @@ struct dl_rq { * This is needed to compute the "inactive utilization" for the * runqueue (inactive utilization = this_bw - running_bw). */ - u64 this_bw; - u64 extra_bw; + u64 this_bw; + u64 extra_bw; /* * Inverse of the fraction of CPU utilization that can be reclaimed * by the GRUB algorithm. */ - u64 bw_ratio; + u64 bw_ratio; }; #ifdef CONFIG_SMP @@ -638,51 +644,51 @@ static inline bool sched_asym_prefer(int a, int b) /* * We add the notion of a root-domain which will be used to define per-domain * variables. Each exclusive cpuset essentially defines an island domain by - * fully partitioning the member cpus from any other cpuset. Whenever a new + * fully partitioning the member CPUs from any other cpuset. Whenever a new * exclusive cpuset is created, we also create and attach a new root-domain * object. * */ struct root_domain { - atomic_t refcount; - atomic_t rto_count; - struct rcu_head rcu; - cpumask_var_t span; - cpumask_var_t online; + atomic_t refcount; + atomic_t rto_count; + struct rcu_head rcu; + cpumask_var_t span; + cpumask_var_t online; /* Indicate more than one runnable task for any CPU */ - bool overload; + bool overload; /* * The bit corresponding to a CPU gets set here if such CPU has more * than one runnable -deadline task (as it is below for RT tasks). */ - cpumask_var_t dlo_mask; - atomic_t dlo_count; - struct dl_bw dl_bw; - struct cpudl cpudl; + cpumask_var_t dlo_mask; + atomic_t dlo_count; + struct dl_bw dl_bw; + struct cpudl cpudl; #ifdef HAVE_RT_PUSH_IPI /* * For IPI pull requests, loop across the rto_mask. */ - struct irq_work rto_push_work; - raw_spinlock_t rto_lock; + struct irq_work rto_push_work; + raw_spinlock_t rto_lock; /* These are only updated and read within rto_lock */ - int rto_loop; - int rto_cpu; + int rto_loop; + int rto_cpu; /* These atomics are updated outside of a lock */ - atomic_t rto_loop_next; - atomic_t rto_loop_start; + atomic_t rto_loop_next; + atomic_t rto_loop_start; #endif /* * The "RT overload" flag: it gets set if a CPU has more than * one runnable RT task. */ - cpumask_var_t rto_mask; - struct cpupri cpupri; + cpumask_var_t rto_mask; + struct cpupri cpupri; - unsigned long max_cpu_capacity; + unsigned long max_cpu_capacity; }; extern struct root_domain def_root_domain; @@ -708,39 +714,39 @@ extern void rto_push_irq_work_func(struct irq_work *work); */ struct rq { /* runqueue lock: */ - raw_spinlock_t lock; + raw_spinlock_t lock; /* * nr_running and cpu_load should be in the same cacheline because * remote CPUs use both these fields when doing load calculation. */ - unsigned int nr_running; + unsigned int nr_running; #ifdef CONFIG_NUMA_BALANCING - unsigned int nr_numa_running; - unsigned int nr_preferred_running; + unsigned int nr_numa_running; + unsigned int nr_preferred_running; #endif #define CPU_LOAD_IDX_MAX 5 - unsigned long cpu_load[CPU_LOAD_IDX_MAX]; + unsigned long cpu_load[CPU_LOAD_IDX_MAX]; #ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_SMP - unsigned long last_load_update_tick; + unsigned long last_load_update_tick; #endif /* CONFIG_SMP */ - unsigned long nohz_flags; + unsigned long nohz_flags; #endif /* CONFIG_NO_HZ_COMMON */ - /* capture load from *all* tasks on this cpu: */ - struct load_weight load; - unsigned long nr_load_updates; - u64 nr_switches; + /* capture load from *all* tasks on this CPU: */ + struct load_weight load; + unsigned long nr_load_updates; + u64 nr_switches; - struct cfs_rq cfs; - struct rt_rq rt; - struct dl_rq dl; + struct cfs_rq cfs; + struct rt_rq rt; + struct dl_rq dl; #ifdef CONFIG_FAIR_GROUP_SCHED - /* list of leaf cfs_rq on this cpu: */ - struct list_head leaf_cfs_rq_list; - struct list_head *tmp_alone_branch; + /* list of leaf cfs_rq on this CPU: */ + struct list_head leaf_cfs_rq_list; + struct list_head *tmp_alone_branch; #endif /* CONFIG_FAIR_GROUP_SCHED */ /* @@ -749,94 +755,98 @@ struct rq { * one CPU and if it got migrated afterwards it may decrease * it on another CPU. Always updated under the runqueue lock: */ - unsigned long nr_uninterruptible; + unsigned long nr_uninterruptible; - struct task_struct *curr, *idle, *stop; - unsigned long next_balance; - struct mm_struct *prev_mm; + struct task_struct *curr; + struct task_struct *idle; + struct task_struct *stop; + unsigned long next_balance; + struct mm_struct *prev_mm; - unsigned int clock_update_flags; - u64 clock; - u64 clock_task; + unsigned int clock_update_flags; + u64 clock; + u64 clock_task; - atomic_t nr_iowait; + atomic_t nr_iowait; #ifdef CONFIG_SMP - struct root_domain *rd; - struct sched_domain *sd; + struct root_domain *rd; + struct sched_domain *sd; + + unsigned long cpu_capacity; + unsigned long cpu_capacity_orig; - unsigned long cpu_capacity; - unsigned long cpu_capacity_orig; + struct callback_head *balance_callback; - struct callback_head *balance_callback; + unsigned char idle_balance; - unsigned char idle_balance; /* For active balancing */ - int active_balance; - int push_cpu; - struct cpu_stop_work active_balance_work; - /* cpu of this runqueue: */ - int cpu; - int online; + int active_balance; + int push_cpu; + struct cpu_stop_work active_balance_work; + + /* CPU of this runqueue: */ + int cpu; + int online; struct list_head cfs_tasks; - u64 rt_avg; - u64 age_stamp; - u64 idle_stamp; - u64 avg_idle; + u64 rt_avg; + u64 age_stamp; + u64 idle_stamp; + u64 avg_idle; /* This is used to determine avg_idle's max value */ - u64 max_idle_balance_cost; + u64 max_idle_balance_cost; #endif #ifdef CONFIG_IRQ_TIME_ACCOUNTING - u64 prev_irq_time; + u64 prev_irq_time; #endif #ifdef CONFIG_PARAVIRT - u64 prev_steal_time; + u64 prev_steal_time; #endif #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING - u64 prev_steal_time_rq; + u64 prev_steal_time_rq; #endif /* calc_load related fields */ - unsigned long calc_load_update; - long calc_load_active; + unsigned long calc_load_update; + long calc_load_active; #ifdef CONFIG_SCHED_HRTICK #ifdef CONFIG_SMP - int hrtick_csd_pending; - call_single_data_t hrtick_csd; + int hrtick_csd_pending; + call_single_data_t hrtick_csd; #endif - struct hrtimer hrtick_timer; + struct hrtimer hrtick_timer; #endif #ifdef CONFIG_SCHEDSTATS /* latency stats */ - struct sched_info rq_sched_info; - unsigned long long rq_cpu_time; + struct sched_info rq_sched_info; + unsigned long long rq_cpu_time; /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ /* sys_sched_yield() stats */ - unsigned int yld_count; + unsigned int yld_count; /* schedule() stats */ - unsigned int sched_count; - unsigned int sched_goidle; + unsigned int sched_count; + unsigned int sched_goidle; /* try_to_wake_up() stats */ - unsigned int ttwu_count; - unsigned int ttwu_local; + unsigned int ttwu_count; + unsigned int ttwu_local; #endif #ifdef CONFIG_SMP - struct llist_head wake_list; + struct llist_head wake_list; #endif #ifdef CONFIG_CPU_IDLE /* Must be inspected within a rcu lock section */ - struct cpuidle_state *idle_state; + struct cpuidle_state *idle_state; #endif }; @@ -902,9 +912,9 @@ static inline u64 __rq_clock_broken(struct rq *rq) * one position though, because the next rq_unpin_lock() will shift it * back. */ -#define RQCF_REQ_SKIP 0x01 -#define RQCF_ACT_SKIP 0x02 -#define RQCF_UPDATED 0x04 +#define RQCF_REQ_SKIP 0x01 +#define RQCF_ACT_SKIP 0x02 +#define RQCF_UPDATED 0x04 static inline void assert_clock_updated(struct rq *rq) { @@ -1057,12 +1067,12 @@ extern void sched_ttwu_pending(void); /** * highest_flag_domain - Return highest sched_domain containing flag. - * @cpu: The cpu whose highest level of sched domain is to + * @cpu: The CPU whose highest level of sched domain is to * be returned. * @flag: The flag to check for the highest sched_domain - * for the given cpu. + * for the given CPU. * - * Returns the highest sched_domain of a cpu which contains the given flag. + * Returns the highest sched_domain of a CPU which contains the given flag. */ static inline struct sched_domain *highest_flag_domain(int cpu, int flag) { @@ -1097,30 +1107,30 @@ DECLARE_PER_CPU(struct sched_domain *, sd_numa); DECLARE_PER_CPU(struct sched_domain *, sd_asym); struct sched_group_capacity { - atomic_t ref; + atomic_t ref; /* * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity * for a single CPU. */ - unsigned long capacity; - unsigned long min_capacity; /* Min per-CPU capacity in group */ - unsigned long next_update; - int imbalance; /* XXX unrelated to capacity but shared group state */ + unsigned long capacity; + unsigned long min_capacity; /* Min per-CPU capacity in group */ + unsigned long next_update; + int imbalance; /* XXX unrelated to capacity but shared group state */ #ifdef CONFIG_SCHED_DEBUG - int id; + int id; #endif - unsigned long cpumask[0]; /* balance mask */ + unsigned long cpumask[0]; /* Balance mask */ }; struct sched_group { - struct sched_group *next; /* Must be a circular list */ - atomic_t ref; + struct sched_group *next; /* Must be a circular list */ + atomic_t ref; - unsigned int group_weight; + unsigned int group_weight; struct sched_group_capacity *sgc; - int asym_prefer_cpu; /* cpu of highest priority in group */ + int asym_prefer_cpu; /* CPU of highest priority in group */ /* * The CPUs this group covers. @@ -1129,7 +1139,7 @@ struct sched_group { * by attaching extra space to the end of the structure, * depending on how many CPUs the kernel has booted up with) */ - unsigned long cpumask[0]; + unsigned long cpumask[0]; }; static inline struct cpumask *sched_group_span(struct sched_group *sg) @@ -1146,8 +1156,8 @@ static inline struct cpumask *group_balance_mask(struct sched_group *sg) } /** - * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. - * @group: The group whose first cpu is to be returned. + * group_first_cpu - Returns the first CPU in the cpumask of a sched_group. + * @group: The group whose first CPU is to be returned. */ static inline unsigned int group_first_cpu(struct sched_group *group) { @@ -1357,9 +1367,9 @@ static inline int task_on_rq_migrating(struct task_struct *p) /* * wake flags */ -#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ -#define WF_FORK 0x02 /* child wakeup after fork */ -#define WF_MIGRATED 0x4 /* internal use, task got migrated */ +#define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */ +#define WF_FORK 0x02 /* Child wakeup after fork */ +#define WF_MIGRATED 0x4 /* Internal use, task got migrated */ /* * To aid in avoiding the subversion of "niceness" due to uneven distribution @@ -1370,11 +1380,11 @@ static inline int task_on_rq_migrating(struct task_struct *p) * slice expiry etc. */ -#define WEIGHT_IDLEPRIO 3 -#define WMULT_IDLEPRIO 1431655765 +#define WEIGHT_IDLEPRIO 3 +#define WMULT_IDLEPRIO 1431655765 -extern const int sched_prio_to_weight[40]; -extern const u32 sched_prio_to_wmult[40]; +extern const int sched_prio_to_weight[40]; +extern const u32 sched_prio_to_wmult[40]; /* * {de,en}queue flags: @@ -1396,9 +1406,9 @@ extern const u32 sched_prio_to_wmult[40]; */ #define DEQUEUE_SLEEP 0x01 -#define DEQUEUE_SAVE 0x02 /* matches ENQUEUE_RESTORE */ -#define DEQUEUE_MOVE 0x04 /* matches ENQUEUE_MOVE */ -#define DEQUEUE_NOCLOCK 0x08 /* matches ENQUEUE_NOCLOCK */ +#define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */ +#define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */ +#define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */ #define ENQUEUE_WAKEUP 0x01 #define ENQUEUE_RESTORE 0x02 @@ -1420,10 +1430,10 @@ struct sched_class { void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); - void (*yield_task) (struct rq *rq); - bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt); + void (*yield_task) (struct rq *rq); + bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt); - void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); + void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags); /* * It is the responsibility of the pick_next_task() method that will @@ -1433,16 +1443,16 @@ struct sched_class { * May return RETRY_TASK when it finds a higher prio class has runnable * tasks. */ - struct task_struct * (*pick_next_task) (struct rq *rq, - struct task_struct *prev, - struct rq_flags *rf); - void (*put_prev_task) (struct rq *rq, struct task_struct *p); + struct task_struct * (*pick_next_task)(struct rq *rq, + struct task_struct *prev, + struct rq_flags *rf); + void (*put_prev_task)(struct rq *rq, struct task_struct *p); #ifdef CONFIG_SMP int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); void (*migrate_task_rq)(struct task_struct *p); - void (*task_woken) (struct rq *this_rq, struct task_struct *task); + void (*task_woken)(struct rq *this_rq, struct task_struct *task); void (*set_cpus_allowed)(struct task_struct *p, const struct cpumask *newmask); @@ -1451,31 +1461,31 @@ struct sched_class { void (*rq_offline)(struct rq *rq); #endif - void (*set_curr_task) (struct rq *rq); - void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); - void (*task_fork) (struct task_struct *p); - void (*task_dead) (struct task_struct *p); + void (*set_curr_task)(struct rq *rq); + void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); + void (*task_fork)(struct task_struct *p); + void (*task_dead)(struct task_struct *p); /* * The switched_from() call is allowed to drop rq->lock, therefore we * cannot assume the switched_from/switched_to pair is serliazed by * rq->lock. They are however serialized by p->pi_lock. */ - void (*switched_from) (struct rq *this_rq, struct task_struct *task); - void (*switched_to) (struct rq *this_rq, struct task_struct *task); + void (*switched_from)(struct rq *this_rq, struct task_struct *task); + void (*switched_to) (struct rq *this_rq, struct task_struct *task); void (*prio_changed) (struct rq *this_rq, struct task_struct *task, - int oldprio); + int oldprio); - unsigned int (*get_rr_interval) (struct rq *rq, - struct task_struct *task); + unsigned int (*get_rr_interval)(struct rq *rq, + struct task_struct *task); - void (*update_curr) (struct rq *rq); + void (*update_curr)(struct rq *rq); -#define TASK_SET_GROUP 0 -#define TASK_MOVE_GROUP 1 +#define TASK_SET_GROUP 0 +#define TASK_MOVE_GROUP 1 #ifdef CONFIG_FAIR_GROUP_SCHED - void (*task_change_group) (struct task_struct *p, int type); + void (*task_change_group)(struct task_struct *p, int type); #endif }; @@ -1524,6 +1534,7 @@ static inline void idle_set_state(struct rq *rq, static inline struct cpuidle_state *idle_get_state(struct rq *rq) { SCHED_WARN_ON(!rcu_read_lock_held()); + return rq->idle_state; } #else @@ -1562,9 +1573,9 @@ extern void init_dl_task_timer(struct sched_dl_entity *dl_se); extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se); extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq); -#define BW_SHIFT 20 -#define BW_UNIT (1 << BW_SHIFT) -#define RATIO_SHIFT 8 +#define BW_SHIFT 20 +#define BW_UNIT (1 << BW_SHIFT) +#define RATIO_SHIFT 8 unsigned long to_ratio(u64 period, u64 runtime); extern void init_entity_runnable_average(struct sched_entity *se); @@ -1814,8 +1825,8 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) /* * Unfair double_lock_balance: Optimizes throughput at the expense of * latency by eliminating extra atomic operations when the locks are - * already in proper order on entry. This favors lower cpu-ids and will - * grant the double lock to lower cpus over higher ids under contention, + * already in proper order on entry. This favors lower CPU-ids and will + * grant the double lock to lower CPUs over higher ids under contention, * regardless of entry order into the function. */ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) @@ -1847,7 +1858,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) { if (unlikely(!irqs_disabled())) { - /* printk() doesn't work good under rq->lock */ + /* printk() doesn't work well under rq->lock */ raw_spin_unlock(&this_rq->lock); BUG_ON(1); } @@ -2106,15 +2117,14 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} #endif /* CONFIG_CPU_FREQ */ #ifdef arch_scale_freq_capacity -#ifndef arch_scale_freq_invariant -#define arch_scale_freq_invariant() (true) -#endif -#else /* arch_scale_freq_capacity */ -#define arch_scale_freq_invariant() (false) +# ifndef arch_scale_freq_invariant +# define arch_scale_freq_invariant() true +# endif +#else +# define arch_scale_freq_invariant() false #endif #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL - static inline unsigned long cpu_util_dl(struct rq *rq) { return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; @@ -2124,5 +2134,4 @@ static inline unsigned long cpu_util_cfs(struct rq *rq) { return rq->cfs.avg.util_avg; } - #endif diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c index 940b1fa1d2ce..968c1fe3099a 100644 --- a/kernel/sched/stats.c +++ b/kernel/sched/stats.c @@ -78,8 +78,8 @@ static int show_schedstat(struct seq_file *seq, void *v) * This itererator needs some explanation. * It returns 1 for the header position. * This means 2 is cpu 0. - * In a hotplugged system some cpus, including cpu 0, may be missing so we have - * to use cpumask_* to iterate over the cpus. + * In a hotplugged system some CPUs, including cpu 0, may be missing so we have + * to use cpumask_* to iterate over the CPUs. */ static void *schedstat_start(struct seq_file *file, loff_t *offset) { @@ -99,12 +99,14 @@ static void *schedstat_start(struct seq_file *file, loff_t *offset) if (n < nr_cpu_ids) return (void *)(unsigned long)(n + 2); + return NULL; } static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset) { (*offset)++; + return schedstat_start(file, offset); } @@ -134,6 +136,7 @@ static const struct file_operations proc_schedstat_operations = { static int __init proc_schedstat_init(void) { proc_create("schedstat", 0, NULL, &proc_schedstat_operations); + return 0; } subsys_initcall(proc_schedstat_init); diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h index 8e7b58de61e7..8aea199a39b4 100644 --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h @@ -30,35 +30,29 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) if (rq) rq->rq_sched_info.run_delay += delta; } -#define schedstat_enabled() static_branch_unlikely(&sched_schedstats) +#define schedstat_enabled() static_branch_unlikely(&sched_schedstats) #define __schedstat_inc(var) do { var++; } while (0) -#define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0) +#define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0) #define __schedstat_add(var, amt) do { var += (amt); } while (0) -#define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0) -#define __schedstat_set(var, val) do { var = (val); } while (0) -#define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) -#define schedstat_val(var) (var) -#define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0) - -#else /* !CONFIG_SCHEDSTATS */ -static inline void -rq_sched_info_arrive(struct rq *rq, unsigned long long delta) -{} -static inline void -rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) -{} -static inline void -rq_sched_info_depart(struct rq *rq, unsigned long long delta) -{} -#define schedstat_enabled() 0 -#define __schedstat_inc(var) do { } while (0) -#define schedstat_inc(var) do { } while (0) -#define __schedstat_add(var, amt) do { } while (0) -#define schedstat_add(var, amt) do { } while (0) -#define __schedstat_set(var, val) do { } while (0) -#define schedstat_set(var, val) do { } while (0) -#define schedstat_val(var) 0 -#define schedstat_val_or_zero(var) 0 +#define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0) +#define __schedstat_set(var, val) do { var = (val); } while (0) +#define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) +#define schedstat_val(var) (var) +#define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0) + +#else /* !CONFIG_SCHEDSTATS: */ +static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { } +static inline void rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) { } +static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { } +# define schedstat_enabled() 0 +# define __schedstat_inc(var) do { } while (0) +# define schedstat_inc(var) do { } while (0) +# define __schedstat_add(var, amt) do { } while (0) +# define schedstat_add(var, amt) do { } while (0) +# define __schedstat_set(var, val) do { } while (0) +# define schedstat_set(var, val) do { } while (0) +# define schedstat_val(var) 0 +# define schedstat_val_or_zero(var) 0 #endif /* CONFIG_SCHEDSTATS */ #ifdef CONFIG_SCHED_INFO @@ -69,9 +63,9 @@ static inline void sched_info_reset_dequeued(struct task_struct *t) /* * We are interested in knowing how long it was from the *first* time a - * task was queued to the time that it finally hit a cpu, we call this routine - * from dequeue_task() to account for possible rq->clock skew across cpus. The - * delta taken on each cpu would annul the skew. + * task was queued to the time that it finally hit a CPU, we call this routine + * from dequeue_task() to account for possible rq->clock skew across CPUs. The + * delta taken on each CPU would annul the skew. */ static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) { @@ -87,7 +81,7 @@ static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) } /* - * Called when a task finally hits the cpu. We can now calculate how + * Called when a task finally hits the CPU. We can now calculate how * long it was waiting to run. We also note when it began so that we * can keep stats on how long its timeslice is. */ @@ -112,9 +106,10 @@ static void sched_info_arrive(struct rq *rq, struct task_struct *t) */ static inline void sched_info_queued(struct rq *rq, struct task_struct *t) { - if (unlikely(sched_info_on())) + if (unlikely(sched_info_on())) { if (!t->sched_info.last_queued) t->sched_info.last_queued = rq_clock(rq); + } } /* @@ -127,8 +122,7 @@ static inline void sched_info_queued(struct rq *rq, struct task_struct *t) */ static inline void sched_info_depart(struct rq *rq, struct task_struct *t) { - unsigned long long delta = rq_clock(rq) - - t->sched_info.last_arrival; + unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival; rq_sched_info_depart(rq, delta); @@ -142,11 +136,10 @@ static inline void sched_info_depart(struct rq *rq, struct task_struct *t) * the idle task.) We are only called when prev != next. */ static inline void -__sched_info_switch(struct rq *rq, - struct task_struct *prev, struct task_struct *next) +__sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) { /* - * prev now departs the cpu. It's not interesting to record + * prev now departs the CPU. It's not interesting to record * stats about how efficient we were at scheduling the idle * process, however. */ @@ -156,18 +149,19 @@ __sched_info_switch(struct rq *rq, if (next != rq->idle) sched_info_arrive(rq, next); } + static inline void -sched_info_switch(struct rq *rq, - struct task_struct *prev, struct task_struct *next) +sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) { if (unlikely(sched_info_on())) __sched_info_switch(rq, prev, next); } -#else -#define sched_info_queued(rq, t) do { } while (0) -#define sched_info_reset_dequeued(t) do { } while (0) -#define sched_info_dequeued(rq, t) do { } while (0) -#define sched_info_depart(rq, t) do { } while (0) -#define sched_info_arrive(rq, next) do { } while (0) -#define sched_info_switch(rq, t, next) do { } while (0) + +#else /* !CONFIG_SCHED_INFO: */ +# define sched_info_queued(rq, t) do { } while (0) +# define sched_info_reset_dequeued(t) do { } while (0) +# define sched_info_dequeued(rq, t) do { } while (0) +# define sched_info_depart(rq, t) do { } while (0) +# define sched_info_arrive(rq, next) do { } while (0) +# define sched_info_switch(rq, t, next) do { } while (0) #endif /* CONFIG_SCHED_INFO */ diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index ea8d2b6a1239..c183b790ca54 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -1,6 +1,4 @@ // SPDX-License-Identifier: GPL-2.0 -#include "sched.h" - /* * stop-task scheduling class. * @@ -9,6 +7,7 @@ * * See kernel/stop_machine.c */ +#include "sched.h" #ifdef CONFIG_SMP static int diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c index 9ff1555341ed..b88ab4e0207f 100644 --- a/kernel/sched/swait.c +++ b/kernel/sched/swait.c @@ -1,4 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 +/* + * (simple wait queues ) implementation: + */ #include #include diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 519b024f4e94..219eee70e457 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -41,8 +41,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, if (!(sd->flags & SD_LOAD_BALANCE)) { printk("does not load-balance\n"); if (sd->parent) - printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" - " has parent"); + printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain has parent"); return -1; } @@ -50,12 +49,10 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, cpumask_pr_args(sched_domain_span(sd)), sd->name); if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { - printk(KERN_ERR "ERROR: domain->span does not contain " - "CPU%d\n", cpu); + printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); } if (!cpumask_test_cpu(cpu, sched_group_span(group))) { - printk(KERN_ERR "ERROR: domain->groups does not contain" - " CPU%d\n", cpu); + printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); } printk(KERN_DEBUG "%*s groups:", level + 1, ""); @@ -115,8 +112,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, if (sd->parent && !cpumask_subset(groupmask, sched_domain_span(sd->parent))) - printk(KERN_ERR "ERROR: parent span is not a superset " - "of domain->span\n"); + printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n"); return 0; } @@ -595,7 +591,7 @@ int group_balance_cpu(struct sched_group *sg) * are not. * * This leads to a few particularly weird cases where the sched_domain's are - * not of the same number for each cpu. Consider: + * not of the same number for each CPU. Consider: * * NUMA-2 0-3 0-3 * groups: {0-2},{1-3} {1-3},{0-2} @@ -780,7 +776,7 @@ fail: * ^ ^ ^ ^ * `-' `-' * - * The sched_domains are per-cpu and have a two way link (parent & child) and + * The sched_domains are per-CPU and have a two way link (parent & child) and * denote the ever growing mask of CPUs belonging to that level of topology. * * Each sched_domain has a circular (double) linked list of sched_group's, each @@ -1021,6 +1017,7 @@ __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) d->rd = alloc_rootdomain(); if (!d->rd) return sa_sd; + return sa_rootdomain; } @@ -1047,12 +1044,14 @@ static void claim_allocations(int cpu, struct sched_domain *sd) } #ifdef CONFIG_NUMA -static int sched_domains_numa_levels; enum numa_topology_type sched_numa_topology_type; -static int *sched_domains_numa_distance; -int sched_max_numa_distance; -static struct cpumask ***sched_domains_numa_masks; -static int sched_domains_curr_level; + +static int sched_domains_numa_levels; +static int sched_domains_curr_level; + +int sched_max_numa_distance; +static int *sched_domains_numa_distance; +static struct cpumask ***sched_domains_numa_masks; #endif /* @@ -1074,11 +1073,11 @@ static int sched_domains_curr_level; * SD_ASYM_PACKING - describes SMT quirks */ #define TOPOLOGY_SD_FLAGS \ - (SD_SHARE_CPUCAPACITY | \ + (SD_SHARE_CPUCAPACITY | \ SD_SHARE_PKG_RESOURCES | \ - SD_NUMA | \ - SD_ASYM_PACKING | \ - SD_ASYM_CPUCAPACITY | \ + SD_NUMA | \ + SD_ASYM_PACKING | \ + SD_ASYM_CPUCAPACITY | \ SD_SHARE_POWERDOMAIN) static struct sched_domain * @@ -1628,7 +1627,7 @@ static struct sched_domain *build_sched_domain(struct sched_domain_topology_leve pr_err(" the %s domain not a subset of the %s domain\n", child->name, sd->name); #endif - /* Fixup, ensure @sd has at least @child cpus. */ + /* Fixup, ensure @sd has at least @child CPUs. */ cpumask_or(sched_domain_span(sd), sched_domain_span(sd), sched_domain_span(child)); @@ -1720,6 +1719,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att ret = 0; error: __free_domain_allocs(&d, alloc_state, cpu_map); + return ret; } @@ -1824,6 +1824,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, return 1; tmp = SD_ATTR_INIT; + return !memcmp(cur ? (cur + idx_cur) : &tmp, new ? (new + idx_new) : &tmp, sizeof(struct sched_domain_attr)); @@ -1929,4 +1930,3 @@ match2: mutex_unlock(&sched_domains_mutex); } - diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 929ecb7d6b78..7b2a142ae629 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -107,6 +107,7 @@ static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode, break; } } + return nr_exclusive; } @@ -317,6 +318,7 @@ int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait) spin_unlock(&wq->lock); schedule(); spin_lock(&wq->lock); + return 0; } EXPORT_SYMBOL(do_wait_intr); @@ -333,6 +335,7 @@ int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait) spin_unlock_irq(&wq->lock); schedule(); spin_lock_irq(&wq->lock); + return 0; } EXPORT_SYMBOL(do_wait_intr_irq); @@ -378,6 +381,7 @@ int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, i if (ret) list_del_init(&wq_entry->entry); + return ret; } EXPORT_SYMBOL(autoremove_wake_function); diff --git a/kernel/sched/wait_bit.c b/kernel/sched/wait_bit.c index 84cb3acd9260..5293c59163a6 100644 --- a/kernel/sched/wait_bit.c +++ b/kernel/sched/wait_bit.c @@ -29,8 +29,8 @@ int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync wait_bit->key.bit_nr != key->bit_nr || test_bit(key->bit_nr, key->flags)) return 0; - else - return autoremove_wake_function(wq_entry, mode, sync, key); + + return autoremove_wake_function(wq_entry, mode, sync, key); } EXPORT_SYMBOL(wake_bit_function); @@ -50,7 +50,9 @@ __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_ if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) ret = (*action)(&wbq_entry->key, mode); } while (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret); + finish_wait(wq_head, &wbq_entry->wq_entry); + return ret; } EXPORT_SYMBOL(__wait_on_bit); @@ -73,6 +75,7 @@ int __sched out_of_line_wait_on_bit_timeout( DEFINE_WAIT_BIT(wq_entry, word, bit); wq_entry.key.timeout = jiffies + timeout; + return __wait_on_bit(wq_head, &wq_entry, action, mode); } EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout); @@ -120,6 +123,7 @@ EXPORT_SYMBOL(out_of_line_wait_on_bit_lock); void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit) { struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit); + if (waitqueue_active(wq_head)) __wake_up(wq_head, TASK_NORMAL, 1, &key); } @@ -157,6 +161,7 @@ static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p) { if (BITS_PER_LONG == 64) { unsigned long q = (unsigned long)p; + return bit_waitqueue((void *)(q & ~1), q & 1); } return bit_waitqueue(p, 0); @@ -173,6 +178,7 @@ static int wake_atomic_t_function(struct wait_queue_entry *wq_entry, unsigned mo wait_bit->key.bit_nr != key->bit_nr || atomic_read(val) != 0) return 0; + return autoremove_wake_function(wq_entry, mode, sync, key); } @@ -196,6 +202,7 @@ int __wait_on_atomic_t(struct wait_queue_head *wq_head, struct wait_bit_queue_en ret = (*action)(val, mode); } while (!ret && atomic_read(val) != 0); finish_wait(wq_head, &wbq_entry->wq_entry); + return ret; } @@ -226,6 +233,7 @@ __sched int atomic_t_wait(atomic_t *counter, unsigned int mode) schedule(); if (signal_pending_state(mode, current)) return -EINTR; + return 0; } EXPORT_SYMBOL(atomic_t_wait); @@ -250,6 +258,7 @@ __sched int bit_wait(struct wait_bit_key *word, int mode) schedule(); if (signal_pending_state(mode, current)) return -EINTR; + return 0; } EXPORT_SYMBOL(bit_wait); @@ -259,6 +268,7 @@ __sched int bit_wait_io(struct wait_bit_key *word, int mode) io_schedule(); if (signal_pending_state(mode, current)) return -EINTR; + return 0; } EXPORT_SYMBOL(bit_wait_io); @@ -266,11 +276,13 @@ EXPORT_SYMBOL(bit_wait_io); __sched int bit_wait_timeout(struct wait_bit_key *word, int mode) { unsigned long now = READ_ONCE(jiffies); + if (time_after_eq(now, word->timeout)) return -EAGAIN; schedule_timeout(word->timeout - now); if (signal_pending_state(mode, current)) return -EINTR; + return 0; } EXPORT_SYMBOL_GPL(bit_wait_timeout); @@ -278,11 +290,13 @@ EXPORT_SYMBOL_GPL(bit_wait_timeout); __sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode) { unsigned long now = READ_ONCE(jiffies); + if (time_after_eq(now, word->timeout)) return -EAGAIN; io_schedule_timeout(word->timeout - now); if (signal_pending_state(mode, current)) return -EINTR; + return 0; } EXPORT_SYMBOL_GPL(bit_wait_io_timeout); -- cgit v1.2.3 From 325ea10c0809406ce23f038602abbc454f3f761d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 3 Mar 2018 12:20:47 +0100 Subject: sched/headers: Simplify and clean up header usage in the scheduler Do the following cleanups and simplifications: - sched/sched.h already includes , so no need to include it in sched/core.c again. - order the headers alphabetically - add all headers to kernel/sched/sched.h - remove all unnecessary includes from the .c files that are already included in kernel/sched/sched.h. Finally, make all scheduler .c files use a single common header: #include "sched.h" ... which now contains a union of the relied upon headers. This makes the various .c files easier to read and easier to handle. Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- include/linux/sched/deadline.h | 6 --- kernel/sched/autogroup.c | 9 ++--- kernel/sched/autogroup.h | 4 -- kernel/sched/clock.c | 14 +------ kernel/sched/completion.c | 5 +-- kernel/sched/core.c | 40 +++++++------------- kernel/sched/cpuacct.c | 13 +------ kernel/sched/cpudeadline.c | 5 +-- kernel/sched/cpudeadline.h | 2 - kernel/sched/cpufreq.c | 1 - kernel/sched/cpufreq_schedutil.c | 8 +--- kernel/sched/cpupri.c | 6 +-- kernel/sched/cpupri.h | 1 - kernel/sched/cputime.c | 10 ++--- kernel/sched/deadline.c | 3 -- kernel/sched/debug.c | 11 +----- kernel/sched/fair.c | 16 +------- kernel/sched/idle.c | 15 +------- kernel/sched/idle_task.c | 5 +-- kernel/sched/isolation.c | 7 ---- kernel/sched/loadavg.c | 4 -- kernel/sched/membarrier.c | 9 +---- kernel/sched/rt.c | 4 -- kernel/sched/sched.h | 81 ++++++++++++++++++++++++++-------------- kernel/sched/stats.c | 13 +++---- kernel/sched/swait.c | 3 +- kernel/sched/topology.c | 4 -- kernel/sched/wait.c | 9 +---- kernel/sched/wait_bit.c | 5 +-- 29 files changed, 94 insertions(+), 219 deletions(-) (limited to 'kernel') diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h index a5bc8728ead7..0cb034331cbb 100644 --- a/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h @@ -1,8 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_SCHED_DEADLINE_H -#define _LINUX_SCHED_DEADLINE_H - -#include /* * SCHED_DEADLINE tasks has negative priorities, reflecting @@ -28,5 +24,3 @@ static inline bool dl_time_before(u64 a, u64 b) { return (s64)(a - b) < 0; } - -#endif /* _LINUX_SCHED_DEADLINE_H */ diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c index ff1b7b647b86..6be6c575b6cd 100644 --- a/kernel/sched/autogroup.c +++ b/kernel/sched/autogroup.c @@ -1,10 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 -#include -#include -#include -#include -#include - +/* + * Auto-group scheduling implementation: + */ #include "sched.h" unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; diff --git a/kernel/sched/autogroup.h b/kernel/sched/autogroup.h index 49e6ec9559cf..b96419974a1f 100644 --- a/kernel/sched/autogroup.h +++ b/kernel/sched/autogroup.h @@ -1,10 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifdef CONFIG_SCHED_AUTOGROUP -#include -#include -#include - struct autogroup { /* * Reference doesn't mean how many threads attach to this diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index 7da6bec8a2ff..10c83e73837a 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c @@ -52,19 +52,7 @@ * that is otherwise invisible (TSC gets stopped). * */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "sched.h" /* * Scheduler clock - returns current time in nanosec units. diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c index 0926aef10dad..5d2d56b0817a 100644 --- a/kernel/sched/completion.c +++ b/kernel/sched/completion.c @@ -11,10 +11,7 @@ * typically be used for exclusion which gives rise to priority inversion. * Waiting for completion is a typically sync point, but not an exclusion point. */ - -#include -#include -#include +#include "sched.h" /** * complete: - signals a single thread waiting on this completion diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9427b59551c1..e1e334ba8ff9 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5,37 +5,11 @@ * * Copyright (C) 1991-2002 Linus Torvalds */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "sched.h" #include #include -#ifdef CONFIG_PARAVIRT -#include -#endif -#include "sched.h" #include "../workqueue_internal.h" #include "../smpboot.h" @@ -2629,6 +2603,18 @@ static inline void finish_lock_switch(struct rq *rq) raw_spin_unlock_irq(&rq->lock); } +/* + * NOP if the arch has not defined these: + */ + +#ifndef prepare_arch_switch +# define prepare_arch_switch(next) do { } while (0) +#endif + +#ifndef finish_arch_post_lock_switch +# define finish_arch_post_lock_switch() do { } while (0) +#endif + /** * prepare_task_switch - prepare to switch tasks * @rq: the runqueue preparing to switch diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 1abd325e733a..9fbb10383434 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -1,22 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sched.h" - /* * CPU accounting code for task groups. * * Based on the work by Paul Menage (menage@google.com) and Balbir Singh * (balbir@in.ibm.com). */ +#include "sched.h" /* Time spent by the tasks of the CPU accounting group executing in ... */ enum cpuacct_stat_index { diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index cb172b61d191..50316455ea66 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c @@ -10,10 +10,7 @@ * as published by the Free Software Foundation; version 2 * of the License. */ -#include -#include -#include -#include "cpudeadline.h" +#include "sched.h" static inline int parent(int i) { diff --git a/kernel/sched/cpudeadline.h b/kernel/sched/cpudeadline.h index c26e7a0e5a66..0adeda93b5fb 100644 --- a/kernel/sched/cpudeadline.h +++ b/kernel/sched/cpudeadline.h @@ -1,6 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#include -#include #define IDX_INVALID -1 diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c index dbc51442ecbc..5e54cbcae673 100644 --- a/kernel/sched/cpufreq.c +++ b/kernel/sched/cpufreq.c @@ -8,7 +8,6 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ - #include "sched.h" DEFINE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 0dad8160e00f..feb5f89020f2 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -11,14 +11,10 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include -#include -#include -#include -#include - #include "sched.h" +#include + struct sugov_tunables { struct gov_attr_set attr_set; unsigned int rate_limit_us; diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index f43e14ccb67d..daaadf939ccb 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -26,11 +26,7 @@ * as published by the Free Software Foundation; version 2 * of the License. */ -#include -#include -#include -#include -#include "cpupri.h" +#include "sched.h" /* Convert between a 140 based task->prio, and our 102 based cpupri */ static int convert_prio(int prio) diff --git a/kernel/sched/cpupri.h b/kernel/sched/cpupri.h index 141a06c914c6..7dc20a3232e7 100644 --- a/kernel/sched/cpupri.h +++ b/kernel/sched/cpupri.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#include #define CPUPRI_NR_PRIORITIES (MAX_RT_PRIO + 2) diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index d3b450b57ade..0796f938c4f0 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -1,10 +1,6 @@ -#include -#include -#include -#include -#include -#include -#include +/* + * Simple CPU accounting cgroup controller + */ #include "sched.h" #ifdef CONFIG_IRQ_TIME_ACCOUNTING diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 58f8b7b37983..af491f537636 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -17,9 +17,6 @@ */ #include "sched.h" -#include -#include - struct dl_bandwidth def_dl_bandwidth; static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se) diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 7c82a9b88510..644d9a464380 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -1,7 +1,7 @@ /* * kernel/sched/debug.c * - * Print the CFS rbtree + * Print the CFS rbtree and other debugging details * * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar * @@ -9,15 +9,6 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#include -#include -#include -#include -#include -#include -#include -#include - #include "sched.h" static DEFINE_SPINLOCK(sched_debug_lock); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 1f877de96c9b..f5591071ae98 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -20,24 +20,10 @@ * Adaptive scheduling granularity, math enhancements by Peter Zijlstra * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra */ -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "sched.h" #include -#include "sched.h" - /* * Targeted preemption latency for CPU-bound tasks: * diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 343d25f85477..2760e0357271 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -1,23 +1,10 @@ /* * Generic entry points for the idle threads */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include +#include "sched.h" #include -#include "sched.h" - /* Linker adds these: start and end of __cpuidle functions */ extern char __cpuidle_text_start[], __cpuidle_text_end[]; diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index ec73680922f8..488222ac4651 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c @@ -1,12 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 -#include "sched.h" - /* * idle-task scheduling class. * - * (NOTE: these are not related to SCHED_IDLE tasks which are + * (NOTE: these are not related to SCHED_IDLE batch scheduling tasks which are * handled in sched/fair.c) */ +#include "sched.h" #ifdef CONFIG_SMP static int diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index aad5f48a07c6..e6802181900f 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -6,13 +6,6 @@ * Copyright (C) 2017-2018 SUSE, Frederic Weisbecker * */ -#include -#include -#include -#include -#include -#include - #include "sched.h" DEFINE_STATIC_KEY_FALSE(housekeeping_overriden); diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index a398e7e28a8a..a171c1258109 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -6,10 +6,6 @@ * figure. Its a silly number but people think its important. We go through * great pains to make it work on big machines and tickless kernels. */ - -#include -#include - #include "sched.h" /* diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c index 2c6ae2413fa2..76e0eaf4654e 100644 --- a/kernel/sched/membarrier.c +++ b/kernel/sched/membarrier.c @@ -13,14 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ - -#include -#include -#include -#include -#include - -#include "sched.h" /* for cpu_rq(). */ +#include "sched.h" /* * Bitmask made from a "or" of all commands within enum membarrier_cmd, diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index e40498872111..a3d438fec46c 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -3,12 +3,8 @@ * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR * policies) */ - #include "sched.h" -#include -#include - int sched_rr_timeslice = RR_TIMESLICE; int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index bd1461ae06e4..23ba4dd76ac4 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3,39 +3,71 @@ * Scheduler internal types and methods: */ #include + #include -#include -#include -#include -#include #include -#include -#include -#include -#include +#include #include -#include -#include +#include +#include #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include -#include -#include +#include +#include +#include +#include + +#include -#include -#include #include -#include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include -#include -#include -#include -#include +#include +#include +#include +#include +#include + +#include #ifdef CONFIG_PARAVIRT -#include +# include #endif #include "cpupri.h" @@ -1357,13 +1389,6 @@ static inline int task_on_rq_migrating(struct task_struct *p) return p->on_rq == TASK_ON_RQ_MIGRATING; } -#ifndef prepare_arch_switch -# define prepare_arch_switch(next) do { } while (0) -#endif -#ifndef finish_arch_post_lock_switch -# define finish_arch_post_lock_switch() do { } while (0) -#endif - /* * wake flags */ diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c index 968c1fe3099a..ab112cbfd7c8 100644 --- a/kernel/sched/stats.c +++ b/kernel/sched/stats.c @@ -1,14 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 - -#include -#include -#include -#include - +/* + * /proc/schedstat implementation + */ #include "sched.h" /* - * bump this up when changing the output format or the meaning of an existing + * Current schedstat API version. + * + * Bump this up when changing the output format or the meaning of an existing * format, so that tools can adapt (or abort) */ #define SCHEDSTAT_VERSION 15 diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c index b88ab4e0207f..b6fb2c3b3ff7 100644 --- a/kernel/sched/swait.c +++ b/kernel/sched/swait.c @@ -2,8 +2,7 @@ /* * (simple wait queues ) implementation: */ -#include -#include +#include "sched.h" void __init_swait_queue_head(struct swait_queue_head *q, const char *name, struct lock_class_key *key) diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 219eee70e457..64cc564f5255 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -2,10 +2,6 @@ /* * Scheduler topology setup/handling methods */ -#include -#include -#include - #include "sched.h" DEFINE_MUTEX(sched_domains_mutex); diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 7b2a142ae629..928be527477e 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -3,14 +3,7 @@ * * (C) 2004 Nadia Yvette Chambers, Oracle */ -#include -#include -#include -#include -#include -#include -#include -#include +#include "sched.h" void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key) { diff --git a/kernel/sched/wait_bit.c b/kernel/sched/wait_bit.c index 5293c59163a6..4239c78f5cd3 100644 --- a/kernel/sched/wait_bit.c +++ b/kernel/sched/wait_bit.c @@ -1,10 +1,7 @@ /* * The implementation of the wait_bit*() and related waiting APIs: */ -#include -#include -#include -#include +#include "sched.h" #define WAIT_TABLE_BITS 8 #define WAIT_TABLE_SIZE (1 << WAIT_TABLE_BITS) -- cgit v1.2.3 From a92057e14beb233e8c891f4de075f2a468c71f15 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 3 Mar 2018 15:44:39 +0100 Subject: sched/idle: Merge kernel/sched/idle.c and kernel/sched/idle_task.c Merge these two small .c modules as they implement two aspects of idle task handling. Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/Makefile | 5 +- kernel/sched/idle.c | 123 ++++++++++++++++++++++++++++++++++++++++++++++- kernel/sched/idle_task.c | 117 -------------------------------------------- 3 files changed, 125 insertions(+), 120 deletions(-) delete mode 100644 kernel/sched/idle_task.c (limited to 'kernel') diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index e2f9d4feff40..d9a02b318108 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -17,8 +17,9 @@ CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer endif obj-y += core.o loadavg.o clock.o cputime.o -obj-y += idle_task.o fair.o rt.o deadline.o -obj-y += wait.o wait_bit.o swait.o completion.o idle.o +obj-y += idle.o fair.o rt.o deadline.o +obj-y += wait.o wait_bit.o swait.o completion.o + obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o obj-$(CONFIG_SCHEDSTATS) += stats.o diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 2760e0357271..2975f195e1c4 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -1,5 +1,9 @@ /* - * Generic entry points for the idle threads + * Generic entry points for the idle threads and + * implementation of the idle task scheduling class. + * + * (NOTE: these are not related to SCHED_IDLE batch scheduled + * tasks which are handled in sched/fair.c ) */ #include "sched.h" @@ -33,6 +37,7 @@ void cpu_idle_poll_ctrl(bool enable) static int __init cpu_idle_poll_setup(char *__unused) { cpu_idle_force_poll = 1; + return 1; } __setup("nohlt", cpu_idle_poll_setup); @@ -40,6 +45,7 @@ __setup("nohlt", cpu_idle_poll_setup); static int __init cpu_idle_nopoll_setup(char *__unused) { cpu_idle_force_poll = 0; + return 1; } __setup("hlt", cpu_idle_nopoll_setup); @@ -51,12 +57,14 @@ static noinline int __cpuidle cpu_idle_poll(void) trace_cpu_idle_rcuidle(0, smp_processor_id()); local_irq_enable(); stop_critical_timings(); + while (!tif_need_resched() && (cpu_idle_force_poll || tick_check_broadcast_expired())) cpu_relax(); start_critical_timings(); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); rcu_idle_exit(); + return 1; } @@ -337,3 +345,116 @@ void cpu_startup_entry(enum cpuhp_state state) while (1) do_idle(); } + +/* + * idle-task scheduling class. + */ + +#ifdef CONFIG_SMP +static int +select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags) +{ + return task_cpu(p); /* IDLE tasks as never migrated */ +} +#endif + +/* + * Idle tasks are unconditionally rescheduled: + */ +static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) +{ + resched_curr(rq); +} + +static struct task_struct * +pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) +{ + put_prev_task(rq, prev); + update_idle_core(rq); + schedstat_inc(rq->sched_goidle); + + return rq->idle; +} + +/* + * It is not legal to sleep in the idle task - print a warning + * message if some code attempts to do it: + */ +static void +dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) +{ + raw_spin_unlock_irq(&rq->lock); + printk(KERN_ERR "bad: scheduling from the idle thread!\n"); + dump_stack(); + raw_spin_lock_irq(&rq->lock); +} + +static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) +{ +} + +/* + * scheduler tick hitting a task of our scheduling class. + * + * NOTE: This function can be called remotely by the tick offload that + * goes along full dynticks. Therefore no local assumption can be made + * and everything must be accessed through the @rq and @curr passed in + * parameters. + */ +static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) +{ +} + +static void set_curr_task_idle(struct rq *rq) +{ +} + +static void switched_to_idle(struct rq *rq, struct task_struct *p) +{ + BUG(); +} + +static void +prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio) +{ + BUG(); +} + +static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task) +{ + return 0; +} + +static void update_curr_idle(struct rq *rq) +{ +} + +/* + * Simple, special scheduling class for the per-CPU idle tasks: + */ +const struct sched_class idle_sched_class = { + /* .next is NULL */ + /* no enqueue/yield_task for idle tasks */ + + /* dequeue is not valid, we print a debug message there: */ + .dequeue_task = dequeue_task_idle, + + .check_preempt_curr = check_preempt_curr_idle, + + .pick_next_task = pick_next_task_idle, + .put_prev_task = put_prev_task_idle, + +#ifdef CONFIG_SMP + .select_task_rq = select_task_rq_idle, + .set_cpus_allowed = set_cpus_allowed_common, +#endif + + .set_curr_task = set_curr_task_idle, + .task_tick = task_tick_idle, + + .get_rr_interval = get_rr_interval_idle, + + .prio_changed = prio_changed_idle, + .switched_to = switched_to_idle, + .update_curr = update_curr_idle, +}; diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c deleted file mode 100644 index 488222ac4651..000000000000 --- a/kernel/sched/idle_task.c +++ /dev/null @@ -1,117 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * idle-task scheduling class. - * - * (NOTE: these are not related to SCHED_IDLE batch scheduling tasks which are - * handled in sched/fair.c) - */ -#include "sched.h" - -#ifdef CONFIG_SMP -static int -select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags) -{ - return task_cpu(p); /* IDLE tasks as never migrated */ -} -#endif - -/* - * Idle tasks are unconditionally rescheduled: - */ -static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) -{ - resched_curr(rq); -} - -static struct task_struct * -pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) -{ - put_prev_task(rq, prev); - update_idle_core(rq); - schedstat_inc(rq->sched_goidle); - - return rq->idle; -} - -/* - * It is not legal to sleep in the idle task - print a warning - * message if some code attempts to do it: - */ -static void -dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) -{ - raw_spin_unlock_irq(&rq->lock); - printk(KERN_ERR "bad: scheduling from the idle thread!\n"); - dump_stack(); - raw_spin_lock_irq(&rq->lock); -} - -static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) -{ -} - -/* - * scheduler tick hitting a task of our scheduling class. - * - * NOTE: This function can be called remotely by the tick offload that - * goes along full dynticks. Therefore no local assumption can be made - * and everything must be accessed through the @rq and @curr passed in - * parameters. - */ -static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) -{ -} - -static void set_curr_task_idle(struct rq *rq) -{ -} - -static void switched_to_idle(struct rq *rq, struct task_struct *p) -{ - BUG(); -} - -static void -prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio) -{ - BUG(); -} - -static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task) -{ - return 0; -} - -static void update_curr_idle(struct rq *rq) -{ -} - -/* - * Simple, special scheduling class for the per-CPU idle tasks: - */ -const struct sched_class idle_sched_class = { - /* .next is NULL */ - /* no enqueue/yield_task for idle tasks */ - - /* dequeue is not valid, we print a debug message there: */ - .dequeue_task = dequeue_task_idle, - - .check_preempt_curr = check_preempt_curr_idle, - - .pick_next_task = pick_next_task_idle, - .put_prev_task = put_prev_task_idle, - -#ifdef CONFIG_SMP - .select_task_rq = select_task_rq_idle, - .set_cpus_allowed = set_cpus_allowed_common, -#endif - - .set_curr_task = set_curr_task_idle, - .task_tick = task_tick_idle, - - .get_rr_interval = get_rr_interval_idle, - - .prio_changed = prio_changed_idle, - .switched_to = switched_to_idle, - .update_curr = update_curr_idle, -}; -- cgit v1.2.3 From 02d8ec9456f47b8865f1ff3fbb532e12a760d3b5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 3 Mar 2018 16:27:54 +0100 Subject: sched/deadline, rt: Rename queue_push_tasks/queue_pull_task to create separate namespace There are similarly named functions in both of these modules: kernel/sched/deadline.c:static inline void queue_push_tasks(struct rq *rq) kernel/sched/deadline.c:static inline void queue_pull_task(struct rq *rq) kernel/sched/deadline.c:static inline void queue_push_tasks(struct rq *rq) kernel/sched/deadline.c:static inline void queue_pull_task(struct rq *rq) kernel/sched/deadline.c: queue_push_tasks(rq); kernel/sched/deadline.c: queue_pull_task(rq); kernel/sched/deadline.c: queue_push_tasks(rq); kernel/sched/deadline.c: queue_pull_task(rq); kernel/sched/rt.c:static inline void queue_push_tasks(struct rq *rq) kernel/sched/rt.c:static inline void queue_pull_task(struct rq *rq) kernel/sched/rt.c:static inline void queue_push_tasks(struct rq *rq) kernel/sched/rt.c: queue_push_tasks(rq); kernel/sched/rt.c: queue_pull_task(rq); kernel/sched/rt.c: queue_push_tasks(rq); kernel/sched/rt.c: queue_pull_task(rq); ... which makes it harder to grep for them. Prefix them with deadline_ and rt_, respectively. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Thomas Gleixner Cc: Linus Torvalds Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/deadline.c | 16 ++++++++-------- kernel/sched/rt.c | 14 +++++++------- 2 files changed, 15 insertions(+), 15 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index af491f537636..8b7c2b35bec9 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -511,7 +511,7 @@ static DEFINE_PER_CPU(struct callback_head, dl_pull_head); static void push_dl_tasks(struct rq *); static void pull_dl_task(struct rq *); -static inline void queue_push_tasks(struct rq *rq) +static inline void deadline_queue_push_tasks(struct rq *rq) { if (!has_pushable_dl_tasks(rq)) return; @@ -519,7 +519,7 @@ static inline void queue_push_tasks(struct rq *rq) queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks); } -static inline void queue_pull_task(struct rq *rq) +static inline void deadline_queue_pull_task(struct rq *rq) { queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task); } @@ -594,11 +594,11 @@ static inline void pull_dl_task(struct rq *rq) { } -static inline void queue_push_tasks(struct rq *rq) +static inline void deadline_queue_push_tasks(struct rq *rq) { } -static inline void queue_pull_task(struct rq *rq) +static inline void deadline_queue_pull_task(struct rq *rq) { } #endif /* CONFIG_SMP */ @@ -1759,7 +1759,7 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) if (hrtick_enabled(rq)) start_hrtick_dl(rq, p); - queue_push_tasks(rq); + deadline_queue_push_tasks(rq); return p; } @@ -2309,7 +2309,7 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p) if (!task_on_rq_queued(p) || rq->dl.dl_nr_running) return; - queue_pull_task(rq); + deadline_queue_pull_task(rq); } /* @@ -2331,7 +2331,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p) if (rq->curr != p) { #ifdef CONFIG_SMP if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) - queue_push_tasks(rq); + deadline_queue_push_tasks(rq); #endif if (dl_task(rq->curr)) check_preempt_curr_dl(rq, p, 0); @@ -2356,7 +2356,7 @@ static void prio_changed_dl(struct rq *rq, struct task_struct *p, * or lowering its prio, so... */ if (!rq->dl.overloaded) - queue_pull_task(rq); + deadline_queue_pull_task(rq); /* * If we now have a earlier deadline task than p, diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index a3d438fec46c..4f4fd3b157f1 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -355,7 +355,7 @@ static DEFINE_PER_CPU(struct callback_head, rt_pull_head); static void push_rt_tasks(struct rq *); static void pull_rt_task(struct rq *); -static inline void queue_push_tasks(struct rq *rq) +static inline void rt_queue_push_tasks(struct rq *rq) { if (!has_pushable_tasks(rq)) return; @@ -363,7 +363,7 @@ static inline void queue_push_tasks(struct rq *rq) queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks); } -static inline void queue_pull_task(struct rq *rq) +static inline void rt_queue_pull_task(struct rq *rq) { queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task); } @@ -421,7 +421,7 @@ static inline void pull_rt_task(struct rq *this_rq) { } -static inline void queue_push_tasks(struct rq *rq) +static inline void rt_queue_push_tasks(struct rq *rq) { } #endif /* CONFIG_SMP */ @@ -1565,7 +1565,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) /* The running task is never eligible for pushing */ dequeue_pushable_task(rq, p); - queue_push_tasks(rq); + rt_queue_push_tasks(rq); return p; } @@ -2185,7 +2185,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p) if (!task_on_rq_queued(p) || rq->rt.rt_nr_running) return; - queue_pull_task(rq); + rt_queue_pull_task(rq); } void __init init_sched_rt_class(void) @@ -2216,7 +2216,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p) if (task_on_rq_queued(p) && rq->curr != p) { #ifdef CONFIG_SMP if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) - queue_push_tasks(rq); + rt_queue_push_tasks(rq); #endif /* CONFIG_SMP */ if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) resched_curr(rq); @@ -2240,7 +2240,7 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) * may need to pull tasks to this runqueue. */ if (oldprio < p->prio) - queue_pull_task(rq); + rt_queue_pull_task(rq); /* * If there's a higher priority task waiting to run -- cgit v1.2.3 From 14a7405b2e814221a951bd7a76ce4a8d24c1b3be Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 3 Mar 2018 16:32:24 +0100 Subject: sched/core: Undefine tracepoint creation at the end of core.c Make it easier to concatenate all the scheduler .c files for single-module compilation. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Thomas Gleixner Cc: Linus Torvalds Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e1e334ba8ff9..4f5eeb63ab5b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7071,3 +7071,5 @@ const u32 sched_prio_to_wmult[40] = { /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, }; + +#undef CREATE_TRACE_POINTS -- cgit v1.2.3 From 13a453c241b78934a945b1af572d0533612c9bd1 Mon Sep 17 00:00:00 2001 From: Norbert Manthey Date: Tue, 27 Feb 2018 08:47:40 +0100 Subject: sched/fair: Add ';' after label attributes Due to using GCC defines for configuration, some labels might be unused in certain configurations. While adding a __maybe_unused to the label is fine in general, the line has to be terminated with ';'. This is also reflected in the GCC documentation, but GCC parsed the previous variant without an error message. This has been spotted while compiling with goto-cc, the compiler for the CPROVER tool suite. Signed-off-by: Norbert Manthey Signed-off-by: Michael Tautschnig Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1519717660-16157-1-git-send-email-nmanthey@amazon.de Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f5591071ae98..097db34d5ba2 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6797,7 +6797,7 @@ simple: p = task_of(se); -done: __maybe_unused +done: __maybe_unused; #ifdef CONFIG_SMP /* * Move the next running task to the front of -- cgit v1.2.3 From 4042d003a0792a3b05c7c424219e4c6cf1abfe76 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 20 Dec 2017 15:37:26 +0100 Subject: cpufreq/schedutil: Remove unused CPUFREQ_DL Bitrot... Signed-off-by: Peter Zijlstra (Intel) Cc: Juri Lelli Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Rafael J. Wysocki Cc: Thomas Gleixner Cc: Viresh Kumar Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- include/linux/sched/cpufreq.h | 3 +-- kernel/sched/deadline.c | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h index 0b55834efd46..d963cfd3a0c2 100644 --- a/include/linux/sched/cpufreq.h +++ b/include/linux/sched/cpufreq.h @@ -9,8 +9,7 @@ */ #define SCHED_CPUFREQ_RT (1U << 0) -#define SCHED_CPUFREQ_DL (1U << 1) -#define SCHED_CPUFREQ_IOWAIT (1U << 2) +#define SCHED_CPUFREQ_IOWAIT (1U << 1) #ifdef CONFIG_CPU_FREQ struct update_util_data { diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 8b7c2b35bec9..d1c7bf7c7e5b 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -84,7 +84,7 @@ void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq) SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */ SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); /* kick cpufreq (see the comment in kernel/sched/sched.h). */ - cpufreq_update_util(rq_of_dl_rq(dl_rq), SCHED_CPUFREQ_DL); + cpufreq_update_util(rq_of_dl_rq(dl_rq), 0); } static inline @@ -98,7 +98,7 @@ void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq) if (dl_rq->running_bw > old) dl_rq->running_bw = 0; /* kick cpufreq (see the comment in kernel/sched/sched.h). */ - cpufreq_update_util(rq_of_dl_rq(dl_rq), SCHED_CPUFREQ_DL); + cpufreq_update_util(rq_of_dl_rq(dl_rq), 0); } static inline -- cgit v1.2.3 From 8f111bc357aa811e0bb5fdfe34c4c9efdafc15b9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 20 Dec 2017 16:26:12 +0100 Subject: cpufreq/schedutil: Rewrite CPUFREQ_RT support Instead of trying to duplicate scheduler state to track if an RT task is running, directly use the scheduler runqueue state for it. This vastly simplifies things and fixes a number of bugs related to sugov and the scheduler getting out of sync wrt this state. As a consequence we not also update the remove cfs/dl state when iterating the shared mask. Signed-off-by: Peter Zijlstra (Intel) Cc: Juri Lelli Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Rafael J. Wysocki Cc: Thomas Gleixner Cc: Viresh Kumar Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- include/linux/sched/cpufreq.h | 3 +- kernel/sched/cpufreq_schedutil.c | 74 ++++++++++++++++++---------------------- kernel/sched/rt.c | 9 +++-- 3 files changed, 41 insertions(+), 45 deletions(-) (limited to 'kernel') diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h index d963cfd3a0c2..b48f2fb3b316 100644 --- a/include/linux/sched/cpufreq.h +++ b/include/linux/sched/cpufreq.h @@ -8,8 +8,7 @@ * Interface between cpufreq drivers and the scheduler: */ -#define SCHED_CPUFREQ_RT (1U << 0) -#define SCHED_CPUFREQ_IOWAIT (1U << 1) +#define SCHED_CPUFREQ_IOWAIT (1U << 0) #ifdef CONFIG_CPU_FREQ struct update_util_data { diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index feb5f89020f2..89fe78ecb88c 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -57,7 +57,6 @@ struct sugov_cpu { unsigned long util_cfs; unsigned long util_dl; unsigned long max; - unsigned int flags; /* The field below is for single-CPU policies only: */ #ifdef CONFIG_NO_HZ_COMMON @@ -183,17 +182,28 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu) static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu) { + struct rq *rq = cpu_rq(sg_cpu->cpu); + unsigned long util; + + if (rq->rt.rt_nr_running) { + util = sg_cpu->max; + } else { + util = sg_cpu->util_dl; + if (rq->cfs.h_nr_running) + util += sg_cpu->util_cfs; + } + /* * Ideally we would like to set util_dl as min/guaranteed freq and * util_cfs + util_dl as requested freq. However, cpufreq is not yet * ready for such an interface. So, we only do the latter for now. */ - return min(sg_cpu->util_cfs + sg_cpu->util_dl, sg_cpu->max); + return min(util, sg_cpu->max); } -static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time) +static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, unsigned int flags) { - if (sg_cpu->flags & SCHED_CPUFREQ_IOWAIT) { + if (flags & SCHED_CPUFREQ_IOWAIT) { if (sg_cpu->iowait_boost_pending) return; @@ -262,12 +272,11 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, { struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); struct sugov_policy *sg_policy = sg_cpu->sg_policy; - struct cpufreq_policy *policy = sg_policy->policy; unsigned long util, max; unsigned int next_f; bool busy; - sugov_set_iowait_boost(sg_cpu, time); + sugov_set_iowait_boost(sg_cpu, time, flags); sg_cpu->last_update = time; if (!sugov_should_update_freq(sg_policy, time)) @@ -275,25 +284,22 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, busy = sugov_cpu_is_busy(sg_cpu); - if (flags & SCHED_CPUFREQ_RT) { - next_f = policy->cpuinfo.max_freq; - } else { - sugov_get_util(sg_cpu); - max = sg_cpu->max; - util = sugov_aggregate_util(sg_cpu); - sugov_iowait_boost(sg_cpu, &util, &max); - next_f = get_next_freq(sg_policy, util, max); - /* - * Do not reduce the frequency if the CPU has not been idle - * recently, as the reduction is likely to be premature then. - */ - if (busy && next_f < sg_policy->next_freq) { - next_f = sg_policy->next_freq; + sugov_get_util(sg_cpu); + max = sg_cpu->max; + util = sugov_aggregate_util(sg_cpu); + sugov_iowait_boost(sg_cpu, &util, &max); + next_f = get_next_freq(sg_policy, util, max); + /* + * Do not reduce the frequency if the CPU has not been idle + * recently, as the reduction is likely to be premature then. + */ + if (busy && next_f < sg_policy->next_freq) { + next_f = sg_policy->next_freq; - /* Reset cached freq as next_freq has changed */ - sg_policy->cached_raw_freq = 0; - } + /* Reset cached freq as next_freq has changed */ + sg_policy->cached_raw_freq = 0; } + sugov_update_commit(sg_policy, time, next_f); } @@ -309,6 +315,8 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) unsigned long j_util, j_max; s64 delta_ns; + sugov_get_util(j_sg_cpu); + /* * If the CFS CPU utilization was last updated before the * previous frequency update and the time elapsed between the @@ -322,21 +330,15 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) if (delta_ns > TICK_NSEC) { j_sg_cpu->iowait_boost = 0; j_sg_cpu->iowait_boost_pending = false; - j_sg_cpu->util_cfs = 0; - if (j_sg_cpu->util_dl == 0) - continue; } - if (j_sg_cpu->flags & SCHED_CPUFREQ_RT) - return policy->cpuinfo.max_freq; j_max = j_sg_cpu->max; j_util = sugov_aggregate_util(j_sg_cpu); + sugov_iowait_boost(j_sg_cpu, &j_util, &j_max); if (j_util * max > j_max * util) { util = j_util; max = j_max; } - - sugov_iowait_boost(j_sg_cpu, &util, &max); } return get_next_freq(sg_policy, util, max); @@ -351,18 +353,11 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags) raw_spin_lock(&sg_policy->update_lock); - sugov_get_util(sg_cpu); - sg_cpu->flags = flags; - - sugov_set_iowait_boost(sg_cpu, time); + sugov_set_iowait_boost(sg_cpu, time, flags); sg_cpu->last_update = time; if (sugov_should_update_freq(sg_policy, time)) { - if (flags & SCHED_CPUFREQ_RT) - next_f = sg_policy->policy->cpuinfo.max_freq; - else - next_f = sugov_next_freq_shared(sg_cpu, time); - + next_f = sugov_next_freq_shared(sg_cpu, time); sugov_update_commit(sg_policy, time, next_f); } @@ -673,7 +668,6 @@ static int sugov_start(struct cpufreq_policy *policy) memset(sg_cpu, 0, sizeof(*sg_cpu)); sg_cpu->cpu = cpu; sg_cpu->sg_policy = sg_policy; - sg_cpu->flags = 0; sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; } diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 4f4fd3b157f1..86b77987435e 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -957,9 +957,6 @@ static void update_curr_rt(struct rq *rq) if (unlikely((s64)delta_exec <= 0)) return; - /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ - cpufreq_update_util(rq, SCHED_CPUFREQ_RT); - schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec)); @@ -1001,6 +998,9 @@ dequeue_top_rt_rq(struct rt_rq *rt_rq) sub_nr_running(rq, rt_rq->rt_nr_running); rt_rq->rt_queued = 0; + + /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ + cpufreq_update_util(rq, 0); } static void @@ -1017,6 +1017,9 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq) add_nr_running(rq, rt_rq->rt_nr_running); rt_rq->rt_queued = 1; + + /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ + cpufreq_update_util(rq, 0); } #if defined CONFIG_SMP -- cgit v1.2.3 From a22e47a4e3f5a9e50a827c5d94705ace3b1eac0b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 21 Dec 2017 10:01:24 +0100 Subject: sched/core: Convert nohz_flags to atomic_t Using atomic_t allows us to use the more flexible bitops provided there. Also its smaller. Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 6 +++--- kernel/sched/fair.c | 23 +++++++++++++++-------- kernel/sched/sched.h | 11 ++++++----- 3 files changed, 24 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4f5eeb63ab5b..96ad1c003d74 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -583,7 +583,7 @@ static inline bool got_nohz_idle_kick(void) { int cpu = smp_processor_id(); - if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu))) + if (!(atomic_read(nohz_flags(cpu)) & NOHZ_BALANCE_KICK)) return false; if (idle_cpu(cpu) && !need_resched()) @@ -593,7 +593,7 @@ static inline bool got_nohz_idle_kick(void) * We can't run Idle Load Balance on this CPU for this time so we * cancel it and clear NOHZ_BALANCE_KICK */ - clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); + atomic_andnot(NOHZ_BALANCE_KICK, nohz_flags(cpu)); return false; } @@ -6074,7 +6074,7 @@ void __init sched_init(void) rq_attach_root(rq, &def_root_domain); #ifdef CONFIG_NO_HZ_COMMON rq->last_load_update_tick = jiffies; - rq->nohz_flags = 0; + atomic_set(&rq->nohz_flags, 0); #endif #endif /* CONFIG_SMP */ hrtick_rq_init(rq); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 097db34d5ba2..5d150478dd58 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9072,6 +9072,7 @@ static inline int find_new_ilb(void) */ static void nohz_balancer_kick(void) { + unsigned int flags; int ilb_cpu; nohz.next_balance++; @@ -9081,7 +9082,8 @@ static void nohz_balancer_kick(void) if (ilb_cpu >= nr_cpu_ids) return; - if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu))) + flags = atomic_fetch_or(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)); + if (flags & NOHZ_BALANCE_KICK) return; /* * Use smp_send_reschedule() instead of resched_cpu(). @@ -9095,7 +9097,9 @@ static void nohz_balancer_kick(void) void nohz_balance_exit_idle(unsigned int cpu) { - if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { + unsigned int flags = atomic_read(nohz_flags(cpu)); + + if (unlikely(flags & NOHZ_TICK_STOPPED)) { /* * Completely isolated CPUs don't ever set, so we must test. */ @@ -9103,7 +9107,8 @@ void nohz_balance_exit_idle(unsigned int cpu) cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); atomic_dec(&nohz.nr_cpus); } - clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); + + atomic_andnot(NOHZ_TICK_STOPPED, nohz_flags(cpu)); } } @@ -9155,7 +9160,7 @@ void nohz_balance_enter_idle(int cpu) if (!housekeeping_cpu(cpu, HK_FLAG_SCHED)) return; - if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) + if (atomic_read(nohz_flags(cpu)) & NOHZ_TICK_STOPPED) return; /* If we're a completely isolated CPU, we don't play: */ @@ -9164,7 +9169,7 @@ void nohz_balance_enter_idle(int cpu) cpumask_set_cpu(cpu, nohz.idle_cpus_mask); atomic_inc(&nohz.nr_cpus); - set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); + atomic_or(NOHZ_TICK_STOPPED, nohz_flags(cpu)); } #endif @@ -9302,8 +9307,10 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) unsigned long next_balance = jiffies + 60*HZ; int update_next_balance = 0; - if (idle != CPU_IDLE || - !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu))) + if (!(atomic_read(nohz_flags(this_cpu)) & NOHZ_BALANCE_KICK)) + return; + + if (idle != CPU_IDLE) goto end; for_each_cpu(balance_cpu, nohz.idle_cpus_mask) { @@ -9349,7 +9356,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) if (likely(update_next_balance)) nohz.next_balance = next_balance; end: - clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)); + atomic_andnot(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)); } /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 23ba4dd76ac4..d98e761b962f 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -763,7 +763,7 @@ struct rq { #ifdef CONFIG_SMP unsigned long last_load_update_tick; #endif /* CONFIG_SMP */ - unsigned long nohz_flags; + atomic_t nohz_flags; #endif /* CONFIG_NO_HZ_COMMON */ /* capture load from *all* tasks on this CPU: */ @@ -2034,10 +2034,11 @@ extern void cfs_bandwidth_usage_inc(void); extern void cfs_bandwidth_usage_dec(void); #ifdef CONFIG_NO_HZ_COMMON -enum rq_nohz_flag_bits { - NOHZ_TICK_STOPPED, - NOHZ_BALANCE_KICK, -}; +#define NOHZ_TICK_STOPPED_BIT 0 +#define NOHZ_BALANCE_KICK_BIT 1 + +#define NOHZ_TICK_STOPPED BIT(NOHZ_TICK_STOPPED_BIT) +#define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) -- cgit v1.2.3 From b7031a02ec753bf9b52a94a966b05e1abad3b7a9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 21 Dec 2017 10:11:09 +0100 Subject: sched/fair: Add NOHZ_STATS_KICK Split the NOHZ idle balancer into doing two separate actions: - update blocked load statistic - actually load-balance Since the latter requires the former, ensure this happens. For now always tag both bits at the same time. Prepares for a future where we can toggle only the STATS bit. Suggested-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 4 ++-- kernel/sched/fair.c | 52 +++++++++++++++++++++++++++++++++++----------------- kernel/sched/sched.h | 4 ++++ 3 files changed, 41 insertions(+), 19 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 96ad1c003d74..69c9a6b07b61 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -583,7 +583,7 @@ static inline bool got_nohz_idle_kick(void) { int cpu = smp_processor_id(); - if (!(atomic_read(nohz_flags(cpu)) & NOHZ_BALANCE_KICK)) + if (!(atomic_read(nohz_flags(cpu)) & NOHZ_KICK_MASK)) return false; if (idle_cpu(cpu) && !need_resched()) @@ -593,7 +593,7 @@ static inline bool got_nohz_idle_kick(void) * We can't run Idle Load Balance on this CPU for this time so we * cancel it and clear NOHZ_BALANCE_KICK */ - atomic_andnot(NOHZ_BALANCE_KICK, nohz_flags(cpu)); + atomic_andnot(NOHZ_KICK_MASK, nohz_flags(cpu)); return false; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 5d150478dd58..fc058967c999 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9082,8 +9082,8 @@ static void nohz_balancer_kick(void) if (ilb_cpu >= nr_cpu_ids) return; - flags = atomic_fetch_or(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)); - if (flags & NOHZ_BALANCE_KICK) + flags = atomic_fetch_or(NOHZ_KICK_MASK, nohz_flags(ilb_cpu)); + if (flags & NOHZ_KICK_MASK) return; /* * Use smp_send_reschedule() instead of resched_cpu(). @@ -9202,8 +9202,6 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) int need_serialize, need_decay = 0; u64 max_cost = 0; - update_blocked_averages(cpu); - rcu_read_lock(); for_each_domain(cpu, sd) { /* @@ -9298,20 +9296,27 @@ out: * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the * rebalancing for all the CPUs for whom scheduler ticks are stopped. */ -static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) +static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { - int this_cpu = this_rq->cpu; - struct rq *rq; - int balance_cpu; /* Earliest time when we have to do rebalance again */ unsigned long next_balance = jiffies + 60*HZ; int update_next_balance = 0; + int this_cpu = this_rq->cpu; + unsigned int flags; + int balance_cpu; + struct rq *rq; - if (!(atomic_read(nohz_flags(this_cpu)) & NOHZ_BALANCE_KICK)) - return; + if (!(atomic_read(nohz_flags(this_cpu)) & NOHZ_KICK_MASK)) + return false; - if (idle != CPU_IDLE) - goto end; + if (idle != CPU_IDLE) { + atomic_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu)); + return false; + } + + flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu)); + + SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK); for_each_cpu(balance_cpu, nohz.idle_cpus_mask) { if (balance_cpu == this_cpu || !idle_cpu(balance_cpu)) @@ -9339,7 +9344,9 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) cpu_load_update_idle(rq); rq_unlock_irq(rq, &rf); - rebalance_domains(rq, CPU_IDLE); + update_blocked_averages(rq->cpu); + if (flags & NOHZ_BALANCE_KICK) + rebalance_domains(rq, CPU_IDLE); } if (time_after(next_balance, rq->next_balance)) { @@ -9348,6 +9355,10 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) } } + update_blocked_averages(this_cpu); + if (flags & NOHZ_BALANCE_KICK) + rebalance_domains(this_rq, CPU_IDLE); + /* * next_balance will be updated only when there is a need. * When the CPU is attached to null domain for ex, it will not be @@ -9355,8 +9366,8 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) */ if (likely(update_next_balance)) nohz.next_balance = next_balance; -end: - atomic_andnot(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)); + + return true; } /* @@ -9443,7 +9454,10 @@ unlock: return kick; } #else -static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { } +static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) +{ + return false; +} #endif /* @@ -9464,7 +9478,11 @@ static __latent_entropy void run_rebalance_domains(struct softirq_action *h) * load balance only within the local sched_domain hierarchy * and abort nohz_idle_balance altogether if we pull some load. */ - nohz_idle_balance(this_rq, idle); + if (nohz_idle_balance(this_rq, idle)) + return; + + /* normal load balance */ + update_blocked_averages(this_rq->cpu); rebalance_domains(this_rq, idle); } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index d98e761b962f..5295f274053b 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2036,9 +2036,13 @@ extern void cfs_bandwidth_usage_dec(void); #ifdef CONFIG_NO_HZ_COMMON #define NOHZ_TICK_STOPPED_BIT 0 #define NOHZ_BALANCE_KICK_BIT 1 +#define NOHZ_STATS_KICK_BIT 2 #define NOHZ_TICK_STOPPED BIT(NOHZ_TICK_STOPPED_BIT) #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) +#define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) + +#define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK) #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) -- cgit v1.2.3 From 4550487a993d579c7329bb5b19e516d36800c8bf Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 21 Dec 2017 10:47:48 +0100 Subject: sched/fair: Restructure nohz_balance_kick() The current: if (nohz_kick_needed()) nohz_balancer_kick() is pointless complexity, fold them into a single call and avoid the various conditions at the call site. When we introduce multiple different needs to kick the ilb, the above construct also becomes a problem. Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 218 ++++++++++++++++++++++++++-------------------------- 1 file changed, 111 insertions(+), 107 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fc058967c999..fa483d889f07 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9065,12 +9065,29 @@ static inline int find_new_ilb(void) return nr_cpu_ids; } +static inline void set_cpu_sd_state_busy(void) +{ + struct sched_domain *sd; + int cpu = smp_processor_id(); + + rcu_read_lock(); + sd = rcu_dereference(per_cpu(sd_llc, cpu)); + + if (!sd || !sd->nohz_idle) + goto unlock; + sd->nohz_idle = 0; + + atomic_inc(&sd->shared->nr_busy_cpus); +unlock: + rcu_read_unlock(); +} + /* * Kick a CPU to do the nohz balancing, if it is time for it. We pick the * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle * CPU (if there is one). */ -static void nohz_balancer_kick(void) +static void kick_ilb(void) { unsigned int flags; int ilb_cpu; @@ -9085,6 +9102,7 @@ static void nohz_balancer_kick(void) flags = atomic_fetch_or(NOHZ_KICK_MASK, nohz_flags(ilb_cpu)); if (flags & NOHZ_KICK_MASK) return; + /* * Use smp_send_reschedule() instead of resched_cpu(). * This way we generate a sched IPI on the target CPU which @@ -9092,7 +9110,94 @@ static void nohz_balancer_kick(void) * will be run before returning from the IPI. */ smp_send_reschedule(ilb_cpu); - return; +} + +/* + * Current heuristic for kicking the idle load balancer in the presence + * of an idle cpu in the system. + * - This rq has more than one task. + * - This rq has at least one CFS task and the capacity of the CPU is + * significantly reduced because of RT tasks or IRQs. + * - At parent of LLC scheduler domain level, this cpu's scheduler group has + * multiple busy cpu. + * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler + * domain span are idle. + */ +static void nohz_balancer_kick(struct rq *rq) +{ + unsigned long now = jiffies; + struct sched_domain_shared *sds; + struct sched_domain *sd; + int nr_busy, i, cpu = rq->cpu; + bool kick = false; + + if (unlikely(rq->idle_balance)) + return; + + /* + * We may be recently in ticked or tickless idle mode. At the first + * busy tick after returning from idle, we will update the busy stats. + */ + set_cpu_sd_state_busy(); + nohz_balance_exit_idle(cpu); + + /* + * None are in tickless mode and hence no need for NOHZ idle load + * balancing. + */ + if (likely(!atomic_read(&nohz.nr_cpus))) + return; + + if (time_before(now, nohz.next_balance)) + return; + + if (rq->nr_running >= 2) { + kick = true; + goto out; + } + + rcu_read_lock(); + sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); + if (sds) { + /* + * XXX: write a coherent comment on why we do this. + * See also: http://lkml.kernel.org/r/20111202010832.602203411@sbsiddha-desk.sc.intel.com + */ + nr_busy = atomic_read(&sds->nr_busy_cpus); + if (nr_busy > 1) { + kick = true; + goto unlock; + } + + } + + sd = rcu_dereference(rq->sd); + if (sd) { + if ((rq->cfs.h_nr_running >= 1) && + check_cpu_capacity(rq, sd)) { + kick = true; + goto unlock; + } + } + + sd = rcu_dereference(per_cpu(sd_asym, cpu)); + if (sd) { + for_each_cpu(i, sched_domain_span(sd)) { + if (i == cpu || + !cpumask_test_cpu(i, nohz.idle_cpus_mask)) + continue; + + if (sched_asym_prefer(i, cpu)) { + kick = true; + goto unlock; + } + } + } +unlock: + rcu_read_unlock(); +out: + if (kick) + kick_ilb(); } void nohz_balance_exit_idle(unsigned int cpu) @@ -9112,23 +9217,6 @@ void nohz_balance_exit_idle(unsigned int cpu) } } -static inline void set_cpu_sd_state_busy(void) -{ - struct sched_domain *sd; - int cpu = smp_processor_id(); - - rcu_read_lock(); - sd = rcu_dereference(per_cpu(sd_llc, cpu)); - - if (!sd || !sd->nohz_idle) - goto unlock; - sd->nohz_idle = 0; - - atomic_inc(&sd->shared->nr_busy_cpus); -unlock: - rcu_read_unlock(); -} - void set_cpu_sd_state_idle(void) { struct sched_domain *sd; @@ -9171,6 +9259,8 @@ void nohz_balance_enter_idle(int cpu) atomic_inc(&nohz.nr_cpus); atomic_or(NOHZ_TICK_STOPPED, nohz_flags(cpu)); } +#else +static inline void nohz_balancer_kick(struct rq *rq) { } #endif static DEFINE_SPINLOCK(balancing); @@ -9369,90 +9459,6 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) return true; } - -/* - * Current heuristic for kicking the idle load balancer in the presence - * of an idle CPU in the system. - * - This rq has more than one task. - * - This rq has at least one CFS task and the capacity of the CPU is - * significantly reduced because of RT tasks or IRQs. - * - At parent of LLC scheduler domain level, this CPU's scheduler group has - * multiple busy CPUs. - * - For SD_ASYM_PACKING, if the lower numbered CPU's in the scheduler - * domain span are idle. - */ -static inline bool nohz_kick_needed(struct rq *rq) -{ - unsigned long now = jiffies; - struct sched_domain_shared *sds; - struct sched_domain *sd; - int nr_busy, i, cpu = rq->cpu; - bool kick = false; - - if (unlikely(rq->idle_balance)) - return false; - - /* - * We may be recently in ticked or tickless idle mode. At the first - * busy tick after returning from idle, we will update the busy stats. - */ - set_cpu_sd_state_busy(); - nohz_balance_exit_idle(cpu); - - /* - * None are in tickless mode and hence no need for NOHZ idle load - * balancing. - */ - if (likely(!atomic_read(&nohz.nr_cpus))) - return false; - - if (time_before(now, nohz.next_balance)) - return false; - - if (rq->nr_running >= 2) - return true; - - rcu_read_lock(); - sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); - if (sds) { - /* - * XXX: write a coherent comment on why we do this. - * See also: http://lkml.kernel.org/r/20111202010832.602203411@sbsiddha-desk.sc.intel.com - */ - nr_busy = atomic_read(&sds->nr_busy_cpus); - if (nr_busy > 1) { - kick = true; - goto unlock; - } - - } - - sd = rcu_dereference(rq->sd); - if (sd) { - if ((rq->cfs.h_nr_running >= 1) && - check_cpu_capacity(rq, sd)) { - kick = true; - goto unlock; - } - } - - sd = rcu_dereference(per_cpu(sd_asym, cpu)); - if (sd) { - for_each_cpu(i, sched_domain_span(sd)) { - if (i == cpu || - !cpumask_test_cpu(i, nohz.idle_cpus_mask)) - continue; - - if (sched_asym_prefer(i, cpu)) { - kick = true; - goto unlock; - } - } - } -unlock: - rcu_read_unlock(); - return kick; -} #else static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { @@ -9497,10 +9503,8 @@ void trigger_load_balance(struct rq *rq) if (time_after_eq(jiffies, rq->next_balance)) raise_softirq(SCHED_SOFTIRQ); -#ifdef CONFIG_NO_HZ_COMMON - if (nohz_kick_needed(rq)) - nohz_balancer_kick(); -#endif + + nohz_balancer_kick(rq); } static void rq_online_fair(struct rq *rq) -- cgit v1.2.3 From a4064fb614f83c0a097c5ff7fe433c4aa139c7af Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 21 Dec 2017 10:42:50 +0100 Subject: sched/fair: Add NOHZ stats balancing Teach the idle balancer about the need to update statistics which have a different periodicity from regular balancing. Suggested-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fa483d889f07..d8693fa9e7c5 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9053,6 +9053,7 @@ static struct { cpumask_var_t idle_cpus_mask; atomic_t nr_cpus; unsigned long next_balance; /* in jiffy units */ + unsigned long next_stats; } nohz ____cacheline_aligned; static inline int find_new_ilb(void) @@ -9087,9 +9088,8 @@ unlock: * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle * CPU (if there is one). */ -static void kick_ilb(void) +static void kick_ilb(unsigned int flags) { - unsigned int flags; int ilb_cpu; nohz.next_balance++; @@ -9099,7 +9099,7 @@ static void kick_ilb(void) if (ilb_cpu >= nr_cpu_ids) return; - flags = atomic_fetch_or(NOHZ_KICK_MASK, nohz_flags(ilb_cpu)); + flags = atomic_fetch_or(flags, nohz_flags(ilb_cpu)); if (flags & NOHZ_KICK_MASK) return; @@ -9129,7 +9129,7 @@ static void nohz_balancer_kick(struct rq *rq) struct sched_domain_shared *sds; struct sched_domain *sd; int nr_busy, i, cpu = rq->cpu; - bool kick = false; + unsigned int flags = 0; if (unlikely(rq->idle_balance)) return; @@ -9148,11 +9148,14 @@ static void nohz_balancer_kick(struct rq *rq) if (likely(!atomic_read(&nohz.nr_cpus))) return; + if (time_after(now, nohz.next_stats)) + flags = NOHZ_STATS_KICK; + if (time_before(now, nohz.next_balance)) - return; + goto out; if (rq->nr_running >= 2) { - kick = true; + flags = NOHZ_KICK_MASK; goto out; } @@ -9165,7 +9168,7 @@ static void nohz_balancer_kick(struct rq *rq) */ nr_busy = atomic_read(&sds->nr_busy_cpus); if (nr_busy > 1) { - kick = true; + flags = NOHZ_KICK_MASK; goto unlock; } @@ -9175,7 +9178,7 @@ static void nohz_balancer_kick(struct rq *rq) if (sd) { if ((rq->cfs.h_nr_running >= 1) && check_cpu_capacity(rq, sd)) { - kick = true; + flags = NOHZ_KICK_MASK; goto unlock; } } @@ -9188,7 +9191,7 @@ static void nohz_balancer_kick(struct rq *rq) continue; if (sched_asym_prefer(i, cpu)) { - kick = true; + flags = NOHZ_KICK_MASK; goto unlock; } } @@ -9196,8 +9199,8 @@ static void nohz_balancer_kick(struct rq *rq) unlock: rcu_read_unlock(); out: - if (kick) - kick_ilb(); + if (flags) + kick_ilb(flags); } void nohz_balance_exit_idle(unsigned int cpu) @@ -9389,7 +9392,9 @@ out: static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { /* Earliest time when we have to do rebalance again */ - unsigned long next_balance = jiffies + 60*HZ; + unsigned long now = jiffies; + unsigned long next_balance = now + 60*HZ; + unsigned long next_stats = now + msecs_to_jiffies(LOAD_AVG_PERIOD); int update_next_balance = 0; int this_cpu = this_rq->cpu; unsigned int flags; @@ -9449,6 +9454,8 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) if (flags & NOHZ_BALANCE_KICK) rebalance_domains(this_rq, CPU_IDLE); + nohz.next_stats = next_stats; + /* * next_balance will be updated only when there is a need. * When the CPU is attached to null domain for ex, it will not be -- cgit v1.2.3 From e022e0d38ad475fc650f22efa3deb2fb96e62542 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 21 Dec 2017 11:20:23 +0100 Subject: sched/fair: Update blocked load from NEWIDLE Since we already iterate CPUs looking for work on NEWIDLE, use this iteration to age the blocked load. If the domain for which this is done completely spand the idle set, we can push the ILB based aging forward. Suggested-by: Brendan Jackman Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 1 + kernel/sched/fair.c | 49 +++++++++++++++++++++++++++++++++++++++++++------ kernel/sched/sched.h | 1 + 3 files changed, 45 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 69c9a6b07b61..8a10a2ce30a4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6074,6 +6074,7 @@ void __init sched_init(void) rq_attach_root(rq, &def_root_domain); #ifdef CONFIG_NO_HZ_COMMON rq->last_load_update_tick = jiffies; + rq->last_blocked_load_update_tick = jiffies; atomic_set(&rq->nohz_flags, 0); #endif #endif /* CONFIG_SMP */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d8693fa9e7c5..85232dad89c9 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5376,6 +5376,14 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) } return load; } + +static struct { + cpumask_var_t idle_cpus_mask; + atomic_t nr_cpus; + unsigned long next_balance; /* in jiffy units */ + unsigned long next_stats; +} nohz ____cacheline_aligned; + #endif /* CONFIG_NO_HZ_COMMON */ /** @@ -7022,6 +7030,7 @@ enum fbq_type { regular, remote, all }; #define LBF_NEED_BREAK 0x02 #define LBF_DST_PINNED 0x04 #define LBF_SOME_PINNED 0x08 +#define LBF_NOHZ_STATS 0x10 struct lb_env { struct sched_domain *sd; @@ -7460,6 +7469,10 @@ static void update_blocked_averages(int cpu) if (cfs_rq_is_decayed(cfs_rq)) list_del_leaf_cfs_rq(cfs_rq); } + +#ifdef CONFIG_NO_HZ_COMMON + rq->last_blocked_load_update_tick = jiffies; +#endif rq_unlock_irqrestore(rq, &rf); } @@ -7519,6 +7532,9 @@ static inline void update_blocked_averages(int cpu) rq_lock_irqsave(rq, &rf); update_rq_clock(rq); update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq); +#ifdef CONFIG_NO_HZ_COMMON + rq->last_blocked_load_update_tick = jiffies; +#endif rq_unlock_irqrestore(rq, &rf); } @@ -7853,6 +7869,21 @@ group_type group_classify(struct sched_group *group, return group_other; } +static void update_nohz_stats(struct rq *rq) +{ +#ifdef CONFIG_NO_HZ_COMMON + unsigned int cpu = rq->cpu; + + if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask)) + return; + + if (!time_after(jiffies, rq->last_blocked_load_update_tick)) + return; + + update_blocked_averages(cpu); +#endif +} + /** * update_sg_lb_stats - Update sched_group's statistics for load balancing. * @env: The load balancing environment. @@ -7875,6 +7906,9 @@ static inline void update_sg_lb_stats(struct lb_env *env, for_each_cpu_and(i, sched_group_span(group), env->cpus) { struct rq *rq = cpu_rq(i); + if (env->flags & LBF_NOHZ_STATS) + update_nohz_stats(rq); + /* Bias balancing toward CPUs of our domain: */ if (local_group) load = target_load(i, load_idx); @@ -8030,6 +8064,15 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd if (child && child->flags & SD_PREFER_SIBLING) prefer_sibling = 1; +#ifdef CONFIG_NO_HZ_COMMON + if (env->idle == CPU_NEWLY_IDLE) { + env->flags |= LBF_NOHZ_STATS; + + if (cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd))) + nohz.next_stats = jiffies + msecs_to_jiffies(LOAD_AVG_PERIOD); + } +#endif + load_idx = get_sd_load_idx(env->sd, env->idle); do { @@ -9049,12 +9092,6 @@ static inline int on_null_domain(struct rq *rq) * needed, they will kick the idle load balancer, which then does idle * load balancing for all the idle CPUs. */ -static struct { - cpumask_var_t idle_cpus_mask; - atomic_t nr_cpus; - unsigned long next_balance; /* in jiffy units */ - unsigned long next_stats; -} nohz ____cacheline_aligned; static inline int find_new_ilb(void) { diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 5295f274053b..21381d276709 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -762,6 +762,7 @@ struct rq { #ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_SMP unsigned long last_load_update_tick; + unsigned long last_blocked_load_update_tick; #endif /* CONFIG_SMP */ atomic_t nohz_flags; #endif /* CONFIG_NO_HZ_COMMON */ -- cgit v1.2.3 From 00357f5ec5d67a52a175da6f29f85c2c19d59bc8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 21 Dec 2017 15:06:50 +0100 Subject: sched/nohz: Clean up nohz enter/exit The primary observation is that nohz enter/exit is always from the current CPU, therefore NOHZ_TICK_STOPPED does not in fact need to be an atomic. Secondary is that we appear to have 2 nearly identical hooks in the nohz enter code, set_cpu_sd_state_idle() and nohz_balance_enter_idle(). Fold the whole set_cpu_sd_state thing into nohz_balance_{enter,exit}_idle. Removes an atomic op from both enter and exit paths. Signed-off-by: Peter Zijlstra (Intel) Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- include/linux/sched/nohz.h | 2 -- kernel/sched/core.c | 2 +- kernel/sched/fair.c | 73 +++++++++++++++++++++++----------------------- kernel/sched/sched.h | 11 ++++--- kernel/time/tick-sched.c | 7 ----- 5 files changed, 43 insertions(+), 52 deletions(-) (limited to 'kernel') diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h index 094217273ff9..b36f4cf38111 100644 --- a/include/linux/sched/nohz.h +++ b/include/linux/sched/nohz.h @@ -16,11 +16,9 @@ static inline void cpu_load_update_nohz_stop(void) { } #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) extern void nohz_balance_enter_idle(int cpu); -extern void set_cpu_sd_state_idle(void); extern int get_nohz_timer_target(void); #else static inline void nohz_balance_enter_idle(int cpu) { } -static inline void set_cpu_sd_state_idle(void) { } #endif #ifdef CONFIG_NO_HZ_COMMON diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8a10a2ce30a4..c7faeb7bd03a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5861,7 +5861,7 @@ int sched_cpu_dying(unsigned int cpu) calc_load_migrate(rq); update_max_interval(); - nohz_balance_exit_idle(cpu); + nohz_balance_exit_idle(rq); hrtick_clear(rq); return 0; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 85232dad89c9..494d5db9a6cd 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9103,23 +9103,6 @@ static inline int find_new_ilb(void) return nr_cpu_ids; } -static inline void set_cpu_sd_state_busy(void) -{ - struct sched_domain *sd; - int cpu = smp_processor_id(); - - rcu_read_lock(); - sd = rcu_dereference(per_cpu(sd_llc, cpu)); - - if (!sd || !sd->nohz_idle) - goto unlock; - sd->nohz_idle = 0; - - atomic_inc(&sd->shared->nr_busy_cpus); -unlock: - rcu_read_unlock(); -} - /* * Kick a CPU to do the nohz balancing, if it is time for it. We pick the * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle @@ -9175,8 +9158,7 @@ static void nohz_balancer_kick(struct rq *rq) * We may be recently in ticked or tickless idle mode. At the first * busy tick after returning from idle, we will update the busy stats. */ - set_cpu_sd_state_busy(); - nohz_balance_exit_idle(cpu); + nohz_balance_exit_idle(rq); /* * None are in tickless mode and hence no need for NOHZ idle load @@ -9240,27 +9222,39 @@ out: kick_ilb(flags); } -void nohz_balance_exit_idle(unsigned int cpu) +static void set_cpu_sd_state_busy(int cpu) { - unsigned int flags = atomic_read(nohz_flags(cpu)); + struct sched_domain *sd; - if (unlikely(flags & NOHZ_TICK_STOPPED)) { - /* - * Completely isolated CPUs don't ever set, so we must test. - */ - if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) { - cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); - atomic_dec(&nohz.nr_cpus); - } + rcu_read_lock(); + sd = rcu_dereference(per_cpu(sd_llc, cpu)); - atomic_andnot(NOHZ_TICK_STOPPED, nohz_flags(cpu)); - } + if (!sd || !sd->nohz_idle) + goto unlock; + sd->nohz_idle = 0; + + atomic_inc(&sd->shared->nr_busy_cpus); +unlock: + rcu_read_unlock(); } -void set_cpu_sd_state_idle(void) +void nohz_balance_exit_idle(struct rq *rq) +{ + SCHED_WARN_ON(rq != this_rq()); + + if (likely(!rq->nohz_tick_stopped)) + return; + + rq->nohz_tick_stopped = 0; + cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask); + atomic_dec(&nohz.nr_cpus); + + set_cpu_sd_state_busy(rq->cpu); +} + +static void set_cpu_sd_state_idle(int cpu) { struct sched_domain *sd; - int cpu = smp_processor_id(); rcu_read_lock(); sd = rcu_dereference(per_cpu(sd_llc, cpu)); @@ -9280,6 +9274,10 @@ unlock: */ void nohz_balance_enter_idle(int cpu) { + struct rq *rq = cpu_rq(cpu); + + SCHED_WARN_ON(cpu != smp_processor_id()); + /* If this CPU is going down, then nothing needs to be done: */ if (!cpu_active(cpu)) return; @@ -9288,16 +9286,19 @@ void nohz_balance_enter_idle(int cpu) if (!housekeeping_cpu(cpu, HK_FLAG_SCHED)) return; - if (atomic_read(nohz_flags(cpu)) & NOHZ_TICK_STOPPED) + if (rq->nohz_tick_stopped) return; /* If we're a completely isolated CPU, we don't play: */ - if (on_null_domain(cpu_rq(cpu))) + if (on_null_domain(rq)) return; + rq->nohz_tick_stopped = 1; + cpumask_set_cpu(cpu, nohz.idle_cpus_mask); atomic_inc(&nohz.nr_cpus); - atomic_or(NOHZ_TICK_STOPPED, nohz_flags(cpu)); + + set_cpu_sd_state_idle(cpu); } #else static inline void nohz_balancer_kick(struct rq *rq) { } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 21381d276709..818f22dbc7ea 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -764,6 +764,7 @@ struct rq { unsigned long last_load_update_tick; unsigned long last_blocked_load_update_tick; #endif /* CONFIG_SMP */ + unsigned int nohz_tick_stopped; atomic_t nohz_flags; #endif /* CONFIG_NO_HZ_COMMON */ @@ -2035,11 +2036,9 @@ extern void cfs_bandwidth_usage_inc(void); extern void cfs_bandwidth_usage_dec(void); #ifdef CONFIG_NO_HZ_COMMON -#define NOHZ_TICK_STOPPED_BIT 0 -#define NOHZ_BALANCE_KICK_BIT 1 -#define NOHZ_STATS_KICK_BIT 2 +#define NOHZ_BALANCE_KICK_BIT 0 +#define NOHZ_STATS_KICK_BIT 1 -#define NOHZ_TICK_STOPPED BIT(NOHZ_TICK_STOPPED_BIT) #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) @@ -2047,9 +2046,9 @@ extern void cfs_bandwidth_usage_dec(void); #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) -extern void nohz_balance_exit_idle(unsigned int cpu); +extern void nohz_balance_exit_idle(struct rq *rq); #else -static inline void nohz_balance_exit_idle(unsigned int cpu) { } +static inline void nohz_balance_exit_idle(struct rq *rq) { } #endif diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index f2fa2e940fe5..ab92aa4442df 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -954,13 +954,6 @@ void tick_nohz_idle_enter(void) struct tick_sched *ts; lockdep_assert_irqs_enabled(); - /* - * Update the idle state in the scheduler domain hierarchy - * when tick_nohz_stop_sched_tick() is called from the idle loop. - * State will be updated to busy during the first busy tick after - * exiting idle. - */ - set_cpu_sd_state_idle(); local_irq_disable(); -- cgit v1.2.3 From ea14b57e8a181ac0561eba7a787e088f8c89f822 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 2 Feb 2018 10:27:00 +0100 Subject: sched/cpufreq: Provide migration hint It was suggested that a migration hint might be usefull for the CPU-freq governors. Signed-off-by: Peter Zijlstra (Intel) Cc: Juri Lelli Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Rafael J. Wysocki Cc: Thomas Gleixner Cc: Viresh Kumar Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- include/linux/sched/cpufreq.h | 1 + kernel/sched/fair.c | 31 +++++++++++++++++++------------ 2 files changed, 20 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h index b48f2fb3b316..59667444669f 100644 --- a/include/linux/sched/cpufreq.h +++ b/include/linux/sched/cpufreq.h @@ -9,6 +9,7 @@ */ #define SCHED_CPUFREQ_IOWAIT (1U << 0) +#define SCHED_CPUFREQ_MIGRATION (1U << 1) #ifdef CONFIG_CPU_FREQ struct update_util_data { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 494d5db9a6cd..e8f5efe2936c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -772,7 +772,7 @@ void post_init_entity_util_avg(struct sched_entity *se) * For !fair tasks do: * update_cfs_rq_load_avg(now, cfs_rq); - attach_entity_load_avg(cfs_rq, se); + attach_entity_load_avg(cfs_rq, se, 0); switched_from_fair(rq, p); * * such that the next switched_to_fair() has the @@ -3009,11 +3009,11 @@ static inline void update_cfs_group(struct sched_entity *se) } #endif /* CONFIG_FAIR_GROUP_SCHED */ -static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq) +static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) { struct rq *rq = rq_of(cfs_rq); - if (&rq->cfs == cfs_rq) { + if (&rq->cfs == cfs_rq || (flags & SCHED_CPUFREQ_MIGRATION)) { /* * There are a few boundary cases this might miss but it should * get called often enough that that should (hopefully) not be @@ -3028,7 +3028,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq) * * See cpu_util(). */ - cpufreq_update_util(rq, 0); + cpufreq_update_util(rq, flags); } } @@ -3686,7 +3686,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) #endif if (decayed) - cfs_rq_util_change(cfs_rq); + cfs_rq_util_change(cfs_rq, 0); return decayed; } @@ -3699,7 +3699,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) * Must call update_cfs_rq_load_avg() before this, since we rely on * cfs_rq->avg.last_update_time being current. */ -static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) +static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib; @@ -3735,7 +3735,7 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); - cfs_rq_util_change(cfs_rq); + cfs_rq_util_change(cfs_rq, flags); } /** @@ -3754,7 +3754,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); - cfs_rq_util_change(cfs_rq); + cfs_rq_util_change(cfs_rq, 0); } /* @@ -3784,7 +3784,14 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s if (!se->avg.last_update_time && (flags & DO_ATTACH)) { - attach_entity_load_avg(cfs_rq, se); + /* + * DO_ATTACH means we're here from enqueue_entity(). + * !last_update_time means we've passed through + * migrate_task_rq_fair() indicating we migrated. + * + * IOW we're enqueueing a task on a new CPU. + */ + attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION); update_tg_load_avg(cfs_rq, 0); } else if (decayed && (flags & UPDATE_TG)) @@ -3880,13 +3887,13 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) { - cfs_rq_util_change(cfs_rq); + cfs_rq_util_change(cfs_rq, 0); } static inline void remove_entity_load_avg(struct sched_entity *se) {} static inline void -attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} +attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) {} static inline void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} @@ -9726,7 +9733,7 @@ static void attach_entity_cfs_rq(struct sched_entity *se) /* Synchronize entity with its cfs_rq */ update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); - attach_entity_load_avg(cfs_rq, se); + attach_entity_load_avg(cfs_rq, se, 0); update_tg_load_avg(cfs_rq, false); propagate_entity_cfs_rq(se); } -- cgit v1.2.3 From f643ea2207010db26f17fca99db031bad87c8461 Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Tue, 13 Feb 2018 11:31:17 +0100 Subject: sched/nohz: Stop NOHZ stats when decayed Stopped the periodic update of blocked load when all idle CPUs have fully decayed. We introduce a new nohz.has_blocked that reflect if some idle CPUs has blocked load that have to be periodiccally updated. nohz.has_blocked is set everytime that a Idle CPU can have blocked load and it is then clear when no more blocked load has been detected during an update. We don't need atomic operation but only to make cure of the right ordering when updating nohz.idle_cpus_mask and nohz.has_blocked. Suggested-by: Peter Zijlstra (Intel) Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: brendan.jackman@arm.com Cc: dietmar.eggemann@arm.com Cc: morten.rasmussen@foss.arm.com Cc: valentin.schneider@arm.com Link: http://lkml.kernel.org/r/1518517879-2280-2-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 116 ++++++++++++++++++++++++++++++++++++++++++--------- kernel/sched/sched.h | 1 + 2 files changed, 97 insertions(+), 20 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e8f5efe2936c..78b06a0814d1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5387,8 +5387,9 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) static struct { cpumask_var_t idle_cpus_mask; atomic_t nr_cpus; + int has_blocked; /* Idle CPUS has blocked load */ unsigned long next_balance; /* in jiffy units */ - unsigned long next_stats; + unsigned long next_blocked; /* Next update of blocked load in jiffies */ } nohz ____cacheline_aligned; #endif /* CONFIG_NO_HZ_COMMON */ @@ -7038,6 +7039,7 @@ enum fbq_type { regular, remote, all }; #define LBF_DST_PINNED 0x04 #define LBF_SOME_PINNED 0x08 #define LBF_NOHZ_STATS 0x10 +#define LBF_NOHZ_AGAIN 0x20 struct lb_env { struct sched_domain *sd; @@ -7422,8 +7424,6 @@ static void attach_tasks(struct lb_env *env) rq_unlock(env->dst_rq, &rf); } -#ifdef CONFIG_FAIR_GROUP_SCHED - static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) { if (cfs_rq->load.weight) @@ -7441,11 +7441,14 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) return true; } +#ifdef CONFIG_FAIR_GROUP_SCHED + static void update_blocked_averages(int cpu) { struct rq *rq = cpu_rq(cpu); struct cfs_rq *cfs_rq, *pos; struct rq_flags rf; + bool done = true; rq_lock_irqsave(rq, &rf); update_rq_clock(rq); @@ -7475,10 +7478,14 @@ static void update_blocked_averages(int cpu) */ if (cfs_rq_is_decayed(cfs_rq)) list_del_leaf_cfs_rq(cfs_rq); + else + done = false; } #ifdef CONFIG_NO_HZ_COMMON rq->last_blocked_load_update_tick = jiffies; + if (done) + rq->has_blocked_load = 0; #endif rq_unlock_irqrestore(rq, &rf); } @@ -7541,6 +7548,8 @@ static inline void update_blocked_averages(int cpu) update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq); #ifdef CONFIG_NO_HZ_COMMON rq->last_blocked_load_update_tick = jiffies; + if (cfs_rq_is_decayed(cfs_rq)) + rq->has_blocked_load = 0; #endif rq_unlock_irqrestore(rq, &rf); } @@ -7876,18 +7885,25 @@ group_type group_classify(struct sched_group *group, return group_other; } -static void update_nohz_stats(struct rq *rq) +static bool update_nohz_stats(struct rq *rq) { #ifdef CONFIG_NO_HZ_COMMON unsigned int cpu = rq->cpu; + if (!rq->has_blocked_load) + return false; + if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask)) - return; + return false; if (!time_after(jiffies, rq->last_blocked_load_update_tick)) - return; + return true; update_blocked_averages(cpu); + + return rq->has_blocked_load; +#else + return false; #endif } @@ -7913,8 +7929,8 @@ static inline void update_sg_lb_stats(struct lb_env *env, for_each_cpu_and(i, sched_group_span(group), env->cpus) { struct rq *rq = cpu_rq(i); - if (env->flags & LBF_NOHZ_STATS) - update_nohz_stats(rq); + if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq)) + env->flags |= LBF_NOHZ_AGAIN; /* Bias balancing toward CPUs of our domain: */ if (local_group) @@ -8072,12 +8088,8 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd prefer_sibling = 1; #ifdef CONFIG_NO_HZ_COMMON - if (env->idle == CPU_NEWLY_IDLE) { + if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked)) env->flags |= LBF_NOHZ_STATS; - - if (cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd))) - nohz.next_stats = jiffies + msecs_to_jiffies(LOAD_AVG_PERIOD); - } #endif load_idx = get_sd_load_idx(env->sd, env->idle); @@ -8133,6 +8145,15 @@ next_group: sg = sg->next; } while (sg != env->sd->groups); +#ifdef CONFIG_NO_HZ_COMMON + if ((env->flags & LBF_NOHZ_AGAIN) && + cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd))) { + + WRITE_ONCE(nohz.next_blocked, + jiffies + msecs_to_jiffies(LOAD_AVG_PERIOD)); + } +#endif + if (env->sd->flags & SD_NUMA) env->fbq_type = fbq_classify_group(&sds->busiest_stat); @@ -9174,7 +9195,8 @@ static void nohz_balancer_kick(struct rq *rq) if (likely(!atomic_read(&nohz.nr_cpus))) return; - if (time_after(now, nohz.next_stats)) + if (READ_ONCE(nohz.has_blocked) && + time_after(now, READ_ONCE(nohz.next_blocked))) flags = NOHZ_STATS_KICK; if (time_before(now, nohz.next_balance)) @@ -9293,8 +9315,21 @@ void nohz_balance_enter_idle(int cpu) if (!housekeeping_cpu(cpu, HK_FLAG_SCHED)) return; + /* + * Can be set safely without rq->lock held + * If a clear happens, it will have evaluated last additions because + * rq->lock is held during the check and the clear + */ + rq->has_blocked_load = 1; + + /* + * The tick is still stopped but load could have been added in the + * meantime. We set the nohz.has_blocked flag to trig a check of the + * *_avg. The CPU is already part of nohz.idle_cpus_mask so the clear + * of nohz.has_blocked can only happen after checking the new load + */ if (rq->nohz_tick_stopped) - return; + goto out; /* If we're a completely isolated CPU, we don't play: */ if (on_null_domain(rq)) @@ -9305,7 +9340,21 @@ void nohz_balance_enter_idle(int cpu) cpumask_set_cpu(cpu, nohz.idle_cpus_mask); atomic_inc(&nohz.nr_cpus); + /* + * Ensures that if nohz_idle_balance() fails to observe our + * @idle_cpus_mask store, it must observe the @has_blocked + * store. + */ + smp_mb__after_atomic(); + set_cpu_sd_state_idle(cpu); + +out: + /* + * Each time a cpu enter idle, we assume that it has blocked load and + * enable the periodic update of the load of idle cpus + */ + WRITE_ONCE(nohz.has_blocked, 1); } #else static inline void nohz_balancer_kick(struct rq *rq) { } @@ -9439,7 +9488,7 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) /* Earliest time when we have to do rebalance again */ unsigned long now = jiffies; unsigned long next_balance = now + 60*HZ; - unsigned long next_stats = now + msecs_to_jiffies(LOAD_AVG_PERIOD); + bool has_blocked_load = false; int update_next_balance = 0; int this_cpu = this_rq->cpu; unsigned int flags; @@ -9458,6 +9507,22 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK); + /* + * We assume there will be no idle load after this update and clear + * the has_blocked flag. If a cpu enters idle in the mean time, it will + * set the has_blocked flag and trig another update of idle load. + * Because a cpu that becomes idle, is added to idle_cpus_mask before + * setting the flag, we are sure to not clear the state and not + * check the load of an idle cpu. + */ + WRITE_ONCE(nohz.has_blocked, 0); + + /* + * Ensures that if we miss the CPU, we must see the has_blocked + * store from nohz_balance_enter_idle(). + */ + smp_mb(); + for_each_cpu(balance_cpu, nohz.idle_cpus_mask) { if (balance_cpu == this_cpu || !idle_cpu(balance_cpu)) continue; @@ -9467,11 +9532,16 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) * work being done for other CPUs. Next load * balancing owner will pick it up. */ - if (need_resched()) - break; + if (need_resched()) { + has_blocked_load = true; + goto abort; + } rq = cpu_rq(balance_cpu); + update_blocked_averages(rq->cpu); + has_blocked_load |= rq->has_blocked_load; + /* * If time for next balance is due, * do the balance. @@ -9484,7 +9554,6 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) cpu_load_update_idle(rq); rq_unlock_irq(rq, &rf); - update_blocked_averages(rq->cpu); if (flags & NOHZ_BALANCE_KICK) rebalance_domains(rq, CPU_IDLE); } @@ -9499,7 +9568,13 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) if (flags & NOHZ_BALANCE_KICK) rebalance_domains(this_rq, CPU_IDLE); - nohz.next_stats = next_stats; + WRITE_ONCE(nohz.next_blocked, + now + msecs_to_jiffies(LOAD_AVG_PERIOD)); + +abort: + /* There is still blocked load, enable periodic update */ + if (has_blocked_load) + WRITE_ONCE(nohz.has_blocked, 1); /* * next_balance will be updated only when there is a need. @@ -10135,6 +10210,7 @@ __init void init_sched_fair_class(void) #ifdef CONFIG_NO_HZ_COMMON nohz.next_balance = jiffies; + nohz.next_blocked = jiffies; zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); #endif #endif /* SMP */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 818f22dbc7ea..22909ffc04fb 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -763,6 +763,7 @@ struct rq { #ifdef CONFIG_SMP unsigned long last_load_update_tick; unsigned long last_blocked_load_update_tick; + unsigned int has_blocked_load; #endif /* CONFIG_SMP */ unsigned int nohz_tick_stopped; atomic_t nohz_flags; -- cgit v1.2.3 From 1936c53ce8c8d4555e9ccad2dc8d98e0637b11f7 Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Tue, 13 Feb 2018 11:31:18 +0100 Subject: sched/fair: Reduce the periodic update duration Instead of using the cfs_rq_is_decayed() which monitors all *_avg and *_sum, we create a cfs_rq_has_blocked() which only takes care of util_avg and load_avg. We are only interested by these 2 values which are decaying faster than the *_sum so we can stop the periodic update earlier. Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: brendan.jackman@arm.com Cc: dietmar.eggemann@arm.com Cc: morten.rasmussen@foss.arm.com Cc: valentin.schneider@arm.com Link: http://lkml.kernel.org/r/1518517879-2280-3-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 78b06a0814d1..aad7c03dbad8 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7424,6 +7424,19 @@ static void attach_tasks(struct lb_env *env) rq_unlock(env->dst_rq, &rf); } +static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) +{ + if (cfs_rq->avg.load_avg) + return true; + + if (cfs_rq->avg.util_avg) + return true; + + return false; +} + +#ifdef CONFIG_FAIR_GROUP_SCHED + static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) { if (cfs_rq->load.weight) @@ -7441,8 +7454,6 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) return true; } -#ifdef CONFIG_FAIR_GROUP_SCHED - static void update_blocked_averages(int cpu) { struct rq *rq = cpu_rq(cpu); @@ -7478,7 +7489,9 @@ static void update_blocked_averages(int cpu) */ if (cfs_rq_is_decayed(cfs_rq)) list_del_leaf_cfs_rq(cfs_rq); - else + + /* Don't need periodic decay once load/util_avg are null */ + if (cfs_rq_has_blocked(cfs_rq)) done = false; } @@ -7548,7 +7561,7 @@ static inline void update_blocked_averages(int cpu) update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq); #ifdef CONFIG_NO_HZ_COMMON rq->last_blocked_load_update_tick = jiffies; - if (cfs_rq_is_decayed(cfs_rq)) + if (!cfs_rq_has_blocked(cfs_rq)) rq->has_blocked_load = 0; #endif rq_unlock_irqrestore(rq, &rf); -- cgit v1.2.3 From 63928384faefba1b31c3bb77361965715a9fc71c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 13 Feb 2018 16:54:17 +0100 Subject: sched/nohz: Optimize nohz_idle_balance() Avoid calling update_blocked_averages() when it does not in fact have any by re-using/extending update_nohz_stats(). Signed-off-by: Peter Zijlstra (Intel) Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index aad7c03dbad8..5c357561db5d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7898,7 +7898,7 @@ group_type group_classify(struct sched_group *group, return group_other; } -static bool update_nohz_stats(struct rq *rq) +static bool update_nohz_stats(struct rq *rq, bool force) { #ifdef CONFIG_NO_HZ_COMMON unsigned int cpu = rq->cpu; @@ -7909,7 +7909,7 @@ static bool update_nohz_stats(struct rq *rq) if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask)) return false; - if (!time_after(jiffies, rq->last_blocked_load_update_tick)) + if (!force && !time_after(jiffies, rq->last_blocked_load_update_tick)) return true; update_blocked_averages(cpu); @@ -7942,7 +7942,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, for_each_cpu_and(i, sched_group_span(group), env->cpus) { struct rq *rq = cpu_rq(i); - if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq)) + if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false)) env->flags |= LBF_NOHZ_AGAIN; /* Bias balancing toward CPUs of our domain: */ @@ -9552,8 +9552,7 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) rq = cpu_rq(balance_cpu); - update_blocked_averages(rq->cpu); - has_blocked_load |= rq->has_blocked_load; + has_blocked_load |= update_nohz_stats(rq, true); /* * If time for next balance is due, -- cgit v1.2.3 From af3fe03c562055bc3c116eabe73f141ae31bf234 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 20 Feb 2018 10:58:39 +0100 Subject: sched/fair: Move rebalance_domains() This pure code movement results in two #ifdef CONFIG_NO_HZ_COMMON sections landing next to each other. Signed-off-by: Peter Zijlstra (Intel) Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 236 ++++++++++++++++++++++++++-------------------------- 1 file changed, 118 insertions(+), 118 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 5c357561db5d..0da79d8a6a2c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9121,6 +9121,124 @@ out_unlock: return 0; } +static DEFINE_SPINLOCK(balancing); + +/* + * Scale the max load_balance interval with the number of CPUs in the system. + * This trades load-balance latency on larger machines for less cross talk. + */ +void update_max_interval(void) +{ + max_load_balance_interval = HZ*num_online_cpus()/10; +} + +/* + * It checks each scheduling domain to see if it is due to be balanced, + * and initiates a balancing operation if so. + * + * Balancing parameters are set up in init_sched_domains. + */ +static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) +{ + int continue_balancing = 1; + int cpu = rq->cpu; + unsigned long interval; + struct sched_domain *sd; + /* Earliest time when we have to do rebalance again */ + unsigned long next_balance = jiffies + 60*HZ; + int update_next_balance = 0; + int need_serialize, need_decay = 0; + u64 max_cost = 0; + + rcu_read_lock(); + for_each_domain(cpu, sd) { + /* + * Decay the newidle max times here because this is a regular + * visit to all the domains. Decay ~1% per second. + */ + if (time_after(jiffies, sd->next_decay_max_lb_cost)) { + sd->max_newidle_lb_cost = + (sd->max_newidle_lb_cost * 253) / 256; + sd->next_decay_max_lb_cost = jiffies + HZ; + need_decay = 1; + } + max_cost += sd->max_newidle_lb_cost; + + if (!(sd->flags & SD_LOAD_BALANCE)) + continue; + + /* + * Stop the load balance at this level. There is another + * CPU in our sched group which is doing load balancing more + * actively. + */ + if (!continue_balancing) { + if (need_decay) + continue; + break; + } + + interval = get_sd_balance_interval(sd, idle != CPU_IDLE); + + need_serialize = sd->flags & SD_SERIALIZE; + if (need_serialize) { + if (!spin_trylock(&balancing)) + goto out; + } + + if (time_after_eq(jiffies, sd->last_balance + interval)) { + if (load_balance(cpu, rq, sd, idle, &continue_balancing)) { + /* + * The LBF_DST_PINNED logic could have changed + * env->dst_cpu, so we can't know our idle + * state even if we migrated tasks. Update it. + */ + idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE; + } + sd->last_balance = jiffies; + interval = get_sd_balance_interval(sd, idle != CPU_IDLE); + } + if (need_serialize) + spin_unlock(&balancing); +out: + if (time_after(next_balance, sd->last_balance + interval)) { + next_balance = sd->last_balance + interval; + update_next_balance = 1; + } + } + if (need_decay) { + /* + * Ensure the rq-wide value also decays but keep it at a + * reasonable floor to avoid funnies with rq->avg_idle. + */ + rq->max_idle_balance_cost = + max((u64)sysctl_sched_migration_cost, max_cost); + } + rcu_read_unlock(); + + /* + * next_balance will be updated only when there is a need. + * When the cpu is attached to null domain for ex, it will not be + * updated. + */ + if (likely(update_next_balance)) { + rq->next_balance = next_balance; + +#ifdef CONFIG_NO_HZ_COMMON + /* + * If this CPU has been elected to perform the nohz idle + * balance. Other idle CPUs have already rebalanced with + * nohz_idle_balance() and nohz.next_balance has been + * updated accordingly. This CPU is now running the idle load + * balance for itself and we need to update the + * nohz.next_balance accordingly. + */ + if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance)) + nohz.next_balance = rq->next_balance; +#endif + } +} + static inline int on_null_domain(struct rq *rq) { return unlikely(!rcu_dereference_sched(rq->sd)); @@ -9373,124 +9491,6 @@ out: static inline void nohz_balancer_kick(struct rq *rq) { } #endif -static DEFINE_SPINLOCK(balancing); - -/* - * Scale the max load_balance interval with the number of CPUs in the system. - * This trades load-balance latency on larger machines for less cross talk. - */ -void update_max_interval(void) -{ - max_load_balance_interval = HZ*num_online_cpus()/10; -} - -/* - * It checks each scheduling domain to see if it is due to be balanced, - * and initiates a balancing operation if so. - * - * Balancing parameters are set up in init_sched_domains. - */ -static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) -{ - int continue_balancing = 1; - int cpu = rq->cpu; - unsigned long interval; - struct sched_domain *sd; - /* Earliest time when we have to do rebalance again */ - unsigned long next_balance = jiffies + 60*HZ; - int update_next_balance = 0; - int need_serialize, need_decay = 0; - u64 max_cost = 0; - - rcu_read_lock(); - for_each_domain(cpu, sd) { - /* - * Decay the newidle max times here because this is a regular - * visit to all the domains. Decay ~1% per second. - */ - if (time_after(jiffies, sd->next_decay_max_lb_cost)) { - sd->max_newidle_lb_cost = - (sd->max_newidle_lb_cost * 253) / 256; - sd->next_decay_max_lb_cost = jiffies + HZ; - need_decay = 1; - } - max_cost += sd->max_newidle_lb_cost; - - if (!(sd->flags & SD_LOAD_BALANCE)) - continue; - - /* - * Stop the load balance at this level. There is another - * CPU in our sched group which is doing load balancing more - * actively. - */ - if (!continue_balancing) { - if (need_decay) - continue; - break; - } - - interval = get_sd_balance_interval(sd, idle != CPU_IDLE); - - need_serialize = sd->flags & SD_SERIALIZE; - if (need_serialize) { - if (!spin_trylock(&balancing)) - goto out; - } - - if (time_after_eq(jiffies, sd->last_balance + interval)) { - if (load_balance(cpu, rq, sd, idle, &continue_balancing)) { - /* - * The LBF_DST_PINNED logic could have changed - * env->dst_cpu, so we can't know our idle - * state even if we migrated tasks. Update it. - */ - idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE; - } - sd->last_balance = jiffies; - interval = get_sd_balance_interval(sd, idle != CPU_IDLE); - } - if (need_serialize) - spin_unlock(&balancing); -out: - if (time_after(next_balance, sd->last_balance + interval)) { - next_balance = sd->last_balance + interval; - update_next_balance = 1; - } - } - if (need_decay) { - /* - * Ensure the rq-wide value also decays but keep it at a - * reasonable floor to avoid funnies with rq->avg_idle. - */ - rq->max_idle_balance_cost = - max((u64)sysctl_sched_migration_cost, max_cost); - } - rcu_read_unlock(); - - /* - * next_balance will be updated only when there is a need. - * When the CPU is attached to null domain for ex, it will not be - * updated. - */ - if (likely(update_next_balance)) { - rq->next_balance = next_balance; - -#ifdef CONFIG_NO_HZ_COMMON - /* - * If this CPU has been elected to perform the nohz idle - * balance. Other idle CPUs have already rebalanced with - * nohz_idle_balance() and nohz.next_balance has been - * updated accordingly. This CPU is now running the idle load - * balance for itself and we need to update the - * nohz.next_balance accordingly. - */ - if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance)) - nohz.next_balance = rq->next_balance; -#endif - } -} - #ifdef CONFIG_NO_HZ_COMMON /* * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the -- cgit v1.2.3 From dd707247ababb685ac4b8b2c6a7bf2923725e6ac Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 20 Feb 2018 10:59:45 +0100 Subject: sched/nohz: Merge CONFIG_NO_HZ_COMMON blocks Now that we have two back-to-back NO_HZ_COMMON blocks, merge them. Signed-off-by: Peter Zijlstra (Intel) Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0da79d8a6a2c..d6767f533029 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9487,11 +9487,7 @@ out: */ WRITE_ONCE(nohz.has_blocked, 1); } -#else -static inline void nohz_balancer_kick(struct rq *rq) { } -#endif -#ifdef CONFIG_NO_HZ_COMMON /* * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the * rebalancing for all the CPUs for whom scheduler ticks are stopped. @@ -9598,12 +9594,14 @@ abort: return true; } -#else +#else /* !CONFIG_NO_HZ_COMMON */ +static inline void nohz_balancer_kick(struct rq *rq) { } + static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { return false; } -#endif +#endif /* CONFIG_NO_HZ_COMMON */ /* * run_rebalance_domains is triggered when needed from the scheduler tick. -- cgit v1.2.3 From 47ea54121e46a685aa2320df8b0f71aaeedff23f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 20 Feb 2018 11:45:47 +0100 Subject: sched/fair: Move idle_balance() We're going to want to call nohz_idle_balance() or parts thereof from idle_balance(). Since we already have a forward declaration of idle_balance() move it down such that it's below nohz_idle_balance() avoiding the need for a forward declaration for that. Signed-off-by: Peter Zijlstra (Intel) Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 228 ++++++++++++++++++++++++++-------------------------- 1 file changed, 114 insertions(+), 114 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d6767f533029..058badcfa94b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8916,120 +8916,6 @@ update_next_balance(struct sched_domain *sd, unsigned long *next_balance) *next_balance = next; } -/* - * idle_balance is called by schedule() if this_cpu is about to become - * idle. Attempts to pull tasks from other CPUs. - */ -static int idle_balance(struct rq *this_rq, struct rq_flags *rf) -{ - unsigned long next_balance = jiffies + HZ; - int this_cpu = this_rq->cpu; - struct sched_domain *sd; - int pulled_task = 0; - u64 curr_cost = 0; - - /* - * We must set idle_stamp _before_ calling idle_balance(), such that we - * measure the duration of idle_balance() as idle time. - */ - this_rq->idle_stamp = rq_clock(this_rq); - - /* - * Do not pull tasks towards !active CPUs... - */ - if (!cpu_active(this_cpu)) - return 0; - - /* - * This is OK, because current is on_cpu, which avoids it being picked - * for load-balance and preemption/IRQs are still disabled avoiding - * further scheduler activity on it and we're being very careful to - * re-start the picking loop. - */ - rq_unpin_lock(this_rq, rf); - - if (this_rq->avg_idle < sysctl_sched_migration_cost || - !this_rq->rd->overload) { - rcu_read_lock(); - sd = rcu_dereference_check_sched_domain(this_rq->sd); - if (sd) - update_next_balance(sd, &next_balance); - rcu_read_unlock(); - - goto out; - } - - raw_spin_unlock(&this_rq->lock); - - update_blocked_averages(this_cpu); - rcu_read_lock(); - for_each_domain(this_cpu, sd) { - int continue_balancing = 1; - u64 t0, domain_cost; - - if (!(sd->flags & SD_LOAD_BALANCE)) - continue; - - if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) { - update_next_balance(sd, &next_balance); - break; - } - - if (sd->flags & SD_BALANCE_NEWIDLE) { - t0 = sched_clock_cpu(this_cpu); - - pulled_task = load_balance(this_cpu, this_rq, - sd, CPU_NEWLY_IDLE, - &continue_balancing); - - domain_cost = sched_clock_cpu(this_cpu) - t0; - if (domain_cost > sd->max_newidle_lb_cost) - sd->max_newidle_lb_cost = domain_cost; - - curr_cost += domain_cost; - } - - update_next_balance(sd, &next_balance); - - /* - * Stop searching for tasks to pull if there are - * now runnable tasks on this rq. - */ - if (pulled_task || this_rq->nr_running > 0) - break; - } - rcu_read_unlock(); - - raw_spin_lock(&this_rq->lock); - - if (curr_cost > this_rq->max_idle_balance_cost) - this_rq->max_idle_balance_cost = curr_cost; - - /* - * While browsing the domains, we released the rq lock, a task could - * have been enqueued in the meantime. Since we're not going idle, - * pretend we pulled a task. - */ - if (this_rq->cfs.h_nr_running && !pulled_task) - pulled_task = 1; - -out: - /* Move the next balance forward */ - if (time_after(this_rq->next_balance, next_balance)) - this_rq->next_balance = next_balance; - - /* Is there a task of a high priority class? */ - if (this_rq->nr_running != this_rq->cfs.h_nr_running) - pulled_task = -1; - - if (pulled_task) - this_rq->idle_stamp = 0; - - rq_repin_lock(this_rq, rf); - - return pulled_task; -} - /* * active_load_balance_cpu_stop is run by the CPU stopper. It pushes * running tasks off the busiest CPU onto idle CPUs. It requires at @@ -9603,6 +9489,120 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) } #endif /* CONFIG_NO_HZ_COMMON */ +/* + * idle_balance is called by schedule() if this_cpu is about to become + * idle. Attempts to pull tasks from other CPUs. + */ +static int idle_balance(struct rq *this_rq, struct rq_flags *rf) +{ + unsigned long next_balance = jiffies + HZ; + int this_cpu = this_rq->cpu; + struct sched_domain *sd; + int pulled_task = 0; + u64 curr_cost = 0; + + /* + * We must set idle_stamp _before_ calling idle_balance(), such that we + * measure the duration of idle_balance() as idle time. + */ + this_rq->idle_stamp = rq_clock(this_rq); + + /* + * Do not pull tasks towards !active CPUs... + */ + if (!cpu_active(this_cpu)) + return 0; + + /* + * This is OK, because current is on_cpu, which avoids it being picked + * for load-balance and preemption/IRQs are still disabled avoiding + * further scheduler activity on it and we're being very careful to + * re-start the picking loop. + */ + rq_unpin_lock(this_rq, rf); + + if (this_rq->avg_idle < sysctl_sched_migration_cost || + !this_rq->rd->overload) { + rcu_read_lock(); + sd = rcu_dereference_check_sched_domain(this_rq->sd); + if (sd) + update_next_balance(sd, &next_balance); + rcu_read_unlock(); + + goto out; + } + + raw_spin_unlock(&this_rq->lock); + + update_blocked_averages(this_cpu); + rcu_read_lock(); + for_each_domain(this_cpu, sd) { + int continue_balancing = 1; + u64 t0, domain_cost; + + if (!(sd->flags & SD_LOAD_BALANCE)) + continue; + + if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) { + update_next_balance(sd, &next_balance); + break; + } + + if (sd->flags & SD_BALANCE_NEWIDLE) { + t0 = sched_clock_cpu(this_cpu); + + pulled_task = load_balance(this_cpu, this_rq, + sd, CPU_NEWLY_IDLE, + &continue_balancing); + + domain_cost = sched_clock_cpu(this_cpu) - t0; + if (domain_cost > sd->max_newidle_lb_cost) + sd->max_newidle_lb_cost = domain_cost; + + curr_cost += domain_cost; + } + + update_next_balance(sd, &next_balance); + + /* + * Stop searching for tasks to pull if there are + * now runnable tasks on this rq. + */ + if (pulled_task || this_rq->nr_running > 0) + break; + } + rcu_read_unlock(); + + raw_spin_lock(&this_rq->lock); + + if (curr_cost > this_rq->max_idle_balance_cost) + this_rq->max_idle_balance_cost = curr_cost; + + /* + * While browsing the domains, we released the rq lock, a task could + * have been enqueued in the meantime. Since we're not going idle, + * pretend we pulled a task. + */ + if (this_rq->cfs.h_nr_running && !pulled_task) + pulled_task = 1; + +out: + /* Move the next balance forward */ + if (time_after(this_rq->next_balance, next_balance)) + this_rq->next_balance = next_balance; + + /* Is there a task of a high priority class? */ + if (this_rq->nr_running != this_rq->cfs.h_nr_running) + pulled_task = -1; + + if (pulled_task) + this_rq->idle_stamp = 0; + + rq_repin_lock(this_rq, rf); + + return pulled_task; +} + /* * run_rebalance_domains is triggered when needed from the scheduler tick. * Also triggered for nohz idle balancing (with nohz_balancing_kick set). -- cgit v1.2.3 From 31e77c93e432dec79c7d90b888bbfc3652592741 Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Wed, 14 Feb 2018 16:26:46 +0100 Subject: sched/fair: Update blocked load when newly idle When NEWLY_IDLE load balance is not triggered, we might need to update the blocked load anyway. We can kick an ilb so an idle CPU will take care of updating blocked load or we can try to update them locally before entering idle. In the latter case, we reuse part of the nohz_idle_balance. Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: brendan.jackman@arm.com Cc: dietmar.eggemann@arm.com Cc: morten.rasmussen@foss.arm.com Cc: valentin.schneider@arm.com Link: http://lkml.kernel.org/r/1518622006-16089-4-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 105 +++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 87 insertions(+), 18 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 058badcfa94b..3582117e1580 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9375,10 +9375,14 @@ out: } /* - * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the - * rebalancing for all the CPUs for whom scheduler ticks are stopped. + * Internal function that runs load balance for all idle cpus. The load balance + * can be a simple update of blocked load or a complete load balance with + * tasks movement depending of flags. + * The function returns false if the loop has stopped before running + * through all idle CPUs. */ -static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) +static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags, + enum cpu_idle_type idle) { /* Earliest time when we have to do rebalance again */ unsigned long now = jiffies; @@ -9386,20 +9390,10 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) bool has_blocked_load = false; int update_next_balance = 0; int this_cpu = this_rq->cpu; - unsigned int flags; int balance_cpu; + int ret = false; struct rq *rq; - if (!(atomic_read(nohz_flags(this_cpu)) & NOHZ_KICK_MASK)) - return false; - - if (idle != CPU_IDLE) { - atomic_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu)); - return false; - } - - flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu)); - SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK); /* @@ -9443,10 +9437,10 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) if (time_after_eq(jiffies, rq->next_balance)) { struct rq_flags rf; - rq_lock_irq(rq, &rf); + rq_lock_irqsave(rq, &rf); update_rq_clock(rq); cpu_load_update_idle(rq); - rq_unlock_irq(rq, &rf); + rq_unlock_irqrestore(rq, &rf); if (flags & NOHZ_BALANCE_KICK) rebalance_domains(rq, CPU_IDLE); @@ -9458,13 +9452,21 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) } } - update_blocked_averages(this_cpu); + /* Newly idle CPU doesn't need an update */ + if (idle != CPU_NEWLY_IDLE) { + update_blocked_averages(this_cpu); + has_blocked_load |= this_rq->has_blocked_load; + } + if (flags & NOHZ_BALANCE_KICK) rebalance_domains(this_rq, CPU_IDLE); WRITE_ONCE(nohz.next_blocked, now + msecs_to_jiffies(LOAD_AVG_PERIOD)); + /* The full idle balance loop has been done */ + ret = true; + abort: /* There is still blocked load, enable periodic update */ if (has_blocked_load) @@ -9478,15 +9480,79 @@ abort: if (likely(update_next_balance)) nohz.next_balance = next_balance; + return ret; +} + +/* + * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the + * rebalancing for all the cpus for whom scheduler ticks are stopped. + */ +static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) +{ + int this_cpu = this_rq->cpu; + unsigned int flags; + + if (!(atomic_read(nohz_flags(this_cpu)) & NOHZ_KICK_MASK)) + return false; + + if (idle != CPU_IDLE) { + atomic_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu)); + return false; + } + + /* + * barrier, pairs with nohz_balance_enter_idle(), ensures ... + */ + flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu)); + if (!(flags & NOHZ_KICK_MASK)) + return false; + + _nohz_idle_balance(this_rq, flags, idle); + return true; } + +static void nohz_newidle_balance(struct rq *this_rq) +{ + int this_cpu = this_rq->cpu; + + /* + * This CPU doesn't want to be disturbed by scheduler + * housekeeping + */ + if (!housekeeping_cpu(this_cpu, HK_FLAG_SCHED)) + return; + + /* Will wake up very soon. No time for doing anything else*/ + if (this_rq->avg_idle < sysctl_sched_migration_cost) + return; + + /* Don't need to update blocked load of idle CPUs*/ + if (!READ_ONCE(nohz.has_blocked) || + time_before(jiffies, READ_ONCE(nohz.next_blocked))) + return; + + raw_spin_unlock(&this_rq->lock); + /* + * This CPU is going to be idle and blocked load of idle CPUs + * need to be updated. Run the ilb locally as it is a good + * candidate for ilb instead of waking up another idle CPU. + * Kick an normal ilb if we failed to do the update. + */ + if (!_nohz_idle_balance(this_rq, NOHZ_STATS_KICK, CPU_NEWLY_IDLE)) + kick_ilb(NOHZ_STATS_KICK); + raw_spin_lock(&this_rq->lock); +} + #else /* !CONFIG_NO_HZ_COMMON */ static inline void nohz_balancer_kick(struct rq *rq) { } -static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) +static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { return false; } + +static inline void nohz_newidle_balance(struct rq *this_rq) { } #endif /* CONFIG_NO_HZ_COMMON */ /* @@ -9523,12 +9589,15 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) if (this_rq->avg_idle < sysctl_sched_migration_cost || !this_rq->rd->overload) { + rcu_read_lock(); sd = rcu_dereference_check_sched_domain(this_rq->sd); if (sd) update_next_balance(sd, &next_balance); rcu_read_unlock(); + nohz_newidle_balance(this_rq); + goto out; } -- cgit v1.2.3 From d17067e4487adc53bedb43681b3cb5a1714ff6ca Mon Sep 17 00:00:00 2001 From: gaurav jindal Date: Wed, 21 Feb 2018 18:24:07 +0530 Subject: sched/completions: Use bool in try_wait_for_completion() Since the return type of the function is bool, the internal 'ret' variable should be bool too. Signed-off-by: Gaurav Jindal Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180221125407.GA14292@gmail.com Signed-off-by: Ingo Molnar --- kernel/sched/completion.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c index 5d2d56b0817a..e426b0cb9ac6 100644 --- a/kernel/sched/completion.c +++ b/kernel/sched/completion.c @@ -280,7 +280,7 @@ EXPORT_SYMBOL(wait_for_completion_killable_timeout); bool try_wait_for_completion(struct completion *x) { unsigned long flags; - int ret = 1; + bool ret = true; /* * Since x->done will need to be locked only @@ -289,11 +289,11 @@ bool try_wait_for_completion(struct completion *x) * return early in the blocking case. */ if (!READ_ONCE(x->done)) - return 0; + return false; spin_lock_irqsave(&x->wait.lock, flags); if (!x->done) - ret = 0; + ret = false; else if (x->done != UINT_MAX) x->done--; spin_unlock_irqrestore(&x->wait.lock, flags); -- cgit v1.2.3 From 7f65ea42eb00bc902f1c37a71e984e4f4064cfa9 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 9 Mar 2018 09:52:42 +0000 Subject: sched/fair: Add util_est on top of PELT The util_avg signal computed by PELT is too variable for some use-cases. For example, a big task waking up after a long sleep period will have its utilization almost completely decayed. This introduces some latency before schedutil will be able to pick the best frequency to run a task. The same issue can affect task placement. Indeed, since the task utilization is already decayed at wakeup, when the task is enqueued in a CPU, this can result in a CPU running a big task as being temporarily represented as being almost empty. This leads to a race condition where other tasks can be potentially allocated on a CPU which just started to run a big task which slept for a relatively long period. Moreover, the PELT utilization of a task can be updated every [ms], thus making it a continuously changing value for certain longer running tasks. This means that the instantaneous PELT utilization of a RUNNING task is not really meaningful to properly support scheduler decisions. For all these reasons, a more stable signal can do a better job of representing the expected/estimated utilization of a task/cfs_rq. Such a signal can be easily created on top of PELT by still using it as an estimator which produces values to be aggregated on meaningful events. This patch adds a simple implementation of util_est, a new signal built on top of PELT's util_avg where: util_est(task) = max(task::util_avg, f(task::util_avg@dequeue)) This allows to remember how big a task has been reported by PELT in its previous activations via f(task::util_avg@dequeue), which is the new _task_util_est(struct task_struct*) function added by this patch. If a task should change its behavior and it runs longer in a new activation, after a certain time its util_est will just track the original PELT signal (i.e. task::util_avg). The estimated utilization of cfs_rq is defined only for root ones. That's because the only sensible consumer of this signal are the scheduler and schedutil when looking for the overall CPU utilization due to FAIR tasks. For this reason, the estimated utilization of a root cfs_rq is simply defined as: util_est(cfs_rq) = max(cfs_rq::util_avg, cfs_rq::util_est::enqueued) where: cfs_rq::util_est::enqueued = sum(_task_util_est(task)) for each RUNNABLE task on that root cfs_rq It's worth noting that the estimated utilization is tracked only for objects of interests, specifically: - Tasks: to better support tasks placement decisions - root cfs_rqs: to better support both tasks placement decisions as well as frequencies selection Signed-off-by: Patrick Bellasi Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Dietmar Eggemann Cc: Joel Fernandes Cc: Juri Lelli Cc: Linus Torvalds Cc: Morten Rasmussen Cc: Paul Turner Cc: Rafael J . Wysocki Cc: Steve Muckle Cc: Thomas Gleixner Cc: Todd Kjos Cc: Vincent Guittot Cc: Viresh Kumar Link: http://lkml.kernel.org/r/20180309095245.11071-2-patrick.bellasi@arm.com Signed-off-by: Ingo Molnar --- include/linux/sched.h | 29 ++++++++++++ kernel/sched/debug.c | 4 ++ kernel/sched/fair.c | 122 +++++++++++++++++++++++++++++++++++++++++++++--- kernel/sched/features.h | 5 ++ 4 files changed, 154 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/include/linux/sched.h b/include/linux/sched.h index 21b1168da951..f228c6033832 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -274,6 +274,34 @@ struct load_weight { u32 inv_weight; }; +/** + * struct util_est - Estimation utilization of FAIR tasks + * @enqueued: instantaneous estimated utilization of a task/cpu + * @ewma: the Exponential Weighted Moving Average (EWMA) + * utilization of a task + * + * Support data structure to track an Exponential Weighted Moving Average + * (EWMA) of a FAIR task's utilization. New samples are added to the moving + * average each time a task completes an activation. Sample's weight is chosen + * so that the EWMA will be relatively insensitive to transient changes to the + * task's workload. + * + * The enqueued attribute has a slightly different meaning for tasks and cpus: + * - task: the task's util_avg at last task dequeue time + * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU + * Thus, the util_est.enqueued of a task represents the contribution on the + * estimated utilization of the CPU where that task is currently enqueued. + * + * Only for tasks we track a moving average of the past instantaneous + * estimated utilization. This allows to absorb sporadic drops in utilization + * of an otherwise almost periodic task. + */ +struct util_est { + unsigned int enqueued; + unsigned int ewma; +#define UTIL_EST_WEIGHT_SHIFT 2 +}; + /* * The load_avg/util_avg accumulates an infinite geometric series * (see __update_load_avg() in kernel/sched/fair.c). @@ -335,6 +363,7 @@ struct sched_avg { unsigned long load_avg; unsigned long runnable_load_avg; unsigned long util_avg; + struct util_est util_est; }; struct sched_statistics { diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 644d9a464380..332303be4beb 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -541,6 +541,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) cfs_rq->avg.runnable_load_avg); SEQ_printf(m, " .%-30s: %lu\n", "util_avg", cfs_rq->avg.util_avg); + SEQ_printf(m, " .%-30s: %u\n", "util_est_enqueued", + cfs_rq->avg.util_est.enqueued); SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg", cfs_rq->removed.load_avg); SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg", @@ -989,6 +991,8 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, P(se.avg.runnable_load_avg); P(se.avg.util_avg); P(se.avg.last_update_time); + P(se.avg.util_est.ewma); + P(se.avg.util_est.enqueued); #endif P(policy); P(prio); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3582117e1580..22b59a7facd2 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3873,6 +3873,113 @@ static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq) static int idle_balance(struct rq *this_rq, struct rq_flags *rf); +static inline unsigned long task_util(struct task_struct *p) +{ + return READ_ONCE(p->se.avg.util_avg); +} + +static inline unsigned long _task_util_est(struct task_struct *p) +{ + struct util_est ue = READ_ONCE(p->se.avg.util_est); + + return max(ue.ewma, ue.enqueued); +} + +static inline unsigned long task_util_est(struct task_struct *p) +{ + return max(task_util(p), _task_util_est(p)); +} + +static inline void util_est_enqueue(struct cfs_rq *cfs_rq, + struct task_struct *p) +{ + unsigned int enqueued; + + if (!sched_feat(UTIL_EST)) + return; + + /* Update root cfs_rq's estimated utilization */ + enqueued = cfs_rq->avg.util_est.enqueued; + enqueued += _task_util_est(p); + WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); +} + +/* + * Check if a (signed) value is within a specified (unsigned) margin, + * based on the observation that: + * + * abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1) + * + * NOTE: this only works when value + maring < INT_MAX. + */ +static inline bool within_margin(int value, int margin) +{ + return ((unsigned int)(value + margin - 1) < (2 * margin - 1)); +} + +static void +util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) +{ + long last_ewma_diff; + struct util_est ue; + + if (!sched_feat(UTIL_EST)) + return; + + /* + * Update root cfs_rq's estimated utilization + * + * If *p is the last task then the root cfs_rq's estimated utilization + * of a CPU is 0 by definition. + */ + ue.enqueued = 0; + if (cfs_rq->nr_running) { + ue.enqueued = cfs_rq->avg.util_est.enqueued; + ue.enqueued -= min_t(unsigned int, ue.enqueued, + _task_util_est(p)); + } + WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued); + + /* + * Skip update of task's estimated utilization when the task has not + * yet completed an activation, e.g. being migrated. + */ + if (!task_sleep) + return; + + /* + * Skip update of task's estimated utilization when its EWMA is + * already ~1% close to its last activation value. + */ + ue = p->se.avg.util_est; + ue.enqueued = task_util(p); + last_ewma_diff = ue.enqueued - ue.ewma; + if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100))) + return; + + /* + * Update Task's estimated utilization + * + * When *p completes an activation we can consolidate another sample + * of the task size. This is done by storing the current PELT value + * as ue.enqueued and by using this value to update the Exponential + * Weighted Moving Average (EWMA): + * + * ewma(t) = w * task_util(p) + (1-w) * ewma(t-1) + * = w * task_util(p) + ewma(t-1) - w * ewma(t-1) + * = w * (task_util(p) - ewma(t-1)) + ewma(t-1) + * = w * ( last_ewma_diff ) + ewma(t-1) + * = w * (last_ewma_diff + ewma(t-1) / w) + * + * Where 'w' is the weight of new samples, which is configured to be + * 0.25, thus making w=1/4 ( >>= UTIL_EST_WEIGHT_SHIFT) + */ + ue.ewma <<= UTIL_EST_WEIGHT_SHIFT; + ue.ewma += last_ewma_diff; + ue.ewma >>= UTIL_EST_WEIGHT_SHIFT; + WRITE_ONCE(p->se.avg.util_est, ue); +} + #else /* CONFIG_SMP */ static inline int @@ -3902,6 +4009,13 @@ static inline int idle_balance(struct rq *rq, struct rq_flags *rf) return 0; } +static inline void +util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {} + +static inline void +util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, + bool task_sleep) {} + #endif /* CONFIG_SMP */ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) @@ -5249,6 +5363,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) if (!se) add_nr_running(rq, 1); + util_est_enqueue(&rq->cfs, p); hrtick_update(rq); } @@ -5308,6 +5423,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) if (!se) sub_nr_running(rq, 1); + util_est_dequeue(&rq->cfs, p, task_sleep); hrtick_update(rq); } @@ -5835,7 +5951,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, return target; } -static inline unsigned long task_util(struct task_struct *p); static unsigned long cpu_util_wake(int cpu, struct task_struct *p); static unsigned long capacity_spare_wake(int cpu, struct task_struct *p) @@ -6351,11 +6466,6 @@ static unsigned long cpu_util(int cpu) return (util >= capacity) ? capacity : util; } -static inline unsigned long task_util(struct task_struct *p) -{ - return p->se.avg.util_avg; -} - /* * cpu_util_wake: Compute CPU utilization with any contributions from * the waking task p removed. diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 9552fd5854bf..c459a4b61544 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -85,3 +85,8 @@ SCHED_FEAT(ATTACH_AGE_LOAD, true) SCHED_FEAT(WA_IDLE, true) SCHED_FEAT(WA_WEIGHT, true) SCHED_FEAT(WA_BIAS, true) + +/* + * UtilEstimation. Use estimated CPU utilization. + */ +SCHED_FEAT(UTIL_EST, false) -- cgit v1.2.3 From f9be3e5961c5554879a491961187472e923f5ee0 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 9 Mar 2018 09:52:43 +0000 Subject: sched/fair: Use util_est in LB and WU paths When the scheduler looks at the CPU utilization, the current PELT value for a CPU is returned straight away. In certain scenarios this can have undesired side effects on task placement. For example, since the task utilization is decayed at wakeup time, when a long sleeping big task is enqueued it does not add immediately a significant contribution to the target CPU. As a result we generate a race condition where other tasks can be placed on the same CPU while it is still considered relatively empty. In order to reduce this kind of race conditions, this patch introduces the required support to integrate the usage of the CPU's estimated utilization in the wakeup path, via cpu_util_wake(), as well as in the load-balance path, via cpu_util() which is used by update_sg_lb_stats(). The estimated utilization of a CPU is defined to be the maximum between its PELT's utilization and the sum of the estimated utilization (at previous dequeue time) of all the tasks currently RUNNABLE on that CPU. This allows to properly represent the spare capacity of a CPU which, for example, has just got a big task running since a long sleep period. Signed-off-by: Patrick Bellasi Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Dietmar Eggemann Cc: Joel Fernandes Cc: Juri Lelli Cc: Linus Torvalds Cc: Morten Rasmussen Cc: Paul Turner Cc: Peter Zijlstra Cc: Rafael J . Wysocki Cc: Steve Muckle Cc: Thomas Gleixner Cc: Todd Kjos Cc: Vincent Guittot Cc: Viresh Kumar Link: http://lkml.kernel.org/r/20180309095245.11071-3-patrick.bellasi@arm.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 84 ++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 70 insertions(+), 14 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 22b59a7facd2..570b8d056282 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6432,11 +6432,13 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) return target; } -/* - * cpu_util returns the amount of capacity of a CPU that is used by CFS - * tasks. The unit of the return value must be the one of capacity so we can - * compare the utilization with the capacity of the CPU that is available for - * CFS task (ie cpu_capacity). +/** + * Amount of capacity of a CPU that is (estimated to be) used by CFS tasks + * @cpu: the CPU to get the utilization of + * + * The unit of the return value must be the one of capacity so we can compare + * the utilization with the capacity of the CPU that is available for CFS task + * (ie cpu_capacity). * * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the * recent utilization of currently non-runnable tasks on a CPU. It represents @@ -6447,6 +6449,14 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) * current capacity (capacity_curr <= capacity_orig) of the CPU because it is * the running time on this CPU scaled by capacity_curr. * + * The estimated utilization of a CPU is defined to be the maximum between its + * cfs_rq.avg.util_avg and the sum of the estimated utilization of the tasks + * currently RUNNABLE on that CPU. + * This allows to properly represent the expected utilization of a CPU which + * has just got a big task running since a long sleep period. At the same time + * however it preserves the benefits of the "blocked utilization" in + * describing the potential for other tasks waking up on the same CPU. + * * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even * higher than capacity_orig because of unfortunate rounding in * cfs.avg.util_avg or just after migrating tasks and new task wakeups until @@ -6457,13 +6467,21 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) * available capacity. We allow utilization to overshoot capacity_curr (but not * capacity_orig) as it useful for predicting the capacity required after task * migrations (scheduler-driven DVFS). + * + * Return: the (estimated) utilization for the specified CPU */ -static unsigned long cpu_util(int cpu) +static inline unsigned long cpu_util(int cpu) { - unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg; - unsigned long capacity = capacity_orig_of(cpu); + struct cfs_rq *cfs_rq; + unsigned int util; + + cfs_rq = &cpu_rq(cpu)->cfs; + util = READ_ONCE(cfs_rq->avg.util_avg); + + if (sched_feat(UTIL_EST)) + util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued)); - return (util >= capacity) ? capacity : util; + return min_t(unsigned long, util, capacity_orig_of(cpu)); } /* @@ -6472,16 +6490,54 @@ static unsigned long cpu_util(int cpu) */ static unsigned long cpu_util_wake(int cpu, struct task_struct *p) { - unsigned long util, capacity; + struct cfs_rq *cfs_rq; + unsigned int util; /* Task has no contribution or is new */ - if (cpu != task_cpu(p) || !p->se.avg.last_update_time) + if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) return cpu_util(cpu); - capacity = capacity_orig_of(cpu); - util = max_t(long, cpu_rq(cpu)->cfs.avg.util_avg - task_util(p), 0); + cfs_rq = &cpu_rq(cpu)->cfs; + util = READ_ONCE(cfs_rq->avg.util_avg); + + /* Discount task's blocked util from CPU's util */ + util -= min_t(unsigned int, util, task_util(p)); - return (util >= capacity) ? capacity : util; + /* + * Covered cases: + * + * a) if *p is the only task sleeping on this CPU, then: + * cpu_util (== task_util) > util_est (== 0) + * and thus we return: + * cpu_util_wake = (cpu_util - task_util) = 0 + * + * b) if other tasks are SLEEPING on this CPU, which is now exiting + * IDLE, then: + * cpu_util >= task_util + * cpu_util > util_est (== 0) + * and thus we discount *p's blocked utilization to return: + * cpu_util_wake = (cpu_util - task_util) >= 0 + * + * c) if other tasks are RUNNABLE on that CPU and + * util_est > cpu_util + * then we use util_est since it returns a more restrictive + * estimation of the spare capacity on that CPU, by just + * considering the expected utilization of tasks already + * runnable on that CPU. + * + * Cases a) and b) are covered by the above code, while case c) is + * covered by the following code when estimated utilization is + * enabled. + */ + if (sched_feat(UTIL_EST)) + util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued)); + + /* + * Utilization (estimated) can exceed the CPU capacity, thus let's + * clamp to the maximum CPU capacity to ensure consistency with + * the cpu_util call. + */ + return min_t(unsigned long, util, capacity_orig_of(cpu)); } /* -- cgit v1.2.3 From a07630b8b2c16f82fd5b71d890079f4dd7599c1d Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 9 Mar 2018 09:52:44 +0000 Subject: sched/cpufreq/schedutil: Use util_est for OPP selection When schedutil looks at the CPU utilization, the current PELT value for that CPU is returned straight away. In certain scenarios this can have undesired side effects and delays on frequency selection. For example, since the task utilization is decayed at wakeup time, a long sleeping big task newly enqueued does not add immediately a significant contribution to the target CPU. This introduces some latency before schedutil will be able to detect the best frequency required by that task. Moreover, the PELT signal build-up time is a function of the current frequency, because of the scale invariant load tracking support. Thus, starting from a lower frequency, the utilization build-up time will increase even more and further delays the selection of the actual frequency which better serves the task requirements. In order to reduce these kind of latencies, we integrate the usage of the CPU's estimated utilization in the sugov_get_util function. This allows to properly consider the expected utilization of a CPU which, for example, has just got a big task running after a long sleep period. Ultimately this allows to select the best frequency to run a task right after its wake-up. Signed-off-by: Patrick Bellasi Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Dietmar Eggemann Acked-by: Rafael J. Wysocki Acked-by: Viresh Kumar Cc: Joel Fernandes Cc: Juri Lelli Cc: Linus Torvalds Cc: Morten Rasmussen Cc: Paul Turner Cc: Peter Zijlstra Cc: Steve Muckle Cc: Thomas Gleixner Cc: Todd Kjos Cc: Vincent Guittot Link: http://lkml.kernel.org/r/20180309095245.11071-4-patrick.bellasi@arm.com Signed-off-by: Ingo Molnar --- kernel/sched/sched.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 22909ffc04fb..c3deaee7a7a2 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2163,6 +2163,13 @@ static inline unsigned long cpu_util_dl(struct rq *rq) static inline unsigned long cpu_util_cfs(struct rq *rq) { - return rq->cfs.avg.util_avg; + unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); + + if (sched_feat(UTIL_EST)) { + util = max_t(unsigned long, util, + READ_ONCE(rq->cfs.avg.util_est.enqueued)); + } + + return util; } #endif -- cgit v1.2.3 From d519329f72a6f36bc4f2b85452640cfe583b4f81 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Fri, 9 Mar 2018 09:52:45 +0000 Subject: sched/fair: Update util_est only on util_avg updates The estimated utilization of a task is currently updated every time the task is dequeued. However, to keep overheads under control, PELT signals are effectively updated at maximum once every 1ms. Thus, for really short running tasks, it can happen that their util_avg value has not been updates since their last enqueue. If such tasks are also frequently running tasks (e.g. the kind of workload generated by hackbench) it can also happen that their util_avg is updated only every few activations. This means that updating util_est at every dequeue potentially introduces not necessary overheads and it's also conceptually wrong if the util_avg signal has never been updated during a task activation. Let's introduce a throttling mechanism on task's util_est updates to sync them with util_avg updates. To make the solution memory efficient, both in terms of space and load/store operations, we encode a synchronization flag into the LSB of util_est.enqueued. This makes util_est an even values only metric, which is still considered good enough for its purpose. The synchronization bit is (re)set by __update_load_avg_se() once the PELT signal of a task has been updated during its last activation. Such a throttling mechanism allows to keep under control util_est overheads in the wakeup hot path, thus making it a suitable mechanism which can be enabled also on high-intensity workload systems. Thus, this now switches on by default the estimation utilization scheduler feature. Suggested-by: Chris Redpath Signed-off-by: Patrick Bellasi Signed-off-by: Peter Zijlstra (Intel) Cc: Dietmar Eggemann Cc: Joel Fernandes Cc: Juri Lelli Cc: Linus Torvalds Cc: Morten Rasmussen Cc: Paul Turner Cc: Peter Zijlstra Cc: Rafael J . Wysocki Cc: Steve Muckle Cc: Thomas Gleixner Cc: Todd Kjos Cc: Vincent Guittot Cc: Viresh Kumar Link: http://lkml.kernel.org/r/20180309095245.11071-5-patrick.bellasi@arm.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 42 ++++++++++++++++++++++++++++++++++++++---- kernel/sched/features.h | 2 +- 2 files changed, 39 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 570b8d056282..0951d1c58d2f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3242,6 +3242,32 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runna sa->util_avg = sa->util_sum / divider; } +/* + * When a task is dequeued, its estimated utilization should not be update if + * its util_avg has not been updated at least once. + * This flag is used to synchronize util_avg updates with util_est updates. + * We map this information into the LSB bit of the utilization saved at + * dequeue time (i.e. util_est.dequeued). + */ +#define UTIL_AVG_UNCHANGED 0x1 + +static inline void cfs_se_util_change(struct sched_avg *avg) +{ + unsigned int enqueued; + + if (!sched_feat(UTIL_EST)) + return; + + /* Avoid store if the flag has been already set */ + enqueued = avg->util_est.enqueued; + if (!(enqueued & UTIL_AVG_UNCHANGED)) + return; + + /* Reset flag to report util_avg has been updated */ + enqueued &= ~UTIL_AVG_UNCHANGED; + WRITE_ONCE(avg->util_est.enqueued, enqueued); +} + /* * sched_entity: * @@ -3293,6 +3319,7 @@ __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entit cfs_rq->curr == se)) { ___update_load_avg(&se->avg, se_weight(se), se_runnable(se)); + cfs_se_util_change(&se->avg); return 1; } @@ -3900,7 +3927,7 @@ static inline void util_est_enqueue(struct cfs_rq *cfs_rq, /* Update root cfs_rq's estimated utilization */ enqueued = cfs_rq->avg.util_est.enqueued; - enqueued += _task_util_est(p); + enqueued += (_task_util_est(p) | UTIL_AVG_UNCHANGED); WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); } @@ -3936,7 +3963,7 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) if (cfs_rq->nr_running) { ue.enqueued = cfs_rq->avg.util_est.enqueued; ue.enqueued -= min_t(unsigned int, ue.enqueued, - _task_util_est(p)); + (_task_util_est(p) | UTIL_AVG_UNCHANGED)); } WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued); @@ -3947,12 +3974,19 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) if (!task_sleep) return; + /* + * If the PELT values haven't changed since enqueue time, + * skip the util_est update. + */ + ue = p->se.avg.util_est; + if (ue.enqueued & UTIL_AVG_UNCHANGED) + return; + /* * Skip update of task's estimated utilization when its EWMA is * already ~1% close to its last activation value. */ - ue = p->se.avg.util_est; - ue.enqueued = task_util(p); + ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED); last_ewma_diff = ue.enqueued - ue.ewma; if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100))) return; diff --git a/kernel/sched/features.h b/kernel/sched/features.h index c459a4b61544..85ae8488039c 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -89,4 +89,4 @@ SCHED_FEAT(WA_BIAS, true) /* * UtilEstimation. Use estimated CPU utilization. */ -SCHED_FEAT(UTIL_EST, false) +SCHED_FEAT(UTIL_EST, true) -- cgit v1.2.3 From e97a90f7069b740575bcb1dae86596e0484b8957 Mon Sep 17 00:00:00 2001 From: Claudio Scordino Date: Tue, 13 Mar 2018 11:35:40 +0100 Subject: sched/cpufreq: Rate limits for SCHED_DEADLINE When the SCHED_DEADLINE scheduling class increases the CPU utilization, it should not wait for the rate limit, otherwise it may miss some deadline. Tests using rt-app on Exynos5422 with up to 10 SCHED_DEADLINE tasks have shown reductions of even 10% of deadline misses with a negligible increase of energy consumption (measured through Baylibre Cape). Signed-off-by: Claudio Scordino Signed-off-by: Thomas Gleixner Reviewed-by: Rafael J. Wysocki Acked-by: Viresh Kumar Cc: Juri Lelli Cc: Joel Fernandes Cc: Vincent Guittot Cc: linux-pm@vger.kernel.org Cc: Peter Zijlstra Cc: Morten Rasmussen Cc: Patrick Bellasi Cc: Todd Kjos Cc: Dietmar Eggemann Link: https://lkml.kernel.org/r/1520937340-2755-1-git-send-email-claudio@evidence.eu.com --- kernel/sched/cpufreq_schedutil.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'kernel') diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 89fe78ecb88c..2b124811947d 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -267,6 +267,16 @@ static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } #endif /* CONFIG_NO_HZ_COMMON */ +/* + * Make sugov_should_update_freq() ignore the rate limit when DL + * has increased the utilization. + */ +static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy) +{ + if (cpu_util_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->util_dl) + sg_policy->need_freq_update = true; +} + static void sugov_update_single(struct update_util_data *hook, u64 time, unsigned int flags) { @@ -279,6 +289,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, sugov_set_iowait_boost(sg_cpu, time, flags); sg_cpu->last_update = time; + ignore_dl_rate_limit(sg_cpu, sg_policy); + if (!sugov_should_update_freq(sg_policy, time)) return; @@ -356,6 +368,8 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags) sugov_set_iowait_boost(sg_cpu, time, flags); sg_cpu->last_update = time; + ignore_dl_rate_limit(sg_cpu, sg_policy); + if (sugov_should_update_freq(sg_policy, time)) { next_f = sugov_next_freq_shared(sg_cpu, time); sugov_update_commit(sg_policy, time, next_f); -- cgit v1.2.3 From b720342849fe685310fca01748a32730a6eca5aa Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Mon, 26 Mar 2018 14:09:26 -0700 Subject: sched/core: Update preempt_notifier_key to modern API No changes in refcount semantics, use DEFINE_STATIC_KEY_FALSE() for initialization and replace: static_key_slow_inc|dec() => static_branch_inc|dec() static_key_false() => static_branch_unlikely() Signed-off-by: Davidlohr Bueso Cc: Davidlohr Bueso Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: akpm@linux-foundation.org Link: http://lkml.kernel.org/r/20180326210929.5244-4-dave@stgolabs.net Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b249adbf2a48..de440456f15c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2462,17 +2462,17 @@ void wake_up_new_task(struct task_struct *p) #ifdef CONFIG_PREEMPT_NOTIFIERS -static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE; +static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key); void preempt_notifier_inc(void) { - static_key_slow_inc(&preempt_notifier_key); + static_branch_inc(&preempt_notifier_key); } EXPORT_SYMBOL_GPL(preempt_notifier_inc); void preempt_notifier_dec(void) { - static_key_slow_dec(&preempt_notifier_key); + static_branch_dec(&preempt_notifier_key); } EXPORT_SYMBOL_GPL(preempt_notifier_dec); @@ -2482,7 +2482,7 @@ EXPORT_SYMBOL_GPL(preempt_notifier_dec); */ void preempt_notifier_register(struct preempt_notifier *notifier) { - if (!static_key_false(&preempt_notifier_key)) + if (!static_branch_unlikely(&preempt_notifier_key)) WARN(1, "registering preempt_notifier while notifiers disabled\n"); hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); @@ -2511,7 +2511,7 @@ static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) { - if (static_key_false(&preempt_notifier_key)) + if (static_branch_unlikely(&preempt_notifier_key)) __fire_sched_in_preempt_notifiers(curr); } @@ -2529,7 +2529,7 @@ static __always_inline void fire_sched_out_preempt_notifiers(struct task_struct *curr, struct task_struct *next) { - if (static_key_false(&preempt_notifier_key)) + if (static_branch_unlikely(&preempt_notifier_key)) __fire_sched_out_preempt_notifiers(curr, next); } -- cgit v1.2.3