summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-05-22 01:29:01 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2024-05-22 01:29:01 +0300
commit4865a27c66fda6a32511ec5492f4bbec437f512d (patch)
treedae4d82641bb45bc97735799dd7f0aac88b34778 /kernel
parentb6394d6f715919c053c1450ef0d7c5e517b53764 (diff)
parent5671dca241b9a2f4ecf88d8e992041cfb580e0a5 (diff)
downloadlinux-4865a27c66fda6a32511ec5492f4bbec437f512d.tar.xz
Merge tag 'bitmap-for-6.10v2' of https://github.com/norov/linux
Pull bitmap updates from Yury Norov: - topology_span_sane() optimization from Kyle Meyer - fns() rework from Kuan-Wei Chiu (used in cpumask_local_spread() and other places) - headers cleanup from Andy - add a MAINTAINERS record for bitops API * tag 'bitmap-for-6.10v2' of https://github.com/norov/linux: usercopy: Don't use "proxy" headers bitops: Move aligned_byte_mask() to wordpart.h MAINTAINERS: add BITOPS API record bitmap: relax find_nth_bit() limitation on return value lib: make test_bitops compilable into the kernel image bitops: Optimize fns() for improved performance lib/test_bitops: Add benchmark test for fns() Compiler Attributes: Add __always_used macro sched/topology: Optimize topology_span_sane() cpumask: Add for_each_cpu_from()
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/topology.c6
1 files changed, 2 insertions, 4 deletions
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 329c82faca9b..a6994a1fcc90 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -2353,7 +2353,7 @@ static struct sched_domain *build_sched_domain(struct sched_domain_topology_leve
static bool topology_span_sane(struct sched_domain_topology_level *tl,
const struct cpumask *cpu_map, int cpu)
{
- int i;
+ int i = cpu + 1;
/* NUMA levels are allowed to overlap */
if (tl->flags & SDTL_OVERLAP)
@@ -2365,9 +2365,7 @@ static bool topology_span_sane(struct sched_domain_topology_level *tl,
* breaking the sched_group lists - i.e. a later get_group() pass
* breaks the linking done for an earlier span.
*/
- for_each_cpu(i, cpu_map) {
- if (i == cpu)
- continue;
+ for_each_cpu_from(i, cpu_map) {
/*
* We should 'and' all those masks with 'cpu_map' to exactly
* match the topology we're about to build, but that can only