diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-02-21 04:41:08 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-02-21 04:41:08 +0300 |
commit | 1f2d9ffc7a5f916935749ffc6e93fb33bfe94d2f (patch) | |
tree | a5dabaa924d50867cbe347e20a7643b2850f11c0 /tools/testing/selftests/rseq/basic_percpu_ops_test.c | |
parent | a2f0e7eee1344eb9f91b22bc72d9eb0a52b849c9 (diff) | |
parent | 7c4a5b89a0b5a57a64b601775b296abf77a9fe97 (diff) | |
download | linux-1f2d9ffc7a5f916935749ffc6e93fb33bfe94d2f.tar.xz |
Merge tag 'sched-core-2023-02-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar:
- Improve the scalability of the CFS bandwidth unthrottling logic with
large number of CPUs.
- Fix & rework various cpuidle routines, simplify interaction with the
generic scheduler code. Add __cpuidle methods as noinstr to objtool's
noinstr detection and fix boatloads of cpuidle bugs & quirks.
- Add new ABI: introduce MEMBARRIER_CMD_GET_REGISTRATIONS, to query
previously issued registrations.
- Limit scheduler slice duration to the sysctl_sched_latency period, to
improve scheduling granularity with a large number of SCHED_IDLE
tasks.
- Debuggability enhancement on sys_exit(): warn about disabled IRQs,
but also enable them to prevent a cascade of followup problems and
repeat warnings.
- Fix the rescheduling logic in prio_changed_dl().
- Micro-optimize cpufreq and sched-util methods.
- Micro-optimize ttwu_runnable()
- Micro-optimize the idle-scanning in update_numa_stats(),
select_idle_capacity() and steal_cookie_task().
- Update the RSEQ code & self-tests
- Constify various scheduler methods
- Remove unused methods
- Refine __init tags
- Documentation updates
- Misc other cleanups, fixes
* tag 'sched-core-2023-02-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (110 commits)
sched/rt: pick_next_rt_entity(): check list_entry
sched/deadline: Add more reschedule cases to prio_changed_dl()
sched/fair: sanitize vruntime of entity being placed
sched/fair: Remove capacity inversion detection
sched/fair: unlink misfit task from cpu overutilized
objtool: mem*() are not uaccess safe
cpuidle: Fix poll_idle() noinstr annotation
sched/clock: Make local_clock() noinstr
sched/clock/x86: Mark sched_clock() noinstr
x86/pvclock: Improve atomic update of last_value in pvclock_clocksource_read()
x86/atomics: Always inline arch_atomic64*()
cpuidle: tracing, preempt: Squash _rcuidle tracing
cpuidle: tracing: Warn about !rcu_is_watching()
cpuidle: lib/bug: Disable rcu_is_watching() during WARN/BUG
cpuidle: drivers: firmware: psci: Dont instrument suspend code
KVM: selftests: Fix build of rseq test
exit: Detect and fix irq disabled state in oops
cpuidle, arm64: Fix the ARM64 cpuidle logic
cpuidle: mvebu: Fix duplicate flags assignment
sched/fair: Limit sched slice duration
...
Diffstat (limited to 'tools/testing/selftests/rseq/basic_percpu_ops_test.c')
-rw-r--r-- | tools/testing/selftests/rseq/basic_percpu_ops_test.c | 46 |
1 files changed, 39 insertions, 7 deletions
diff --git a/tools/testing/selftests/rseq/basic_percpu_ops_test.c b/tools/testing/selftests/rseq/basic_percpu_ops_test.c index 517756afc2a4..887542961968 100644 --- a/tools/testing/selftests/rseq/basic_percpu_ops_test.c +++ b/tools/testing/selftests/rseq/basic_percpu_ops_test.c @@ -12,6 +12,32 @@ #include "../kselftest.h" #include "rseq.h" +#ifdef BUILDOPT_RSEQ_PERCPU_MM_CID +# define RSEQ_PERCPU RSEQ_PERCPU_MM_CID +static +int get_current_cpu_id(void) +{ + return rseq_current_mm_cid(); +} +static +bool rseq_validate_cpu_id(void) +{ + return rseq_mm_cid_available(); +} +#else +# define RSEQ_PERCPU RSEQ_PERCPU_CPU_ID +static +int get_current_cpu_id(void) +{ + return rseq_cpu_start(); +} +static +bool rseq_validate_cpu_id(void) +{ + return rseq_current_cpu_raw() >= 0; +} +#endif + struct percpu_lock_entry { intptr_t v; } __attribute__((aligned(128))); @@ -51,9 +77,9 @@ int rseq_this_cpu_lock(struct percpu_lock *lock) for (;;) { int ret; - cpu = rseq_cpu_start(); - ret = rseq_cmpeqv_storev(&lock->c[cpu].v, - 0, 1, cpu); + cpu = get_current_cpu_id(); + ret = rseq_cmpeqv_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU, + &lock->c[cpu].v, 0, 1, cpu); if (rseq_likely(!ret)) break; /* Retry if comparison fails or rseq aborts. */ @@ -141,13 +167,14 @@ void this_cpu_list_push(struct percpu_list *list, intptr_t *targetptr, newval, expect; int ret; - cpu = rseq_cpu_start(); + cpu = get_current_cpu_id(); /* Load list->c[cpu].head with single-copy atomicity. */ expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head); newval = (intptr_t)node; targetptr = (intptr_t *)&list->c[cpu].head; node->next = (struct percpu_list_node *)expect; - ret = rseq_cmpeqv_storev(targetptr, expect, newval, cpu); + ret = rseq_cmpeqv_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU, + targetptr, expect, newval, cpu); if (rseq_likely(!ret)) break; /* Retry if comparison fails or rseq aborts. */ @@ -170,12 +197,13 @@ struct percpu_list_node *this_cpu_list_pop(struct percpu_list *list, long offset; int ret, cpu; - cpu = rseq_cpu_start(); + cpu = get_current_cpu_id(); targetptr = (intptr_t *)&list->c[cpu].head; expectnot = (intptr_t)NULL; offset = offsetof(struct percpu_list_node, next); load = (intptr_t *)&head; - ret = rseq_cmpnev_storeoffp_load(targetptr, expectnot, + ret = rseq_cmpnev_storeoffp_load(RSEQ_MO_RELAXED, RSEQ_PERCPU, + targetptr, expectnot, offset, load, cpu); if (rseq_likely(!ret)) { if (_cpu) @@ -295,6 +323,10 @@ int main(int argc, char **argv) errno, strerror(errno)); goto error; } + if (!rseq_validate_cpu_id()) { + fprintf(stderr, "Error: cpu id getter unavailable\n"); + goto error; + } printf("spinlock\n"); test_percpu_spinlock(); printf("percpu_list\n"); |