From 03d44ee80eac980a869ed3d5637ed85de6fb957f Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 10 May 2023 13:31:07 +1000 Subject: powerpc: qspinlock: Mark accesses to qnode lock checks The powerpc implementation of qspinlocks will both poll and spin on the bitlock guarding a qnode. Mark these accesses with READ_ONCE to convey to KCSAN that polling is intentional here. Signed-off-by: Rohan McLure Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://msgid.link/20230510033117.1395895-2-rmclure@linux.ibm.com --- arch/powerpc/lib/qspinlock.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c index e4bd145255d0..b76c1f6acce5 100644 --- a/arch/powerpc/lib/qspinlock.c +++ b/arch/powerpc/lib/qspinlock.c @@ -435,7 +435,7 @@ yield_prev: smp_rmb(); /* See __yield_to_locked_owner comment */ - if (!node->locked) { + if (!READ_ONCE(node->locked)) { yield_to_preempted(prev_cpu, yield_count); spin_begin(); return preempted; @@ -584,7 +584,7 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b /* Wait for mcs node lock to be released */ spin_begin(); - while (!node->locked) { + while (!READ_ONCE(node->locked)) { spec_barrier(); if (yield_to_prev(lock, node, old, paravirt)) -- cgit v1.2.3