The powerpc implemenation of qspinlocks will both poll and spin on the
bitlock guarding a qnode. Mark these accesses with READ_ONCE to convey
to KCSAN that polling is intentional here.

Signed-off-by: Rohan McLure <rmcl...@linux.ibm.com>
---
 arch/powerpc/lib/qspinlock.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index 9cf93963772b..579290d55abf 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -435,7 +435,7 @@ static __always_inline bool yield_to_prev(struct qspinlock 
*lock, struct qnode *
 
        smp_rmb(); /* See __yield_to_locked_owner comment */
 
-       if (node->locked) {
+       if (READ_ONCE(node->locked)) {
                yield_to_preempted(prev_cpu, yield_count);
                spin_begin();
                return preempted;
@@ -584,7 +584,7 @@ static __always_inline void 
queued_spin_lock_mcs_queue(struct qspinlock *lock, b
 
                /* Wait for mcs node lock to be released */
                spin_begin();
-               while (node->locked) {
+               while (READ_ONCE(node->locked)) {
                        spec_barrier();
 
                        if (yield_to_prev(lock, node, old, paravirt))
-- 
2.37.2

Reply via email to