A comment accompanying the locked attribute of a qnode assigns a value
of 1 to mean that the lock has been acquired. The usages of this
variable however assume opposite semantics. Update usages so that the
assertions of this comment are reflected in this file.

Signed-off-by: Rohan McLure <rmcl...@linux.ibm.com>
---
 arch/powerpc/lib/qspinlock.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index e4bd145255d0..9cf93963772b 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -435,7 +435,7 @@ static __always_inline bool yield_to_prev(struct qspinlock 
*lock, struct qnode *
 
        smp_rmb(); /* See __yield_to_locked_owner comment */
 
-       if (!node->locked) {
+       if (node->locked) {
                yield_to_preempted(prev_cpu, yield_count);
                spin_begin();
                return preempted;
@@ -566,7 +566,7 @@ static __always_inline void 
queued_spin_lock_mcs_queue(struct qspinlock *lock, b
        node->lock = lock;
        node->cpu = smp_processor_id();
        node->yield_cpu = -1;
-       node->locked = 0;
+       node->locked = 1;
 
        tail = encode_tail_cpu(node->cpu);
 
@@ -584,7 +584,7 @@ static __always_inline void 
queued_spin_lock_mcs_queue(struct qspinlock *lock, b
 
                /* Wait for mcs node lock to be released */
                spin_begin();
-               while (!node->locked) {
+               while (node->locked) {
                        spec_barrier();
 
                        if (yield_to_prev(lock, node, old, paravirt))
@@ -693,13 +693,13 @@ static __always_inline void 
queued_spin_lock_mcs_queue(struct qspinlock *lock, b
         */
        if (paravirt && pv_prod_head) {
                int next_cpu = next->cpu;
-               WRITE_ONCE(next->locked, 1);
+               WRITE_ONCE(next->locked, 0);
                if (_Q_SPIN_MISO)
                        asm volatile("miso" ::: "memory");
                if (vcpu_is_preempted(next_cpu))
                        prod_cpu(next_cpu);
        } else {
-               WRITE_ONCE(next->locked, 1);
+               WRITE_ONCE(next->locked, 0);
                if (_Q_SPIN_MISO)
                        asm volatile("miso" ::: "memory");
        }
-- 
2.37.2

Reply via email to