Use the spin_begin/spin_cpu_relax/spin_end APIs in qspinlock, which helps
to prevent threads issuing a lot of expensive priority nops which may not
have much effect due to immediately executing low then medium priority.
---
 arch/powerpc/lib/qspinlock.c | 21 +++++++++++++++++----
 1 file changed, 17 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index d67b923e4f98..486423d566d3 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -243,7 +243,7 @@ static void __yield_to_locked_owner(struct qspinlock *lock, 
u32 val, bool paravi
                return;
        }
 relax:
-       cpu_relax();
+       spin_cpu_relax();
 }
 
 static void yield_to_locked_owner(struct qspinlock *lock, u32 val, bool 
paravirt)
@@ -331,7 +331,7 @@ static void yield_to_prev(struct qspinlock *lock, struct 
qnode *node, int prev_c
        }
 
 relax:
-       cpu_relax();
+       spin_cpu_relax();
 }
 
 
@@ -340,6 +340,7 @@ static __always_inline bool try_to_steal_lock(struct 
qspinlock *lock, bool parav
        int iters;
 
        /* Attempt to steal the lock */
+       spin_begin();
        for (iters = 0; iters < STEAL_SPINS; iters++) {
                u32 val = READ_ONCE(lock->val);
 
@@ -354,6 +355,7 @@ static __always_inline bool try_to_steal_lock(struct 
qspinlock *lock, bool parav
                if (trylock_with_tail_cpu(lock, val))
                        return true;
        }
+       spin_end();
 
        return false;
 }
@@ -404,8 +406,10 @@ static __always_inline void 
queued_spin_lock_mcs_queue(struct qspinlock *lock, b
                WRITE_ONCE(prev->next, node);
 
                /* Wait for mcs node lock to be released */
+               spin_begin();
                while (!node->locked)
                        yield_to_prev(lock, node, prev_cpu, paravirt);
+               spin_end();
 
                /* Clear out stale propagated yield_cpu */
                if (paravirt && pv_yield_propagate_owner && node->yield_cpu != 
-1)
@@ -418,10 +422,12 @@ static __always_inline void 
queued_spin_lock_mcs_queue(struct qspinlock *lock, b
                int set_yield_cpu = -1;
 
                /* We're at the head of the waitqueue, wait for the lock. */
+               spin_begin();
                while ((val = READ_ONCE(lock->val)) & _Q_LOCKED_VAL) {
                        propagate_yield_cpu(node, val, &set_yield_cpu, 
paravirt);
                        yield_head_to_locked_owner(lock, val, paravirt, false);
                }
+               spin_end();
 
                /* If we're the last queued, must clean up the tail. */
                if ((val & _Q_TAIL_CPU_MASK) == tail) {
@@ -436,6 +442,7 @@ static __always_inline void 
queued_spin_lock_mcs_queue(struct qspinlock *lock, b
                int set_yield_cpu = -1;
                int iters = 0;
 again:
+               spin_begin();
                /* We're at the head of the waitqueue, wait for the lock. */
                while ((val = READ_ONCE(lock->val)) & _Q_LOCKED_VAL) {
                        if (iters++ == HEAD_SPINS) {
@@ -446,6 +453,7 @@ static __always_inline void 
queued_spin_lock_mcs_queue(struct qspinlock *lock, b
                        yield_head_to_locked_owner(lock, val, paravirt,
                                pv_yield_allow_steal && (iters > HEAD_SPINS));
                }
+               spin_end();
 
                /* If we're the last queued, must clean up the tail. */
                if ((val & _Q_TAIL_CPU_MASK) == tail) {
@@ -461,8 +469,13 @@ static __always_inline void 
queued_spin_lock_mcs_queue(struct qspinlock *lock, b
 
 unlock_next:
        /* contended path; must wait for next != NULL (MCS protocol) */
-       while (!(next = READ_ONCE(node->next)))
-               cpu_relax();
+       next = READ_ONCE(node->next);
+       if (!next) {
+               spin_begin();
+               while (!(next = READ_ONCE(node->next)))
+                       cpu_relax();
+               spin_end();
+       }
 
        /*
         * Unlock the next mcs waiter node. Release barrier is not required
-- 
2.35.1

Reply via email to