Use the spin_begin/spin_cpu_relax/spin_end APIs in qspinlock, which helps
to prevent threads issuing a lot of expensive priority nops which may not
have much effect due to immediately executing low then medium priority.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/powerpc/lib/qspinlock.c | 39 ++++++++++++++++++++++++++++++++----
 1 file changed, 35 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index 1cb47a6478a0..70f924296b36 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -184,6 +184,7 @@ static struct qnode *get_tail_qnode(struct qspinlock *lock, 
u32 val)
        BUG();
 }
 
+/* Called inside spin_begin() */
 static __always_inline void __yield_to_locked_owner(struct qspinlock *lock, 
u32 val, bool paravirt, bool mustq)
 {
        int owner;
@@ -203,6 +204,8 @@ static __always_inline void __yield_to_locked_owner(struct 
qspinlock *lock, u32
        if ((yield_count & 1) == 0)
                goto relax; /* owner vcpu is running */
 
+       spin_end();
+
        /*
         * Read the lock word after sampling the yield count. On the other side
         * there may a wmb because the yield count update is done by the
@@ -218,18 +221,22 @@ static __always_inline void 
__yield_to_locked_owner(struct qspinlock *lock, u32
                yield_to_preempted(owner, yield_count);
                if (mustq)
                        set_mustq(lock);
+               spin_begin();
                /* Don't relax if we yielded. Maybe we should? */
                return;
        }
+       spin_begin();
 relax:
-       cpu_relax();
+       spin_cpu_relax();
 }
 
+/* Called inside spin_begin() */
 static __always_inline void yield_to_locked_owner(struct qspinlock *lock, u32 
val, bool paravirt)
 {
        __yield_to_locked_owner(lock, val, paravirt, false);
 }
 
+/* Called inside spin_begin() */
 static __always_inline void yield_head_to_locked_owner(struct qspinlock *lock, 
u32 val, bool paravirt)
 {
        bool mustq = false;
@@ -267,6 +274,7 @@ static __always_inline void propagate_yield_cpu(struct 
qnode *node, u32 val, int
        }
 }
 
+/* Called inside spin_begin() */
 static __always_inline void yield_to_prev(struct qspinlock *lock, struct qnode 
*node, u32 val, bool paravirt)
 {
        int prev_cpu = decode_tail_cpu(val);
@@ -291,14 +299,18 @@ static __always_inline void yield_to_prev(struct 
qspinlock *lock, struct qnode *
        if ((yield_count & 1) == 0)
                goto yield_prev; /* owner vcpu is running */
 
+       spin_end();
+
        smp_rmb();
 
        if (yield_cpu == node->yield_cpu) {
                if (node->next && node->next->yield_cpu != yield_cpu)
                        node->next->yield_cpu = yield_cpu;
                yield_to_preempted(yield_cpu, yield_count);
+               spin_begin();
                return;
        }
+       spin_begin();
 
 yield_prev:
        if (!pv_yield_prev)
@@ -308,15 +320,19 @@ static __always_inline void yield_to_prev(struct 
qspinlock *lock, struct qnode *
        if ((yield_count & 1) == 0)
                goto relax; /* owner vcpu is running */
 
+       spin_end();
+
        smp_rmb(); /* See __yield_to_locked_owner comment */
 
        if (!node->locked) {
                yield_to_preempted(prev_cpu, yield_count);
+               spin_begin();
                return;
        }
+       spin_begin();
 
 relax:
-       cpu_relax();
+       spin_cpu_relax();
 }
 
 
@@ -328,6 +344,8 @@ static __always_inline bool try_to_steal_lock(struct 
qspinlock *lock, bool parav
                return false;
 
        /* Attempt to steal the lock */
+       spin_begin();
+
        do {
                u32 val = READ_ONCE(lock->val);
 
@@ -335,8 +353,10 @@ static __always_inline bool try_to_steal_lock(struct 
qspinlock *lock, bool parav
                        break;
 
                if (unlikely(!(val & _Q_LOCKED_VAL))) {
+                       spin_end();
                        if (__queued_spin_trylock_steal(lock))
                                return true;
+                       spin_begin();
                } else {
                        yield_to_locked_owner(lock, val, paravirt);
                }
@@ -344,6 +364,8 @@ static __always_inline bool try_to_steal_lock(struct 
qspinlock *lock, bool parav
                iters++;
        } while (iters < get_steal_spins(paravirt));
 
+       spin_end();
+
        return false;
 }
 
@@ -395,8 +417,10 @@ static __always_inline void 
queued_spin_lock_mcs_queue(struct qspinlock *lock, b
                WRITE_ONCE(prev->next, node);
 
                /* Wait for mcs node lock to be released */
+               spin_begin();
                while (!node->locked)
                        yield_to_prev(lock, node, old, paravirt);
+               spin_end();
 
                /* Clear out stale propagated yield_cpu */
                if (paravirt && pv_yield_propagate_owner && node->yield_cpu != 
-1)
@@ -407,6 +431,7 @@ static __always_inline void 
queued_spin_lock_mcs_queue(struct qspinlock *lock, b
 
 again:
        /* We're at the head of the waitqueue, wait for the lock. */
+       spin_begin();
        for (;;) {
                val = READ_ONCE(lock->val);
                if (!(val & _Q_LOCKED_VAL))
@@ -424,6 +449,7 @@ static __always_inline void 
queued_spin_lock_mcs_queue(struct qspinlock *lock, b
                        val |= _Q_MUST_Q_VAL;
                }
        }
+       spin_end();
 
        /* If we're the last queued, must clean up the tail. */
        old = trylock_clean_tail(lock, tail);
@@ -436,8 +462,13 @@ static __always_inline void 
queued_spin_lock_mcs_queue(struct qspinlock *lock, b
                goto release; /* We were the tail, no next. */
 
        /* There is a next, must wait for node->next != NULL (MCS protocol) */
-       while (!(next = READ_ONCE(node->next)))
-               cpu_relax();
+       next = READ_ONCE(node->next);
+       if (!next) {
+               spin_begin();
+               while (!(next = READ_ONCE(node->next)))
+                       cpu_relax();
+               spin_end();
+       }
 
        /*
         * Unlock the next mcs waiter node. Release barrier is not required
-- 
2.37.2

Reply via email to