Performing CPU kicking at lock time can be a bit faster if there
is no kick-ahead. On the other hand, deferring it to unlock time is
preferrable when kick-ahead can be performed or when the VM guest is
having too few vCPUs that a vCPU may be kicked twice before getting
the lock. This patch implements the deferring kicking when either
one of the above 2 conditions is true.

Linux kernel builds were run in KVM guest on an 8-socket, 4
cores/socket Westmere-EX system and a 4-socket, 8 cores/socket
Haswell-EX system. Both systems are configured to have 32 physical
CPUs. The kernel build times before and after the patch were:

                    Westmere                    Haswell
  Patch         32 vCPUs    48 vCPUs    32 vCPUs    48 vCPUs
  -----         --------    --------    --------    --------
  Before patch   3m27.4s    10m32.0s     2m00.8s    14m52.5s
  After patch    3m01.3s     9m50.9s     2m00.5s    13m28.1s

On Westmere, both 32/48 vCPUs case showed some sizeable increase
in performance. For Haswell, there was some improvement in the
overcommitted (48 vCPUs) case.

Signed-off-by: Waiman Long <[email protected]>
---
 kernel/locking/qspinlock.c          |    6 +-
 kernel/locking/qspinlock_paravirt.h |   71 +++++++++++++++++++++++++++--------
 2 files changed, 58 insertions(+), 19 deletions(-)

diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 6518ee9..94fdd27 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -259,8 +259,8 @@ static __always_inline void set_locked(struct qspinlock 
*lock)
 
 static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
 static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
-static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
-
+static __always_inline void __pv_kick_node(struct qspinlock *lock,
+                                          struct mcs_spinlock *node) { }
 static __always_inline void __pv_wait_head(struct qspinlock *lock,
                                           struct mcs_spinlock *node) { }
 
@@ -464,7 +464,7 @@ queue:
                cpu_relax();
 
        arch_mcs_spin_unlock_contended(&next->locked);
-       pv_kick_node(next);
+       pv_kick_node(lock, next);
 
 release:
        /*
diff --git a/kernel/locking/qspinlock_paravirt.h 
b/kernel/locking/qspinlock_paravirt.h
index f3ceeff..1f0485c 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -40,6 +40,7 @@ struct pv_node {
 
        int                     cpu;
        u8                      state;
+       u8                      hashed; /* Set if in hashed table */
 };
 
 /*
@@ -336,6 +337,7 @@ static void pv_init_node(struct mcs_spinlock *node)
 
        pn->cpu = smp_processor_id();
        pn->state = vcpu_running;
+       pn->hashed = 0;
 }
 
 /*
@@ -455,18 +457,21 @@ static void pv_wait_node(struct mcs_spinlock *node)
 
 /*
  * Helper to get the address of the next kickable node
- * The node has to be in the halted state and is being transitioned to
- * running state by this function. Otherwise, NULL will be returned.
+ *
+ * The node has to be in the halted state. If the chkonly flag is set,
+ * the CPU state won't be changed. Otherwise, it will be transitioned to
+ * running state by this function. If no kickable node is found, NULL
+ * will be returned.
  */
-static inline struct pv_node *pv_get_kick_node(struct pv_node *node)
+static inline struct pv_node *
+pv_get_kick_node(struct pv_node *node, int chkonly)
 {
        struct pv_node *next = (struct pv_node *)READ_ONCE(node->mcs.next);
 
-       if (!next)
+       if (!next || (READ_ONCE(next->state) != vcpu_halted))
                return NULL;
 
-       if ((READ_ONCE(next->state) != vcpu_halted) ||
-           (xchg(&next->state, vcpu_running) != vcpu_halted))
+       if (!chkonly && (xchg(&next->state, vcpu_running) != vcpu_halted))
                next = NULL;    /* No kicking is needed */
 
        return next;
@@ -476,23 +481,50 @@ static inline struct pv_node *pv_get_kick_node(struct 
pv_node *node)
  * Called after setting next->locked = 1, used to wake those stuck in
  * pv_wait_node().
  */
-static void pv_kick_node(struct mcs_spinlock *node)
+static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
 {
        struct pv_node *pn = (struct pv_node *)node;
 
+       if (xchg(&pn->state, vcpu_running) == vcpu_running)
+               return;
+
        /*
-        * Note that because node->locked is already set, this actual
-        * mcs_spinlock entry could be re-used already.
+        * Kicking the next node at lock time can actually be a bit faster
+        * than doing it at unlock time because the critical section time
+        * overlaps with the wakeup latency of the next node. However, if the
+        * VM is too overcommmitted, it can happen that we need to kick the
+        * CPU again at unlock time (double-kick). To avoid that and also to
+        * fully utilize the kick-ahead functionality at unlock time,
+        * the kicking will be deferred under either one of the following
+        * 2 conditions:
         *
-        * This should be fine however, kicking people for no reason is
-        * harmless.
+        * 1) The VM guest has too few vCPUs that kick-ahead is not even
+        *    enabled. In this case, the chance of double-kick will be
+        *    higher.
+        * 2) The node after the next one is also in the halted state.
         *
-        * See the comment in pv_wait_node().
+        * In this case, the hashed flag is set to indicate that hashed
+        * table has been filled and _Q_SLOW_VAL is set.
         */
-       if (xchg(&pn->state, vcpu_running) == vcpu_halted) {
-               pvstat_inc(pvstat_lock_kick);
-               pv_kick(pn->cpu);
+       if ((!pv_kick_ahead || pv_get_kick_node(pn, 1)) &&
+           (xchg(&pn->hashed, 1) == 0)) {
+               struct __qspinlock *l = (void *)lock;
+
+               /*
+                * As this is the same vCPU that will check the _Q_SLOW_VAL
+                * value and the hash table later on at unlock time, no atomic
+                * instruction is needed.
+                */
+               WRITE_ONCE(l->locked, _Q_SLOW_VAL);
+               (void)pv_hash(lock, pn);
+               return;
        }
+
+       /*
+        * Kicking the vCPU even if it is not really halted is safe.
+        */
+       pvstat_inc(pvstat_lock_kick);
+       pv_kick(pn->cpu);
 }
 
 /*
@@ -513,6 +545,13 @@ static void pv_wait_head(struct qspinlock *lock, struct 
mcs_spinlock *node)
                        cpu_relax();
                }
 
+               if (!lp && (xchg(&pn->hashed, 1) == 1))
+                       /*
+                        * The hashed table & _Q_SLOW_VAL had been filled
+                        * by the lock holder.
+                        */
+                       lp = (struct qspinlock **)-1;
+
                if (!lp) { /* ONCE */
                        lp = pv_hash(lock, pn);
                        /*
@@ -584,7 +623,7 @@ __visible void __pv_queued_spin_unlock(struct qspinlock 
*lock)
         * should improve performance on an overcommitted system.
         */
        for (nr_kick = 0, nxt = node; nr_kick < pv_kick_ahead; nr_kick++) {
-               nxt = next[nr_kick] = pv_get_kick_node(nxt);
+               nxt = next[nr_kick] = pv_get_kick_node(nxt, 0);
                if (!nxt)
                        break;
        }
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to