[PATCH v6 2/6] locking/pvqspinlock: Unconditional PV kick with _Q_SLOW_VAL

2015-09-11 Thread Waiman Long
If _Q_SLOW_VAL has been set, the vCPU state must have been vcpu_hashed.
The extra check at the end of __pv_queued_spin_unlock() is unnecessary
and so is removed.

Signed-off-by: Waiman Long 
Reviewed-by: Davidlohr Bueso 
---
 kernel/locking/qspinlock_paravirt.h |6 +-
 1 files changed, 1 insertions(+), 5 deletions(-)

diff --git a/kernel/locking/qspinlock_paravirt.h 
b/kernel/locking/qspinlock_paravirt.h
index c8e6e9a..f0450ff 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -267,7 +267,6 @@ static void pv_wait_head(struct qspinlock *lock, struct 
mcs_spinlock *node)
}
 
if (!lp) { /* ONCE */
-   WRITE_ONCE(pn->state, vcpu_hashed);
lp = pv_hash(lock, pn);
 
/*
@@ -275,11 +274,9 @@ static void pv_wait_head(struct qspinlock *lock, struct 
mcs_spinlock *node)
 * when we observe _Q_SLOW_VAL in 
__pv_queued_spin_unlock()
 * we'll be sure to be able to observe our hash entry.
 *
-*   [S] pn->state
 *   [S]  [Rmw] l->locked == 
_Q_SLOW_VAL
 *   MB   RMB
 * [RmW] l->locked = _Q_SLOW_VAL  [L] 
-*[L] pn->state
 *
 * Matches the smp_rmb() in __pv_queued_spin_unlock().
 */
@@ -364,8 +361,7 @@ __visible void __pv_queued_spin_unlock(struct qspinlock 
*lock)
 * vCPU is harmless other than the additional latency in completing
 * the unlock.
 */
-   if (READ_ONCE(node->state) == vcpu_hashed)
-   pv_kick(node->cpu);
+   pv_kick(node->cpu);
 }
 /*
  * Include the architecture specific callee-save thunk of the
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH v6 2/6] locking/pvqspinlock: Unconditional PV kick with _Q_SLOW_VAL

2015-09-11 Thread Waiman Long
If _Q_SLOW_VAL has been set, the vCPU state must have been vcpu_hashed.
The extra check at the end of __pv_queued_spin_unlock() is unnecessary
and so is removed.

Signed-off-by: Waiman Long 
Reviewed-by: Davidlohr Bueso 
---
 kernel/locking/qspinlock_paravirt.h |6 +-
 1 files changed, 1 insertions(+), 5 deletions(-)

diff --git a/kernel/locking/qspinlock_paravirt.h 
b/kernel/locking/qspinlock_paravirt.h
index c8e6e9a..f0450ff 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -267,7 +267,6 @@ static void pv_wait_head(struct qspinlock *lock, struct 
mcs_spinlock *node)
}
 
if (!lp) { /* ONCE */
-   WRITE_ONCE(pn->state, vcpu_hashed);
lp = pv_hash(lock, pn);
 
/*
@@ -275,11 +274,9 @@ static void pv_wait_head(struct qspinlock *lock, struct 
mcs_spinlock *node)
 * when we observe _Q_SLOW_VAL in 
__pv_queued_spin_unlock()
 * we'll be sure to be able to observe our hash entry.
 *
-*   [S] pn->state
 *   [S]  [Rmw] l->locked == 
_Q_SLOW_VAL
 *   MB   RMB
 * [RmW] l->locked = _Q_SLOW_VAL  [L] 
-*[L] pn->state
 *
 * Matches the smp_rmb() in __pv_queued_spin_unlock().
 */
@@ -364,8 +361,7 @@ __visible void __pv_queued_spin_unlock(struct qspinlock 
*lock)
 * vCPU is harmless other than the additional latency in completing
 * the unlock.
 */
-   if (READ_ONCE(node->state) == vcpu_hashed)
-   pv_kick(node->cpu);
+   pv_kick(node->cpu);
 }
 /*
  * Include the architecture specific callee-save thunk of the
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/