[PATCH -v3 1/8] rtmutex: Deboost before waking up the top waiter

2017-03-23 Thread Peter Zijlstra
From: Xunlei Pang 

We should deboost before waking the high-priority task, such that we
don't run two tasks with the same "state" (priority, deadline,
sched_class, etc).

In order to make sure the boosting task doesn't start running between
unlock and deboost (due to 'spurious' wakeup), we move the deboost
under the wait_lock, that way its serialized against the wait loop in
__rt_mutex_slowlock().

Doing the deboost early can however lead to priority-inversion if
current would get preempted after the deboost but before waking our
high-prio task, hence we disable preemption before doing deboost, and
enabling it after the wake up is over.

This gets us the right semantic order, but most importantly however;
this change ensures pointer stability for the next patch, where we
have rt_mutex_setprio() cache a pointer to the top-most waiter task.
If we, as before this change, do the wakeup first and then deboost,
this pointer might point into thin air.

[peterz: Changelog + patch munging]
Cc: Ingo Molnar 
Cc: Juri Lelli 
Acked-by: Steven Rostedt 
Suggested-by: Peter Zijlstra 
Signed-off-by: Xunlei Pang 
Signed-off-by: Peter Zijlstra (Intel) 
---

 kernel/futex.c  |5 ---
 kernel/locking/rtmutex.c|   59 +---
 kernel/locking/rtmutex_common.h |2 -
 3 files changed, 34 insertions(+), 32 deletions(-)

--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1465,10 +1465,7 @@ static int wake_futex_pi(u32 __user *uad
 out_unlock:
raw_spin_unlock_irq(_state->pi_mutex.wait_lock);
 
-   if (deboost) {
-   wake_up_q(_q);
-   rt_mutex_adjust_prio(current);
-   }
+   rt_mutex_postunlock(_q, deboost);
 
return ret;
 }
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -307,24 +307,6 @@ static void __rt_mutex_adjust_prio(struc
 }
 
 /*
- * Adjust task priority (undo boosting). Called from the exit path of
- * rt_mutex_slowunlock() and rt_mutex_slowlock().
- *
- * (Note: We do this outside of the protection of lock->wait_lock to
- * allow the lock to be taken while or before we readjust the priority
- * of task. We do not use the spin_xx_mutex() variants here as we are
- * outside of the debug path.)
- */
-void rt_mutex_adjust_prio(struct task_struct *task)
-{
-   unsigned long flags;
-
-   raw_spin_lock_irqsave(>pi_lock, flags);
-   __rt_mutex_adjust_prio(task);
-   raw_spin_unlock_irqrestore(>pi_lock, flags);
-}
-
-/*
  * Deadlock detection is conditional:
  *
  * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
@@ -985,6 +967,7 @@ static void mark_wakeup_next_waiter(stru
 * lock->wait_lock.
 */
rt_mutex_dequeue_pi(current, waiter);
+   __rt_mutex_adjust_prio(current);
 
/*
 * As we are waking up the top waiter, and the waiter stays
@@ -1321,6 +1304,16 @@ static bool __sched rt_mutex_slowunlock(
 */
mark_wakeup_next_waiter(wake_q, lock);
 
+   /*
+* We should deboost before waking the top waiter task such that
+* we don't run two tasks with the 'same' priority. This however
+* can lead to prio-inversion if we would get preempted after
+* the deboost but before waking our high-prio task, hence the
+* preempt_disable before unlock. Pairs with preempt_enable() in
+* rt_mutex_postunlock();
+*/
+   preempt_disable();
+
raw_spin_unlock_irqrestore(>wait_lock, flags);
 
/* check PI boosting */
@@ -1370,6 +1363,18 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
return slowfn(lock);
 }
 
+/*
+ * Undo pi boosting (if necessary) and wake top waiter.
+ */
+void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost)
+{
+   wake_up_q(wake_q);
+
+   /* Pairs with preempt_disable() in rt_mutex_slowunlock() */
+   if (deboost)
+   preempt_enable();
+}
+
 static inline void
 rt_mutex_fastunlock(struct rt_mutex *lock,
bool (*slowfn)(struct rt_mutex *lock,
@@ -1383,11 +1388,7 @@ rt_mutex_fastunlock(struct rt_mutex *loc
 
deboost = slowfn(lock, _q);
 
-   wake_up_q(_q);
-
-   /* Undo pi boosting if necessary: */
-   if (deboost)
-   rt_mutex_adjust_prio(current);
+   rt_mutex_postunlock(_q, deboost);
 }
 
 /**
@@ -1513,6 +1514,13 @@ bool __sched __rt_mutex_futex_unlock(str
}
 
mark_wakeup_next_waiter(wake_q, lock);
+   /*
+* We've already deboosted, retain preempt_disabled when dropping
+* the wait_lock to avoid inversion until the wakeup. Matched
+* by rt_mutex_postunlock();
+*/
+   preempt_disable();
+
return true; /* deboost and wakeups */
 }
 
@@ -1525,10 +1533,7 @@ void __sched rt_mutex_futex_unlock(struc
deboost = __rt_mutex_futex_unlock(lock, _q);

[PATCH -v3 1/8] rtmutex: Deboost before waking up the top waiter

2017-03-23 Thread Peter Zijlstra
From: Xunlei Pang 

We should deboost before waking the high-priority task, such that we
don't run two tasks with the same "state" (priority, deadline,
sched_class, etc).

In order to make sure the boosting task doesn't start running between
unlock and deboost (due to 'spurious' wakeup), we move the deboost
under the wait_lock, that way its serialized against the wait loop in
__rt_mutex_slowlock().

Doing the deboost early can however lead to priority-inversion if
current would get preempted after the deboost but before waking our
high-prio task, hence we disable preemption before doing deboost, and
enabling it after the wake up is over.

This gets us the right semantic order, but most importantly however;
this change ensures pointer stability for the next patch, where we
have rt_mutex_setprio() cache a pointer to the top-most waiter task.
If we, as before this change, do the wakeup first and then deboost,
this pointer might point into thin air.

[peterz: Changelog + patch munging]
Cc: Ingo Molnar 
Cc: Juri Lelli 
Acked-by: Steven Rostedt 
Suggested-by: Peter Zijlstra 
Signed-off-by: Xunlei Pang 
Signed-off-by: Peter Zijlstra (Intel) 
---

 kernel/futex.c  |5 ---
 kernel/locking/rtmutex.c|   59 +---
 kernel/locking/rtmutex_common.h |2 -
 3 files changed, 34 insertions(+), 32 deletions(-)

--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1465,10 +1465,7 @@ static int wake_futex_pi(u32 __user *uad
 out_unlock:
raw_spin_unlock_irq(_state->pi_mutex.wait_lock);
 
-   if (deboost) {
-   wake_up_q(_q);
-   rt_mutex_adjust_prio(current);
-   }
+   rt_mutex_postunlock(_q, deboost);
 
return ret;
 }
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -307,24 +307,6 @@ static void __rt_mutex_adjust_prio(struc
 }
 
 /*
- * Adjust task priority (undo boosting). Called from the exit path of
- * rt_mutex_slowunlock() and rt_mutex_slowlock().
- *
- * (Note: We do this outside of the protection of lock->wait_lock to
- * allow the lock to be taken while or before we readjust the priority
- * of task. We do not use the spin_xx_mutex() variants here as we are
- * outside of the debug path.)
- */
-void rt_mutex_adjust_prio(struct task_struct *task)
-{
-   unsigned long flags;
-
-   raw_spin_lock_irqsave(>pi_lock, flags);
-   __rt_mutex_adjust_prio(task);
-   raw_spin_unlock_irqrestore(>pi_lock, flags);
-}
-
-/*
  * Deadlock detection is conditional:
  *
  * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
@@ -985,6 +967,7 @@ static void mark_wakeup_next_waiter(stru
 * lock->wait_lock.
 */
rt_mutex_dequeue_pi(current, waiter);
+   __rt_mutex_adjust_prio(current);
 
/*
 * As we are waking up the top waiter, and the waiter stays
@@ -1321,6 +1304,16 @@ static bool __sched rt_mutex_slowunlock(
 */
mark_wakeup_next_waiter(wake_q, lock);
 
+   /*
+* We should deboost before waking the top waiter task such that
+* we don't run two tasks with the 'same' priority. This however
+* can lead to prio-inversion if we would get preempted after
+* the deboost but before waking our high-prio task, hence the
+* preempt_disable before unlock. Pairs with preempt_enable() in
+* rt_mutex_postunlock();
+*/
+   preempt_disable();
+
raw_spin_unlock_irqrestore(>wait_lock, flags);
 
/* check PI boosting */
@@ -1370,6 +1363,18 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
return slowfn(lock);
 }
 
+/*
+ * Undo pi boosting (if necessary) and wake top waiter.
+ */
+void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost)
+{
+   wake_up_q(wake_q);
+
+   /* Pairs with preempt_disable() in rt_mutex_slowunlock() */
+   if (deboost)
+   preempt_enable();
+}
+
 static inline void
 rt_mutex_fastunlock(struct rt_mutex *lock,
bool (*slowfn)(struct rt_mutex *lock,
@@ -1383,11 +1388,7 @@ rt_mutex_fastunlock(struct rt_mutex *loc
 
deboost = slowfn(lock, _q);
 
-   wake_up_q(_q);
-
-   /* Undo pi boosting if necessary: */
-   if (deboost)
-   rt_mutex_adjust_prio(current);
+   rt_mutex_postunlock(_q, deboost);
 }
 
 /**
@@ -1513,6 +1514,13 @@ bool __sched __rt_mutex_futex_unlock(str
}
 
mark_wakeup_next_waiter(wake_q, lock);
+   /*
+* We've already deboosted, retain preempt_disabled when dropping
+* the wait_lock to avoid inversion until the wakeup. Matched
+* by rt_mutex_postunlock();
+*/
+   preempt_disable();
+
return true; /* deboost and wakeups */
 }
 
@@ -1525,10 +1533,7 @@ void __sched rt_mutex_futex_unlock(struc
deboost = __rt_mutex_futex_unlock(lock, _q);
raw_spin_unlock_irq(>wait_lock);
 
-   if (deboost) {
-   wake_up_q(_q);
-   rt_mutex_adjust_prio(current);