3.12.38-rt53-rc1 stable review patch.
If anyone has any objections, please let me know.

------------------

From: Mike Galbraith <[email protected]>

If the caller already holds the mutex, task_blocks_on_rt_mutex()
returns -EDEADLK, we proceed directly to rt_mutex_handle_deadlock()
where it's instant game over.

Let ww_mutexes return EDEADLK/EALREADY as they want to instead.

Cc: [email protected]
Signed-off-by: Mike Galbraith <[email protected]>
Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
Signed-off-by: Steven Rostedt <[email protected]>
---
 kernel/rtmutex.c | 21 ++++++++++++++-------
 1 file changed, 14 insertions(+), 7 deletions(-)

diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index e8a6555a670a..7601c1332a88 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -1621,13 +1621,20 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
 
        if (likely(!ret))
                ret = __rt_mutex_slowlock(lock, state, timeout, &waiter, 
ww_ctx);
+       else if (ww_ctx) {
+               /* ww_mutex received EDEADLK, let it become EALREADY */
+               ret = __mutex_lock_check_stamp(lock, ww_ctx);
+               BUG_ON(!ret);
+       }
 
        set_current_state(TASK_RUNNING);
 
        if (unlikely(ret)) {
                if (rt_mutex_has_waiters(lock))
                        remove_waiter(lock, &waiter);
-               rt_mutex_handle_deadlock(ret, chwalk, &waiter);
+               /* ww_mutex want to report EDEADLK/EALREADY, let them */
+               if (!ww_ctx)
+                       rt_mutex_handle_deadlock(ret, chwalk, &waiter);
        } else if (ww_ctx) {
                ww_mutex_account_lock(lock, ww_ctx);
        }
@@ -2166,8 +2173,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, 
struct ww_acquire_ctx *ww_c
        might_sleep();
 
        mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, 
_RET_IP_);
-       ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL,
-                               RT_MUTEX_FULL_CHAINWALK, ww_ctx);
+       ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, 
ww_ctx);
        if (ret)
                mutex_release(&lock->base.dep_map, 1, _RET_IP_);
        else if (!ret && ww_ctx->acquired > 1)
@@ -2185,8 +2191,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct 
ww_acquire_ctx *ww_ctx)
        might_sleep();
 
        mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, 
_RET_IP_);
-       ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL,
-                               RT_MUTEX_FULL_CHAINWALK, ww_ctx);
+       ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 
0, ww_ctx);
        if (ret)
                mutex_release(&lock->base.dep_map, 1, _RET_IP_);
        else if (!ret && ww_ctx->acquired > 1)
@@ -2198,11 +2203,13 @@ EXPORT_SYMBOL_GPL(__ww_mutex_lock);
 
 void __sched ww_mutex_unlock(struct ww_mutex *lock)
 {
+       int nest = !!lock->ctx;
+
        /*
         * The unlocking fastpath is the 0->1 transition from 'locked'
         * into 'unlocked' state:
         */
-       if (lock->ctx) {
+       if (nest) {
 #ifdef CONFIG_DEBUG_MUTEXES
                DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
 #endif
@@ -2211,7 +2218,7 @@ void __sched ww_mutex_unlock(struct ww_mutex *lock)
                lock->ctx = NULL;
        }
 
-       mutex_release(&lock->base.dep_map, 1, _RET_IP_);
+       mutex_release(&lock->base.dep_map, nest, _RET_IP_);
        rt_mutex_unlock(&lock->base.lock);
 }
 EXPORT_SYMBOL(ww_mutex_unlock);
-- 
2.1.4


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to