From: Nicolai Hähnle <nicolai.haeh...@amd.com>

While adding our task as a waiter, detect if another task should back off
because of us.

With this patch, we establish the invariant that the wait list contains
at most one (sleeping) waiter with ww_ctx->acquired > 0, and this waiter
will be the first waiter with a context.

Since only waiters with ww_ctx->acquired > 0 have to back off, this allows
us to be much more economical with wakeups.

Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Ingo Molnar <mi...@redhat.com>
Cc: Maarten Lankhorst <d...@mblankhorst.nl>
Cc: Daniel Vetter <dan...@ffwll.ch>
Cc: Chris Wilson <ch...@chris-wilson.co.uk>
Cc: dri-de...@lists.freedesktop.org
Signed-off-by: Nicolai Hähnle <nicolai.haeh...@amd.com>
---
 kernel/locking/mutex.c | 42 ++++++++++++++++++++++++++++++++----------
 1 file changed, 32 insertions(+), 10 deletions(-)

diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 01dcae7..d310703e 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -609,23 +609,34 @@ void __sched ww_mutex_unlock(struct ww_mutex *lock)
 EXPORT_SYMBOL(ww_mutex_unlock);
 
 static inline int __sched
-__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
+__ww_mutex_lock_check_stamp(struct mutex *lock, struct mutex_waiter *waiter,
+                           struct ww_acquire_ctx *ctx)
 {
        struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
        struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
+       struct mutex_waiter *cur;
 
-       if (!hold_ctx)
-               return 0;
+       if (hold_ctx && __ww_mutex_stamp_after(ctx, hold_ctx))
+               goto deadlock;
 
-       if (__ww_mutex_stamp_after(ctx, hold_ctx)) {
-#ifdef CONFIG_DEBUG_MUTEXES
-               DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
-               ctx->contending_lock = ww;
-#endif
-               return -EDEADLK;
+       /*
+        * If there is a waiter in front of us that has a context, then its
+        * stamp is earlier than ours and we must back off.
+        */
+       cur = waiter;
+       list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
+               if (cur->ww_ctx)
+                       goto deadlock;
        }
 
        return 0;
+
+deadlock:
+#ifdef CONFIG_DEBUG_MUTEXES
+       DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
+       ctx->contending_lock = ww;
+#endif
+       return -EDEADLK;
 }
 
 static inline int __sched
@@ -654,6 +665,16 @@ __ww_mutex_add_waiter(struct mutex_waiter *waiter,
                        }
 
                        list_add_tail(&waiter->list, &cur->list);
+
+                       /*
+                        * Wake up the waiter so that it gets a chance to back
+                        * off.
+                        */
+                       if (cur->ww_ctx->acquired > 0) {
+                               debug_mutex_wake_waiter(lock, cur);
+                               wake_up_process(cur->task);
+                       }
+
                        return 0;
                }
        }
@@ -756,7 +777,8 @@ __mutex_lock_common(struct mutex *lock, long state, 
unsigned int subclass,
                }
 
                if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
-                       ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
+                       ret = __ww_mutex_lock_check_stamp(lock, &waiter,
+                                                         ww_ctx);
                        if (ret)
                                goto err;
                }
-- 
2.7.4

Reply via email to