Now that we have an atomic owner field, we can do explicit lock
handoff. Use this to avoid starvation.

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
---
 kernel/locking/mutex.c |   44 ++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 40 insertions(+), 4 deletions(-)

--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -53,6 +53,7 @@ __mutex_init(struct mutex *lock, const c
 EXPORT_SYMBOL(__mutex_init);
 
 #define MUTEX_FLAG_WAITERS     0x01
+#define MUTEX_FLAG_HANDOFF     0x02
 
 #define MUTEX_FLAG_ALL         0x03
 
@@ -84,6 +85,29 @@ static inline void __mutex_clear_flag(st
        atomic_long_andnot(flag, &lock->owner);
 }
 
+static inline bool __mutex_waiter_is_first(struct mutex *lock, struct 
mutex_waiter *waiter)
+{
+       return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == 
waiter;
+}
+
+static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
+{
+       unsigned long owner = atomic_long_read(&lock->owner);
+
+       for (;;) {
+               unsigned long old, new;
+
+               new = (owner & MUTEX_FLAG_WAITERS);
+               new |= (unsigned long)task;
+
+               old = atomic_long_cmpxchg(&lock->owner, owner, new);
+               if (old == owner)
+                       break;
+
+               owner = old;
+       }
+}
+
 #ifndef CONFIG_DEBUG_LOCK_ALLOC
 /*
  * We split the mutex lock/unlock logic into separate fastpath and
@@ -414,7 +438,7 @@ static bool mutex_optimistic_spin(struct
 }
 #endif
 
-static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock);
+static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, 
unsigned long owner);
 
 /**
  * mutex_unlock - release the mutex
@@ -439,6 +463,9 @@ void __sched mutex_unlock(struct mutex *
        for (;;) {
                unsigned long old;
 
+               if (owner & MUTEX_FLAG_HANDOFF)
+                       break;
+
                old = atomic_long_cmpxchg_release(&lock->owner, owner, owner & 
0x03);
                if (old == owner)
                        break;
@@ -447,7 +474,7 @@ void __sched mutex_unlock(struct mutex *
        }
 
        if (owner & 0x03);
-               __mutex_unlock_slowpath(lock);
+               __mutex_unlock_slowpath(lock, owner);
 }
 EXPORT_SYMBOL(mutex_unlock);
 
@@ -545,7 +572,7 @@ __mutex_lock_common(struct mutex *lock,
        list_add_tail(&waiter.list, &lock->wait_list);
        waiter.task = task;
 
-       if (list_first_entry(&lock->wait_list, struct mutex_waiter, list) == 
&waiter)
+       if (__mutex_waiter_is_first(lock, &waiter))
                __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
 
        lock_contended(&lock->dep_map, ip);
@@ -573,8 +600,14 @@ __mutex_lock_common(struct mutex *lock,
                schedule_preempt_disabled();
                spin_lock_mutex(&lock->wait_lock, flags);
 
+               if (__mutex_owner(lock) == current)
+                       break;
+
                if (__mutex_trylock(lock))
                        break;
+
+               if (__mutex_waiter_is_first(lock, &waiter))
+                       __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
        }
        __set_task_state(task, TASK_RUNNING);
 
@@ -707,7 +740,7 @@ EXPORT_SYMBOL_GPL(__ww_mutex_lock_interr
 /*
  * Release the lock, slowpath:
  */
-static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock)
+static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, 
unsigned long owner)
 {
        unsigned long flags;
        WAKE_Q(wake_q);
@@ -722,6 +755,9 @@ static noinline void __sched __mutex_unl
                                list_entry(lock->wait_list.next,
                                           struct mutex_waiter, list);
 
+               if (owner & MUTEX_FLAG_HANDOFF)
+                       __mutex_handoff(lock, waiter->task);
+
                debug_mutex_wake_waiter(lock, waiter);
                wake_q_add(&wake_q, waiter->task);
        }


Reply via email to