Re: [PATCH 05/11] locking/ww_mutex: Add waiters in stamp order

2016-11-30 Thread Chris Wilson
On Mon, Nov 28, 2016 at 01:20:06PM +0100, Nicolai Hähnle wrote:
> From: Nicolai Hähnle 
> 
> Add regular waiters in stamp order. Keep adding waiters that have no
> context in FIFO order and take care not to starve them.
> 
> While adding our task as a waiter, back off if we detect that there is a
> waiter with a lower stamp in front of us.
> 
> Make sure to call lock_contended even when we back off early.

I'm hitting
[   86.202749] WARNING: CPU: 1 PID: 813 at ./include/linux/ww_mutex.h:292 
stress_inorder_work+0x436/0x4b5 [test_ww_mutex]
[   86.202885] DEBUG_LOCKS_WARN_ON(!ctx->contending_lock)

which if I understand correctly is due to

> +static inline int __sched
> +__ww_mutex_add_waiter(struct mutex_waiter *waiter,
> +   struct mutex *lock,
> +   struct ww_acquire_ctx *ww_ctx)
> +{
> + if (ww_ctx) {
> + struct mutex_waiter *cur;
> +
> + /*
> +  * Add the waiter before the first waiter with a higher stamp.
> +  * Waiters without a context are skipped to avoid starving
> +  * them.
> +  */
> + list_for_each_entry(cur, >wait_list, list) {
> + if (!cur->ww_ctx)
> + continue;
> +
> + if (__ww_mutex_stamp_after(ww_ctx, cur->ww_ctx)) {
> + /* Back off immediately if necessary. */
> + if (ww_ctx->acquired > 0)
> + return -EDEADLK;

not setting ww_ctx->contending_lock here.

> +
> + continue;
> + }
> +
> + list_add_tail(>list, >list);
> + return 0;
> + }
> + }
> +
> + list_add_tail(>list, >wait_list);
> + return 0;
> +}

-- 
Chris Wilson, Intel Open Source Technology Centre


Re: [PATCH 05/11] locking/ww_mutex: Add waiters in stamp order

2016-11-30 Thread Chris Wilson
On Mon, Nov 28, 2016 at 01:20:06PM +0100, Nicolai Hähnle wrote:
> From: Nicolai Hähnle 
> 
> Add regular waiters in stamp order. Keep adding waiters that have no
> context in FIFO order and take care not to starve them.
> 
> While adding our task as a waiter, back off if we detect that there is a
> waiter with a lower stamp in front of us.
> 
> Make sure to call lock_contended even when we back off early.

I'm hitting
[   86.202749] WARNING: CPU: 1 PID: 813 at ./include/linux/ww_mutex.h:292 
stress_inorder_work+0x436/0x4b5 [test_ww_mutex]
[   86.202885] DEBUG_LOCKS_WARN_ON(!ctx->contending_lock)

which if I understand correctly is due to

> +static inline int __sched
> +__ww_mutex_add_waiter(struct mutex_waiter *waiter,
> +   struct mutex *lock,
> +   struct ww_acquire_ctx *ww_ctx)
> +{
> + if (ww_ctx) {
> + struct mutex_waiter *cur;
> +
> + /*
> +  * Add the waiter before the first waiter with a higher stamp.
> +  * Waiters without a context are skipped to avoid starving
> +  * them.
> +  */
> + list_for_each_entry(cur, >wait_list, list) {
> + if (!cur->ww_ctx)
> + continue;
> +
> + if (__ww_mutex_stamp_after(ww_ctx, cur->ww_ctx)) {
> + /* Back off immediately if necessary. */
> + if (ww_ctx->acquired > 0)
> + return -EDEADLK;

not setting ww_ctx->contending_lock here.

> +
> + continue;
> + }
> +
> + list_add_tail(>list, >list);
> + return 0;
> + }
> + }
> +
> + list_add_tail(>list, >wait_list);
> + return 0;
> +}

-- 
Chris Wilson, Intel Open Source Technology Centre


[PATCH 05/11] locking/ww_mutex: Add waiters in stamp order

2016-11-28 Thread Nicolai Hähnle
From: Nicolai Hähnle 

Add regular waiters in stamp order. Keep adding waiters that have no
context in FIFO order and take care not to starve them.

While adding our task as a waiter, back off if we detect that there is a
waiter with a lower stamp in front of us.

Make sure to call lock_contended even when we back off early.

Cc: Peter Zijlstra 
Cc: Ingo Molnar 
Cc: Maarten Lankhorst 
Cc: Daniel Vetter 
Cc: Chris Wilson 
Cc: dri-de...@lists.freedesktop.org
Signed-off-by: Nicolai Hähnle 
---
 include/linux/mutex.h  |  3 ++
 kernel/locking/mutex.c | 76 +-
 2 files changed, 72 insertions(+), 7 deletions(-)

diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index b97870f..118a3b6 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -20,6 +20,8 @@
 #include 
 #include 
 
+struct ww_acquire_ctx;
+
 /*
  * Simple, straightforward mutexes with strict semantics:
  *
@@ -75,6 +77,7 @@ static inline struct task_struct *__mutex_owner(struct mutex 
*lock)
 struct mutex_waiter {
struct list_headlist;
struct task_struct  *task;
+   struct ww_acquire_ctx   *ww_ctx;
 #ifdef CONFIG_DEBUG_MUTEXES
void*magic;
 #endif
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 585627f..01dcae7 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -628,6 +628,40 @@ __ww_mutex_lock_check_stamp(struct mutex *lock, struct 
ww_acquire_ctx *ctx)
return 0;
 }
 
+static inline int __sched
+__ww_mutex_add_waiter(struct mutex_waiter *waiter,
+ struct mutex *lock,
+ struct ww_acquire_ctx *ww_ctx)
+{
+   if (ww_ctx) {
+   struct mutex_waiter *cur;
+
+   /*
+* Add the waiter before the first waiter with a higher stamp.
+* Waiters without a context are skipped to avoid starving
+* them.
+*/
+   list_for_each_entry(cur, >wait_list, list) {
+   if (!cur->ww_ctx)
+   continue;
+
+   if (__ww_mutex_stamp_after(ww_ctx, cur->ww_ctx)) {
+   /* Back off immediately if necessary. */
+   if (ww_ctx->acquired > 0)
+   return -EDEADLK;
+
+   continue;
+   }
+
+   list_add_tail(>list, >list);
+   return 0;
+   }
+   }
+
+   list_add_tail(>list, >wait_list);
+   return 0;
+}
+
 /*
  * Lock a mutex (possibly interruptible), slowpath:
  */
@@ -677,15 +711,25 @@ __mutex_lock_common(struct mutex *lock, long state, 
unsigned int subclass,
debug_mutex_lock_common(lock, );
debug_mutex_add_waiter(lock, , task);
 
-   /* add waiting tasks to the end of the waitqueue (FIFO): */
-   list_add_tail(, >wait_list);
+   lock_contended(>dep_map, ip);
+
+   if (!use_ww_ctx) {
+   /* add waiting tasks to the end of the waitqueue (FIFO): */
+   list_add_tail(, >wait_list);
+   } else {
+   /* Add in stamp order, waking up waiters that must back off. */
+   ret = __ww_mutex_add_waiter(, lock, ww_ctx);
+   if (ret)
+   goto err_early_backoff;
+
+   waiter.ww_ctx = ww_ctx;
+   }
+
waiter.task = task;
 
if (__mutex_waiter_is_first(lock, ))
__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
 
-   lock_contended(>dep_map, ip);
-
set_task_state(task, state);
for (;;) {
/*
@@ -693,8 +737,12 @@ __mutex_lock_common(struct mutex *lock, long state, 
unsigned int subclass,
 * mutex_unlock() handing the lock off to us, do a trylock
 * before testing the error conditions to make sure we pick up
 * the handoff.
+*
+* For w/w locks, we always need to do this even if we're not
+* currently the first waiter, because we may have been the
+* first waiter during the unlock.
 */
-   if (__mutex_trylock(lock, first))
+   if (__mutex_trylock(lock, use_ww_ctx || first))
goto acquired;
 
/*
@@ -716,7 +764,20 @@ __mutex_lock_common(struct mutex *lock, long state, 
unsigned int subclass,
spin_unlock_mutex(>wait_lock, flags);
schedule_preempt_disabled();
 
-   if (!first && __mutex_waiter_is_first(lock, )) {
+   if (use_ww_ctx && ww_ctx) {
+   /*
+* Always re-check whether we're in first position. We
+   

[PATCH 05/11] locking/ww_mutex: Add waiters in stamp order

2016-11-28 Thread Nicolai Hähnle
From: Nicolai Hähnle 

Add regular waiters in stamp order. Keep adding waiters that have no
context in FIFO order and take care not to starve them.

While adding our task as a waiter, back off if we detect that there is a
waiter with a lower stamp in front of us.

Make sure to call lock_contended even when we back off early.

Cc: Peter Zijlstra 
Cc: Ingo Molnar 
Cc: Maarten Lankhorst 
Cc: Daniel Vetter 
Cc: Chris Wilson 
Cc: dri-de...@lists.freedesktop.org
Signed-off-by: Nicolai Hähnle 
---
 include/linux/mutex.h  |  3 ++
 kernel/locking/mutex.c | 76 +-
 2 files changed, 72 insertions(+), 7 deletions(-)

diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index b97870f..118a3b6 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -20,6 +20,8 @@
 #include 
 #include 
 
+struct ww_acquire_ctx;
+
 /*
  * Simple, straightforward mutexes with strict semantics:
  *
@@ -75,6 +77,7 @@ static inline struct task_struct *__mutex_owner(struct mutex 
*lock)
 struct mutex_waiter {
struct list_headlist;
struct task_struct  *task;
+   struct ww_acquire_ctx   *ww_ctx;
 #ifdef CONFIG_DEBUG_MUTEXES
void*magic;
 #endif
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 585627f..01dcae7 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -628,6 +628,40 @@ __ww_mutex_lock_check_stamp(struct mutex *lock, struct 
ww_acquire_ctx *ctx)
return 0;
 }
 
+static inline int __sched
+__ww_mutex_add_waiter(struct mutex_waiter *waiter,
+ struct mutex *lock,
+ struct ww_acquire_ctx *ww_ctx)
+{
+   if (ww_ctx) {
+   struct mutex_waiter *cur;
+
+   /*
+* Add the waiter before the first waiter with a higher stamp.
+* Waiters without a context are skipped to avoid starving
+* them.
+*/
+   list_for_each_entry(cur, >wait_list, list) {
+   if (!cur->ww_ctx)
+   continue;
+
+   if (__ww_mutex_stamp_after(ww_ctx, cur->ww_ctx)) {
+   /* Back off immediately if necessary. */
+   if (ww_ctx->acquired > 0)
+   return -EDEADLK;
+
+   continue;
+   }
+
+   list_add_tail(>list, >list);
+   return 0;
+   }
+   }
+
+   list_add_tail(>list, >wait_list);
+   return 0;
+}
+
 /*
  * Lock a mutex (possibly interruptible), slowpath:
  */
@@ -677,15 +711,25 @@ __mutex_lock_common(struct mutex *lock, long state, 
unsigned int subclass,
debug_mutex_lock_common(lock, );
debug_mutex_add_waiter(lock, , task);
 
-   /* add waiting tasks to the end of the waitqueue (FIFO): */
-   list_add_tail(, >wait_list);
+   lock_contended(>dep_map, ip);
+
+   if (!use_ww_ctx) {
+   /* add waiting tasks to the end of the waitqueue (FIFO): */
+   list_add_tail(, >wait_list);
+   } else {
+   /* Add in stamp order, waking up waiters that must back off. */
+   ret = __ww_mutex_add_waiter(, lock, ww_ctx);
+   if (ret)
+   goto err_early_backoff;
+
+   waiter.ww_ctx = ww_ctx;
+   }
+
waiter.task = task;
 
if (__mutex_waiter_is_first(lock, ))
__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
 
-   lock_contended(>dep_map, ip);
-
set_task_state(task, state);
for (;;) {
/*
@@ -693,8 +737,12 @@ __mutex_lock_common(struct mutex *lock, long state, 
unsigned int subclass,
 * mutex_unlock() handing the lock off to us, do a trylock
 * before testing the error conditions to make sure we pick up
 * the handoff.
+*
+* For w/w locks, we always need to do this even if we're not
+* currently the first waiter, because we may have been the
+* first waiter during the unlock.
 */
-   if (__mutex_trylock(lock, first))
+   if (__mutex_trylock(lock, use_ww_ctx || first))
goto acquired;
 
/*
@@ -716,7 +764,20 @@ __mutex_lock_common(struct mutex *lock, long state, 
unsigned int subclass,
spin_unlock_mutex(>wait_lock, flags);
schedule_preempt_disabled();
 
-   if (!first && __mutex_waiter_is_first(lock, )) {
+   if (use_ww_ctx && ww_ctx) {
+   /*
+* Always re-check whether we're in first position. We
+* don't want to spin if another task with a lower
+* stamp has taken our position.
+*
+