On Thu, Sep 09, 2021 at 11:32:18AM +0200, Maarten Lankhorst wrote:
> diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
> index d456579d0952..791c28005eef 100644
> --- a/kernel/locking/mutex.c
> +++ b/kernel/locking/mutex.c
> @@ -736,6 +736,44 @@ __ww_mutex_lock(struct mutex *lock, unsigned int state, 
> unsigned int subclass,
>       return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, 
> true);
>  }
>  
> +/**
> + * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire 
> context
> + * @lock: mutex to lock
> + * @ctx: optional w/w acquire context
> + *
> + * Trylocks a mutex with the optional acquire context; no deadlock detection 
> is
> + * possible. Returns 1 if the mutex has been acquired successfully, 0 
> otherwise.
> + *
> + * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a 
> @ctx is
> + * specified, -EALREADY and -EDEADLK handling may happen in calls to 
> ww_mutex_lock.
> + *
> + * A mutex acquired with this function must be released with ww_mutex_unlock.
> + */
> +int __sched
> +ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ctx)
> +{
> +     bool locked;
> +
> +     if (!ctx)
> +             return mutex_trylock(&ww->base);
> +
> +#ifdef CONFIG_DEBUG_MUTEXES
> +     DEBUG_LOCKS_WARN_ON(ww->base.magic != &ww->base);
> +#endif
> +
> +     preempt_disable();
> +     locked = __mutex_trylock(&ww->base);
> +
> +     if (locked) {
> +             ww_mutex_set_context_fastpath(ww, ctx);
> +             mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ctx->dep_map, 
> _RET_IP_);
> +     }
> +     preempt_enable();
> +
> +     return locked;
> +}
> +EXPORT_SYMBOL(ww_mutex_trylock);
> +
>  #ifdef CONFIG_DEBUG_LOCK_ALLOC
>  void __sched
>  mutex_lock_nested(struct mutex *lock, unsigned int subclass)

> diff --git a/kernel/locking/ww_rt_mutex.c b/kernel/locking/ww_rt_mutex.c
> index 3f1fff7d2780..c4cb863edb4c 100644
> --- a/kernel/locking/ww_rt_mutex.c
> +++ b/kernel/locking/ww_rt_mutex.c
> @@ -50,6 +50,18 @@ __ww_rt_mutex_lock(struct ww_mutex *lock, struct 
> ww_acquire_ctx *ww_ctx,
>       return ret;
>  }
>  
> +int __sched
> +ww_mutex_trylock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
> +{
> +     int locked = rt_mutex_trylock(&lock->base);
> +
> +     if (locked && ctx)
> +             ww_mutex_set_context_fastpath(lock, ctx);
> +
> +     return locked;
> +}
> +EXPORT_SYMBOL(ww_mutex_trylock);
> +
>  int __sched
>  ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
>  {

That doesn't look right, how's this for you?

---
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -94,6 +94,9 @@ static inline unsigned long __owner_flag
        return owner & MUTEX_FLAGS;
 }
 
+/*
+ * Returns: __mutex_owner(lock) on failure or NULL on success.
+ */
 static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, 
bool handoff)
 {
        unsigned long owner, curr = (unsigned long)current;
@@ -736,6 +739,47 @@ __ww_mutex_lock(struct mutex *lock, unsi
        return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, 
true);
 }
 
+/**
+ * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire 
context
+ * @ww: mutex to lock
+ * @ww_ctx: optional w/w acquire context
+ *
+ * Trylocks a mutex with the optional acquire context; no deadlock detection is
+ * possible. Returns 1 if the mutex has been acquired successfully, 0 
otherwise.
+ *
+ * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx 
is
+ * specified, -EALREADY handling may happen in calls to ww_mutex_trylock.
+ *
+ * A mutex acquired with this function must be released with ww_mutex_unlock.
+ */
+int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
+{
+       if (!ww_ctx)
+               return mutex_trylock(&ww->base);
+
+       MUTEX_WARN_ON(ww->base.magic != &ww->base);
+
+       if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
+               return -EALREADY;
+
+       /*
+        * Reset the wounded flag after a kill. No other process can
+        * race and wound us here, since they can't have a valid owner
+        * pointer if we don't have any locks held.
+        */
+       if (ww_ctx->acquired == 0)
+               ww_ctx->wounded = 0;
+
+       if (__mutex_trylock(&ww->base)) {
+               ww_mutex_set_context_fastpath(ww, ww_ctx);
+               mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, 
_RET_IP_);
+               return 1;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(ww_mutex_trylock);
+
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 void __sched
 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
--- a/kernel/locking/ww_rt_mutex.c
+++ b/kernel/locking/ww_rt_mutex.c
@@ -9,6 +9,34 @@
 #define WW_RT
 #include "rtmutex.c"
 
+int ww_mutex_trylock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
+{
+       struct rt_mutex *rtm = &lock->base;
+
+       if (!ww_ctx)
+               return rt_mutex_trylock(rtm);
+
+       if (unlikely(ww_ctx == READ_ONCE(lock->ctx)))
+               return -EALREADY;
+
+       /*
+        * Reset the wounded flag after a kill. No other process can
+        * race and wound us here, since they can't have a valid owner
+        * pointer if we don't have any locks held.
+        */
+       if (ww_ctx->acquired == 0)
+               ww_ctx->wounded = 0;
+
+       if (__rt_mutex_trylock(&rtm->rtmutex)) {
+               ww_mutex_set_context_fastpath(lock, ww_ctx);
+               mutex_acquire_nest(&rtm->dep_map, 0, 1, ww_ctx->dep_map, 
_RET_IP_);
+               return 1;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(ww_mutex_trylock);
+
 static int __sched
 __ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx,
                   unsigned int state, unsigned long ip)

Reply via email to