On Thu, Dec 01, 2016 at 03:06:47PM +0100, Nicolai Hähnle wrote:

> @@ -640,10 +640,11 @@ __mutex_lock_common(struct mutex *lock, long state, 
> unsigned int subclass,
>       struct mutex_waiter waiter;
>       unsigned long flags;
>       bool first = false;
> -     struct ww_mutex *ww;
>       int ret;
>  
> -     if (use_ww_ctx) {
> +     if (use_ww_ctx && ww_ctx) {
> +             struct ww_mutex *ww;
> +
>               ww = container_of(lock, struct ww_mutex, base);
>               if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
>                       return -EALREADY;

So I don't see the point of removing *ww from the function scope, we can
still compute that container_of() even if !ww_ctx, right? That would
safe a ton of churn below, adding all those struct ww_mutex declarations
and container_of() casts.

(and note that the container_of() is a fancy NO-OP because base is the
first member).

> @@ -656,8 +657,12 @@ __mutex_lock_common(struct mutex *lock, long state, 
> unsigned int subclass,
>           mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) {
>               /* got the lock, yay! */
>               lock_acquired(&lock->dep_map, ip);
> -             if (use_ww_ctx)
> +             if (use_ww_ctx && ww_ctx) {
> +                     struct ww_mutex *ww;
> +
> +                     ww = container_of(lock, struct ww_mutex, base);
>                       ww_mutex_set_context_fastpath(ww, ww_ctx);
> +             }
>               preempt_enable();
>               return 0;
>       }
> @@ -702,7 +707,7 @@ __mutex_lock_common(struct mutex *lock, long state, 
> unsigned int subclass,
>                       goto err;
>               }
>  
> -             if (use_ww_ctx && ww_ctx->acquired > 0) {
> +             if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
>                       ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
>                       if (ret)
>                               goto err;
> @@ -742,8 +747,12 @@ __mutex_lock_common(struct mutex *lock, long state, 
> unsigned int subclass,
>       /* got the lock - cleanup and rejoice! */
>       lock_acquired(&lock->dep_map, ip);
>  
> -     if (use_ww_ctx)
> +     if (use_ww_ctx && ww_ctx) {
> +             struct ww_mutex *ww;
> +
> +             ww = container_of(lock, struct ww_mutex, base);
>               ww_mutex_set_context_slowpath(ww, ww_ctx);
> +     }
>  
>       spin_unlock_mutex(&lock->wait_lock, flags);
>       preempt_enable();

All that then reverts to:

-       if (use_ww_ctx)
+       if (use_ww_ctx && ww_ctx)


Reply via email to