> The atomic ops on futex need to provide the full barrier just like
> regular atomics in kernel.
> 
> Also remove pagefault_enable/disable in futex_atomic_cmpxchg_inatomic()
> as core code already does that
> 
> Cc: David Hildenbrand <[email protected]>
> Cc: Peter Zijlstra (Intel) <[email protected]>
> Cc: Thomas Gleixner <[email protected]>
> Cc: Michel Lespinasse <[email protected]>
> Signed-off-by: Vineet Gupta <[email protected]>
> ---
>  arch/arc/include/asm/futex.h | 12 ++++++++----
>  1 file changed, 8 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
> index 70cfe16b742d..160656d0a15a 100644
> --- a/arch/arc/include/asm/futex.h
> +++ b/arch/arc/include/asm/futex.h
> @@ -20,6 +20,7 @@
> 
>  #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
>                                                       \
> +     smp_mb();                                       \
>       __asm__ __volatile__(                           \
>       "1:     llock   %1, [%2]                \n"     \
>               insn                            "\n"    \
> @@ -40,12 +41,14 @@
>                                                       \
>       : "=&r" (ret), "=&r" (oldval)                   \
>       : "r" (uaddr), "r" (oparg), "ir" (-EFAULT)      \
> -     : "cc", "memory")
> +     : "cc", "memory");                              \
> +     smp_mb();                                       \

I think you should drop the ;

> 
>  #else        /* !CONFIG_ARC_HAS_LLSC */
> 
>  #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
>                                                       \
> +     smp_mb();                                       \
>       __asm__ __volatile__(                           \
>       "1:     ld      %1, [%2]                \n"     \
>               insn                            "\n"    \
> @@ -65,7 +68,8 @@
>                                                       \
>       : "=&r" (ret), "=&r" (oldval)                   \
>       : "r" (uaddr), "r" (oparg), "ir" (-EFAULT)      \
> -     : "cc", "memory")
> +     : "cc", "memory");                              \
> +     smp_mb();                                       \

dito

> 
>  #endif
> 
> @@ -151,7 +155,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user 
> *uaddr, u32 oldval,
>       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
>               return -EFAULT;
> 
> -     pagefault_disable();
> +     smp_mb();
> 
>       __asm__ __volatile__(
>  #ifdef CONFIG_ARC_HAS_LLSC
> @@ -178,7 +182,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user 
> *uaddr, u32 oldval,
>       : "r"(oldval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
>       : "cc", "memory");
> 
> -     pagefault_enable();
> +     smp_mb();
> 
>       *uval = val;
>       return val;

Looks like pagefault_() magic is only required for futex_atomic_op_inuser. So
this should be fine (and arc seems to be the only arch left that has in in
_inatomic).

Not sure if you want to change the comment:

/* Compare-xchg with pagefaults disabled.
 *  Notes:
 *      -Best-Effort: Exchg happens only if compare succeeds.

Maybe something like "Compare-xchg: pagefaults have to be disabled by the
caller"

Looks sane to me.

David

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to