[tip:locking/core] locking/atomics: Instrument xchg()

2018-07-25 Thread tip-bot for Mark Rutland
Commit-ID:  f9881cc43b118efc6f82fef2d121166113ee9f8e
Gitweb: https://git.kernel.org/tip/f9881cc43b118efc6f82fef2d121166113ee9f8e
Author: Mark Rutland 
AuthorDate: Mon, 16 Jul 2018 12:30:09 +0100
Committer:  Ingo Molnar 
CommitDate: Wed, 25 Jul 2018 11:53:59 +0200

locking/atomics: Instrument xchg()

While we instrument all of the (non-relaxed) atomic_*() functions and
cmpxchg(), we missed xchg().

Let's add instrumentation for xchg(), fixing up x86 to implement
arch_xchg().

Signed-off-by: Mark Rutland 
Acked-by: Peter Zijlstra (Intel) 
Acked-by: Will Deacon 
Cc: Boqun Feng 
Cc: Dmitry Vyukov 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: andy.shevche...@gmail.com
Cc: a...@arndb.de
Cc: aryabi...@virtuozzo.com
Cc: catalin.mari...@arm.com
Cc: gli...@google.com
Cc: linux-arm-ker...@lists.infradead.org
Cc: parri.and...@gmail.com
Cc: pe...@hurleysoftware.com
Link: http://lkml.kernel.org/r/20180716113017.3909-5-mark.rutl...@arm.com
Signed-off-by: Ingo Molnar 
---
 arch/x86/include/asm/atomic.h | 2 +-
 arch/x86/include/asm/atomic64_64.h| 2 +-
 arch/x86/include/asm/cmpxchg.h| 2 +-
 include/asm-generic/atomic-instrumented.h | 7 +++
 4 files changed, 10 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 823fd2f320cf..b143717b92b3 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -202,7 +202,7 @@ static __always_inline bool 
arch_atomic_try_cmpxchg(atomic_t *v, int *old, int n
 
 static inline int arch_atomic_xchg(atomic_t *v, int new)
 {
-   return xchg(>counter, new);
+   return arch_xchg(>counter, new);
 }
 
 static inline void arch_atomic_and(int i, atomic_t *v)
diff --git a/arch/x86/include/asm/atomic64_64.h 
b/arch/x86/include/asm/atomic64_64.h
index 849f1c566a11..4343d9b4f30e 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -188,7 +188,7 @@ static __always_inline bool 
arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, l
 
 static inline long arch_atomic64_xchg(atomic64_t *v, long new)
 {
-   return xchg(>counter, new);
+   return arch_xchg(>counter, new);
 }
 
 static inline void arch_atomic64_and(long i, atomic64_t *v)
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index e3efd8a06066..a55d79b233d3 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -75,7 +75,7 @@ extern void __add_wrong_size(void)
  * use "asm volatile" and "memory" clobbers to prevent gcc from moving
  * information around.
  */
-#define xchg(ptr, v)   __xchg_op((ptr), (v), xchg, "")
+#define arch_xchg(ptr, v)  __xchg_op((ptr), (v), xchg, "")
 
 /*
  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
diff --git a/include/asm-generic/atomic-instrumented.h 
b/include/asm-generic/atomic-instrumented.h
index c7c3e4cdd942..53481b6eacdf 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -408,6 +408,13 @@ static __always_inline bool atomic64_add_negative(s64 i, 
atomic64_t *v)
 }
 #endif
 
+#define xchg(ptr, new) \
+({ \
+   typeof(ptr) __ai_ptr = (ptr);   \
+   kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+   arch_xchg(__ai_ptr, (new)); \
+})
+
 #define cmpxchg(ptr, old, new) \
 ({ \
typeof(ptr) __ai_ptr = (ptr);   \


[tip:locking/core] locking/atomics: Instrument xchg()

2018-07-25 Thread tip-bot for Mark Rutland
Commit-ID:  f9881cc43b118efc6f82fef2d121166113ee9f8e
Gitweb: https://git.kernel.org/tip/f9881cc43b118efc6f82fef2d121166113ee9f8e
Author: Mark Rutland 
AuthorDate: Mon, 16 Jul 2018 12:30:09 +0100
Committer:  Ingo Molnar 
CommitDate: Wed, 25 Jul 2018 11:53:59 +0200

locking/atomics: Instrument xchg()

While we instrument all of the (non-relaxed) atomic_*() functions and
cmpxchg(), we missed xchg().

Let's add instrumentation for xchg(), fixing up x86 to implement
arch_xchg().

Signed-off-by: Mark Rutland 
Acked-by: Peter Zijlstra (Intel) 
Acked-by: Will Deacon 
Cc: Boqun Feng 
Cc: Dmitry Vyukov 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: andy.shevche...@gmail.com
Cc: a...@arndb.de
Cc: aryabi...@virtuozzo.com
Cc: catalin.mari...@arm.com
Cc: gli...@google.com
Cc: linux-arm-ker...@lists.infradead.org
Cc: parri.and...@gmail.com
Cc: pe...@hurleysoftware.com
Link: http://lkml.kernel.org/r/20180716113017.3909-5-mark.rutl...@arm.com
Signed-off-by: Ingo Molnar 
---
 arch/x86/include/asm/atomic.h | 2 +-
 arch/x86/include/asm/atomic64_64.h| 2 +-
 arch/x86/include/asm/cmpxchg.h| 2 +-
 include/asm-generic/atomic-instrumented.h | 7 +++
 4 files changed, 10 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 823fd2f320cf..b143717b92b3 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -202,7 +202,7 @@ static __always_inline bool 
arch_atomic_try_cmpxchg(atomic_t *v, int *old, int n
 
 static inline int arch_atomic_xchg(atomic_t *v, int new)
 {
-   return xchg(>counter, new);
+   return arch_xchg(>counter, new);
 }
 
 static inline void arch_atomic_and(int i, atomic_t *v)
diff --git a/arch/x86/include/asm/atomic64_64.h 
b/arch/x86/include/asm/atomic64_64.h
index 849f1c566a11..4343d9b4f30e 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -188,7 +188,7 @@ static __always_inline bool 
arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, l
 
 static inline long arch_atomic64_xchg(atomic64_t *v, long new)
 {
-   return xchg(>counter, new);
+   return arch_xchg(>counter, new);
 }
 
 static inline void arch_atomic64_and(long i, atomic64_t *v)
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index e3efd8a06066..a55d79b233d3 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -75,7 +75,7 @@ extern void __add_wrong_size(void)
  * use "asm volatile" and "memory" clobbers to prevent gcc from moving
  * information around.
  */
-#define xchg(ptr, v)   __xchg_op((ptr), (v), xchg, "")
+#define arch_xchg(ptr, v)  __xchg_op((ptr), (v), xchg, "")
 
 /*
  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
diff --git a/include/asm-generic/atomic-instrumented.h 
b/include/asm-generic/atomic-instrumented.h
index c7c3e4cdd942..53481b6eacdf 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -408,6 +408,13 @@ static __always_inline bool atomic64_add_negative(s64 i, 
atomic64_t *v)
 }
 #endif
 
+#define xchg(ptr, new) \
+({ \
+   typeof(ptr) __ai_ptr = (ptr);   \
+   kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+   arch_xchg(__ai_ptr, (new)); \
+})
+
 #define cmpxchg(ptr, old, new) \
 ({ \
typeof(ptr) __ai_ptr = (ptr);   \