The comments are factored out from the code changes to make them
easier to read. Add them separately to explain some non-obvious
aspects.

Signed-off-by: Dmitry Vyukov <dvyu...@google.com>
Acked-by: Mark Rutland <mark.rutl...@arm.com>
Reviewed-by: Andrey Ryabinin <aryabi...@virtuozzo.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Will Deacon <will.dea...@arm.com>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Ingo Molnar <mi...@redhat.com>
Cc: kasan-...@googlegroups.com
Cc: linux...@kvack.org
Cc: linux-kernel@vger.kernel.org
Cc: x...@kernel.org

---

Changes since v3:
 - rephrase comment in arch_atomic_read()
---
 arch/x86/include/asm/atomic.h             |  4 ++++
 include/asm-generic/atomic-instrumented.h | 30 ++++++++++++++++++++++++++++++
 2 files changed, 34 insertions(+)

diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 03dd2a6cf7e2..b1cd05da5d8a 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -23,6 +23,10 @@
  */
 static __always_inline int arch_atomic_read(const atomic_t *v)
 {
+       /*
+        * Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
+        * it's non-inlined function that increases binary size and stack usage.
+        */
        return READ_ONCE((v)->counter);
 }
 
diff --git a/include/asm-generic/atomic-instrumented.h 
b/include/asm-generic/atomic-instrumented.h
index a0f5b7525bb2..5771439e7a31 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -1,3 +1,15 @@
+/*
+ * This file provides wrappers with KASAN instrumentation for atomic 
operations.
+ * To use this functionality an arch's atomic.h file needs to define all
+ * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
+ * this file at the end. This file provides atomic_read() that forwards to
+ * arch_atomic_read() for actual atomic operation.
+ * Note: if an arch atomic operation is implemented by means of other atomic
+ * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
+ * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
+ * double instrumentation.
+ */
+
 #ifndef _LINUX_ATOMIC_INSTRUMENTED_H
 #define _LINUX_ATOMIC_INSTRUMENTED_H
 
@@ -336,6 +348,15 @@ static __always_inline bool atomic64_add_negative(s64 i, 
atomic64_t *v)
        return arch_atomic64_add_negative(i, v);
 }
 
+/*
+ * In the following macros we need to be careful to not clash with arch_ 
macros.
+ * arch_xchg() can be defined as an extended statement expression as well,
+ * if we define a __ptr variable, and arch_xchg() also defines __ptr variable,
+ * and we pass __ptr as an argument to arch_xchg(), it will use own __ptr
+ * instead of ours. This leads to unpleasant crashes. To avoid the problem
+ * the following macros declare variables with lots of underscores.
+ */
+
 #define cmpxchg(ptr, old, new)                         \
 ({                                                     \
        __typeof__(ptr) ___ptr = (ptr);                 \
@@ -371,6 +392,15 @@ static __always_inline bool atomic64_add_negative(s64 i, 
atomic64_t *v)
        arch_cmpxchg64_local(____ptr, (old), (new));    \
 })
 
+/*
+ * Originally we had the following code here:
+ *     __typeof__(p1) ____p1 = (p1);
+ *     kasan_check_write(____p1, 2 * sizeof(*____p1));
+ *     arch_cmpxchg_double(____p1, (p2), (o1), (o2), (n1), (n2));
+ * But it leads to compilation failures (see gcc issue 72873).
+ * So for now it's left non-instrumented.
+ * There are few callers of cmpxchg_double(), so it's not critical.
+ */
 #define cmpxchg_double(p1, p2, o1, o2, n1, n2)                         \
 ({                                                                     \
        arch_cmpxchg_double((p1), (p2), (o1), (o2), (n1), (n2));        \
-- 
2.13.1.611.g7e3b11ae1-goog

Reply via email to