[tip:locking/core] locking/atomic/x86: Un-macro-ify atomic ops implementation

2017-07-25 Thread tip-bot for Dmitry Vyukov
Commit-ID:  ba1c9f83f6330a34cc77ef989e183f54c4fe732e
Gitweb: http://git.kernel.org/tip/ba1c9f83f6330a34cc77ef989e183f54c4fe732e
Author: Dmitry Vyukov 
AuthorDate: Sat, 17 Jun 2017 11:15:27 +0200
Committer:  Ingo Molnar 
CommitDate: Wed, 28 Jun 2017 18:55:55 +0200

locking/atomic/x86: Un-macro-ify atomic ops implementation

CPP turns perfectly readable code into a much harder to read syntactic soup.

Ingo suggested to write them out as-is in C and ignore the higher linecount.

Do this.

(As a side effect, plain C functions will be easier to KASAN-instrument as 
well.)

Suggested-by: Ingo Molnar 
Signed-off-by: Dmitry Vyukov 
Cc: Andrew Morton 
Cc: Andrey Ryabinin 
Cc: Linus Torvalds 
Cc: Mark Rutland 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: kasan-...@googlegroups.com
Cc: will.dea...@arm.com
Link: 
http://lkml.kernel.org/r/a35b983dd3be937a3cf63c4e2db487de2cdc7b8f.1497690003.git.dvyu...@google.com
[ Beautified the C code some more and twiddled the changelog
  to mention the linecount increase and the KASAN benefit. ]
Signed-off-by: Ingo Molnar 
---
 arch/x86/include/asm/atomic.h  | 69 +---
 arch/x86/include/asm/atomic64_32.h | 81 +++---
 arch/x86/include/asm/atomic64_64.h | 67 ---
 3 files changed, 147 insertions(+), 70 deletions(-)

diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 33380b8..0874ebd 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -197,35 +197,56 @@ static inline int atomic_xchg(atomic_t *v, int new)
return xchg(>counter, new);
 }
 
-#define ATOMIC_OP(op)  \
-static inline void atomic_##op(int i, atomic_t *v) \
-{  \
-   asm volatile(LOCK_PREFIX #op"l %1,%0"   \
-   : "+m" (v->counter) \
-   : "ir" (i)  \
-   : "memory");\
+static inline void atomic_and(int i, atomic_t *v)
+{
+   asm volatile(LOCK_PREFIX "andl %1,%0"
+   : "+m" (v->counter)
+   : "ir" (i)
+   : "memory");
+}
+
+static inline int atomic_fetch_and(int i, atomic_t *v)
+{
+   int val = atomic_read(v);
+
+   do { } while (!atomic_try_cmpxchg(v, , val & i));
+
+   return val;
 }
 
-#define ATOMIC_FETCH_OP(op, c_op)  \
-static inline int atomic_fetch_##op(int i, atomic_t *v)
\
-{  \
-   int val = atomic_read(v);   \
-   do {\
-   } while (!atomic_try_cmpxchg(v, , val c_op i)); \
-   return val; \
+static inline void atomic_or(int i, atomic_t *v)
+{
+   asm volatile(LOCK_PREFIX "orl %1,%0"
+   : "+m" (v->counter)
+   : "ir" (i)
+   : "memory");
 }
 
-#define ATOMIC_OPS(op, c_op)   \
-   ATOMIC_OP(op)   \
-   ATOMIC_FETCH_OP(op, c_op)
+static inline int atomic_fetch_or(int i, atomic_t *v)
+{
+   int val = atomic_read(v);
 
-ATOMIC_OPS(and, &)
-ATOMIC_OPS(or , |)
-ATOMIC_OPS(xor, ^)
+   do { } while (!atomic_try_cmpxchg(v, , val | i));
 
-#undef ATOMIC_OPS
-#undef ATOMIC_FETCH_OP
-#undef ATOMIC_OP
+   return val;
+}
+
+static inline void atomic_xor(int i, atomic_t *v)
+{
+   asm volatile(LOCK_PREFIX "xorl %1,%0"
+   : "+m" (v->counter)
+   : "ir" (i)
+   : "memory");
+}
+
+static inline int atomic_fetch_xor(int i, atomic_t *v)
+{
+   int val = atomic_read(v);
+
+   do { } while (!atomic_try_cmpxchg(v, , val ^ i));
+
+   return val;
+}
 
 /**
  * __atomic_add_unless - add unless the number is already a given value
@@ -239,10 +260,12 @@ ATOMIC_OPS(xor, ^)
 static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
 {
int c = atomic_read(v);
+
do {
if (unlikely(c == u))
break;
} while (!atomic_try_cmpxchg(v, , c + a));
+
return c;
 }
 
diff --git a/arch/x86/include/asm/atomic64_32.h 
b/arch/x86/include/asm/atomic64_32.h
index 71d7705..9e206f3 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ 

[tip:locking/core] locking/atomic/x86: Un-macro-ify atomic ops implementation

2017-07-25 Thread tip-bot for Dmitry Vyukov
Commit-ID:  ba1c9f83f6330a34cc77ef989e183f54c4fe732e
Gitweb: http://git.kernel.org/tip/ba1c9f83f6330a34cc77ef989e183f54c4fe732e
Author: Dmitry Vyukov 
AuthorDate: Sat, 17 Jun 2017 11:15:27 +0200
Committer:  Ingo Molnar 
CommitDate: Wed, 28 Jun 2017 18:55:55 +0200

locking/atomic/x86: Un-macro-ify atomic ops implementation

CPP turns perfectly readable code into a much harder to read syntactic soup.

Ingo suggested to write them out as-is in C and ignore the higher linecount.

Do this.

(As a side effect, plain C functions will be easier to KASAN-instrument as 
well.)

Suggested-by: Ingo Molnar 
Signed-off-by: Dmitry Vyukov 
Cc: Andrew Morton 
Cc: Andrey Ryabinin 
Cc: Linus Torvalds 
Cc: Mark Rutland 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: kasan-...@googlegroups.com
Cc: will.dea...@arm.com
Link: 
http://lkml.kernel.org/r/a35b983dd3be937a3cf63c4e2db487de2cdc7b8f.1497690003.git.dvyu...@google.com
[ Beautified the C code some more and twiddled the changelog
  to mention the linecount increase and the KASAN benefit. ]
Signed-off-by: Ingo Molnar 
---
 arch/x86/include/asm/atomic.h  | 69 +---
 arch/x86/include/asm/atomic64_32.h | 81 +++---
 arch/x86/include/asm/atomic64_64.h | 67 ---
 3 files changed, 147 insertions(+), 70 deletions(-)

diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 33380b8..0874ebd 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -197,35 +197,56 @@ static inline int atomic_xchg(atomic_t *v, int new)
return xchg(>counter, new);
 }
 
-#define ATOMIC_OP(op)  \
-static inline void atomic_##op(int i, atomic_t *v) \
-{  \
-   asm volatile(LOCK_PREFIX #op"l %1,%0"   \
-   : "+m" (v->counter) \
-   : "ir" (i)  \
-   : "memory");\
+static inline void atomic_and(int i, atomic_t *v)
+{
+   asm volatile(LOCK_PREFIX "andl %1,%0"
+   : "+m" (v->counter)
+   : "ir" (i)
+   : "memory");
+}
+
+static inline int atomic_fetch_and(int i, atomic_t *v)
+{
+   int val = atomic_read(v);
+
+   do { } while (!atomic_try_cmpxchg(v, , val & i));
+
+   return val;
 }
 
-#define ATOMIC_FETCH_OP(op, c_op)  \
-static inline int atomic_fetch_##op(int i, atomic_t *v)
\
-{  \
-   int val = atomic_read(v);   \
-   do {\
-   } while (!atomic_try_cmpxchg(v, , val c_op i)); \
-   return val; \
+static inline void atomic_or(int i, atomic_t *v)
+{
+   asm volatile(LOCK_PREFIX "orl %1,%0"
+   : "+m" (v->counter)
+   : "ir" (i)
+   : "memory");
 }
 
-#define ATOMIC_OPS(op, c_op)   \
-   ATOMIC_OP(op)   \
-   ATOMIC_FETCH_OP(op, c_op)
+static inline int atomic_fetch_or(int i, atomic_t *v)
+{
+   int val = atomic_read(v);
 
-ATOMIC_OPS(and, &)
-ATOMIC_OPS(or , |)
-ATOMIC_OPS(xor, ^)
+   do { } while (!atomic_try_cmpxchg(v, , val | i));
 
-#undef ATOMIC_OPS
-#undef ATOMIC_FETCH_OP
-#undef ATOMIC_OP
+   return val;
+}
+
+static inline void atomic_xor(int i, atomic_t *v)
+{
+   asm volatile(LOCK_PREFIX "xorl %1,%0"
+   : "+m" (v->counter)
+   : "ir" (i)
+   : "memory");
+}
+
+static inline int atomic_fetch_xor(int i, atomic_t *v)
+{
+   int val = atomic_read(v);
+
+   do { } while (!atomic_try_cmpxchg(v, , val ^ i));
+
+   return val;
+}
 
 /**
  * __atomic_add_unless - add unless the number is already a given value
@@ -239,10 +260,12 @@ ATOMIC_OPS(xor, ^)
 static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
 {
int c = atomic_read(v);
+
do {
if (unlikely(c == u))
break;
} while (!atomic_try_cmpxchg(v, , c + a));
+
return c;
 }
 
diff --git a/arch/x86/include/asm/atomic64_32.h 
b/arch/x86/include/asm/atomic64_32.h
index 71d7705..9e206f3 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -312,37 +312,70 @@ static inline long long 
atomic64_dec_if_positive(atomic64_t *v)
 #undef alternative_atomic64
 #undef __alternative_atomic64
 
-#define ATOMIC64_OP(op, c_op) 

[tip:locking/core] locking/atomic/x86: Un-macro-ify atomic ops implementation

2017-06-22 Thread tip-bot for Dmitry Vyukov
Commit-ID:  0f2376eb0ff8851124c876eb81806d7ec1b421d1
Gitweb: http://git.kernel.org/tip/0f2376eb0ff8851124c876eb81806d7ec1b421d1
Author: Dmitry Vyukov 
AuthorDate: Sat, 17 Jun 2017 11:15:27 +0200
Committer:  Ingo Molnar 
CommitDate: Thu, 22 Jun 2017 10:19:56 +0200

locking/atomic/x86: Un-macro-ify atomic ops implementation

CPP turns perfectly readable code into a much harder to read syntactic soup.

Ingo suggested to write them out as-is in C and ignore the higher linecount.

Do this.

(As a side effect, plain C functions will be easier to KASAN-instrument as 
well.)

Suggested-by: Ingo Molnar 
Signed-off-by: Dmitry Vyukov 
Cc: Andrew Morton 
Cc: Andrey Ryabinin 
Cc: Linus Torvalds 
Cc: Mark Rutland 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: kasan-...@googlegroups.com
Cc: will.dea...@arm.com
Link: 
http://lkml.kernel.org/r/a35b983dd3be937a3cf63c4e2db487de2cdc7b8f.1497690003.git.dvyu...@google.com
[ Beautified the C code some more and twiddled the changelog
  to mention the linecount increase and the KASAN benefit. ]
Signed-off-by: Ingo Molnar 
---
 arch/x86/include/asm/atomic.h  | 69 +---
 arch/x86/include/asm/atomic64_32.h | 81 +++---
 arch/x86/include/asm/atomic64_64.h | 67 ---
 3 files changed, 147 insertions(+), 70 deletions(-)

diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 33380b8..0874ebd 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -197,35 +197,56 @@ static inline int atomic_xchg(atomic_t *v, int new)
return xchg(>counter, new);
 }
 
-#define ATOMIC_OP(op)  \
-static inline void atomic_##op(int i, atomic_t *v) \
-{  \
-   asm volatile(LOCK_PREFIX #op"l %1,%0"   \
-   : "+m" (v->counter) \
-   : "ir" (i)  \
-   : "memory");\
+static inline void atomic_and(int i, atomic_t *v)
+{
+   asm volatile(LOCK_PREFIX "andl %1,%0"
+   : "+m" (v->counter)
+   : "ir" (i)
+   : "memory");
+}
+
+static inline int atomic_fetch_and(int i, atomic_t *v)
+{
+   int val = atomic_read(v);
+
+   do { } while (!atomic_try_cmpxchg(v, , val & i));
+
+   return val;
 }
 
-#define ATOMIC_FETCH_OP(op, c_op)  \
-static inline int atomic_fetch_##op(int i, atomic_t *v)
\
-{  \
-   int val = atomic_read(v);   \
-   do {\
-   } while (!atomic_try_cmpxchg(v, , val c_op i)); \
-   return val; \
+static inline void atomic_or(int i, atomic_t *v)
+{
+   asm volatile(LOCK_PREFIX "orl %1,%0"
+   : "+m" (v->counter)
+   : "ir" (i)
+   : "memory");
 }
 
-#define ATOMIC_OPS(op, c_op)   \
-   ATOMIC_OP(op)   \
-   ATOMIC_FETCH_OP(op, c_op)
+static inline int atomic_fetch_or(int i, atomic_t *v)
+{
+   int val = atomic_read(v);
 
-ATOMIC_OPS(and, &)
-ATOMIC_OPS(or , |)
-ATOMIC_OPS(xor, ^)
+   do { } while (!atomic_try_cmpxchg(v, , val | i));
 
-#undef ATOMIC_OPS
-#undef ATOMIC_FETCH_OP
-#undef ATOMIC_OP
+   return val;
+}
+
+static inline void atomic_xor(int i, atomic_t *v)
+{
+   asm volatile(LOCK_PREFIX "xorl %1,%0"
+   : "+m" (v->counter)
+   : "ir" (i)
+   : "memory");
+}
+
+static inline int atomic_fetch_xor(int i, atomic_t *v)
+{
+   int val = atomic_read(v);
+
+   do { } while (!atomic_try_cmpxchg(v, , val ^ i));
+
+   return val;
+}
 
 /**
  * __atomic_add_unless - add unless the number is already a given value
@@ -239,10 +260,12 @@ ATOMIC_OPS(xor, ^)
 static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
 {
int c = atomic_read(v);
+
do {
if (unlikely(c == u))
break;
} while (!atomic_try_cmpxchg(v, , c + a));
+
return c;
 }
 
diff --git a/arch/x86/include/asm/atomic64_32.h 
b/arch/x86/include/asm/atomic64_32.h
index 71d7705..9e206f3 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ 

[tip:locking/core] locking/atomic/x86: Un-macro-ify atomic ops implementation

2017-06-22 Thread tip-bot for Dmitry Vyukov
Commit-ID:  0f2376eb0ff8851124c876eb81806d7ec1b421d1
Gitweb: http://git.kernel.org/tip/0f2376eb0ff8851124c876eb81806d7ec1b421d1
Author: Dmitry Vyukov 
AuthorDate: Sat, 17 Jun 2017 11:15:27 +0200
Committer:  Ingo Molnar 
CommitDate: Thu, 22 Jun 2017 10:19:56 +0200

locking/atomic/x86: Un-macro-ify atomic ops implementation

CPP turns perfectly readable code into a much harder to read syntactic soup.

Ingo suggested to write them out as-is in C and ignore the higher linecount.

Do this.

(As a side effect, plain C functions will be easier to KASAN-instrument as 
well.)

Suggested-by: Ingo Molnar 
Signed-off-by: Dmitry Vyukov 
Cc: Andrew Morton 
Cc: Andrey Ryabinin 
Cc: Linus Torvalds 
Cc: Mark Rutland 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: kasan-...@googlegroups.com
Cc: will.dea...@arm.com
Link: 
http://lkml.kernel.org/r/a35b983dd3be937a3cf63c4e2db487de2cdc7b8f.1497690003.git.dvyu...@google.com
[ Beautified the C code some more and twiddled the changelog
  to mention the linecount increase and the KASAN benefit. ]
Signed-off-by: Ingo Molnar 
---
 arch/x86/include/asm/atomic.h  | 69 +---
 arch/x86/include/asm/atomic64_32.h | 81 +++---
 arch/x86/include/asm/atomic64_64.h | 67 ---
 3 files changed, 147 insertions(+), 70 deletions(-)

diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 33380b8..0874ebd 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -197,35 +197,56 @@ static inline int atomic_xchg(atomic_t *v, int new)
return xchg(>counter, new);
 }
 
-#define ATOMIC_OP(op)  \
-static inline void atomic_##op(int i, atomic_t *v) \
-{  \
-   asm volatile(LOCK_PREFIX #op"l %1,%0"   \
-   : "+m" (v->counter) \
-   : "ir" (i)  \
-   : "memory");\
+static inline void atomic_and(int i, atomic_t *v)
+{
+   asm volatile(LOCK_PREFIX "andl %1,%0"
+   : "+m" (v->counter)
+   : "ir" (i)
+   : "memory");
+}
+
+static inline int atomic_fetch_and(int i, atomic_t *v)
+{
+   int val = atomic_read(v);
+
+   do { } while (!atomic_try_cmpxchg(v, , val & i));
+
+   return val;
 }
 
-#define ATOMIC_FETCH_OP(op, c_op)  \
-static inline int atomic_fetch_##op(int i, atomic_t *v)
\
-{  \
-   int val = atomic_read(v);   \
-   do {\
-   } while (!atomic_try_cmpxchg(v, , val c_op i)); \
-   return val; \
+static inline void atomic_or(int i, atomic_t *v)
+{
+   asm volatile(LOCK_PREFIX "orl %1,%0"
+   : "+m" (v->counter)
+   : "ir" (i)
+   : "memory");
 }
 
-#define ATOMIC_OPS(op, c_op)   \
-   ATOMIC_OP(op)   \
-   ATOMIC_FETCH_OP(op, c_op)
+static inline int atomic_fetch_or(int i, atomic_t *v)
+{
+   int val = atomic_read(v);
 
-ATOMIC_OPS(and, &)
-ATOMIC_OPS(or , |)
-ATOMIC_OPS(xor, ^)
+   do { } while (!atomic_try_cmpxchg(v, , val | i));
 
-#undef ATOMIC_OPS
-#undef ATOMIC_FETCH_OP
-#undef ATOMIC_OP
+   return val;
+}
+
+static inline void atomic_xor(int i, atomic_t *v)
+{
+   asm volatile(LOCK_PREFIX "xorl %1,%0"
+   : "+m" (v->counter)
+   : "ir" (i)
+   : "memory");
+}
+
+static inline int atomic_fetch_xor(int i, atomic_t *v)
+{
+   int val = atomic_read(v);
+
+   do { } while (!atomic_try_cmpxchg(v, , val ^ i));
+
+   return val;
+}
 
 /**
  * __atomic_add_unless - add unless the number is already a given value
@@ -239,10 +260,12 @@ ATOMIC_OPS(xor, ^)
 static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
 {
int c = atomic_read(v);
+
do {
if (unlikely(c == u))
break;
} while (!atomic_try_cmpxchg(v, , c + a));
+
return c;
 }
 
diff --git a/arch/x86/include/asm/atomic64_32.h 
b/arch/x86/include/asm/atomic64_32.h
index 71d7705..9e206f3 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -312,37 +312,70 @@ static inline long long 
atomic64_dec_if_positive(atomic64_t *v)
 #undef alternative_atomic64
 #undef __alternative_atomic64
 
-#define ATOMIC64_OP(op, c_op)