[tip:locking/core] locking/atomics: Switch to generated fallbacks

2018-11-01 Thread tip-bot for Mark Rutland
Commit-ID:  9fa45070a2e59a871e1cd3370173369f3a4f61e2
Gitweb: https://git.kernel.org/tip/9fa45070a2e59a871e1cd3370173369f3a4f61e2
Author: Mark Rutland 
AuthorDate: Tue, 4 Sep 2018 11:48:26 +0100
Committer:  Ingo Molnar 
CommitDate: Thu, 1 Nov 2018 11:00:46 +0100

locking/atomics: Switch to generated fallbacks

As a step to ensuring the atomic* APIs are consistent, switch to fallbacks
generated by gen-atomic-fallback.sh.

These are checked in rather than generated with Kbuild, since:

* This allows inspection of the atomics with git grep and ctags on a
  pristine tree, which Linus strongly prefers being able to do.

* The fallbacks are not affected by machine details or configuration
  options, so it is not necessary to regenerate them to take these into
  account.

* These are included by files required *very* early in the build process
  (e.g. for generating bounds.h), and we'd rather not complicate the
  top-level Kbuild file with dependencies.

The new fallback header should be equivalent to the old fallbacks in
, but:

* It is formatted a little differently due to scripting ensuring things
  are more regular than they used to be.

* Fallbacks are now expanded in-place as static inline functions rather
  than macros.

* The prototypes for fallbacks are arragned consistently with the return
  type on a separate line to try to keep to a sensible line length.

There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland 
Signed-off-by: Peter Zijlstra (Intel) 
Acked-by: Will Deacon 
Cc: linux-arm-ker...@lists.infradead.org
Cc: catalin.mari...@arm.com
Cc: linuxdriv...@attotech.com
Cc: dvyu...@google.com
Cc: Boqun Feng 
Cc: a...@arndb.de
Cc: aryabi...@virtuozzo.com
Cc: gli...@google.com
Link: http://lkml.kernel.org/r/20180904104830.2975-3-mark.rutl...@arm.com
Signed-off-by: Ingo Molnar 
---
 include/linux/atomic-fallback.h | 2294 +++
 include/linux/atomic.h  | 1241 +
 2 files changed, 2295 insertions(+), 1240 deletions(-)

diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h
new file mode 100644
index ..1c02c0112fbb
--- /dev/null
+++ b/include/linux/atomic-fallback.h
@@ -0,0 +1,2294 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-fallback.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _LINUX_ATOMIC_FALLBACK_H
+#define _LINUX_ATOMIC_FALLBACK_H
+
+#ifndef xchg_relaxed
+#define xchg_relaxed   xchg
+#define xchg_acquire   xchg
+#define xchg_release   xchg
+#else /* xchg_relaxed */
+
+#ifndef xchg_acquire
+#define xchg_acquire(...) \
+   __atomic_op_acquire(xchg, __VA_ARGS__)
+#endif
+
+#ifndef xchg_release
+#define xchg_release(...) \
+   __atomic_op_release(xchg, __VA_ARGS__)
+#endif
+
+#ifndef xchg
+#define xchg(...) \
+   __atomic_op_fence(xchg, __VA_ARGS__)
+#endif
+
+#endif /* xchg_relaxed */
+
+#ifndef cmpxchg_relaxed
+#define cmpxchg_relaxedcmpxchg
+#define cmpxchg_acquirecmpxchg
+#define cmpxchg_releasecmpxchg
+#else /* cmpxchg_relaxed */
+
+#ifndef cmpxchg_acquire
+#define cmpxchg_acquire(...) \
+   __atomic_op_acquire(cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg_release
+#define cmpxchg_release(...) \
+   __atomic_op_release(cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg
+#define cmpxchg(...) \
+   __atomic_op_fence(cmpxchg, __VA_ARGS__)
+#endif
+
+#endif /* cmpxchg_relaxed */
+
+#ifndef cmpxchg64_relaxed
+#define cmpxchg64_relaxed  cmpxchg64
+#define cmpxchg64_acquire  cmpxchg64
+#define cmpxchg64_release  cmpxchg64
+#else /* cmpxchg64_relaxed */
+
+#ifndef cmpxchg64_acquire
+#define cmpxchg64_acquire(...) \
+   __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg64_release
+#define cmpxchg64_release(...) \
+   __atomic_op_release(cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg64
+#define cmpxchg64(...) \
+   __atomic_op_fence(cmpxchg64, __VA_ARGS__)
+#endif
+
+#endif /* cmpxchg64_relaxed */
+
+#ifndef atomic_read_acquire
+static inline int
+atomic_read_acquire(const atomic_t *v)
+{
+   return smp_load_acquire(&(v)->counter);
+}
+#define atomic_read_acquire atomic_read_acquire
+#endif
+
+#ifndef atomic_set_release
+static inline void
+atomic_set_release(atomic_t *v, int i)
+{
+   smp_store_release(&(v)->counter, i);
+}
+#define atomic_set_release atomic_set_release
+#endif
+
+#ifndef atomic_add_return_relaxed
+#define atomic_add_return_acquire atomic_add_return
+#define atomic_add_return_release atomic_add_return
+#define atomic_add_return_relaxed atomic_add_return
+#else /* atomic_add_return_relaxed */
+
+#ifndef atomic_add_return_acquire
+static inline int
+atomic_add_return_acquire(int i, atomic_t *v)
+{
+   int ret = atomic_add_return_relaxed(i, v);
+   __atomic_acquire_fence();
+   return ret;
+}
+#define atomic_add_return_acquire 

[tip:locking/core] locking/atomics: Switch to generated fallbacks

2018-11-01 Thread tip-bot for Mark Rutland
Commit-ID:  1ff498091a527f636e8ce10b64d789371f8bebd9
Gitweb: https://git.kernel.org/tip/1ff498091a527f636e8ce10b64d789371f8bebd9
Author: Mark Rutland 
AuthorDate: Tue, 4 Sep 2018 11:48:26 +0100
Committer:  Peter Zijlstra 
CommitDate: Thu, 1 Nov 2018 10:48:19 +0100

locking/atomics: Switch to generated fallbacks

As a step to ensuring the atomic* APIs are consistent, switch to fallbacks
generated by gen-atomic-fallback.sh.

These are checked in rather than generated with Kbuild, since:

* This allows inspection of the atomics with git grep and ctags on a
  pristine tree, which Linus strongly prefers being able to do.

* The fallbacks are not affected by machine details or configuration
  options, so it is not necessary to regenerate them to take these into
  account.

* These are included by files required *very* early in the build process
  (e.g. for generating bounds.h), and we'd rather not complicate the
  top-level Kbuild file with dependencies.

The new fallback header should be equivalent to the old fallbacks in
, but:

* It is formatted a little differently due to scripting ensuring things
  are more regular than they used to be.

* Fallbacks are now expanded in-place as static inline functions rather
  than macros.

* The prototypes for fallbacks are arragned consistently with the return
  type on a separate line to try to keep to a sensible line length.

There should be no functional change as a result of this patch.

Cc: linux-arm-ker...@lists.infradead.org
Cc: catalin.mari...@arm.com
Cc: Ingo Molnar 
Cc: linuxdriv...@attotech.com
Cc: dvyu...@google.com
Cc: Boqun Feng 
Cc: a...@arndb.de
Cc: aryabi...@virtuozzo.com
Cc: gli...@google.com
Signed-off-by: Mark Rutland 
Acked-by: Peter Zijlstra (Intel) 
Acked-by: Will Deacon 
Signed-off-by: Peter Zijlstra (Intel) 
Link: http://lkml.kernel.org/r/20180904104830.2975-3-mark.rutl...@arm.com
---
 include/linux/atomic-fallback.h | 2294 +++
 include/linux/atomic.h  | 1241 +
 2 files changed, 2295 insertions(+), 1240 deletions(-)

diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h
new file mode 100644
index ..1c02c0112fbb
--- /dev/null
+++ b/include/linux/atomic-fallback.h
@@ -0,0 +1,2294 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-fallback.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _LINUX_ATOMIC_FALLBACK_H
+#define _LINUX_ATOMIC_FALLBACK_H
+
+#ifndef xchg_relaxed
+#define xchg_relaxed   xchg
+#define xchg_acquire   xchg
+#define xchg_release   xchg
+#else /* xchg_relaxed */
+
+#ifndef xchg_acquire
+#define xchg_acquire(...) \
+   __atomic_op_acquire(xchg, __VA_ARGS__)
+#endif
+
+#ifndef xchg_release
+#define xchg_release(...) \
+   __atomic_op_release(xchg, __VA_ARGS__)
+#endif
+
+#ifndef xchg
+#define xchg(...) \
+   __atomic_op_fence(xchg, __VA_ARGS__)
+#endif
+
+#endif /* xchg_relaxed */
+
+#ifndef cmpxchg_relaxed
+#define cmpxchg_relaxedcmpxchg
+#define cmpxchg_acquirecmpxchg
+#define cmpxchg_releasecmpxchg
+#else /* cmpxchg_relaxed */
+
+#ifndef cmpxchg_acquire
+#define cmpxchg_acquire(...) \
+   __atomic_op_acquire(cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg_release
+#define cmpxchg_release(...) \
+   __atomic_op_release(cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg
+#define cmpxchg(...) \
+   __atomic_op_fence(cmpxchg, __VA_ARGS__)
+#endif
+
+#endif /* cmpxchg_relaxed */
+
+#ifndef cmpxchg64_relaxed
+#define cmpxchg64_relaxed  cmpxchg64
+#define cmpxchg64_acquire  cmpxchg64
+#define cmpxchg64_release  cmpxchg64
+#else /* cmpxchg64_relaxed */
+
+#ifndef cmpxchg64_acquire
+#define cmpxchg64_acquire(...) \
+   __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg64_release
+#define cmpxchg64_release(...) \
+   __atomic_op_release(cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg64
+#define cmpxchg64(...) \
+   __atomic_op_fence(cmpxchg64, __VA_ARGS__)
+#endif
+
+#endif /* cmpxchg64_relaxed */
+
+#ifndef atomic_read_acquire
+static inline int
+atomic_read_acquire(const atomic_t *v)
+{
+   return smp_load_acquire(&(v)->counter);
+}
+#define atomic_read_acquire atomic_read_acquire
+#endif
+
+#ifndef atomic_set_release
+static inline void
+atomic_set_release(atomic_t *v, int i)
+{
+   smp_store_release(&(v)->counter, i);
+}
+#define atomic_set_release atomic_set_release
+#endif
+
+#ifndef atomic_add_return_relaxed
+#define atomic_add_return_acquire atomic_add_return
+#define atomic_add_return_release atomic_add_return
+#define atomic_add_return_relaxed atomic_add_return
+#else /* atomic_add_return_relaxed */
+
+#ifndef atomic_add_return_acquire
+static inline int
+atomic_add_return_acquire(int i, atomic_t *v)
+{
+   int ret = atomic_add_return_relaxed(i, v);
+   __atomic_acquire_fence();
+   return ret;
+}
+#define