This is an automated email from the ASF dual-hosted git repository.

masayuki pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git


The following commit(s) were added to refs/heads/master by this push:
     new bbf2bbf37d Revert "arch_atomic : Introduce CONFIG_LIBC_ARCH_ATOMIC"
bbf2bbf37d is described below

commit bbf2bbf37d045c75fadad5c929e77178e5822015
Author: Masayuki Ishikawa <[email protected]>
AuthorDate: Tue Nov 12 23:04:29 2024 +0900

    Revert "arch_atomic : Introduce CONFIG_LIBC_ARCH_ATOMIC"
    
    This reverts commit 81e7b13a05aa1766ca05d36d130a9b8a7b8bb8fc.
---
 arch/arm/Kconfig                |   1 -
 include/nuttx/atomic.h          |   4 +-
 include/nuttx/lib/stdatomic.h   | 164 +++++-----
 include/nuttx/macro.h           |   4 -
 libs/libc/machine/Kconfig       |   8 -
 libs/libc/machine/arch_atomic.c | 687 ++++++++++++++++++----------------------
 6 files changed, 388 insertions(+), 480 deletions(-)

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 06312080cf..44f55bffe8 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -731,7 +731,6 @@ config ARCH_CHIP_CXD32XX
        bool "Sony CXD32xx"
        select ARCH_CORTEXM4
        select ARCH_HAVE_FPU
-       select LIBC_ARCH_ATOMIC
        ---help---
                Sony CXD32XX (ARM Cortex-M4) architectures
 
diff --git a/include/nuttx/atomic.h b/include/nuttx/atomic.h
index c23ecc4c92..1c430f963f 100644
--- a/include/nuttx/atomic.h
+++ b/include/nuttx/atomic.h
@@ -27,9 +27,7 @@
  * Included Files
  ****************************************************************************/
 
-#if defined(CONFIG_LIBC_ARCH_ATOMIC)
-#  include <nuttx/lib/stdatomic.h>
-#elif defined(__has_include)
+#ifdef __has_include
 #  if defined(__cplusplus) && __has_include(<atomic>)
 extern "C++"
 {
diff --git a/include/nuttx/lib/stdatomic.h b/include/nuttx/lib/stdatomic.h
index 48972b61ae..07f14911fb 100644
--- a/include/nuttx/lib/stdatomic.h
+++ b/include/nuttx/lib/stdatomic.h
@@ -63,38 +63,38 @@
 #define ATOMIC_VAR_INIT(value) (value)
 
 #define atomic_store_n(obj, val, type) \
-  (sizeof(*(obj)) == 1 ? nx_atomic_store_1(obj, val, type) : \
-   sizeof(*(obj)) == 2 ? nx_atomic_store_2(obj, val, type) : \
-   sizeof(*(obj)) == 4 ? nx_atomic_store_4(obj, val, type) : \
-                         nx_atomic_store_8(obj, val, type))
+  (sizeof(*(obj)) == 1 ? __atomic_store_1(obj, val, type) : \
+   sizeof(*(obj)) == 2 ? __atomic_store_2(obj, val, type) : \
+   sizeof(*(obj)) == 4 ? __atomic_store_4(obj, val, type) : \
+                         __atomic_store_8(obj, val, type))
 
 #define atomic_store(obj, val) atomic_store_n(obj, val, __ATOMIC_RELAXED)
 #define atomic_store_explicit(obj, val, type) atomic_store_n(obj, val, type)
 #define atomic_init(obj, val) atomic_store(obj, val)
 
 #define atomic_load_n(obj, type) \
-  (sizeof(*(obj)) == 1 ? nx_atomic_load_1(obj, type) : \
-   sizeof(*(obj)) == 2 ? nx_atomic_load_2(obj, type) : \
-   sizeof(*(obj)) == 4 ? nx_atomic_load_4(obj, type) : \
-                         nx_atomic_load_8(obj, type))
+  (sizeof(*(obj)) == 1 ? __atomic_load_1(obj, type) : \
+   sizeof(*(obj)) == 2 ? __atomic_load_2(obj, type) : \
+   sizeof(*(obj)) == 4 ? __atomic_load_4(obj, type) : \
+                         __atomic_load_8(obj, type))
 
 #define atomic_load(obj) atomic_load_n(obj, __ATOMIC_RELAXED)
 #define atomic_load_explicit(obj, type) atomic_load_n(obj, type)
 
 #define atomic_exchange_n(obj, val, type) \
-  (sizeof(*(obj)) == 1 ? nx_atomic_exchange_1(obj, val, type) : \
-   sizeof(*(obj)) == 2 ? nx_atomic_exchange_2(obj, val, type) : \
-   sizeof(*(obj)) == 4 ? nx_atomic_exchange_4(obj, val, type) : \
-                         nx_atomic_exchange_8(obj, val, type))
+  (sizeof(*(obj)) == 1 ? __atomic_exchange_1(obj, val, type) : \
+   sizeof(*(obj)) == 2 ? __atomic_exchange_2(obj, val, type) : \
+   sizeof(*(obj)) == 4 ? __atomic_exchange_4(obj, val, type) : \
+                         __atomic_exchange_8(obj, val, type))
 
 #define atomic_exchange(obj, val) atomic_exchange_n(obj, val, __ATOMIC_RELAXED)
 #define atomic_exchange_explicit(obj, val, type) atomic_exchange_n(obj, val, 
type)
 
 #define atomic_compare_exchange_n(obj, expected, desired, weak, success, 
failure) \
-  (sizeof(*(obj)) == 1 ? nx_atomic_compare_exchange_1(obj, expected, desired, 
weak, success, failure) : \
-   sizeof(*(obj)) == 2 ? nx_atomic_compare_exchange_2(obj, expected, desired, 
weak, success, failure) : \
-   sizeof(*(obj)) == 4 ? nx_atomic_compare_exchange_4(obj, expected, desired, 
weak, success, failure) : \
-                         nx_atomic_compare_exchange_8(obj, expected, desired, 
weak, success, failure))
+  (sizeof(*(obj)) == 1 ? __atomic_compare_exchange_1(obj, expected, desired, 
weak, success, failure) : \
+   sizeof(*(obj)) == 2 ? __atomic_compare_exchange_2(obj, expected, desired, 
weak, success, failure) : \
+   sizeof(*(obj)) == 4 ? __atomic_compare_exchange_4(obj, expected, desired, 
weak, success, failure) : \
+                         __atomic_compare_exchange_8(obj, expected, desired, 
weak, success, failure))
 
 #define atomic_compare_exchange_strong(obj, expected, desired) \
   atomic_compare_exchange_n(obj, expected, desired, false, __ATOMIC_RELAXED, 
__ATOMIC_RELAXED)
@@ -106,10 +106,10 @@
   atomic_compare_exchange_n(obj, expected, desired, true, success, failure)
 
 #define atomic_flag_test_and_set_n(obj, type) \
-  (sizeof(*(obj)) == 1 ? nx_atomic_flag_test_and_set_1(obj, type) : \
-   sizeof(*(obj)) == 2 ? nx_atomic_flag_test_and_set_2(obj, type) : \
-   sizeof(*(obj)) == 4 ? nx_atomic_flag_test_and_set_4(obj, type) : \
-                         nx_atomic_flag_test_and_set_8(obj, type))
+  (sizeof(*(obj)) == 1 ? __atomic_flag_test_and_set_1(obj, type) : \
+   sizeof(*(obj)) == 2 ? __atomic_flag_test_and_set_2(obj, type) : \
+   sizeof(*(obj)) == 4 ? __atomic_flag_test_and_set_4(obj, type) : \
+                         __atomic_flag_test_and_set_8(obj, type))
 
 #define atomic_flag_test_and_set(obj) atomic_flag_test_and_set_n(obj, 
__ATOMIC_RELAXED)
 #define atomic_flag_test_and_set_explicit(obj, type) 
atomic_flag_test_and_set_n(obj, 1, type)
@@ -117,46 +117,46 @@
 #define atomic_flag_clear_explicit(obj, type) atomic_store_explicit(obj, 0, 
type)
 
 #define atomic_fetch_and_n(obj, val, type) \
-  (sizeof(*(obj)) == 1 ? nx_atomic_fetch_and_1(obj, val, type) : \
-   sizeof(*(obj)) == 2 ? nx_atomic_fetch_and_2(obj, val, type) : \
-   sizeof(*(obj)) == 4 ? nx_atomic_fetch_and_4(obj, val, type) : \
-                         nx_atomic_fetch_and_8(obj, val, type))
+  (sizeof(*(obj)) == 1 ? __atomic_fetch_and_1(obj, val, type) : \
+   sizeof(*(obj)) == 2 ? __atomic_fetch_and_2(obj, val, type) : \
+   sizeof(*(obj)) == 4 ? __atomic_fetch_and_4(obj, val, type) : \
+                         __atomic_fetch_and_8(obj, val, type))
 
 #define atomic_fetch_and(obj, val) atomic_fetch_and_n(obj, val, 
__ATOMIC_RELAXED)
 #define atomic_fetch_and_explicit(obj, val, type) atomic_fetch_and_n(obj, val, 
type)
 
 #define atomic_fetch_or_n(obj, val, type) \
-  (sizeof(*(obj)) == 1 ? nx_atomic_fetch_or_1(obj, val, type) : \
-   sizeof(*(obj)) == 2 ? nx_atomic_fetch_or_2(obj, val, type) : \
-   sizeof(*(obj)) == 4 ? nx_atomic_fetch_or_4(obj, val, type) : \
-                         nx_atomic_fetch_or_8(obj, val, type))
+  (sizeof(*(obj)) == 1 ? __atomic_fetch_or_1(obj, val, type) : \
+   sizeof(*(obj)) == 2 ? __atomic_fetch_or_2(obj, val, type) : \
+   sizeof(*(obj)) == 4 ? __atomic_fetch_or_4(obj, val, type) : \
+                         __atomic_fetch_or_8(obj, val, type))
 
 #define atomic_fetch_or(obj, val) atomic_fetch_or_n(obj, val, __ATOMIC_RELAXED)
 #define atomic_fetch_or_explicit(obj, val, type) atomic_fetch_or_n(obj, val, 
type)
 
 #define atomic_fetch_xor_n(obj, val, type) \
-  (sizeof(*(obj)) == 1 ? nx_atomic_fetch_xor_1(obj, val, type) : \
-   sizeof(*(obj)) == 2 ? nx_atomic_fetch_xor_2(obj, val, type) : \
-   sizeof(*(obj)) == 4 ? nx_atomic_fetch_xor_4(obj, val, type) : \
-                         nx_atomic_fetch_xor_8(obj, val, type))
+  (sizeof(*(obj)) == 1 ? __atomic_fetch_xor_1(obj, val, type) : \
+   sizeof(*(obj)) == 2 ? __atomic_fetch_xor_2(obj, val, type) : \
+   sizeof(*(obj)) == 4 ? __atomic_fetch_xor_4(obj, val, type) : \
+                         __atomic_fetch_xor_8(obj, val, type))
 
 #define atomic_fetch_xor(obj, val) atomic_fetch_xor_n(obj, val, 
__ATOMIC_RELAXED)
 #define atomic_fetch_xor_explicit(obj, val, type) atomic_fetch_xor_n(obj, val, 
type)
 
 #define atomic_fetch_add_n(obj, val, type) \
-  (sizeof(*(obj)) == 1 ? nx_atomic_fetch_add_1(obj, val, type) : \
-   sizeof(*(obj)) == 2 ? nx_atomic_fetch_add_2(obj, val, type) : \
-   sizeof(*(obj)) == 4 ? nx_atomic_fetch_add_4(obj, val, type) : \
-                         nx_atomic_fetch_add_8(obj, val, type))
+  (sizeof(*(obj)) == 1 ? __atomic_fetch_add_1(obj, val, type) : \
+   sizeof(*(obj)) == 2 ? __atomic_fetch_add_2(obj, val, type) : \
+   sizeof(*(obj)) == 4 ? __atomic_fetch_add_4(obj, val, type) : \
+                         __atomic_fetch_add_8(obj, val, type))
 
 #define atomic_fetch_add(obj, val) atomic_fetch_add_n(obj, val, 
__ATOMIC_RELAXED)
 #define atomic_fetch_add_explicit(obj, val, type) atomic_fetch_add_n(obj, val, 
type)
 
 #define atomic_fetch_sub_n(obj, val, type) \
-  (sizeof(*(obj)) == 1 ? nx_atomic_fetch_sub_1(obj, val, type) : \
-   sizeof(*(obj)) == 2 ? nx_atomic_fetch_sub_2(obj, val, type) : \
-   sizeof(*(obj)) == 4 ? nx_atomic_fetch_sub_4(obj, val, type) : \
-                         nx_atomic_fetch_sub_8(obj, val, type))
+  (sizeof(*(obj)) == 1 ? __atomic_fetch_sub_1(obj, val, type) : \
+   sizeof(*(obj)) == 2 ? __atomic_fetch_sub_2(obj, val, type) : \
+   sizeof(*(obj)) == 4 ? __atomic_fetch_sub_4(obj, val, type) : \
+                         __atomic_fetch_sub_8(obj, val, type))
 
 #define atomic_fetch_sub(obj, val) atomic_fetch_sub_n(obj, val, 
__ATOMIC_RELAXED)
 #define atomic_fetch_sub_explicit(obj, val, type) atomic_fetch_sub_n(obj, val, 
type)
@@ -194,85 +194,81 @@ typedef volatile wchar_t atomic_wchar_t;
  * Public Function Prototypes
  ****************************************************************************/
 
-void nx_atomic_store_1(FAR volatile void *ptr, uint8_t value,
-                      int memorder);
-void nx_atomic_store_2(FAR volatile void *ptr, uint16_t value,
-                      int memorder);
-void nx_atomic_store_4(FAR volatile void *ptr, uint32_t value,
-                      int memorder);
-void nx_atomic_store_8(FAR volatile void *ptr, uint64_t value,
-                      int memorder);
-uint8_t nx_atomic_load_1(FAR const volatile void *ptr, int memorder);
-uint16_t nx_atomic_load_2(FAR const volatile void *ptr, int memorder);
-uint32_t nx_atomic_load_4(FAR const volatile void *ptr, int memorder);
-uint64_t nx_atomic_load_8(FAR const volatile void *ptr, int memorder);
-uint8_t nx_atomic_exchange_1(FAR volatile void *ptr, uint8_t value,
+void __atomic_store_1(FAR volatile void *ptr, uint8_t value, int memorder);
+void __atomic_store_2(FAR volatile void *ptr, uint16_t value, int memorder);
+void __atomic_store_4(FAR volatile void *ptr, uint32_t value, int memorder);
+void __atomic_store_8(FAR volatile void *ptr, uint64_t value, int memorder);
+uint8_t __atomic_load_1(FAR const volatile void *ptr, int memorder);
+uint16_t __atomic_load_2(FAR const volatile void *ptr, int memorder);
+uint32_t __atomic_load_4(FAR const volatile void *ptr, int memorder);
+uint64_t __atomic_load_8(FAR const volatile void *ptr, int memorder);
+uint8_t __atomic_exchange_1(FAR volatile void *ptr, uint8_t value,
                             int memorder);
-uint16_t nx_atomic_exchange_2(FAR volatile void *ptr, uint16_t value,
+uint16_t __atomic_exchange_2(FAR volatile void *ptr, uint16_t value,
                              int memorder);
-uint32_t nx_atomic_exchange_4(FAR volatile void *ptr, uint32_t value,
+uint32_t __atomic_exchange_4(FAR volatile void *ptr, uint32_t value,
                              int memorder);
-uint64_t nx_atomic_exchange_8(FAR volatile void *ptr, uint64_t value,
+uint64_t __atomic_exchange_8(FAR volatile void *ptr, uint64_t value,
                              int memorder);
-bool nx_atomic_compare_exchange_1(FAR volatile void *mem, FAR void *expect,
+bool __atomic_compare_exchange_1(FAR volatile void *mem, FAR void *expect,
                                  uint8_t desired, bool weak, int success,
                                  int failure);
-bool nx_atomic_compare_exchange_2(FAR volatile void *mem, FAR void *expect,
+bool __atomic_compare_exchange_2(FAR volatile void *mem, FAR void *expect,
                                  uint16_t desired, bool weak, int success,
                                  int failure);
-bool nx_atomic_compare_exchange_4(FAR volatile void *mem, FAR void *expect,
+bool __atomic_compare_exchange_4(FAR volatile void *mem, FAR void *expect,
                                  uint32_t desired, bool weak, int success,
                                  int failure);
-bool nx_atomic_compare_exchange_8(FAR volatile void *mem, FAR void *expect,
+bool __atomic_compare_exchange_8(FAR volatile void *mem, FAR void *expect,
                                  uint64_t desired, bool weak, int success,
                                  int failure);
-uint8_t nx_atomic_flag_test_and_set_1(FAR const volatile void *ptr,
+uint8_t __atomic_flag_test_and_set_1(FAR const volatile void *ptr,
                                      int memorder);
-uint16_t nx_atomic_flag_test_and_set_2(FAR const volatile void *ptr,
+uint16_t __atomic_flag_test_and_set_2(FAR const volatile void *ptr,
                                       int memorder);
-uint32_t nx_atomic_flag_test_and_set_4(FAR const volatile void *ptr,
+uint32_t __atomic_flag_test_and_set_4(FAR const volatile void *ptr,
                                       int memorder);
-uint64_t nx_atomic_flag_test_and_set_8(FAR const volatile void *ptr,
+uint64_t __atomic_flag_test_and_set_8(FAR const volatile void *ptr,
                                       int memorder);
-uint8_t nx_atomic_fetch_add_1(FAR volatile void *ptr, uint8_t value,
+uint8_t __atomic_fetch_add_1(FAR volatile void *ptr, uint8_t value,
                              int memorder);
-uint16_t nx_atomic_fetch_add_2(FAR volatile void *ptr, uint16_t value,
+uint16_t __atomic_fetch_add_2(FAR volatile void *ptr, uint16_t value,
                               int memorder);
-uint32_t nx_atomic_fetch_add_4(FAR volatile void *ptr, uint32_t value,
+uint32_t __atomic_fetch_add_4(FAR volatile void *ptr, uint32_t value,
                               int memorder);
-uint64_t nx_atomic_fetch_add_8(FAR volatile void *ptr, uint64_t value,
+uint64_t __atomic_fetch_add_8(FAR volatile void *ptr, uint64_t value,
                               int memorder);
-uint8_t nx_atomic_fetch_sub_1(FAR volatile void *ptr, uint8_t value,
+uint8_t __atomic_fetch_sub_1(FAR volatile void *ptr, uint8_t value,
                              int memorder);
-uint16_t nx_atomic_fetch_sub_2(FAR volatile void *ptr, uint16_t value,
+uint16_t __atomic_fetch_sub_2(FAR volatile void *ptr, uint16_t value,
                               int memorder);
-uint32_t nx_atomic_fetch_sub_4(FAR volatile void *ptr, uint32_t value,
+uint32_t __atomic_fetch_sub_4(FAR volatile void *ptr, uint32_t value,
                               int memorder);
-uint64_t nx_atomic_fetch_sub_8(FAR volatile void *ptr, uint64_t value,
+uint64_t __atomic_fetch_sub_8(FAR volatile void *ptr, uint64_t value,
                               int memorder);
-uint8_t nx_atomic_fetch_and_1(FAR volatile void *ptr, uint8_t value,
+uint8_t __atomic_fetch_and_1(FAR volatile void *ptr, uint8_t value,
                              int memorder);
-uint16_t nx_atomic_fetch_and_2(FAR volatile void *ptr, uint16_t value,
+uint16_t __atomic_fetch_and_2(FAR volatile void *ptr, uint16_t value,
                               int memorder);
-uint32_t nx_atomic_fetch_and_4(FAR volatile void *ptr, uint32_t value,
+uint32_t __atomic_fetch_and_4(FAR volatile void *ptr, uint32_t value,
                               int memorder);
-uint64_t nx_atomic_fetch_and_8(FAR volatile void *ptr, uint64_t value,
+uint64_t __atomic_fetch_and_8(FAR volatile void *ptr, uint64_t value,
                               int memorder);
-uint8_t nx_atomic_fetch_or_1(FAR volatile void *ptr, uint8_t value,
+uint8_t __atomic_fetch_or_1(FAR volatile void *ptr, uint8_t value,
                             int memorder);
-uint16_t nx_atomic_fetch_or_2(FAR volatile void *ptr, uint16_t value,
+uint16_t __atomic_fetch_or_2(FAR volatile void *ptr, uint16_t value,
                              int memorder);
-uint32_t nx_atomic_fetch_or_4(FAR volatile void *ptr, uint32_t value,
+uint32_t __atomic_fetch_or_4(FAR volatile void *ptr, uint32_t value,
                              int memorder);
-uint64_t nx_atomic_fetch_or_8(FAR volatile void *ptr, uint64_t value,
+uint64_t __atomic_fetch_or_8(FAR volatile void *ptr, uint64_t value,
                              int memorder);
-uint8_t nx_atomic_fetch_xor_1(FAR volatile void *ptr, uint8_t value,
+uint8_t __atomic_fetch_xor_1(FAR volatile void *ptr, uint8_t value,
                              int memorder);
-uint16_t nx_atomic_fetch_xor_2(FAR volatile void *ptr, uint16_t value,
+uint16_t __atomic_fetch_xor_2(FAR volatile void *ptr, uint16_t value,
                               int memorder);
-uint32_t nx_atomic_fetch_xor_4(FAR volatile void *ptr, uint32_t value,
+uint32_t __atomic_fetch_xor_4(FAR volatile void *ptr, uint32_t value,
                               int memorder);
-uint64_t nx_atomic_fetch_xor_8(FAR volatile void *ptr, uint64_t value,
+uint64_t __atomic_fetch_xor_8(FAR volatile void *ptr, uint64_t value,
                               int memorder);
 
 #endif /* __INCLUDE_NUTTX_LIB_STDATOMIC_H */
diff --git a/include/nuttx/macro.h b/include/nuttx/macro.h
index 0e838006d2..f36634be20 100644
--- a/include/nuttx/macro.h
+++ b/include/nuttx/macro.h
@@ -141,9 +141,5 @@
 #define STRINGIFY_(x) #x
 #define STRINGIFY(x)  STRINGIFY_(x)
 
-/* Concatenate the arguments */
-
-#define CONCATENATE(a, b) a##b
-
 #endif /* __INCLUDE_NUTTX_MACRO_H */
 
diff --git a/libs/libc/machine/Kconfig b/libs/libc/machine/Kconfig
index 4fb304df53..5b0caacccd 100644
--- a/libs/libc/machine/Kconfig
+++ b/libs/libc/machine/Kconfig
@@ -9,14 +9,6 @@
 
 menu "Architecture-Specific Support"
 
-config LIBC_ARCH_ATOMIC
-       bool "arch_atomic"
-       default n
-       ---help---
-               If this configuration is selected and <include/nuttx/atomic.h> 
is
-               included, arch_atomic.c will be linked instead of built-in
-               atomic function.
-
 config ARCH_LOWPUTC
        bool "Low-level console output"
        default y
diff --git a/libs/libc/machine/arch_atomic.c b/libs/libc/machine/arch_atomic.c
index bdecc8c057..5da80f452c 100644
--- a/libs/libc/machine/arch_atomic.c
+++ b/libs/libc/machine/arch_atomic.c
@@ -29,15 +29,14 @@
 #include <stdbool.h>
 #include <stdint.h>
 #include <nuttx/spinlock.h>
-#include <nuttx/macro.h>
 
 /****************************************************************************
  * Pre-processor Definitions
  ****************************************************************************/
 
-#define STORE(fn, n, type)                                         \
+#define STORE(n, type)                                             \
                                                                    \
-  void weak_function CONCATENATE(fn, n) (FAR volatile void *ptr,   \
+  void weak_function __atomic_store_##n (FAR volatile void *ptr,   \
                                          type value, int memorder) \
   {                                                                \
     irqstate_t irqstate = spin_lock_irqsave(NULL);                 \
@@ -47,271 +46,271 @@
     spin_unlock_irqrestore(NULL, irqstate);                        \
   }
 
-#define LOAD(fn, n, type)                                              \
-                                                                       \
-  type weak_function CONCATENATE(fn, n) (FAR const volatile void *ptr, \
-                                         int memorder)                 \
-  {                                                                    \
-    irqstate_t irqstate = spin_lock_irqsave(NULL);                     \
-                                                                       \
-    type ret = *(FAR type *)ptr;                                       \
-                                                                       \
-    spin_unlock_irqrestore(NULL, irqstate);                            \
-    return ret;                                                        \
+#define LOAD(n, type)                                                 \
+                                                                      \
+  type weak_function __atomic_load_##n (FAR const volatile void *ptr, \
+                                        int memorder)                 \
+  {                                                                   \
+    irqstate_t irqstate = spin_lock_irqsave(NULL);                    \
+                                                                      \
+    type ret = *(FAR type *)ptr;                                      \
+                                                                      \
+    spin_unlock_irqrestore(NULL, irqstate);                           \
+    return ret;                                                       \
   }
 
-#define EXCHANGE(fn, n, type)                                      \
-                                                                   \
-  type weak_function CONCATENATE(fn, n) (FAR volatile void *ptr,   \
-                                         type value, int memorder) \
-  {                                                                \
-    irqstate_t irqstate = spin_lock_irqsave(NULL);                 \
-    FAR type *tmp = (FAR type *)ptr;                               \
-                                                                   \
-    type ret = *tmp;                                               \
-    *tmp = value;                                                  \
-                                                                   \
-    spin_unlock_irqrestore(NULL, irqstate);                        \
-    return ret;                                                    \
+#define EXCHANGE(n, type)                                             \
+                                                                      \
+  type weak_function __atomic_exchange_##n (FAR volatile void *ptr,   \
+                                            type value, int memorder) \
+  {                                                                   \
+    irqstate_t irqstate = spin_lock_irqsave(NULL);                    \
+    FAR type *tmp = (FAR type *)ptr;                                  \
+                                                                      \
+    type ret = *tmp;                                                  \
+    *tmp = value;                                                     \
+                                                                      \
+    spin_unlock_irqrestore(NULL, irqstate);                           \
+    return ret;                                                       \
   }
 
-#define CMP_EXCHANGE(fn, n, type)                                  \
-                                                                   \
-  bool weak_function CONCATENATE(fn, n) (FAR volatile void *mem,   \
-                                         FAR void *expect,         \
-                                         type desired, bool weak,  \
-                                         int success, int failure) \
-  {                                                                \
-    bool ret = false;                                              \
-    irqstate_t irqstate = spin_lock_irqsave(NULL);                 \
-    FAR type *tmpmem = (FAR type *)mem;                            \
-    FAR type *tmpexp = (FAR type *)expect;                         \
-                                                                   \
-    if (*tmpmem == *tmpexp)                                        \
-      {                                                            \
-        ret = true;                                                \
-        *tmpmem = desired;                                         \
-      }                                                            \
-    else                                                           \
-      {                                                            \
-        *tmpexp = *tmpmem;                                         \
-      }                                                            \
-                                                                   \
-    spin_unlock_irqrestore(NULL, irqstate);                        \
-    return ret;                                                    \
+#define CMP_EXCHANGE(n, type)                                                 \
+                                                                              \
+  bool weak_function __atomic_compare_exchange_##n (FAR volatile void *mem,   \
+                                                    FAR void *expect,         \
+                                                    type desired, bool weak,  \
+                                                    int success, int failure) \
+  {                                                                           \
+    bool ret = false;                                                         \
+    irqstate_t irqstate = spin_lock_irqsave(NULL);                            \
+    FAR type *tmpmem = (FAR type *)mem;                                       \
+    FAR type *tmpexp = (FAR type *)expect;                                    \
+                                                                              \
+    if (*tmpmem == *tmpexp)                                                   \
+      {                                                                       \
+        ret = true;                                                           \
+        *tmpmem = desired;                                                    \
+      }                                                                       \
+    else                                                                      \
+      {                                                                       \
+        *tmpexp = *tmpmem;                                                    \
+      }                                                                       \
+                                                                              \
+    spin_unlock_irqrestore(NULL, irqstate);                                   \
+    return ret;                                                               \
   }
 
-#define FLAG_TEST_AND_SET(fn, n, type)                           \
-                                                                 \
-  type weak_function CONCATENATE(fn, n) (FAR volatile void *ptr, \
-                                         int memorder)           \
-  {                                                              \
-    irqstate_t irqstate = spin_lock_irqsave(NULL);               \
-    FAR type *tmp = (FAR type *)ptr;                             \
-    type ret = *tmp;                                             \
-                                                                 \
-    *(FAR type *)ptr = 1;                                        \
-                                                                 \
-    spin_unlock_irqrestore(NULL, irqstate);                      \
-    return ret;                                                  \
+#define FLAG_TEST_AND_SET(n, type)                                           \
+                                                                             \
+  type weak_function __atomic_flags_test_and_set##n (FAR volatile void *ptr, \
+                                                     int memorder)           \
+  {                                                                          \
+    irqstate_t irqstate = spin_lock_irqsave(NULL);                           \
+    FAR type *tmp = (FAR type *)ptr;                                         \
+    type ret = *tmp;                                                         \
+                                                                             \
+    *(FAR type *)ptr = 1;                                                    \
+                                                                             \
+    spin_unlock_irqrestore(NULL, irqstate);                                  \
+    return ret;                                                              \
   }
 
-#define FETCH_ADD(fn, n, type)                                     \
-                                                                   \
-  type weak_function CONCATENATE(fn, n) (FAR volatile void *ptr,   \
-                                         type value, int memorder) \
-  {                                                                \
-    irqstate_t irqstate = spin_lock_irqsave(NULL);                 \
-    FAR type *tmp = (FAR type *)ptr;                               \
-    type ret = *tmp;                                               \
-                                                                   \
-    *tmp = *tmp + value;                                           \
-                                                                   \
-    spin_unlock_irqrestore(NULL, irqstate);                        \
-    return ret;                                                    \
+#define FETCH_ADD(n, type)                                             \
+                                                                       \
+  type weak_function __atomic_fetch_add_##n (FAR volatile void *ptr,   \
+                                             type value, int memorder) \
+  {                                                                    \
+    irqstate_t irqstate = spin_lock_irqsave(NULL);                     \
+    FAR type *tmp = (FAR type *)ptr;                                   \
+    type ret = *tmp;                                                   \
+                                                                       \
+    *tmp = *tmp + value;                                               \
+                                                                       \
+    spin_unlock_irqrestore(NULL, irqstate);                            \
+    return ret;                                                        \
   }
 
-#define FETCH_SUB(fn, n, type)                                     \
-                                                                   \
-  type weak_function CONCATENATE(fn, n) (FAR volatile void *ptr,   \
-                                         type value, int memorder) \
-  {                                                                \
-    irqstate_t irqstate = spin_lock_irqsave(NULL);                 \
-    FAR type *tmp = (FAR type *)ptr;                               \
-    type ret = *tmp;                                               \
-                                                                   \
-    *tmp = *tmp - value;                                           \
-                                                                   \
-    spin_unlock_irqrestore(NULL, irqstate);                        \
-    return ret;                                                    \
+#define FETCH_SUB(n, type)                                             \
+                                                                       \
+  type weak_function __atomic_fetch_sub_##n (FAR volatile void *ptr,   \
+                                             type value, int memorder) \
+  {                                                                    \
+    irqstate_t irqstate = spin_lock_irqsave(NULL);                     \
+    FAR type *tmp = (FAR type *)ptr;                                   \
+    type ret = *tmp;                                                   \
+                                                                       \
+    *tmp = *tmp - value;                                               \
+                                                                       \
+    spin_unlock_irqrestore(NULL, irqstate);                            \
+    return ret;                                                        \
   }
 
-#define FETCH_AND(fn, n, type)                                     \
-                                                                   \
-  type weak_function CONCATENATE(fn, n) (FAR volatile void *ptr,   \
-                                         type value, int memorder) \
-  {                                                                \
-    irqstate_t irqstate = spin_lock_irqsave(NULL);                 \
-    FAR type *tmp = (FAR type *)ptr;                               \
-    type ret = *tmp;                                               \
-                                                                   \
-    *tmp = *tmp & value;                                           \
-                                                                   \
-    spin_unlock_irqrestore(NULL, irqstate);                        \
-    return ret;                                                    \
+#define FETCH_AND(n, type)                                             \
+                                                                       \
+  type weak_function __atomic_fetch_and_##n (FAR volatile void *ptr,   \
+                                             type value, int memorder) \
+  {                                                                    \
+    irqstate_t irqstate = spin_lock_irqsave(NULL);                     \
+    FAR type *tmp = (FAR type *)ptr;                                   \
+    type ret = *tmp;                                                   \
+                                                                       \
+    *tmp = *tmp & value;                                               \
+                                                                       \
+    spin_unlock_irqrestore(NULL, irqstate);                            \
+    return ret;                                                        \
   }
 
-#define FETCH_OR(fn, n, type)                                      \
-                                                                   \
-  type weak_function CONCATENATE(fn, n) (FAR volatile void *ptr,   \
-                                         type value, int memorder) \
-  {                                                                \
-    irqstate_t irqstate = spin_lock_irqsave(NULL);                 \
-    FAR type *tmp = (FAR type *)ptr;                               \
-    type ret = *tmp;                                               \
-                                                                   \
-    *tmp = *tmp | value;                                           \
-                                                                   \
-    spin_unlock_irqrestore(NULL, irqstate);                        \
-    return ret;                                                    \
+#define FETCH_OR(n, type)                                             \
+                                                                      \
+  type weak_function __atomic_fetch_or_##n (FAR volatile void *ptr,   \
+                                            type value, int memorder) \
+  {                                                                   \
+    irqstate_t irqstate = spin_lock_irqsave(NULL);                    \
+    FAR type *tmp = (FAR type *)ptr;                                  \
+    type ret = *tmp;                                                  \
+                                                                      \
+    *tmp = *tmp | value;                                              \
+                                                                      \
+    spin_unlock_irqrestore(NULL, irqstate);                           \
+    return ret;                                                       \
   }
 
-#define FETCH_XOR(fn, n, type)                                     \
-                                                                   \
-  type weak_function CONCATENATE(fn, n) (FAR volatile void *ptr,   \
-                                         type value, int memorder) \
-  {                                                                \
-    irqstate_t irqstate = spin_lock_irqsave(NULL);                 \
-    FAR type *tmp = (FAR type *)ptr;                               \
-    type ret = *tmp;                                               \
-                                                                   \
-    *tmp = *tmp ^ value;                                           \
-                                                                   \
-    spin_unlock_irqrestore(NULL, irqstate);                        \
-    return ret;                                                    \
+#define FETCH_XOR(n, type)                                             \
+                                                                       \
+  type weak_function __atomic_fetch_xor_##n (FAR volatile void *ptr,   \
+                                             type value, int memorder) \
+  {                                                                    \
+    irqstate_t irqstate = spin_lock_irqsave(NULL);                     \
+    FAR type *tmp = (FAR type *)ptr;                                   \
+    type ret = *tmp;                                                   \
+                                                                       \
+    *tmp = *tmp ^ value;                                               \
+                                                                       \
+    spin_unlock_irqrestore(NULL, irqstate);                            \
+    return ret;                                                        \
   }
 
-#define SYNC_ADD_FETCH(fn, n, type)                              \
-                                                                 \
-  type weak_function CONCATENATE(fn, n) (FAR volatile void *ptr, \
-                                         type value)             \
-  {                                                              \
-    irqstate_t irqstate = spin_lock_irqsave(NULL);               \
-    FAR type *tmp = (FAR type *)ptr;                             \
-                                                                 \
-    *tmp = *tmp + value;                                         \
-                                                                 \
-    spin_unlock_irqrestore(NULL, irqstate);                      \
-    return *tmp;                                                 \
+#define SYNC_ADD_FETCH(n, type)                                        \
+                                                                       \
+  type weak_function __sync_add_and_fetch_##n (FAR volatile void *ptr, \
+                                               type value)             \
+  {                                                                    \
+    irqstate_t irqstate = spin_lock_irqsave(NULL);                     \
+    FAR type *tmp = (FAR type *)ptr;                                   \
+                                                                       \
+    *tmp = *tmp + value;                                               \
+                                                                       \
+    spin_unlock_irqrestore(NULL, irqstate);                            \
+    return *tmp;                                                       \
   }
 
-#define SYNC_SUB_FETCH(fn, n, type)                              \
-                                                                 \
-  type weak_function CONCATENATE(fn, n) (FAR volatile void *ptr, \
-                                         type value)             \
-  {                                                              \
-    irqstate_t irqstate = spin_lock_irqsave(NULL);               \
-    FAR type *tmp = (FAR type *)ptr;                             \
-                                                                 \
-    *tmp = *tmp - value;                                         \
-                                                                 \
-    spin_unlock_irqrestore(NULL, irqstate);                      \
-    return *tmp;                                                 \
+#define SYNC_SUB_FETCH(n, type)                                        \
+                                                                       \
+  type weak_function __sync_sub_and_fetch_##n (FAR volatile void *ptr, \
+                                               type value)             \
+  {                                                                    \
+    irqstate_t irqstate = spin_lock_irqsave(NULL);                     \
+    FAR type *tmp = (FAR type *)ptr;                                   \
+                                                                       \
+    *tmp = *tmp - value;                                               \
+                                                                       \
+    spin_unlock_irqrestore(NULL, irqstate);                            \
+    return *tmp;                                                       \
   }
 
-#define SYNC_OR_FETCH(fn, n, type)                               \
-                                                                 \
-  type weak_function CONCATENATE(fn, n) (FAR volatile void *ptr, \
-                                         type value)             \
-  {                                                              \
-    irqstate_t irqstate = spin_lock_irqsave(NULL);               \
-    FAR type *tmp = (FAR type *)ptr;                             \
-                                                                 \
-    *tmp = *tmp | value;                                         \
-                                                                 \
-    spin_unlock_irqrestore(NULL, irqstate);                      \
-    return *tmp;                                                 \
+#define SYNC_OR_FETCH(n, type)                                        \
+                                                                      \
+  type weak_function __sync_or_and_fetch_##n (FAR volatile void *ptr, \
+                                              type value)             \
+  {                                                                   \
+    irqstate_t irqstate = spin_lock_irqsave(NULL);                    \
+    FAR type *tmp = (FAR type *)ptr;                                  \
+                                                                      \
+    *tmp = *tmp | value;                                              \
+                                                                      \
+    spin_unlock_irqrestore(NULL, irqstate);                           \
+    return *tmp;                                                      \
   }
 
-#define SYNC_AND_FETCH(fn, n, type)                              \
-                                                                 \
-  type weak_function CONCATENATE(fn, n) (FAR volatile void *ptr, \
-                                         type value)             \
-  {                                                              \
-    irqstate_t irqstate = spin_lock_irqsave(NULL);               \
-    FAR type *tmp = (FAR type *)ptr;                             \
-                                                                 \
-    *tmp = *tmp & value;                                         \
-                                                                 \
-    spin_unlock_irqrestore(NULL, irqstate);                      \
-    return *tmp;                                                 \
+#define SYNC_AND_FETCH(n, type)                                        \
+                                                                       \
+  type weak_function __sync_and_and_fetch_##n (FAR volatile void *ptr, \
+                                               type value)             \
+  {                                                                    \
+    irqstate_t irqstate = spin_lock_irqsave(NULL);                     \
+    FAR type *tmp = (FAR type *)ptr;                                   \
+                                                                       \
+    *tmp = *tmp & value;                                               \
+                                                                       \
+    spin_unlock_irqrestore(NULL, irqstate);                            \
+    return *tmp;                                                       \
   }
 
-#define SYNC_XOR_FETCH(fn, n, type)                              \
-                                                                 \
-  type weak_function CONCATENATE(fn, n) (FAR volatile void *ptr, \
-                                         type value)             \
-  {                                                              \
-    irqstate_t irqstate = spin_lock_irqsave(NULL);               \
-    FAR type *tmp = (FAR type *)ptr;                             \
-                                                                 \
-    *tmp = *tmp ^ value;                                         \
-                                                                 \
-    spin_unlock_irqrestore(NULL, irqstate);                      \
-    return *tmp;                                                 \
+#define SYNC_XOR_FETCH(n, type)                                        \
+                                                                       \
+  type weak_function __sync_xor_and_fetch_##n (FAR volatile void *ptr, \
+                                               type value)             \
+  {                                                                    \
+    irqstate_t irqstate = spin_lock_irqsave(NULL);                     \
+    FAR type *tmp = (FAR type *)ptr;                                   \
+                                                                       \
+    *tmp = *tmp ^ value;                                               \
+                                                                       \
+    spin_unlock_irqrestore(NULL, irqstate);                            \
+    return *tmp;                                                       \
   }
 
-#define SYNC_NAND_FETCH(fn, n, type)                             \
-                                                                 \
-  type weak_function CONCATENATE(fn, n) (FAR volatile void *ptr, \
-                                         type value)             \
-  {                                                              \
-    irqstate_t irqstate = spin_lock_irqsave(NULL);               \
-    FAR type *tmp = (FAR type *)ptr;                             \
-                                                                 \
-    *tmp = ~(*tmp & value);                                      \
-                                                                 \
-    spin_unlock_irqrestore(NULL, irqstate);                      \
-    return *tmp;                                                 \
+#define SYNC_NAND_FETCH(n, type)                                        \
+                                                                        \
+  type weak_function __sync_nand_and_fetch_##n (FAR volatile void *ptr, \
+                                                type value)             \
+  {                                                                     \
+    irqstate_t irqstate = spin_lock_irqsave(NULL);                      \
+    FAR type *tmp = (FAR type *)ptr;                                    \
+                                                                        \
+    *tmp = ~(*tmp & value);                                             \
+                                                                        \
+    spin_unlock_irqrestore(NULL, irqstate);                             \
+    return *tmp;                                                        \
   }
 
-#define SYNC_BOOL_CMP_SWAP(fn, n, type)                          \
-                                                                 \
-  bool weak_function CONCATENATE(fn, n) (FAR volatile void *ptr, \
-                                         type oldvalue,          \
-                                         type newvalue)          \
-  {                                                              \
-    bool ret = false;                                            \
-    irqstate_t irqstate = spin_lock_irqsave(NULL);               \
-    FAR type *tmp = (FAR type *)ptr;                             \
-                                                                 \
-    if (*tmp == oldvalue)                                        \
-      {                                                          \
-        ret = true;                                              \
-        *tmp = newvalue;                                         \
-      }                                                          \
-                                                                 \
-    spin_unlock_irqrestore(NULL, irqstate);                      \
-    return ret;                                                  \
+#define SYNC_BOOL_CMP_SWAP(n, type)                                            
\
+                                                                               
\
+  bool weak_function __sync_bool_compare_and_swap_##n (FAR volatile void *ptr, 
\
+                                                       type oldvalue,          
\
+                                                       type newvalue)          
\
+  {                                                                            
\
+    bool ret = false;                                                          
\
+    irqstate_t irqstate = spin_lock_irqsave(NULL);                             
\
+    FAR type *tmp = (FAR type *)ptr;                                           
\
+                                                                               
\
+    if (*tmp == oldvalue)                                                      
\
+      {                                                                        
\
+        ret = true;                                                            
\
+        *tmp = newvalue;                                                       
\
+      }                                                                        
\
+                                                                               
\
+    spin_unlock_irqrestore(NULL, irqstate);                                    
\
+    return ret;                                                                
\
   }
 
-#define SYNC_VAL_CMP_SWAP(fn, n, type)                           \
-                                                                 \
-  type weak_function CONCATENATE(fn, n) (FAR volatile void *ptr, \
-                                         type oldvalue,          \
-                                         type newvalue)          \
-  {                                                              \
-    irqstate_t irqstate = spin_lock_irqsave(NULL);               \
-    FAR type *tmp = (FAR type *)ptr;                             \
-    type ret = *tmp;                                             \
-                                                                 \
-    if (*tmp == oldvalue)                                        \
-      {                                                          \
-        *tmp = newvalue;                                         \
-      }                                                          \
-                                                                 \
-    spin_unlock_irqrestore(NULL, irqstate);                      \
-    return ret;                                                  \
+#define SYNC_VAL_CMP_SWAP(n, type)                                            \
+                                                                              \
+  type weak_function __sync_val_compare_and_swap_##n (FAR volatile void *ptr, \
+                                                      type oldvalue,          \
+                                                      type newvalue)          \
+  {                                                                           \
+    irqstate_t irqstate = spin_lock_irqsave(NULL);                            \
+    FAR type *tmp = (FAR type *)ptr;                                          \
+    type ret = *tmp;                                                          \
+                                                                              \
+    if (*tmp == oldvalue)                                                     \
+      {                                                                       \
+        *tmp = newvalue;                                                      \
+      }                                                                       \
+                                                                              \
+    spin_unlock_irqrestore(NULL, irqstate);                                   \
+    return ret;                                                               \
   }
 
 /****************************************************************************
@@ -322,281 +321,241 @@
  * Name: __atomic_store_1
  ****************************************************************************/
 
-STORE(__atomic_store_, 1, uint8_t)
-STORE(nx_atomic_store_, 1, uint8_t)
+STORE(1, uint8_t)
 
 /****************************************************************************
  * Name: __atomic_store_2
  ****************************************************************************/
 
-STORE(__atomic_store_, 2, uint16_t)
-STORE(nx_atomic_store_, 2, uint16_t)
+STORE(2, uint16_t)
 
 /****************************************************************************
  * Name: __atomic_store_4
  ****************************************************************************/
 
-STORE(__atomic_store_, 4, uint32_t)
-STORE(nx_atomic_store_, 4, uint32_t)
+STORE(4, uint32_t)
 
 /****************************************************************************
  * Name: __atomic_store_8
  ****************************************************************************/
 
-STORE(__atomic_store_, 8, uint64_t)
-STORE(nx_atomic_store_, 8, uint64_t)
+STORE(8, uint64_t)
 
 /****************************************************************************
  * Name: __atomic_load_1
  ****************************************************************************/
 
-LOAD(__atomic_load_, 1, uint8_t)
-LOAD(nx_atomic_load_, 1, uint8_t)
+LOAD(1, uint8_t)
 
 /****************************************************************************
  * Name: __atomic_load__2
  ****************************************************************************/
 
-LOAD(__atomic_load_, 2, uint16_t)
-LOAD(nx_atomic_load_, 2, uint16_t)
+LOAD(2, uint16_t)
 
 /****************************************************************************
  * Name: __atomic_load__4
  ****************************************************************************/
 
-LOAD(__atomic_load_, 4, uint32_t)
-LOAD(nx_atomic_load_, 4, uint32_t)
+LOAD(4, uint32_t)
 
 /****************************************************************************
  * Name: __atomic_load__8
  ****************************************************************************/
 
-LOAD(__atomic_load_, 8, uint64_t)
-LOAD(nx_atomic_load_, 8, uint64_t)
+LOAD(8, uint64_t)
 
 /****************************************************************************
  * Name: __atomic_exchange_1
  ****************************************************************************/
 
-EXCHANGE(__atomic_exchange_, 1, uint8_t)
-EXCHANGE(nx_atomic_exchange_, 1, uint8_t)
+EXCHANGE(1, uint8_t)
 
 /****************************************************************************
  * Name: __atomic_exchange__2
  ****************************************************************************/
 
-EXCHANGE(__atomic_exchange_, 2, uint16_t)
-EXCHANGE(nx_atomic_exchange_, 2, uint16_t)
+EXCHANGE(2, uint16_t)
 
 /****************************************************************************
  * Name: __atomic_exchange__4
  ****************************************************************************/
 
-EXCHANGE(__atomic_exchange_, 4, uint32_t)
-EXCHANGE(nx_atomic_exchange_, 4, uint32_t)
+EXCHANGE(4, uint32_t)
 
 /****************************************************************************
  * Name: __atomic_exchange__8
  ****************************************************************************/
 
-EXCHANGE(__atomic_exchange_, 8, uint64_t)
-EXCHANGE(nx_atomic_exchange_, 8, uint64_t)
+EXCHANGE(8, uint64_t)
 
 /****************************************************************************
  * Name: __atomic_compare_exchange_1
  ****************************************************************************/
 
-CMP_EXCHANGE(__atomic_compare_exchange_, 1, uint8_t)
-CMP_EXCHANGE(nx_atomic_compare_exchange_, 1, uint8_t)
+CMP_EXCHANGE(1, uint8_t)
 
 /****************************************************************************
  * Name: __atomic_compare_exchange_2
  ****************************************************************************/
 
-CMP_EXCHANGE(__atomic_compare_exchange_, 2, uint16_t)
-CMP_EXCHANGE(nx_atomic_compare_exchange_, 2, uint16_t)
+CMP_EXCHANGE(2, uint16_t)
 
 /****************************************************************************
  * Name: __atomic_compare_exchange_4
  ****************************************************************************/
 
-CMP_EXCHANGE(__atomic_compare_exchange_, 4, uint32_t)
-CMP_EXCHANGE(nx_atomic_compare_exchange_, 4, uint32_t)
+CMP_EXCHANGE(4, uint32_t)
 
 /****************************************************************************
  * Name: __atomic_compare_exchange_8
  ****************************************************************************/
 
-CMP_EXCHANGE(__atomic_compare_exchange_, 8, uint64_t)
-CMP_EXCHANGE(nx_atomic_compare_exchange_, 8, uint64_t)
+CMP_EXCHANGE(8, uint64_t)
 
 /****************************************************************************
  * Name: __atomic_flag_test_and_set_1
  ****************************************************************************/
 
-FLAG_TEST_AND_SET(__atomic_flags_test_and_set_, 1, uint8_t)
-FLAG_TEST_AND_SET(nx_atomic_flags_test_and_set_, 1, uint8_t)
+FLAG_TEST_AND_SET(1, uint8_t)
 
 /****************************************************************************
  * Name: __atomic_flag_test_and_set_2
  ****************************************************************************/
 
-FLAG_TEST_AND_SET(__atomic_flags_test_and_set_, 2, uint16_t)
-FLAG_TEST_AND_SET(nx_atomic_flags_test_and_set_, 2, uint16_t)
+FLAG_TEST_AND_SET(2, uint16_t)
 
 /****************************************************************************
  * Name: __atomic_flag_test_and_set_4
  ****************************************************************************/
 
-FLAG_TEST_AND_SET(__atomic_flags_test_and_set_, 4, uint32_t)
-FLAG_TEST_AND_SET(nx_atomic_flags_test_and_set_, 4, uint32_t)
+FLAG_TEST_AND_SET(4, uint32_t)
 
 /****************************************************************************
  * Name: __atomic_flag_test_and_set_8
  ****************************************************************************/
 
-FLAG_TEST_AND_SET(__atomic_flags_test_and_set_, 8, uint64_t)
-FLAG_TEST_AND_SET(nx_atomic_flags_test_and_set_, 8, uint64_t)
+FLAG_TEST_AND_SET(8, uint64_t)
 
 /****************************************************************************
  * Name: __atomic_fetch_add_1
  ****************************************************************************/
 
-FETCH_ADD(__atomic_fetch_add_, 1, uint8_t)
-FETCH_ADD(nx_atomic_fetch_add_, 1, uint8_t)
+FETCH_ADD(1, uint8_t)
 
 /****************************************************************************
  * Name: __atomic_fetch_add_2
  ****************************************************************************/
 
-FETCH_ADD(__atomic_fetch_add_, 2, uint16_t)
-FETCH_ADD(nx_atomic_fetch_add_, 2, uint16_t)
+FETCH_ADD(2, uint16_t)
 
 /****************************************************************************
  * Name: __atomic_fetch_add_4
  ****************************************************************************/
 
-FETCH_ADD(__atomic_fetch_add_, 4, uint32_t)
-FETCH_ADD(nx_atomic_fetch_add_, 4, uint32_t)
+FETCH_ADD(4, uint32_t)
 
 /****************************************************************************
  * Name: __atomic_fetch_add_8
  ****************************************************************************/
 
-FETCH_ADD(__atomic_fetch_add_, 8, uint64_t)
-FETCH_ADD(nx_atomic_fetch_add_, 8, uint64_t)
+FETCH_ADD(8, uint64_t)
 
 /****************************************************************************
  * Name: __atomic_fetch_sub_1
  ****************************************************************************/
 
-FETCH_SUB(__atomic_fetch_sub_, 1, uint8_t)
-FETCH_SUB(nx_atomic_fetch_sub_, 1, uint8_t)
+FETCH_SUB(1, uint8_t)
 
 /****************************************************************************
  * Name: __atomic_fetch_sub_2
  ****************************************************************************/
 
-FETCH_SUB(__atomic_fetch_sub_, 2, uint16_t)
-FETCH_SUB(nx_atomic_fetch_sub_, 2, uint16_t)
+FETCH_SUB(2, uint16_t)
 
 /****************************************************************************
  * Name: __atomic_fetch_sub_4
  ****************************************************************************/
 
-FETCH_SUB(__atomic_fetch_sub_, 4, uint32_t)
-FETCH_SUB(nx_atomic_fetch_sub_, 4, uint32_t)
+FETCH_SUB(4, uint32_t)
 
 /****************************************************************************
  * Name: __atomic_fetch_sub_8
  ****************************************************************************/
 
-FETCH_SUB(__atomic_fetch_sub_, 8, uint64_t)
-FETCH_SUB(nx_atomic_fetch_sub_, 8, uint64_t)
+FETCH_SUB(8, uint64_t)
 
 /****************************************************************************
  * Name: __atomic_fetch_and_1
  ****************************************************************************/
 
-FETCH_AND(__atomic_fetch_and_, 1, uint8_t)
-FETCH_AND(nx_atomic_fetch_and_, 1, uint8_t)
+FETCH_AND(1, uint8_t)
 
 /****************************************************************************
  * Name: __atomic_fetch_and_2
  ****************************************************************************/
 
-FETCH_AND(__atomic_fetch_and_, 2, uint16_t)
-FETCH_AND(nx_atomic_fetch_and_, 2, uint16_t)
+FETCH_AND(2, uint16_t)
 
 /****************************************************************************
  * Name: __atomic_fetch_and_4
  ****************************************************************************/
 
-FETCH_AND(__atomic_fetch_and_, 4, uint32_t)
-FETCH_AND(nx_atomic_fetch_and_, 4, uint32_t)
+FETCH_AND(4, uint32_t)
 
 /****************************************************************************
  * Name: __atomic_fetch_and_8
  ****************************************************************************/
 
-FETCH_AND(__atomic_fetch_and_, 8, uint64_t)
-FETCH_AND(nx_atomic_fetch_and_, 8, uint64_t)
+FETCH_AND(8, uint64_t)
 
 /****************************************************************************
  * Name: __atomic_fetch_or_1
  ****************************************************************************/
 
-FETCH_OR(__atomic_fetch_or_, 1, uint8_t)
-FETCH_OR(nx_atomic_fetch_or_, 1, uint8_t)
+FETCH_OR(1, uint8_t)
 
 /****************************************************************************
  * Name: __atomic_fetch_or_2
  ****************************************************************************/
 
-FETCH_OR(__atomic_fetch_or_, 2, uint16_t)
-FETCH_OR(nx_atomic_fetch_or_, 2, uint16_t)
+FETCH_OR(2, uint16_t)
 
 /****************************************************************************
  * Name: __atomic_fetch_or_4
  ****************************************************************************/
 
-FETCH_OR(__atomic_fetch_or_, 4, uint32_t)
-FETCH_OR(nx_atomic_fetch_or_, 4, uint32_t)
+FETCH_OR(4, uint32_t)
 
 /****************************************************************************
  * Name: __atomic_fetch_or_4
  ****************************************************************************/
 
-FETCH_OR(__atomic_fetch_or_, 8, uint64_t)
-FETCH_OR(nx_atomic_fetch_or_, 8, uint64_t)
+FETCH_OR(8, uint64_t)
 
 /****************************************************************************
  * Name: __atomic_fetch_xor_1
  ****************************************************************************/
 
-FETCH_XOR(__atomic_fetch_xor_, 1, uint8_t)
-FETCH_XOR(nx_atomic_fetch_xor_, 1, uint8_t)
+FETCH_XOR(1, uint8_t)
 
 /****************************************************************************
  * Name: __atomic_fetch_xor_2
  ****************************************************************************/
 
-FETCH_XOR(__atomic_fetch_xor_, 2, uint16_t)
-FETCH_XOR(nx_atomic_fetch_xor_, 2, uint16_t)
+FETCH_XOR(2, uint16_t)
 
 /****************************************************************************
  * Name: __atomic_fetch_xor_4
  ****************************************************************************/
 
-FETCH_XOR(__atomic_fetch_xor_, 4, uint32_t)
-FETCH_XOR(nx_atomic_fetch_xor_, 4, uint32_t)
+FETCH_XOR(4, uint32_t)
 
 /****************************************************************************
  * Name: __atomic_fetch_xor_8
  ****************************************************************************/
 
-FETCH_XOR(__atomic_fetch_xor_, 8, uint64_t)
-FETCH_XOR(nx_atomic_fetch_xor_, 8, uint64_t)
+FETCH_XOR(8, uint64_t)
 
 /* Clang define the __sync builtins, add #ifndef to avoid
  * redefined/redeclared problem.
@@ -608,225 +567,193 @@ FETCH_XOR(nx_atomic_fetch_xor_, 8, uint64_t)
  * Name: __sync_add_and_fetch_1
  ****************************************************************************/
 
-SYNC_ADD_FETCH(__sync_add_and_fetch_, 1, uint8_t)
-SYNC_ADD_FETCH(nx_sync_add_and_fetch_, 1, uint8_t)
+SYNC_ADD_FETCH(1, uint8_t)
 
 /****************************************************************************
  * Name: __sync_add_and_fetch_2
  ****************************************************************************/
 
-SYNC_ADD_FETCH(__sync_add_and_fetch_, 2, uint16_t)
-SYNC_ADD_FETCH(nx_sync_add_and_fetch_, 2, uint16_t)
+SYNC_ADD_FETCH(2, uint16_t)
 
 /****************************************************************************
  * Name: __sync_add_and_fetch_4
  ****************************************************************************/
 
-SYNC_ADD_FETCH(__sync_add_and_fetch_, 4, uint32_t)
-SYNC_ADD_FETCH(nx_sync_add_and_fetch_, 4, uint32_t)
+SYNC_ADD_FETCH(4, uint32_t)
 
 /****************************************************************************
  * Name: __sync_add_and_fetch_8
  ****************************************************************************/
 
-SYNC_ADD_FETCH(__sync_add_and_fetch_, 8, uint64_t)
-SYNC_ADD_FETCH(nx_sync_add_and_fetch_, 8, uint64_t)
+SYNC_ADD_FETCH(8, uint64_t)
 
 /****************************************************************************
  * Name: __sync_sub_and_fetch_1
  ****************************************************************************/
 
-SYNC_SUB_FETCH(__sync_sub_and_fetch_, 1, uint8_t)
-SYNC_SUB_FETCH(nx_sync_sub_and_fetch_, 1, uint8_t)
+SYNC_SUB_FETCH(1, uint8_t)
 
 /****************************************************************************
  * Name: __sync_sub_and_fetch_2
  ****************************************************************************/
 
-SYNC_SUB_FETCH(__sync_sub_and_fetch_, 2, uint16_t)
-SYNC_SUB_FETCH(nx_sync_sub_and_fetch_, 2, uint16_t)
+SYNC_SUB_FETCH(2, uint16_t)
 
 /****************************************************************************
  * Name: __sync_sub_and_fetch_4
  ****************************************************************************/
 
-SYNC_SUB_FETCH(__sync_sub_and_fetch_, 4, uint32_t)
-SYNC_SUB_FETCH(nx_sync_sub_and_fetch_, 4, uint32_t)
+SYNC_SUB_FETCH(4, uint32_t)
 
 /****************************************************************************
  * Name: __sync_sub_and_fetch_8
  ****************************************************************************/
 
-SYNC_SUB_FETCH(__sync_sub_and_fetch_, 8, uint64_t)
-SYNC_SUB_FETCH(nx_sync_sub_and_fetch_, 8, uint64_t)
+SYNC_SUB_FETCH(8, uint64_t)
 
 /****************************************************************************
  * Name: __sync_or_and_fetch_1
  ****************************************************************************/
 
-SYNC_OR_FETCH(__sync_or_and_fetch_, 1, uint8_t)
-SYNC_OR_FETCH(nx_sync_or_and_fetch_, 1, uint8_t)
+SYNC_OR_FETCH(1, uint8_t)
 
 /****************************************************************************
  * Name: __sync_or_and_fetch_2
  ****************************************************************************/
 
-SYNC_OR_FETCH(__sync_or_and_fetch_, 2, uint16_t)
-SYNC_OR_FETCH(nx_sync_or_and_fetch_, 2, uint16_t)
+SYNC_OR_FETCH(2, uint16_t)
 
 /****************************************************************************
  * Name: __sync_or_and_fetch_4
  ****************************************************************************/
 
-SYNC_OR_FETCH(__sync_or_and_fetch_, 4, uint32_t)
-SYNC_OR_FETCH(nx_sync_or_and_fetch_, 4, uint32_t)
+SYNC_OR_FETCH(4, uint32_t)
 
 /****************************************************************************
  * Name: __sync_or_and_fetch_8
  ****************************************************************************/
 
-SYNC_OR_FETCH(__sync_or_and_fetch_, 8, uint64_t)
-SYNC_OR_FETCH(nx_sync_or_and_fetch_, 8, uint64_t)
+SYNC_OR_FETCH(8, uint64_t)
 
 /****************************************************************************
  * Name: __sync_and_and_fetch_1
  ****************************************************************************/
 
-SYNC_AND_FETCH(__sync_and_and_fetch_, 1, uint8_t)
-SYNC_AND_FETCH(nx_sync_and_and_fetch_, 1, uint8_t)
+SYNC_AND_FETCH(1, uint8_t)
 
 /****************************************************************************
  * Name: __sync_and_and_fetch_2
  ****************************************************************************/
 
-SYNC_AND_FETCH(__sync_and_and_fetch_, 2, uint16_t)
-SYNC_AND_FETCH(nx_sync_and_and_fetch_, 2, uint16_t)
+SYNC_AND_FETCH(2, uint16_t)
 
 /****************************************************************************
  * Name: __sync_and_and_fetch_4
  ****************************************************************************/
 
-SYNC_AND_FETCH(__sync_and_and_fetch_, 4, uint32_t)
-SYNC_AND_FETCH(nx_sync_and_and_fetch_, 4, uint32_t)
+SYNC_AND_FETCH(4, uint32_t)
 
 /****************************************************************************
  * Name: __sync_and_and_fetch_8
  ****************************************************************************/
 
-SYNC_AND_FETCH(__sync_and_and_fetch_, 8, uint64_t)
-SYNC_AND_FETCH(nx_sync_and_and_fetch_, 8, uint64_t)
+SYNC_AND_FETCH(8, uint64_t)
 
 /****************************************************************************
  * Name: __sync_xor_and_fetch_1
  ****************************************************************************/
 
-SYNC_XOR_FETCH(__sync_xor_and_fetch_, 1, uint8_t)
-SYNC_XOR_FETCH(nx_sync_xor_and_fetch_, 1, uint8_t)
+SYNC_XOR_FETCH(1, uint8_t)
 
 /****************************************************************************
  * Name: __sync_xor_and_fetch_2
  ****************************************************************************/
 
-SYNC_XOR_FETCH(__sync_xor_and_fetch_, 2, uint16_t)
-SYNC_XOR_FETCH(nx_sync_xor_and_fetch_, 2, uint16_t)
+SYNC_XOR_FETCH(2, uint16_t)
 
 /****************************************************************************
  * Name: __sync_xor_and_fetch_4
  ****************************************************************************/
 
-SYNC_XOR_FETCH(__sync_xor_and_fetch_, 4, uint32_t)
-SYNC_XOR_FETCH(nx_sync_xor_and_fetch_, 4, uint32_t)
+SYNC_XOR_FETCH(4, uint32_t)
 
 /****************************************************************************
  * Name: __sync_xor_and_fetch_8
  ****************************************************************************/
 
-SYNC_XOR_FETCH(__sync_xor_and_fetch_, 8, uint64_t)
-SYNC_XOR_FETCH(nx_sync_xor_and_fetch_, 8, uint64_t)
+SYNC_XOR_FETCH(8, uint64_t)
 
 /****************************************************************************
  * Name: __sync_nand_and_fetch_1
  ****************************************************************************/
 
-SYNC_NAND_FETCH(__sync_nand_and_fetch_, 1, uint8_t)
-SYNC_NAND_FETCH(nx_sync_nand_and_fetch_, 1, uint8_t)
+SYNC_NAND_FETCH(1, uint8_t)
 
 /****************************************************************************
  * Name: __sync_nand_and_fetch_2
  ****************************************************************************/
 
-SYNC_NAND_FETCH(__sync_nand_and_fetch_, 2, uint16_t)
-SYNC_NAND_FETCH(nx_sync_nand_and_fetch_, 2, uint16_t)
+SYNC_NAND_FETCH(2, uint16_t)
 
 /****************************************************************************
  * Name: __sync_nand_and_fetch_4
  ****************************************************************************/
 
-SYNC_NAND_FETCH(__sync_nand_and_fetch_, 4, uint32_t)
-SYNC_NAND_FETCH(nx_sync_nand_and_fetch_, 4, uint32_t)
+SYNC_NAND_FETCH(4, uint32_t)
 
 /****************************************************************************
  * Name: __sync_nand_and_fetch_8
  ****************************************************************************/
 
-SYNC_NAND_FETCH(__sync_nand_and_fetch_, 8, uint64_t)
-SYNC_NAND_FETCH(nx_sync_nand_and_fetch_, 8, uint64_t)
+SYNC_NAND_FETCH(8, uint64_t)
 
 /****************************************************************************
  * Name: __sync_bool_compare_and_swap_1
  ****************************************************************************/
 
-SYNC_BOOL_CMP_SWAP(__sync_bool_compare_and_swap_, 1, uint8_t)
-SYNC_BOOL_CMP_SWAP(nx_sync_bool_compare_and_swap_, 1, uint8_t)
+SYNC_BOOL_CMP_SWAP(1, uint8_t)
 
 /****************************************************************************
  * Name: __sync_bool_compare_and_swap_2
  ****************************************************************************/
 
-SYNC_BOOL_CMP_SWAP(__sync_bool_compare_and_swap_, 2, uint16_t)
-SYNC_BOOL_CMP_SWAP(nx_sync_bool_compare_and_swap_, 2, uint16_t)
+SYNC_BOOL_CMP_SWAP(2, uint16_t)
 
 /****************************************************************************
  * Name: __sync_bool_compare_and_swap_4
  ****************************************************************************/
 
-SYNC_BOOL_CMP_SWAP(__sync_bool_compare_and_swap_, 4, uint32_t)
-SYNC_BOOL_CMP_SWAP(nx_sync_bool_compare_and_swap_, 4, uint32_t)
+SYNC_BOOL_CMP_SWAP(4, uint32_t)
 
 /****************************************************************************
  * Name: __sync_bool_compare_and_swap_8
  ****************************************************************************/
 
-SYNC_BOOL_CMP_SWAP(__sync_bool_compare_and_swap_, 8, uint64_t)
-SYNC_BOOL_CMP_SWAP(nx_sync_bool_compare_and_swap_, 8, uint64_t)
+SYNC_BOOL_CMP_SWAP(8, uint64_t)
 
 /****************************************************************************
  * Name: __sync_val_compare_and_swap_1
  ****************************************************************************/
 
-SYNC_VAL_CMP_SWAP(__sync_val_compare_and_swap_, 1, uint8_t)
-SYNC_VAL_CMP_SWAP(nx_sync_val_compare_and_swap_, 1, uint8_t)
+SYNC_VAL_CMP_SWAP(1, uint8_t)
 
 /****************************************************************************
  * Name: __sync_val_compare_and_swap_2
  ****************************************************************************/
 
-SYNC_VAL_CMP_SWAP(__sync_val_compare_and_swap_, 2, uint16_t)
-SYNC_VAL_CMP_SWAP(nx_sync_val_compare_and_swap_, 2, uint16_t)
+SYNC_VAL_CMP_SWAP(2, uint16_t)
 
 /****************************************************************************
  * Name: __sync_val_compare_and_swap_4
  ****************************************************************************/
 
-SYNC_VAL_CMP_SWAP(__sync_val_compare_and_swap_, 4, uint32_t)
-SYNC_VAL_CMP_SWAP(nx_sync_val_compare_and_swap_, 4, uint32_t)
+SYNC_VAL_CMP_SWAP(4, uint32_t)
 
 /****************************************************************************
  * Name: __sync_val_compare_and_swap_8
  ****************************************************************************/
 
-SYNC_VAL_CMP_SWAP(__sync_val_compare_and_swap_, 8, uint64_t)
-SYNC_VAL_CMP_SWAP(nx_sync_val_compare_and_swap_, 8, uint64_t)
+SYNC_VAL_CMP_SWAP(8, uint64_t)
 
 /****************************************************************************
  * Name: __sync_synchronize

Reply via email to