This is an automated email from the ASF dual-hosted git repository.

reshke pushed a commit to branch REL_2_STABLE
in repository https://gitbox.apache.org/repos/asf/cloudberry.git

commit 88df9d7d2c4b2cfbc28bc18593dedf15e047ef0a
Author: Tom Lane <[email protected]>
AuthorDate: Wed Nov 2 17:37:26 2022 -0400

    Allow use of __sync_lock_test_and_set for spinlocks on any machine.
    
    If we have no special-case code in s_lock.h for the current platform,
    but the compiler has __sync_lock_test_and_set, use that instead of
    failing.  It's unlikely that anybody's __sync_lock_test_and_set
    would be so awful as to be worse than our semaphore-based fallback,
    but if it is, they can (continue to) use --disable-spinlocks.
    
    This allows removal of the RISC-V special case installed by commit
    c32fcac56, which generated exactly the same code but only on that
    platform.  Usefully, the RISC-V buildfarm animals should now test
    at least the int variant of this patch.
    
    I've manually tested both variants on ARM by dint of removing the
    ARM-specific stanza.  We don't want to drop that, because it already
    has some special knowledge and is likely to grow more over time.
    Likewise, this is not meant to preclude installing special cases
    for other arches if that proves worthwhile.
    
    Per discussion of a request to install the same code for loongarch64.
    Like the previous patch, we might as well back-patch to supported
    branches.
    
    Discussion: 
https://postgr.es/m/[email protected]
---
 src/include/storage/s_lock.h | 68 +++++++++++++++++++++++++++++---------------
 1 file changed, 45 insertions(+), 23 deletions(-)

diff --git a/src/include/storage/s_lock.h b/src/include/storage/s_lock.h
index d34ff73ad36..ece5e3d68eb 100644
--- a/src/include/storage/s_lock.h
+++ b/src/include/storage/s_lock.h
@@ -341,29 +341,6 @@ tas(volatile slock_t *lock)
 #endif  /* __arm__ || __arm || __aarch64__ || __aarch64 */
 
 
-/*
- * RISC-V likewise uses __sync_lock_test_and_set(int *, int) if available.
- */
-#if defined(__riscv)
-#ifdef HAVE_GCC__SYNC_INT32_TAS
-#define HAS_TEST_AND_SET
-
-#define TAS(lock) tas(lock)
-
-typedef int slock_t;
-
-static __inline__ int
-tas(volatile slock_t *lock)
-{
-       return __sync_lock_test_and_set(lock, 1);
-}
-
-#define S_UNLOCK(lock) __sync_lock_release(lock)
-
-#endif  /* HAVE_GCC__SYNC_INT32_TAS */
-#endif  /* __riscv */
-
-
 /* S/390 and S/390x Linux (32- and 64-bit zSeries) */
 #if defined(__s390__) || defined(__s390x__)
 #define HAS_TEST_AND_SET
@@ -766,6 +743,51 @@ tas(volatile slock_t *lock)
 typedef unsigned char slock_t;
 #endif
 
+
+/*
+ * If we have no platform-specific knowledge, but we found that the compiler
+ * provides __sync_lock_test_and_set(), use that.  Prefer the int-width
+ * version over the char-width version if we have both, on the rather dubious
+ * grounds that that's known to be more likely to work in the ARM ecosystem.
+ * (But we dealt with ARM above.)
+ */
+#if !defined(HAS_TEST_AND_SET)
+
+#if defined(HAVE_GCC__SYNC_INT32_TAS)
+#define HAS_TEST_AND_SET
+
+#define TAS(lock) tas(lock)
+
+typedef int slock_t;
+
+static __inline__ int
+tas(volatile slock_t *lock)
+{
+       return __sync_lock_test_and_set(lock, 1);
+}
+
+#define S_UNLOCK(lock) __sync_lock_release(lock)
+
+#elif defined(HAVE_GCC__SYNC_CHAR_TAS)
+#define HAS_TEST_AND_SET
+
+#define TAS(lock) tas(lock)
+
+typedef char slock_t;
+
+static __inline__ int
+tas(volatile slock_t *lock)
+{
+       return __sync_lock_test_and_set(lock, 1);
+}
+
+#define S_UNLOCK(lock) __sync_lock_release(lock)
+
+#endif  /* HAVE_GCC__SYNC_INT32_TAS */
+
+#endif /* !defined(HAS_TEST_AND_SET) */
+
+
 /*
  * Default implementation of S_UNLOCK() for gcc/icc.
  *


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to