On Wed, Mar 26, 2025 at 04:44:24PM -0500, Nathan Bossart wrote:
> IMHO these are acceptable results, at least for the use-cases I see in the
> tree.  We might be able to minimize the difference between the Neon and SVE
> implementations on the low end with some additional code, but I'm really
> not sure if it's worth the effort.

I couldn't resist...  I tried a variety of things (e.g., inlining the Neon
implementation to process the tail, jumping to the Neon implementation for
smaller inputs), and the only thing that seemed to be a clear win was to
add a 2-register block in the SVE implementations (like what is already
there for the Neon ones).  In particular, that helps bring the Graviton3
SVE numbers closer to the Neon numbers for inputs between 8-16 8-byte
words.

I also noticed a silly mistake in 0003 that would cause us to potentially
skip part of the tail.  That should be fixed now.

-- 
nathan
>From e938de4a8f1bf1b6b1aec05ec9d753621e37746f Mon Sep 17 00:00:00 2001
From: Nathan Bossart <nat...@postgresql.org>
Date: Mon, 24 Mar 2025 19:48:41 -0500
Subject: [PATCH v10 1/3] Rename TRY_POPCNT_FAST to TRY_POPCNT_X86_64.

This macro guards x86_64-specific code, and a follow-up commit will
add AArch64-specific versions of that code.  To avoid confusion,
let's rename TRY_POPCNT_FAST to make it more obvious that it's for
x86_64.

Discussion: 
https://postgr.es/m/010101936e4aaa70-b474ab9e-b9ce-474d-a3ba-a3dc223d295c-000000%40us-west-2.amazonses.com
---
 src/include/port/pg_bitutils.h |  6 +++---
 src/port/pg_bitutils.c         | 14 +++++++-------
 src/port/pg_popcount_avx512.c  |  8 ++++----
 3 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/src/include/port/pg_bitutils.h b/src/include/port/pg_bitutils.h
index 62554ce685a..3067ff402ba 100644
--- a/src/include/port/pg_bitutils.h
+++ b/src/include/port/pg_bitutils.h
@@ -294,11 +294,11 @@ pg_ceil_log2_64(uint64 num)
  */
 #ifdef HAVE_X86_64_POPCNTQ
 #if defined(HAVE__GET_CPUID) || defined(HAVE__CPUID)
-#define TRY_POPCNT_FAST 1
+#define TRY_POPCNT_X86_64 1
 #endif
 #endif
 
-#ifdef TRY_POPCNT_FAST
+#ifdef TRY_POPCNT_X86_64
 /* Attempt to use the POPCNT instruction, but perform a runtime check first */
 extern PGDLLIMPORT int (*pg_popcount32) (uint32 word);
 extern PGDLLIMPORT int (*pg_popcount64) (uint64 word);
@@ -322,7 +322,7 @@ extern int  pg_popcount64(uint64 word);
 extern uint64 pg_popcount_optimized(const char *buf, int bytes);
 extern uint64 pg_popcount_masked_optimized(const char *buf, int bytes, bits8 
mask);
 
-#endif                                                 /* TRY_POPCNT_FAST */
+#endif                                                 /* TRY_POPCNT_X86_64 */
 
 /*
  * Returns the number of 1-bits in buf.
diff --git a/src/port/pg_bitutils.c b/src/port/pg_bitutils.c
index 5677525693d..82be40e2fb4 100644
--- a/src/port/pg_bitutils.c
+++ b/src/port/pg_bitutils.c
@@ -108,7 +108,7 @@ static inline int pg_popcount64_slow(uint64 word);
 static uint64 pg_popcount_slow(const char *buf, int bytes);
 static uint64 pg_popcount_masked_slow(const char *buf, int bytes, bits8 mask);
 
-#ifdef TRY_POPCNT_FAST
+#ifdef TRY_POPCNT_X86_64
 static bool pg_popcount_available(void);
 static int     pg_popcount32_choose(uint32 word);
 static int     pg_popcount64_choose(uint64 word);
@@ -123,9 +123,9 @@ int                 (*pg_popcount32) (uint32 word) = 
pg_popcount32_choose;
 int                    (*pg_popcount64) (uint64 word) = pg_popcount64_choose;
 uint64         (*pg_popcount_optimized) (const char *buf, int bytes) = 
pg_popcount_choose;
 uint64         (*pg_popcount_masked_optimized) (const char *buf, int bytes, 
bits8 mask) = pg_popcount_masked_choose;
-#endif                                                 /* TRY_POPCNT_FAST */
+#endif                                                 /* TRY_POPCNT_X86_64 */
 
-#ifdef TRY_POPCNT_FAST
+#ifdef TRY_POPCNT_X86_64
 
 /*
  * Return true if CPUID indicates that the POPCNT instruction is available.
@@ -337,7 +337,7 @@ pg_popcount_masked_fast(const char *buf, int bytes, bits8 
mask)
        return popcnt;
 }
 
-#endif                                                 /* TRY_POPCNT_FAST */
+#endif                                                 /* TRY_POPCNT_X86_64 */
 
 
 /*
@@ -486,13 +486,13 @@ pg_popcount_masked_slow(const char *buf, int bytes, bits8 
mask)
        return popcnt;
 }
 
-#ifndef TRY_POPCNT_FAST
+#ifndef TRY_POPCNT_X86_64
 
 /*
  * When the POPCNT instruction is not available, there's no point in using
  * function pointers to vary the implementation between the fast and slow
  * method.  We instead just make these actual external functions when
- * TRY_POPCNT_FAST is not defined.  The compiler should be able to inline
+ * TRY_POPCNT_X86_64 is not defined.  The compiler should be able to inline
  * the slow versions here.
  */
 int
@@ -527,4 +527,4 @@ pg_popcount_masked_optimized(const char *buf, int bytes, 
bits8 mask)
        return pg_popcount_masked_slow(buf, bytes, mask);
 }
 
-#endif                                                 /* !TRY_POPCNT_FAST */
+#endif                                                 /* !TRY_POPCNT_X86_64 */
diff --git a/src/port/pg_popcount_avx512.c b/src/port/pg_popcount_avx512.c
index dac895a0fc2..80c0aee3e73 100644
--- a/src/port/pg_popcount_avx512.c
+++ b/src/port/pg_popcount_avx512.c
@@ -27,11 +27,11 @@
 #include "port/pg_bitutils.h"
 
 /*
- * It's probably unlikely that TRY_POPCNT_FAST won't be set if we are able to
+ * It's probably unlikely that TRY_POPCNT_X86_64 won't be set if we are able to
  * use AVX-512 intrinsics, but we check it anyway to be sure.  We piggy-back on
- * the function pointers that are only used when TRY_POPCNT_FAST is set.
+ * the function pointers that are only used when TRY_POPCNT_X86_64 is set.
  */
-#ifdef TRY_POPCNT_FAST
+#ifdef TRY_POPCNT_X86_64
 
 /*
  * Does CPUID say there's support for XSAVE instructions?
@@ -219,5 +219,5 @@ pg_popcount_masked_avx512(const char *buf, int bytes, bits8 
mask)
        return _mm512_reduce_add_epi64(accum);
 }
 
-#endif                                                 /* TRY_POPCNT_FAST */
+#endif                                                 /* TRY_POPCNT_X86_64 */
 #endif                                                 /* 
USE_AVX512_POPCNT_WITH_RUNTIME_CHECK */
-- 
2.39.5 (Apple Git-154)

>From ee81eded16a5b7987b0fdf180f6a411bef2810b6 Mon Sep 17 00:00:00 2001
From: Nathan Bossart <nat...@postgresql.org>
Date: Mon, 24 Mar 2025 20:10:23 -0500
Subject: [PATCH v10 2/3] Add Neon popcount support.

This commit introduces a Neon implementation of pg_popcount{32,64},
pg_popcount(), and pg_popcount_masked().  As in simd.h, we assume
that all available AArch64 hardware supports Neon, so we
conveniently don't need any new configure-time or runtime checks.
Some compilers emit Neon instructions for these functions already,
but our hand-rolled implementations for pg_popcount() and
pg_popcount_masked() performed better in our tests, presumably due
to the instruction-level parallelism.

Author: "chiranmoy.bhattacha...@fujitsu.com" 
<chiranmoy.bhattacha...@fujitsu.com>
Reviewed-by: John Naylor <johncnaylo...@gmail.com>
Discussion: 
https://postgr.es/m/010101936e4aaa70-b474ab9e-b9ce-474d-a3ba-a3dc223d295c-000000%40us-west-2.amazonses.com
---
 src/include/port/pg_bitutils.h |   9 ++
 src/port/Makefile              |   1 +
 src/port/meson.build           |   1 +
 src/port/pg_bitutils.c         |  22 +++-
 src/port/pg_popcount_aarch64.c | 208 +++++++++++++++++++++++++++++++++
 5 files changed, 235 insertions(+), 6 deletions(-)
 create mode 100644 src/port/pg_popcount_aarch64.c

diff --git a/src/include/port/pg_bitutils.h b/src/include/port/pg_bitutils.h
index 3067ff402ba..a387f77c2c0 100644
--- a/src/include/port/pg_bitutils.h
+++ b/src/include/port/pg_bitutils.h
@@ -298,6 +298,15 @@ pg_ceil_log2_64(uint64 num)
 #endif
 #endif
 
+/*
+ * On AArch64, we can use Neon instructions if the compiler provides access to
+ * them (as indicated by __ARM_NEON).  As in simd.h, we assume that all
+ * available 64-bit hardware has Neon support.
+ */
+#if defined(__aarch64__) && defined(__ARM_NEON)
+#define POPCNT_AARCH64 1
+#endif
+
 #ifdef TRY_POPCNT_X86_64
 /* Attempt to use the POPCNT instruction, but perform a runtime check first */
 extern PGDLLIMPORT int (*pg_popcount32) (uint32 word);
diff --git a/src/port/Makefile b/src/port/Makefile
index 4c224319512..cb86b7141e6 100644
--- a/src/port/Makefile
+++ b/src/port/Makefile
@@ -44,6 +44,7 @@ OBJS = \
        noblock.o \
        path.o \
        pg_bitutils.o \
+       pg_popcount_aarch64.o \
        pg_popcount_avx512.o \
        pg_strong_random.o \
        pgcheckdir.o \
diff --git a/src/port/meson.build b/src/port/meson.build
index 7fcfa728d43..cad0dd8f4f8 100644
--- a/src/port/meson.build
+++ b/src/port/meson.build
@@ -7,6 +7,7 @@ pgport_sources = [
   'noblock.c',
   'path.c',
   'pg_bitutils.c',
+  'pg_popcount_aarch64.c',
   'pg_popcount_avx512.c',
   'pg_strong_random.c',
   'pgcheckdir.c',
diff --git a/src/port/pg_bitutils.c b/src/port/pg_bitutils.c
index 82be40e2fb4..61c7388f474 100644
--- a/src/port/pg_bitutils.c
+++ b/src/port/pg_bitutils.c
@@ -103,10 +103,15 @@ const uint8 pg_number_of_ones[256] = {
        4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
 };
 
+/*
+ * If we are building the Neon versions, we don't need the "slow" fallbacks.
+ */
+#ifndef POPCNT_AARCH64
 static inline int pg_popcount32_slow(uint32 word);
 static inline int pg_popcount64_slow(uint64 word);
 static uint64 pg_popcount_slow(const char *buf, int bytes);
 static uint64 pg_popcount_masked_slow(const char *buf, int bytes, bits8 mask);
+#endif
 
 #ifdef TRY_POPCNT_X86_64
 static bool pg_popcount_available(void);
@@ -339,6 +344,10 @@ pg_popcount_masked_fast(const char *buf, int bytes, bits8 
mask)
 
 #endif                                                 /* TRY_POPCNT_X86_64 */
 
+/*
+ * If we are building the Neon versions, we don't need the "slow" fallbacks.
+ */
+#ifndef POPCNT_AARCH64
 
 /*
  * pg_popcount32_slow
@@ -486,14 +495,15 @@ pg_popcount_masked_slow(const char *buf, int bytes, bits8 
mask)
        return popcnt;
 }
 
-#ifndef TRY_POPCNT_X86_64
+#endif                                                 /* ! POPCNT_AARCH64 */
+
+#if !defined(TRY_POPCNT_X86_64) && !defined(POPCNT_AARCH64)
 
 /*
- * When the POPCNT instruction is not available, there's no point in using
+ * When special CPU instructions are not available, there's no point in using
  * function pointers to vary the implementation between the fast and slow
- * method.  We instead just make these actual external functions when
- * TRY_POPCNT_X86_64 is not defined.  The compiler should be able to inline
- * the slow versions here.
+ * method.  We instead just make these actual external functions.  The compiler
+ * should be able to inline the slow versions here.
  */
 int
 pg_popcount32(uint32 word)
@@ -527,4 +537,4 @@ pg_popcount_masked_optimized(const char *buf, int bytes, 
bits8 mask)
        return pg_popcount_masked_slow(buf, bytes, mask);
 }
 
-#endif                                                 /* !TRY_POPCNT_X86_64 */
+#endif                                                 /* ! TRY_POPCNT_X86_64 
&& ! POPCNT_AARCH64 */
diff --git a/src/port/pg_popcount_aarch64.c b/src/port/pg_popcount_aarch64.c
new file mode 100644
index 00000000000..cdcfee464e4
--- /dev/null
+++ b/src/port/pg_popcount_aarch64.c
@@ -0,0 +1,208 @@
+/*-------------------------------------------------------------------------
+ *
+ * pg_popcount_aarc64.c
+ *       Holds the AArch64 pg_popcount() implementations.
+ *
+ * Copyright (c) 2025, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ *       src/port/pg_popcount_aarch64.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "c.h"
+
+#include "port/pg_bitutils.h"
+
+#ifdef POPCNT_AARCH64
+
+#include <arm_neon.h>
+
+/*
+ * pg_popcount32
+ *             Return number of 1 bits in word
+ */
+int
+pg_popcount32(uint32 word)
+{
+       return pg_popcount64((uint64) word);
+}
+
+/*
+ * pg_popcount64
+ *             Return number of 1 bits in word
+ */
+int
+pg_popcount64(uint64 word)
+{
+       /*
+        * For some compilers, __builtin_popcountl() emits Neon instructions
+        * already. The line below should compile to the same code on those
+        * systems.
+        */
+       return vaddv_u8(vcnt_u8(vld1_u8((const uint8 *) &word)));
+}
+
+/*
+ * pg_popcount_optimized
+ *             Returns number of 1 bits in buf
+ */
+uint64
+pg_popcount_optimized(const char *buf, int bytes)
+{
+       uint8x16_t      vec;
+       uint32          bytes_per_iteration = 4 * sizeof(uint8x16_t);
+       uint64x2_t      accum1 = vdupq_n_u64(0),
+                               accum2 = vdupq_n_u64(0),
+                               accum3 = vdupq_n_u64(0),
+                               accum4 = vdupq_n_u64(0);
+       uint64          popcnt = 0;
+
+       /*
+        * For better instruction-level parallelism, each loop iteration 
operates
+        * on a block of four registers.
+        */
+       for (; bytes >= bytes_per_iteration; bytes -= bytes_per_iteration)
+       {
+               vec = vld1q_u8((const uint8 *) buf);
+               accum1 = vpadalq_u32(accum1, 
vpaddlq_u16(vpaddlq_u8(vcntq_u8(vec))));
+               buf += sizeof(uint8x16_t);
+
+               vec = vld1q_u8((const uint8 *) buf);
+               accum2 = vpadalq_u32(accum2, 
vpaddlq_u16(vpaddlq_u8(vcntq_u8(vec))));
+               buf += sizeof(uint8x16_t);
+
+               vec = vld1q_u8((const uint8 *) buf);
+               accum3 = vpadalq_u32(accum3, 
vpaddlq_u16(vpaddlq_u8(vcntq_u8(vec))));
+               buf += sizeof(uint8x16_t);
+
+               vec = vld1q_u8((const uint8 *) buf);
+               accum4 = vpadalq_u32(accum4, 
vpaddlq_u16(vpaddlq_u8(vcntq_u8(vec))));
+               buf += sizeof(uint8x16_t);
+       }
+
+       /*
+        * If enough data remains, do another iteration on a block of two
+        * registers.
+        */
+       bytes_per_iteration = 2 * sizeof(uint8x16_t);
+       if (bytes >= bytes_per_iteration)
+       {
+               vec = vld1q_u8((const uint8 *) buf);
+               accum1 = vpadalq_u32(accum1, 
vpaddlq_u16(vpaddlq_u8(vcntq_u8(vec))));
+               buf += sizeof(uint8x16_t);
+
+               vec = vld1q_u8((const uint8 *) buf);
+               accum2 = vpadalq_u32(accum2, 
vpaddlq_u16(vpaddlq_u8(vcntq_u8(vec))));
+               buf += sizeof(uint8x16_t);
+
+               bytes -= bytes_per_iteration;
+       }
+
+       /*
+        * Add the accumulators.
+        */
+       popcnt += vaddvq_u64(vaddq_u64(accum1, accum2));
+       popcnt += vaddvq_u64(vaddq_u64(accum3, accum4));
+
+       /*
+        * Process remaining 8-byte blocks.
+        */
+       for (; bytes >= sizeof(uint64); bytes -= sizeof(uint64))
+       {
+               popcnt += pg_popcount64(*((uint64 *) buf));
+               buf += sizeof(uint64);
+       }
+
+       /*
+        * Process any remaining data byte-by-byte.
+        */
+       while (bytes--)
+               popcnt += pg_number_of_ones[(unsigned char) *buf++];
+
+       return popcnt;
+}
+
+/*
+ * pg_popcount_masked_optimized
+ *             Returns number of 1 bits in buf after applying the mask to each 
byte
+ */
+uint64
+pg_popcount_masked_optimized(const char *buf, int bytes, bits8 mask)
+{
+       uint8x16_t      vec;
+       uint32          bytes_per_iteration = 4 * sizeof(uint8x16_t);
+       uint64x2_t      accum1 = vdupq_n_u64(0),
+                               accum2 = vdupq_n_u64(0),
+                               accum3 = vdupq_n_u64(0),
+                               accum4 = vdupq_n_u64(0);
+       uint64          popcnt = 0,
+                               mask64 = ~UINT64CONST(0) / 0xFF * mask;
+       uint8x16_t      maskv = vdupq_n_u8(mask);
+
+       /*
+        * For better instruction-level parallelism, each loop iteration 
operates
+        * on a block of four registers.
+        */
+       for (; bytes >= bytes_per_iteration; bytes -= bytes_per_iteration)
+       {
+               vec = vandq_u8(vld1q_u8((const uint8 *) buf), maskv);
+               accum1 = vpadalq_u32(accum1, 
vpaddlq_u16(vpaddlq_u8(vcntq_u8(vec))));
+               buf += sizeof(uint8x16_t);
+
+               vec = vandq_u8(vld1q_u8((const uint8 *) buf), maskv);
+               accum2 = vpadalq_u32(accum2, 
vpaddlq_u16(vpaddlq_u8(vcntq_u8(vec))));
+               buf += sizeof(uint8x16_t);
+
+               vec = vandq_u8(vld1q_u8((const uint8 *) buf), maskv);
+               accum3 = vpadalq_u32(accum3, 
vpaddlq_u16(vpaddlq_u8(vcntq_u8(vec))));
+               buf += sizeof(uint8x16_t);
+
+               vec = vandq_u8(vld1q_u8((const uint8 *) buf), maskv);
+               accum4 = vpadalq_u32(accum4, 
vpaddlq_u16(vpaddlq_u8(vcntq_u8(vec))));
+               buf += sizeof(uint8x16_t);
+       }
+
+       /*
+        * If enough data remains, do another iteration on a block of two
+        * registers.
+        */
+       bytes_per_iteration = 2 * sizeof(uint8x16_t);
+       if (bytes >= bytes_per_iteration)
+       {
+               vec = vandq_u8(vld1q_u8((const uint8 *) buf), maskv);
+               accum1 = vpadalq_u32(accum1, 
vpaddlq_u16(vpaddlq_u8(vcntq_u8(vec))));
+               buf += sizeof(uint8x16_t);
+
+               vec = vandq_u8(vld1q_u8((const uint8 *) buf), maskv);
+               accum2 = vpadalq_u32(accum2, 
vpaddlq_u16(vpaddlq_u8(vcntq_u8(vec))));
+               buf += sizeof(uint8x16_t);
+
+               bytes -= bytes_per_iteration;
+       }
+
+       /*
+        * Add the accumulators.
+        */
+       popcnt += vaddvq_u64(vaddq_u64(accum1, accum2));
+       popcnt += vaddvq_u64(vaddq_u64(accum3, accum4));
+
+       /*
+        * Process remining 8-byte blocks.
+        */
+       for (; bytes >= sizeof(uint64); bytes -= sizeof(uint64))
+       {
+               popcnt += pg_popcount64(*((uint64 *) buf) & mask64);
+               buf += sizeof(uint64);
+       }
+
+       /*
+        * Process any remaining data byte-by-byte.
+        */
+       while (bytes--)
+               popcnt += pg_number_of_ones[(unsigned char) *buf++ & mask];
+
+       return popcnt;
+}
+
+#endif                                                 /* POPCNT_AARCH64 */
-- 
2.39.5 (Apple Git-154)

>From 26585ebe89d97bb99b549b8833f9c838cdd3a67c Mon Sep 17 00:00:00 2001
From: Nathan Bossart <nat...@postgresql.org>
Date: Wed, 26 Mar 2025 22:21:10 -0500
Subject: [PATCH v10 3/3] Add SVE popcount support.

This commit introduces an SVE implementation of pg_popcount{32,64}.
Unlike Neon support, we need an additional configure-time check to
discover whether the compiler supports SVE intrinsics, and we need
a runtime check to find whether the current CPU supports SVE
instructions.  The SVE implementations are much faster for larger
inputs and are comparable to the Neon implementations for smaller
inputs.

Author: "chiranmoy.bhattacha...@fujitsu.com" 
<chiranmoy.bhattacha...@fujitsu.com>
Co-authored-by: "Malladi, Rama" <ramamall...@hotmail.com>
Co-authored-by: "devanga.susmi...@fujitsu.com" <devanga.susmi...@fujitsu.com>
Reviewed-by: Kirill Reshke <reshkekir...@gmail.com>
Reviewed-by: John Naylor <johncnaylo...@gmail.com>
Discussion: 
https://postgr.es/m/010101936e4aaa70-b474ab9e-b9ce-474d-a3ba-a3dc223d295c-000000%40us-west-2.amazonses.com
Discussion: 
https://postgr.es/m/OSZPR01MB84990A9A02A3515C6E85A65B8B2A2%40OSZPR01MB8499.jpnprd01.prod.outlook.com
---
 config/c-compiler.m4           |  51 ++++++
 configure                      |  71 +++++++++
 configure.ac                   |   9 ++
 meson.build                    |  48 ++++++
 src/include/pg_config.h.in     |   3 +
 src/include/port/pg_bitutils.h |  17 ++
 src/port/pg_popcount_aarch64.c | 281 ++++++++++++++++++++++++++++++++-
 7 files changed, 474 insertions(+), 6 deletions(-)

diff --git a/config/c-compiler.m4 b/config/c-compiler.m4
index 3712e81e38c..c2769b3bc21 100644
--- a/config/c-compiler.m4
+++ b/config/c-compiler.m4
@@ -708,3 +708,54 @@ if test x"$Ac_cachevar" = x"yes"; then
 fi
 undefine([Ac_cachevar])dnl
 ])# PGAC_AVX512_POPCNT_INTRINSICS
+
+# PGAC_SVE_POPCNT_INTRINSICS
+# --------------------------
+# Check if the compiler supports the SVE popcount instructions using the
+# svptrue_b64, svdup_u64, svcntb, svld1, svadd_x, svcnt_x, svaddv,
+# svwhilelt_b8, and svand_x intrinsic functions.
+#
+# If the intrinsics are supported, sets pgac_sve_popcnt_intrinsics.
+AC_DEFUN([PGAC_SVE_POPCNT_INTRINSICS],
+[define([Ac_cachevar], [AS_TR_SH([pgac_cv_sve_popcnt_intrinsics])])dnl
+AC_CACHE_CHECK([for svcnt_x], [Ac_cachevar],
+[AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include <arm_sve.h>
+
+       char buf[128];
+
+       #if defined(__has_attribute) && __has_attribute (target)
+       __attribute__((target("arch=armv8-a+sve")))
+       #endif
+       static int popcount_test(void)
+       {
+               svuint64_t      accum1 = svdup_u64(0),
+                                       accum2 = svdup_u64(0),
+                                       vec64;
+               svuint8_t       vec8;
+               svbool_t        pred = svptrue_b64();
+               uint64_t        popcnt,
+                                       mask = 0x5555555555555555;
+               char       *p = buf;
+
+               vec64 = svand_x(pred, svld1(pred, (const uint64_t *) p), mask);
+               accum1 = svadd_x(pred, accum1, svcnt_x(pred, vec64));
+               p += svcntb();
+
+               vec64 = svand_x(pred, svld1(pred, (const uint64_t *) p), mask);
+               accum2 = svadd_x(pred, accum2, svcnt_x(pred, vec64));
+               p += svcntb();
+
+               popcnt = svaddv(pred, svadd_x(pred, accum1, accum2));
+
+               pred = svwhilelt_b8(0, sizeof(buf));
+               vec8 = svand_x(pred, svld1(pred, (const uint8_t *) p), 0x55);
+               return (int) (popcnt + svaddv(pred, svcnt_x(pred, vec8)));
+       }]],
+  [return popcount_test();])],
+  [Ac_cachevar=yes],
+  [Ac_cachevar=no])])
+if test x"$Ac_cachevar" = x"yes"; then
+  pgac_sve_popcnt_intrinsics=yes
+fi
+undefine([Ac_cachevar])dnl
+])# PGAC_SVE_POPCNT_INTRINSICS
diff --git a/configure b/configure
index c6d762dc999..fea70c20ae2 100755
--- a/configure
+++ b/configure
@@ -17517,6 +17517,77 @@ $as_echo "#define USE_AVX512_POPCNT_WITH_RUNTIME_CHECK 
1" >>confdefs.h
   fi
 fi
 
+# Check for SVE popcount intrinsics
+#
+if test x"$host_cpu" = x"aarch64"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for svcnt_x" >&5
+$as_echo_n "checking for svcnt_x... " >&6; }
+if ${pgac_cv_sve_popcnt_intrinsics+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <arm_sve.h>
+
+       char buf[128];
+
+       #if defined(__has_attribute) && __has_attribute (target)
+       __attribute__((target("arch=armv8-a+sve")))
+       #endif
+       static int popcount_test(void)
+       {
+               svuint64_t      accum1 = svdup_u64(0),
+                                       accum2 = svdup_u64(0),
+                                       vec64;
+               svuint8_t       vec8;
+               svbool_t        pred = svptrue_b64();
+               uint64_t        popcnt,
+                                       mask = 0x5555555555555555;
+               char       *p = buf;
+
+               vec64 = svand_x(pred, svld1(pred, (const uint64_t *) p), mask);
+               accum1 = svadd_x(pred, accum1, svcnt_x(pred, vec64));
+               p += svcntb();
+
+               vec64 = svand_x(pred, svld1(pred, (const uint64_t *) p), mask);
+               accum2 = svadd_x(pred, accum2, svcnt_x(pred, vec64));
+               p += svcntb();
+
+               popcnt = svaddv(pred, svadd_x(pred, accum1, accum2));
+
+               pred = svwhilelt_b8(0, sizeof(buf));
+               vec8 = svand_x(pred, svld1(pred, (const uint8_t *) p), 0x55);
+               return (int) (popcnt + svaddv(pred, svcnt_x(pred, vec8)));
+       }
+int
+main ()
+{
+return popcount_test();
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  pgac_cv_sve_popcnt_intrinsics=yes
+else
+  pgac_cv_sve_popcnt_intrinsics=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: 
$pgac_cv_sve_popcnt_intrinsics" >&5
+$as_echo "$pgac_cv_sve_popcnt_intrinsics" >&6; }
+if test x"$pgac_cv_sve_popcnt_intrinsics" = x"yes"; then
+  pgac_sve_popcnt_intrinsics=yes
+fi
+
+  if test x"$pgac_sve_popcnt_intrinsics" = x"yes"; then
+
+$as_echo "#define USE_SVE_POPCNT_WITH_RUNTIME_CHECK 1" >>confdefs.h
+
+  fi
+fi
+
 # Check for Intel SSE 4.2 intrinsics to do CRC calculations.
 #
 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _mm_crc32_u8 and 
_mm_crc32_u32" >&5
diff --git a/configure.ac b/configure.ac
index ecbc2734829..05266e6d656 100644
--- a/configure.ac
+++ b/configure.ac
@@ -2069,6 +2069,15 @@ if test x"$host_cpu" = x"x86_64"; then
   fi
 fi
 
+# Check for SVE popcount intrinsics
+#
+if test x"$host_cpu" = x"aarch64"; then
+  PGAC_SVE_POPCNT_INTRINSICS()
+  if test x"$pgac_sve_popcnt_intrinsics" = x"yes"; then
+    AC_DEFINE(USE_SVE_POPCNT_WITH_RUNTIME_CHECK, 1, [Define to 1 to use SVE 
popcount instructions with a runtime check.])
+  fi
+fi
+
 # Check for Intel SSE 4.2 intrinsics to do CRC calculations.
 #
 PGAC_SSE42_CRC32_INTRINSICS()
diff --git a/meson.build b/meson.build
index 108e3678071..a41bf90d3c0 100644
--- a/meson.build
+++ b/meson.build
@@ -2297,6 +2297,54 @@ int main(void)
 endif
 
 
+###############################################################
+# Check for the availability of SVE popcount intrinsics.
+###############################################################
+
+if host_cpu == 'aarch64'
+
+  prog = '''
+#include <arm_sve.h>
+
+char buf[128];
+
+#if defined(__has_attribute) && __has_attribute (target)
+__attribute__((target("arch=armv8-a+sve")))
+#endif
+int main(void)
+{
+       svuint64_t      accum1 = svdup_u64(0),
+                               accum2 = svdup_u64(0),
+                               vec64;
+       svuint8_t       vec8;
+       svbool_t        pred = svptrue_b64();
+       uint64_t        popcnt,
+                               mask = 0x5555555555555555;
+       char       *p = buf;
+
+       vec64 = svand_x(pred, svld1(pred, (const uint64_t *) p), mask);
+       accum1 = svadd_x(pred, accum1, svcnt_x(pred, vec64));
+       p += svcntb();
+
+       vec64 = svand_x(pred, svld1(pred, (const uint64_t *) p), mask);
+       accum2 = svadd_x(pred, accum2, svcnt_x(pred, vec64));
+       p += svcntb();
+
+       popcnt = svaddv(pred, svadd_x(pred, accum1, accum2));
+
+       pred = svwhilelt_b8(0, sizeof(buf));
+       vec8 = svand_x(pred, svld1(pred, (const uint8_t *) p), 0x55);
+       return (int) (popcnt + svaddv(pred, svcnt_x(pred, vec8)));
+}
+'''
+
+  if cc.links(prog, name: 'SVE popcount', args: test_c_args)
+    cdata.set('USE_SVE_POPCNT_WITH_RUNTIME_CHECK', 1)
+  endif
+
+endif
+
+
 ###############################################################
 # Select CRC-32C implementation.
 #
diff --git a/src/include/pg_config.h.in b/src/include/pg_config.h.in
index f2422241133..ac13112a892 100644
--- a/src/include/pg_config.h.in
+++ b/src/include/pg_config.h.in
@@ -709,6 +709,9 @@
 /* Define to 1 to use Intel SSE 4.2 CRC instructions with a runtime check. */
 #undef USE_SSE42_CRC32C_WITH_RUNTIME_CHECK
 
+/* Define to 1 to use SVE popcount instructions with a runtime check. */
+#undef USE_SVE_POPCNT_WITH_RUNTIME_CHECK
+
 /* Define to build with systemd support. (--with-systemd) */
 #undef USE_SYSTEMD
 
diff --git a/src/include/port/pg_bitutils.h b/src/include/port/pg_bitutils.h
index a387f77c2c0..c7901bf8ddc 100644
--- a/src/include/port/pg_bitutils.h
+++ b/src/include/port/pg_bitutils.h
@@ -324,6 +324,23 @@ extern uint64 pg_popcount_avx512(const char *buf, int 
bytes);
 extern uint64 pg_popcount_masked_avx512(const char *buf, int bytes, bits8 
mask);
 #endif
 
+#elif POPCNT_AARCH64
+/* Use the Neon version of pg_popcount{32,64} without function pointer. */
+extern int     pg_popcount32(uint32 word);
+extern int     pg_popcount64(uint64 word);
+
+/*
+ * We can try to use an SVE-optimized pg_popcount() on some systems  For that,
+ * we do use a function pointer.
+ */
+#ifdef USE_SVE_POPCNT_WITH_RUNTIME_CHECK
+extern PGDLLIMPORT uint64 (*pg_popcount_optimized) (const char *buf, int 
bytes);
+extern PGDLLIMPORT uint64 (*pg_popcount_masked_optimized) (const char *buf, 
int bytes, bits8 mask);
+#else
+extern uint64 pg_popcount_optimized(const char *buf, int bytes);
+extern uint64 pg_popcount_masked_optimized(const char *buf, int bytes, bits8 
mask);
+#endif
+
 #else
 /* Use a portable implementation -- no need for a function pointer. */
 extern int     pg_popcount32(uint32 word);
diff --git a/src/port/pg_popcount_aarch64.c b/src/port/pg_popcount_aarch64.c
index cdcfee464e4..9c9e8b9cd23 100644
--- a/src/port/pg_popcount_aarch64.c
+++ b/src/port/pg_popcount_aarch64.c
@@ -18,6 +18,275 @@
 
 #include <arm_neon.h>
 
+#ifdef USE_SVE_POPCNT_WITH_RUNTIME_CHECK
+#include <arm_sve.h>
+
+#if defined(HAVE_ELF_AUX_INFO) || defined(HAVE_GETAUXVAL)
+#include <sys/auxv.h>
+#endif
+#endif
+
+/*
+ * The Neon versions are built regardless of whether we are building the SVE
+ * versions.
+ */
+static uint64 pg_popcount_neon(const char *buf, int bytes);
+static uint64 pg_popcount_masked_neon(const char *buf, int bytes, bits8 mask);
+
+#ifdef USE_SVE_POPCNT_WITH_RUNTIME_CHECK
+
+/*
+ * These are the SVE implementations of the popcount functions.
+ */
+static uint64 pg_popcount_sve(const char *buf, int bytes);
+static uint64 pg_popcount_masked_sve(const char *buf, int bytes, bits8 mask);
+
+/*
+ * The function pointers are initially set to "choose" functions.  These
+ * functions will first set the pointers to the right implementations (based on
+ * what the current CPU supports) and then will call the pointer to fulfill the
+ * caller's request.
+ */
+static uint64 pg_popcount_choose(const char *buf, int bytes);
+static uint64 pg_popcount_masked_choose(const char *buf, int bytes, bits8 
mask);
+uint64         (*pg_popcount_optimized) (const char *buf, int bytes) = 
pg_popcount_choose;
+uint64         (*pg_popcount_masked_optimized) (const char *buf, int bytes, 
bits8 mask) = pg_popcount_masked_choose;
+
+static inline bool
+pg_popcount_sve_available(void)
+{
+#ifdef HAVE_ELF_AUX_INFO
+       unsigned long value;
+
+       return elf_aux_info(AT_HWCAP, &value, sizeof(value)) == 0 &&
+               (value & HWCAP_SVE) != 0;
+#elif defined(HAVE_GETAUXVAL)
+       return (getauxval(AT_HWCAP) & HWCAP_SVE) != 0;
+#else
+       return false;
+#endif
+}
+
+static inline void
+choose_popcount_functions(void)
+{
+       if (pg_popcount_sve_available())
+       {
+               pg_popcount_optimized = pg_popcount_sve;
+               pg_popcount_masked_optimized = pg_popcount_masked_sve;
+       }
+       else
+       {
+               pg_popcount_optimized = pg_popcount_neon;
+               pg_popcount_masked_optimized = pg_popcount_masked_neon;
+       }
+}
+
+static uint64
+pg_popcount_choose(const char *buf, int bytes)
+{
+       choose_popcount_functions();
+       return pg_popcount_optimized(buf, bytes);
+}
+
+static uint64
+pg_popcount_masked_choose(const char *buf, int bytes, bits8 mask)
+{
+       choose_popcount_functions();
+       return pg_popcount_masked_optimized(buf, bytes, mask);
+}
+
+/*
+ * pg_popcount_sve
+ *             Returns number of 1 bits in buf
+ */
+pg_attribute_target("arch=armv8-a+sve")
+static uint64
+pg_popcount_sve(const char *buf, int bytes)
+{
+       uint32          vec_len = svcntb(),
+                               bytes_per_iteration = 4 * vec_len;
+       svuint64_t      accum1 = svdup_u64(0),
+                               accum2 = svdup_u64(0),
+                               accum3 = svdup_u64(0),
+                               accum4 = svdup_u64(0);
+       svbool_t        pred = svptrue_b64();
+       uint64          popcnt = 0;
+
+       /*
+        * For better instruction-level parallelism, each loop iteration 
operates
+        * on a block of four registers.
+        */
+       for (; bytes >= bytes_per_iteration; bytes -= bytes_per_iteration)
+       {
+               svuint64_t      vec;
+
+               vec = svld1(pred, (const uint64 *) buf);
+               accum1 = svadd_x(pred, accum1, svcnt_x(pred, vec));
+               buf += vec_len;
+
+               vec = svld1(pred, (const uint64 *) buf);
+               accum2 = svadd_x(pred, accum2, svcnt_x(pred, vec));
+               buf += vec_len;
+
+               vec = svld1(pred, (const uint64 *) buf);
+               accum3 = svadd_x(pred, accum3, svcnt_x(pred, vec));
+               buf += vec_len;
+
+               vec = svld1(pred, (const uint64 *) buf);
+               accum4 = svadd_x(pred, accum4, svcnt_x(pred, vec));
+               buf += vec_len;
+       }
+
+       /*
+        * If enough data remains, do another iteration on a block of two
+        * registers.
+        */
+       bytes_per_iteration = 2 * vec_len;
+       if (bytes >= bytes_per_iteration)
+       {
+               svuint64_t      vec;
+
+               vec = svld1(pred, (const uint64 *) buf);
+               accum1 = svadd_x(pred, accum1, svcnt_x(pred, vec));
+               buf += vec_len;
+
+               vec = svld1(pred, (const uint64 *) buf);
+               accum2 = svadd_x(pred, accum2, svcnt_x(pred, vec));
+               buf += vec_len;
+
+               bytes -= bytes_per_iteration;
+       }
+
+       /*
+        * Add the accumulators.
+        */
+       popcnt += svaddv(pred, svadd_x(pred, accum1, accum2));
+       popcnt += svaddv(pred, svadd_x(pred, accum3, accum4));
+
+       /*
+        * Process any remaining data.
+        */
+       for (; bytes > 0; bytes -= vec_len)
+       {
+               svuint8_t       vec;
+
+               pred = svwhilelt_b8(0, bytes);
+               vec = svld1(pred, (const uint8 *) buf);
+               popcnt += svaddv(pred, svcnt_x(pred, vec));
+               buf += vec_len;
+       }
+
+       return popcnt;
+}
+
+/*
+ * pg_popcount_masked_sve
+ *             Returns number of 1 bits in buf after applying the mask to each 
byte
+ */
+pg_attribute_target("arch=armv8-a+sve")
+static uint64
+pg_popcount_masked_sve(const char *buf, int bytes, bits8 mask)
+{
+       uint32          vec_len = svcntb(),
+                               bytes_per_iteration = 4 * vec_len;
+       svuint64_t      accum1 = svdup_u64(0),
+                               accum2 = svdup_u64(0),
+                               accum3 = svdup_u64(0),
+                               accum4 = svdup_u64(0);
+       svbool_t        pred = svptrue_b64();
+       uint64          popcnt = 0,
+                               mask64 = ~UINT64CONST(0) / 0xFF * mask;
+
+       /*
+        * For better instruction-level parallelism, each loop iteration 
operates
+        * on a block of four registers.
+        */
+       for (; bytes >= bytes_per_iteration; bytes -= bytes_per_iteration)
+       {
+               svuint64_t      vec;
+
+               vec = svand_x(pred, svld1(pred, (const uint64 *) buf), mask64);
+               accum1 = svadd_x(pred, accum1, svcnt_x(pred, vec));
+               buf += vec_len;
+
+               vec = svand_x(pred, svld1(pred, (const uint64 *) buf), mask64);
+               accum2 = svadd_x(pred, accum2, svcnt_x(pred, vec));
+               buf += vec_len;
+
+               vec = svand_x(pred, svld1(pred, (const uint64 *) buf), mask64);
+               accum3 = svadd_x(pred, accum3, svcnt_x(pred, vec));
+               buf += vec_len;
+
+               vec = svand_x(pred, svld1(pred, (const uint64 *) buf), mask64);
+               accum4 = svadd_x(pred, accum4, svcnt_x(pred, vec));
+               buf += vec_len;
+       }
+
+       /*
+        * If enough data remains, do another iteration on a block of two
+        * registers.
+        */
+       bytes_per_iteration = 2 * vec_len;
+       if (bytes >= bytes_per_iteration)
+       {
+               svuint64_t      vec;
+
+               vec = svand_x(pred, svld1(pred, (const uint64 *) buf), mask64);
+               accum1 = svadd_x(pred, accum1, svcnt_x(pred, vec));
+               buf += vec_len;
+
+               vec = svand_x(pred, svld1(pred, (const uint64 *) buf), mask64);
+               accum2 = svadd_x(pred, accum2, svcnt_x(pred, vec));
+               buf += vec_len;
+
+               bytes -= bytes_per_iteration;
+       }
+
+       /*
+        * Add the accumulators.
+        */
+       popcnt += svaddv(pred, svadd_x(pred, accum1, accum2));
+       popcnt += svaddv(pred, svadd_x(pred, accum3, accum4));
+
+       /*
+        * Process any remaining data.
+        */
+       for (; bytes > 0; bytes -= vec_len)
+       {
+               svuint8_t       vec;
+
+               pred = svwhilelt_b8(0, bytes);
+               vec = svand_x(pred, svld1(pred, (const uint8 *) buf), mask);
+               popcnt += svaddv(pred, svcnt_x(pred, vec));
+               buf += vec_len;
+       }
+
+       return popcnt;
+}
+
+#else                                                  /* 
USE_SVE_POPCNT_WITH_RUNTIME_CHECK */
+
+/*
+ * When the SVE version isn't available, there's no point in using function
+ * pointers to vary the implementation.  We instead just make these actual
+ * external functions when USE_SVE_POPCNT_WITH_RUNTIME_CHECK is not defined.
+ * The compiler should be able to inline the slow versions here.
+ */
+uint64
+pg_popcount_optimized(const char *buf, int bytes)
+{
+       return pg_popcount_neon(buf, bytes);
+}
+
+uint64
+pg_popcount_masked_optimized(const char *buf, int bytes, bits8 mask)
+{
+       return pg_popcount_masked_neon(buf, bytes, mask);
+}
+
+#endif                                                 /* ! 
USE_SVE_POPCNT_WITH_RUNTIME_CHECK */
+
 /*
  * pg_popcount32
  *             Return number of 1 bits in word
@@ -44,11 +313,11 @@ pg_popcount64(uint64 word)
 }
 
 /*
- * pg_popcount_optimized
+ * pg_popcount_neon
  *             Returns number of 1 bits in buf
  */
-uint64
-pg_popcount_optimized(const char *buf, int bytes)
+static uint64
+pg_popcount_neon(const char *buf, int bytes)
 {
        uint8x16_t      vec;
        uint32          bytes_per_iteration = 4 * sizeof(uint8x16_t);
@@ -124,11 +393,11 @@ pg_popcount_optimized(const char *buf, int bytes)
 }
 
 /*
- * pg_popcount_masked_optimized
+ * pg_popcount_masked_neon
  *             Returns number of 1 bits in buf after applying the mask to each 
byte
  */
-uint64
-pg_popcount_masked_optimized(const char *buf, int bytes, bits8 mask)
+static uint64
+pg_popcount_masked_neon(const char *buf, int bytes, bits8 mask)
 {
        uint8x16_t      vec;
        uint32          bytes_per_iteration = 4 * sizeof(uint8x16_t);
-- 
2.39.5 (Apple Git-154)

Reply via email to