On Thu, Aug 25, 2022 at 11:13:47PM -0700, Nathan Bossart wrote:
> Here is a new patch set that applies on top of v9-0001 in the
> json_lex_string patch set [0] and v3 of the is_valid_ascii patch [1].

Here is a rebased patch set that applies to HEAD.

-- 
Nathan Bossart
Amazon Web Services: https://aws.amazon.com
>From 8d8afe70bccec20cd381934fae5e11e155d78129 Mon Sep 17 00:00:00 2001
From: Nathan Bossart <nathandboss...@gmail.com>
Date: Thu, 25 Aug 2022 22:18:30 -0700
Subject: [PATCH v4 1/2] abstract architecture-specific implementation details
 from pg_lfind32()

---
 src/include/port/pg_lfind.h | 55 +++++++++++++++++---------------
 src/include/port/simd.h     | 63 +++++++++++++++++++++++++++++++++++++
 2 files changed, 93 insertions(+), 25 deletions(-)

diff --git a/src/include/port/pg_lfind.h b/src/include/port/pg_lfind.h
index a4e13dffec..7a851ea42c 100644
--- a/src/include/port/pg_lfind.h
+++ b/src/include/port/pg_lfind.h
@@ -91,16 +91,19 @@ pg_lfind32(uint32 key, uint32 *base, uint32 nelem)
 {
 	uint32		i = 0;
 
-#ifdef USE_SSE2
+#ifndef USE_NO_SIMD
 
 	/*
-	 * A 16-byte register only has four 4-byte lanes. For better
-	 * instruction-level parallelism, each loop iteration operates on a block
-	 * of four registers. Testing has showed this is ~40% faster than using a
-	 * block of two registers.
+	 * For better instruction-level parallelism, each loop iteration operates
+	 * on a block of four registers.  Testing for SSE2 has showed this is ~40%
+	 * faster than using a block of two registers.
 	 */
-	const		__m128i keys = _mm_set1_epi32(key); /* load 4 copies of key */
-	uint32		iterations = nelem & ~0xF;	/* round down to multiple of 16 */
+	const Vector32 keys = vector32_broadcast(key);	/* load copies of key */
+	uint32		nelem_per_vector = sizeof(Vector32) / sizeof(uint32);
+	uint32		nelem_per_iteration = 4 * nelem_per_vector;
+
+	/* round down to multiple of elements per iteration */
+	uint32		tail_idx = nelem & ~(nelem_per_iteration - 1);
 
 #if defined(USE_ASSERT_CHECKING)
 	bool		assert_result = false;
@@ -116,31 +119,33 @@ pg_lfind32(uint32 key, uint32 *base, uint32 nelem)
 	}
 #endif
 
-	for (i = 0; i < iterations; i += 16)
+	for (i = 0; i < tail_idx; i += nelem_per_iteration)
 	{
-		/* load the next block into 4 registers holding 4 values each */
-		const		__m128i vals1 = _mm_loadu_si128((__m128i *) & base[i]);
-		const		__m128i vals2 = _mm_loadu_si128((__m128i *) & base[i + 4]);
-		const		__m128i vals3 = _mm_loadu_si128((__m128i *) & base[i + 8]);
-		const		__m128i vals4 = _mm_loadu_si128((__m128i *) & base[i + 12]);
+		Vector32	vals1, vals2, vals3, vals4,
+					result1, result2, result3, result4,
+					tmp1, tmp2, result;
+
+		/* load the next block into 4 registers */
+		vector32_load(&vals1, &base[i]);
+		vector32_load(&vals2, &base[i + nelem_per_vector]);
+		vector32_load(&vals3, &base[i + nelem_per_vector * 2]);
+		vector32_load(&vals4, &base[i + nelem_per_vector * 3]);
 
 		/* compare each value to the key */
-		const		__m128i result1 = _mm_cmpeq_epi32(keys, vals1);
-		const		__m128i result2 = _mm_cmpeq_epi32(keys, vals2);
-		const		__m128i result3 = _mm_cmpeq_epi32(keys, vals3);
-		const		__m128i result4 = _mm_cmpeq_epi32(keys, vals4);
+		result1 = vector32_eq(keys, vals1);
+		result2 = vector32_eq(keys, vals2);
+		result3 = vector32_eq(keys, vals3);
+		result4 = vector32_eq(keys, vals4);
 
 		/* combine the results into a single variable */
-		const		__m128i tmp1 = _mm_or_si128(result1, result2);
-		const		__m128i tmp2 = _mm_or_si128(result3, result4);
-		const		__m128i result = _mm_or_si128(tmp1, tmp2);
+		tmp1 = vector32_or(result1, result2);
+		tmp2 = vector32_or(result3, result4);
+		result = vector32_or(tmp1, tmp2);
 
 		/* see if there was a match */
-		if (_mm_movemask_epi8(result) != 0)
+		if (vector32_any_lane_set(result))
 		{
-#if defined(USE_ASSERT_CHECKING)
 			Assert(assert_result == true);
-#endif
 			return true;
 		}
 	}
@@ -151,14 +156,14 @@ pg_lfind32(uint32 key, uint32 *base, uint32 nelem)
 	{
 		if (key == base[i])
 		{
-#if defined(USE_SSE2) && defined(USE_ASSERT_CHECKING)
+#ifndef USE_NO_SIMD
 			Assert(assert_result == true);
 #endif
 			return true;
 		}
 	}
 
-#if defined(USE_SSE2) && defined(USE_ASSERT_CHECKING)
+#ifndef USE_NO_SIMD
 	Assert(assert_result == false);
 #endif
 	return false;
diff --git a/src/include/port/simd.h b/src/include/port/simd.h
index a425cd887b..c42dccf784 100644
--- a/src/include/port/simd.h
+++ b/src/include/port/simd.h
@@ -31,6 +31,7 @@
 #include <emmintrin.h>
 #define USE_SSE2
 typedef __m128i Vector8;
+typedef __m128i Vector32;
 
 #else
 /*
@@ -39,14 +40,17 @@ typedef __m128i Vector8;
  */
 #define USE_NO_SIMD
 typedef uint64 Vector8;
+typedef uint64 Vector32;
 #endif
 
 
 /* load/store operations */
 static inline void vector8_load(Vector8 *v, const uint8 *s);
+static inline void vector32_load(Vector32 *v, const uint32 *s);
 
 /* assignment operations */
 static inline Vector8 vector8_broadcast(const uint8 c);
+static inline Vector32 vector32_broadcast(const uint32 c);
 
 /* element-wise comparisons to a scalar */
 static inline bool vector8_has(const Vector8 v, const uint8 c);
@@ -56,12 +60,17 @@ static inline bool vector8_is_highbit_set(const Vector8 v);
 
 /* arithmetic operations */
 static inline Vector8 vector8_or(const Vector8 v1, const Vector8 v2);
+static inline Vector32 vector32_or(const Vector32 v1, const Vector32 v2);
 
 /* Different semantics for SIMD architectures. */
 #ifndef USE_NO_SIMD
 
 /* comparisons between vectors */
 static inline Vector8 vector8_eq(const Vector8 v1, const Vector8 v2);
+static inline Vector32 vector32_eq(const Vector32 v1, const Vector32 v2);
+
+/* inspecting vector lanes */
+static inline bool vector32_any_lane_set(const Vector32 v);
 
 #endif							/* ! USE_NO_SIMD */
 
@@ -78,6 +87,16 @@ vector8_load(Vector8 *v, const uint8 *s)
 #endif
 }
 
+static inline void
+vector32_load(Vector32 *v, const uint32 *s)
+{
+#ifdef USE_SSE2
+	*v = _mm_loadu_si128((const __m128i *) s);
+#else
+	memcpy(v, s, sizeof(Vector32));
+#endif
+}
+
 
 /*
  * Create a vector with all elements set to the same value.
@@ -92,6 +111,16 @@ vector8_broadcast(const uint8 c)
 #endif
 }
 
+static inline Vector32
+vector32_broadcast(const uint32 c)
+{
+#ifdef USE_SSE2
+	return _mm_set1_epi32(c);
+#else
+	return ~UINT64CONST(0) / 0xFFFFFFFF * c;
+#endif
+}
+
 /*
  * Return true if any elements in the vector are equal to the given scalar.
  */
@@ -230,6 +259,16 @@ vector8_or(const Vector8 v1, const Vector8 v2)
 #endif
 }
 
+static inline Vector32
+vector32_or(const Vector32 v1, const Vector32 v2)
+{
+#ifdef USE_SSE2
+	return _mm_or_si128(v1, v2);
+#else
+	return v1 | v2;
+#endif
+}
+
 
 /* Different semantics for SIMD architectures. */
 #ifndef USE_NO_SIMD
@@ -246,6 +285,30 @@ vector8_eq(const Vector8 v1, const Vector8 v2)
 #endif
 }
 
+static inline Vector32
+vector32_eq(const Vector32 v1, const Vector32 v2)
+{
+#ifdef USE_SSE2
+	return _mm_cmpeq_epi32(v1, v2);
+#endif
+}
+
+/*
+ * Return true if any lanes in the vector have all bits set.
+ *
+ * NB: This function assumes that each lane in the given vector either has all
+ * bits set or all bits zeroed, as it is mainly intended for use with
+ * operations that produce such vectors (e.g., vector32_eq()).  If this
+ * assumption is not true, this function's behavior is undefined.
+ */
+static inline bool
+vector32_any_lane_set(const Vector32 v)
+{
+#ifdef USE_SSE2
+	return _mm_movemask_epi8(v) != 0;
+#endif
+}
+
 #endif							/* ! USE_NO_SIMD */
 
 #endif							/* SIMD_H */
-- 
2.25.1

>From a1e60532eccded6a485ef2cff60d4f1ceac01a3a Mon Sep 17 00:00:00 2001
From: Nathan Bossart <nathandboss...@gmail.com>
Date: Fri, 26 Aug 2022 10:47:35 -0700
Subject: [PATCH v4 2/2] use ARM Advanced SIMD intrinsic functions where
 available

---
 src/include/port/simd.h | 46 ++++++++++++++++++++++++++++++++++++++---
 1 file changed, 43 insertions(+), 3 deletions(-)

diff --git a/src/include/port/simd.h b/src/include/port/simd.h
index c42dccf784..127b1f3f70 100644
--- a/src/include/port/simd.h
+++ b/src/include/port/simd.h
@@ -33,6 +33,19 @@
 typedef __m128i Vector8;
 typedef __m128i Vector32;
 
+#elif defined(__aarch64__)
+/*
+ * Include arm_neon.h if the compiler is targeting an architecture that
+ * supports ARM Advanced SIMD (Neon) intrinsics.  While Neon support is
+ * technically optional for aarch64, we assume it's unlikely that anyone will
+ * run PostgreSQL on specialized hardware lacking this feature, and we assume
+ * that compilers targeting this architecture understand Neon intrinsics.
+ */
+#include <arm_neon.h>
+#define USE_NEON
+typedef uint8x16_t Vector8;
+typedef uint32x4_t Vector32;
+
 #else
 /*
  * If no SIMD instructions are available, we can in some cases emulate vector
@@ -82,6 +95,8 @@ vector8_load(Vector8 *v, const uint8 *s)
 {
 #if defined(USE_SSE2)
 	*v = _mm_loadu_si128((const __m128i *) s);
+#elif defined(USE_NEON)
+	*v = vld1q_u8(s);
 #else
 	memcpy(v, s, sizeof(Vector8));
 #endif
@@ -92,6 +107,8 @@ vector32_load(Vector32 *v, const uint32 *s)
 {
 #ifdef USE_SSE2
 	*v = _mm_loadu_si128((const __m128i *) s);
+#elif defined(USE_NEON)
+	*v = vld1q_u32(s);
 #else
 	memcpy(v, s, sizeof(Vector32));
 #endif
@@ -106,6 +123,8 @@ vector8_broadcast(const uint8 c)
 {
 #if defined(USE_SSE2)
 	return _mm_set1_epi8(c);
+#elif defined(USE_NEON)
+	return vdupq_n_u8(c);
 #else
 	return ~UINT64CONST(0) / 0xFF * c;
 #endif
@@ -116,6 +135,8 @@ vector32_broadcast(const uint32 c)
 {
 #ifdef USE_SSE2
 	return _mm_set1_epi32(c);
+#elif defined(USE_NEON)
+	return vdupq_n_u32(c);
 #else
 	return ~UINT64CONST(0) / 0xFFFFFFFF * c;
 #endif
@@ -148,6 +169,8 @@ vector8_has(const Vector8 v, const uint8 c)
 	result = vector8_has_zero(v ^ vector8_broadcast(c));
 #elif defined(USE_SSE2)
 	result = _mm_movemask_epi8(_mm_cmpeq_epi8(v, vector8_broadcast(c)));
+#elif defined(USE_NEON)
+	result = vmaxvq_u8(vceqq_u8(v, vector8_broadcast(c))) != 0;
 #endif
 
 	Assert(assert_result == result);
@@ -166,7 +189,7 @@ vector8_has_zero(const Vector8 v)
 	 * definition.
 	 */
 	return vector8_has_le(v, 0);
-#elif defined(USE_SSE2)
+#elif defined(USE_SSE2) || defined(USE_NEON)
 	return vector8_has(v, 0);
 #endif
 }
@@ -179,8 +202,8 @@ static inline bool
 vector8_has_le(const Vector8 v, const uint8 c)
 {
 	bool		result = false;
-#if defined(USE_SSE2)
-	__m128i		sub;
+#ifndef USE_NO_SIMD
+	Vector8		sub;
 #endif
 
 	/* pre-compute the result for assert checking */
@@ -227,6 +250,11 @@ vector8_has_le(const Vector8 v, const uint8 c)
 	 */
 	sub = _mm_subs_epu8(v, vector8_broadcast(c));
 	result = vector8_has_zero(sub);
+#elif defined(USE_NEON)
+
+	/* use the same approach as the USE_SSE2 block above */
+	sub = vqsubq_u8(v, vector8_broadcast(c));
+	result = vector8_has_zero(sub);
 #endif
 
 	Assert(assert_result == result);
@@ -241,6 +269,8 @@ vector8_is_highbit_set(const Vector8 v)
 {
 #ifdef USE_SSE2
 	return _mm_movemask_epi8(v) != 0;
+#elif defined(USE_NEON)
+	return vmaxvq_u8(vandq_u8(v, vector8_broadcast(0x80))) != 0;
 #else
 	return v & vector8_broadcast(0x80);
 #endif
@@ -254,6 +284,8 @@ vector8_or(const Vector8 v1, const Vector8 v2)
 {
 #ifdef USE_SSE2
 	return _mm_or_si128(v1, v2);
+#elif defined(USE_NEON)
+	return vorrq_u8(v1, v2);
 #else
 	return v1 | v2;
 #endif
@@ -264,6 +296,8 @@ vector32_or(const Vector32 v1, const Vector32 v2)
 {
 #ifdef USE_SSE2
 	return _mm_or_si128(v1, v2);
+#elif defined(USE_NEON)
+	return vorrq_u32(v1, v2);
 #else
 	return v1 | v2;
 #endif
@@ -282,6 +316,8 @@ vector8_eq(const Vector8 v1, const Vector8 v2)
 {
 #ifdef USE_SSE2
 	return _mm_cmpeq_epi8(v1, v2);
+#elif defined(USE_NEON)
+	return vceqq_u8(v1, v2);
 #endif
 }
 
@@ -290,6 +326,8 @@ vector32_eq(const Vector32 v1, const Vector32 v2)
 {
 #ifdef USE_SSE2
 	return _mm_cmpeq_epi32(v1, v2);
+#elif defined(USE_NEON)
+	return vceqq_u32(v1, v2);
 #endif
 }
 
@@ -306,6 +344,8 @@ vector32_any_lane_set(const Vector32 v)
 {
 #ifdef USE_SSE2
 	return _mm_movemask_epi8(v) != 0;
+#elif defined(USE_NEON)
+	return vmaxvq_u32(v) != 0;
 #endif
 }
 
-- 
2.25.1

Reply via email to