From 30af9146f8915df08d70845776a4a43d73051e4f Mon Sep 17 00:00:00 2001
From: Bharath Rupireddy <bharath.rupireddyforpostgres@gmail.com>
Date: Tue, 24 Jan 2023 08:10:00 +0000
Subject: [PATCH v3] Optimize WAL insertion lock acquisition and release

This commit optimizes WAL insertion lock acquisition and release
in the following way:

1. WAL insertion lock's variable insertingAt is currently read and
written with the help of lwlock's wait list lock to avoid
torn-free reads/writes. This wait list lock can become a point of
contention on a highly concurrent write workloads. Therefore, make
insertingAt a 64-bit atomic which inherently provides torn-free
reads/writes.

2. LWLockUpdateVar currently acquires lwlock's wait list lock even
when there are no waiters at all. Add a fastpath exit to
LWLockUpdateVar when there are no waiters to avoid unnecessary
locking.

Note that atomic exchange operation (which is a full barrier) is
used when necessary, instead of atomic write to ensure the memory
ordering is preserved.

It also adds a note in WaitXLogInsertionsToFinish regarding how the
use of spinlock there can avoid explicit memory barrier in some
subsequently called functions.

Suggested-by: Andres Freund
Author: Bharath Rupireddy
Reviewed-by: Nathan Bossart
Discussion: https://www.postgresql.org/message-id/20221124184619.xit4sfi52bcz2tva%40awork3.anarazel.de
---
 src/backend/access/transam/xlog.c | 14 +++++--
 src/backend/storage/lmgr/lwlock.c | 66 +++++++++++++++++++------------
 src/include/storage/lwlock.h      |  6 +--
 3 files changed, 55 insertions(+), 31 deletions(-)

diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index fb4c860bde..4e427c85c2 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -350,7 +350,8 @@ typedef struct XLogwrtResult
  * wait for all currently in-progress insertions to finish, but the
  * insertingAt indicator allows you to ignore insertions to later in the WAL,
  * so that you only wait for the insertions that are modifying the buffers
- * you're about to write out.
+ * you're about to write out. Using an atomic variable for insertingAt avoids
+ * taking extra lock for reads and writes.
  *
  * This isn't just an optimization. If all the WAL buffers are dirty, an
  * inserter that's holding a WAL insert lock might need to evict an old WAL
@@ -376,7 +377,7 @@ typedef struct XLogwrtResult
 typedef struct
 {
 	LWLock		lock;
-	XLogRecPtr	insertingAt;
+	pg_atomic_uint64	insertingAt;
 	XLogRecPtr	lastImportantAt;
 } WALInsertLock;
 
@@ -1496,6 +1497,13 @@ WaitXLogInsertionsToFinish(XLogRecPtr upto)
 			 * calling LWLockUpdateVar.  But if it has to sleep, it will
 			 * advertise the insertion point with LWLockUpdateVar before
 			 * sleeping.
+			 *
+			 * XXX: Use of a spinlock at the beginning of this function to read
+			 * current insert position in this here implies memory ordering.
+			 * That means that the immediate loads and stores to shared memory,
+			 * for instance in LWLockUpdateVar called via LWLockWaitForVar,
+			 * don't need an explicit memory barrier as far as the current
+			 * usage is concerned. But that might not be safe in general.
 			 */
 			if (LWLockWaitForVar(&WALInsertLocks[i].l.lock,
 								 &WALInsertLocks[i].l.insertingAt,
@@ -4596,7 +4604,7 @@ XLOGShmemInit(void)
 	for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++)
 	{
 		LWLockInitialize(&WALInsertLocks[i].l.lock, LWTRANCHE_WAL_INSERT);
-		WALInsertLocks[i].l.insertingAt = InvalidXLogRecPtr;
+		pg_atomic_init_u64(&WALInsertLocks[i].l.insertingAt, InvalidXLogRecPtr);
 		WALInsertLocks[i].l.lastImportantAt = InvalidXLogRecPtr;
 	}
 
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index d2ec396045..27c3b63c68 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -1547,9 +1547,8 @@ LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
  * *result is set to true if the lock was free, and false otherwise.
  */
 static bool
-LWLockConflictsWithVar(LWLock *lock,
-					   uint64 *valptr, uint64 oldval, uint64 *newval,
-					   bool *result)
+LWLockConflictsWithVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval,
+					   uint64 *newval, bool *result)
 {
 	bool		mustwait;
 	uint64		value;
@@ -1572,13 +1571,11 @@ LWLockConflictsWithVar(LWLock *lock,
 	*result = false;
 
 	/*
-	 * Read value using the lwlock's wait list lock, as we can't generally
-	 * rely on atomic 64 bit reads/stores.  TODO: On platforms with a way to
-	 * do atomic 64 bit reads/writes the spinlock should be optimized away.
+	 * Read value atomically without any explicit lock. We rely on 64-bit
+	 * atomic reads/writes that transparently does the required work to make
+	 * even non-atomic reads/writes tear free.
 	 */
-	LWLockWaitListLock(lock);
-	value = *valptr;
-	LWLockWaitListUnlock(lock);
+	value = pg_atomic_read_u64(valptr);
 
 	if (value != oldval)
 	{
@@ -1607,7 +1604,8 @@ LWLockConflictsWithVar(LWLock *lock,
  * in shared mode, returns 'true'.
  */
 bool
-LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval)
+LWLockWaitForVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval,
+				 uint64 *newval)
 {
 	PGPROC	   *proc = MyProc;
 	int			extraWaits = 0;
@@ -1735,29 +1733,47 @@ LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval)
  * LWLockUpdateVar - Update a variable and wake up waiters atomically
  *
  * Sets *valptr to 'val', and wakes up all processes waiting for us with
- * LWLockWaitForVar().  Setting the value and waking up the processes happen
- * atomically so that any process calling LWLockWaitForVar() on the same lock
- * is guaranteed to see the new value, and act accordingly.
+ * LWLockWaitForVar().  It first sets the value atomically and then wakes up
+ * the waiting processes so that any process calling LWLockWaitForVar() on the
+ * same lock is guaranteed to see the new value, and act accordingly.
  *
  * The caller must be holding the lock in exclusive mode.
  */
 void
-LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 val)
+LWLockUpdateVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
 {
 	proclist_head wakeup;
 	proclist_mutable_iter iter;
 
 	PRINT_LWDEBUG("LWLockUpdateVar", lock, LW_EXCLUSIVE);
 
+	/*
+	 * Update the lock variable atomically first without having to acquire wait
+	 * list lock, so that if anyone looking for the lock will have chance to
+	 * grab it a bit quickly.
+	 *
+	 * NB: Note the use of pg_atomic_exchange_u64 as opposed to just
+	 * pg_atomic_write_u64 to update the value. Since pg_atomic_exchange_u64 is
+	 * a full barrier, we're guaranteed that the subsequent atomic read of lock
+	 * state to check if it has any waiters happens after we set the lock
+	 * variable to new value here. Without a barrier, we could end up missing
+	 * waiters that otherwise should have been woken up.
+	 */
+	pg_atomic_exchange_u64(valptr, val);
+
+	/*
+	 * Quick exit when there are no waiters. This avoids unnecessary lwlock's
+	 * wait list lock acquisition and release.
+	 */
+	if ((pg_atomic_read_u32(&lock->state) & LW_FLAG_HAS_WAITERS) == 0)
+		return;
+
 	proclist_init(&wakeup);
 
 	LWLockWaitListLock(lock);
 
 	Assert(pg_atomic_read_u32(&lock->state) & LW_VAL_EXCLUSIVE);
 
-	/* Update the lock's value */
-	*valptr = val;
-
 	/*
 	 * See if there are any LW_WAIT_UNTIL_FREE waiters that need to be woken
 	 * up. They are always in the front of the queue.
@@ -1873,17 +1889,17 @@ LWLockRelease(LWLock *lock)
  * LWLockReleaseClearVar - release a previously acquired lock, reset variable
  */
 void
-LWLockReleaseClearVar(LWLock *lock, uint64 *valptr, uint64 val)
+LWLockReleaseClearVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
 {
-	LWLockWaitListLock(lock);
-
 	/*
-	 * Set the variable's value before releasing the lock, that prevents race
-	 * a race condition wherein a new locker acquires the lock, but hasn't yet
-	 * set the variables value.
+	 * Update the lock variable atomically first.
+	 *
+	 * NB: Note the use of pg_atomic_exchange_u64 as opposed to just
+	 * pg_atomic_write_u64 to update the value. Since pg_atomic_exchange_u64 is
+	 * a full barrier, we're guaranteed that the subsequent shared memory
+	 * reads/writes, if any, happen after we reset the lock variable.
 	 */
-	*valptr = val;
-	LWLockWaitListUnlock(lock);
+	pg_atomic_exchange_u64(valptr, val);
 
 	LWLockRelease(lock);
 }
diff --git a/src/include/storage/lwlock.h b/src/include/storage/lwlock.h
index d2c7afb8f4..f19bc49193 100644
--- a/src/include/storage/lwlock.h
+++ b/src/include/storage/lwlock.h
@@ -128,14 +128,14 @@ extern bool LWLockAcquire(LWLock *lock, LWLockMode mode);
 extern bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode);
 extern bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode);
 extern void LWLockRelease(LWLock *lock);
-extern void LWLockReleaseClearVar(LWLock *lock, uint64 *valptr, uint64 val);
+extern void LWLockReleaseClearVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val);
 extern void LWLockReleaseAll(void);
 extern bool LWLockHeldByMe(LWLock *lock);
 extern bool LWLockAnyHeldByMe(LWLock *lock, int nlocks, size_t stride);
 extern bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode);
 
-extern bool LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval);
-extern void LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 val);
+extern bool LWLockWaitForVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval, uint64 *newval);
+extern void LWLockUpdateVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val);
 
 extern Size LWLockShmemSize(void);
 extern void CreateLWLocks(void);
-- 
2.34.1

