The choice between adding a new GUC or extending the existing one
(e.g., log_lock_waits)
is debatable, but I prefer the latter. I'm considering extending log_lock_waits
to accept a value like "fail". If set to "on" (the current behavior),
detailed logs are generated when the lock wait time exceeds deadlock_timeout. If set to "fail", logs are generated whenever a lock wait fails. If both are
specified, logs would be triggered when the wait time exceeds
deadlock_timeout or
when a lock wait fails.

Thanks for the idea.
Changed log_lock_waits to an enum type and added fail and all.
"off"  : No log message(default).
"on"   : If over deadlock_timeout(the current behavior).
"fail" : If lock failed.
"all"  : All pettern.

I struggled with the name "on," but decided to leave it by compatibility.
diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index 934ef5e469..e297b6d188 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -7664,12 +7664,15 @@ log_line_prefix = '%m [%p] %q%u@%d/%a '
       </term>
       <listitem>
        <para>
-        Controls whether a log message is produced when a session waits
-        longer than <xref linkend="guc-deadlock-timeout"/> to acquire a
-        lock.  This is useful in determining if lock waits are causing
-        poor performance.  The default is <literal>off</literal>.
-        Only superusers and users with the appropriate <literal>SET</literal>
-        privilege can change this setting.
+        Controls whether lock wait log messages are generated. When
+        <literal>on</literal>, generates a log message if waiting longer
+        than <xref linkend="guc-deadlock-timeout"/>.  When <literal>fail</literal>,
+        generates a log message if lock acquisition fails.  Currently,
+        this only enabled when lock acquisition fails with NOWAIT.
+        When <literal>all</literal>, all will be enabled.
+        This is useful in determining if lock waits are causing poor performance.
+        The default is <literal>off</literal>. Only superusers and users with 
+        the appropriate <literal>SET</literal> privilege can change this setting.
        </para>
       </listitem>
      </varlistentry>
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index eaf3916f28..05ecafaedd 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -61,7 +61,7 @@ int			LockTimeout = 0;
 int			IdleInTransactionSessionTimeout = 0;
 int			TransactionTimeout = 0;
 int			IdleSessionTimeout = 0;
-bool		log_lock_waits = false;
+int			log_lock_waits = LOGLOCK_OFF;
 
 /* Pointer to this process's PGPROC struct, if any */
 PGPROC	   *MyProc = NULL;
@@ -1119,6 +1119,11 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
 	bool		logged_recovery_conflict = false;
 	ProcWaitStatus myWaitStatus;
 	PGPROC	   *leader = MyProc->lockGroupLeader;
+	StringInfoData buf,
+			lock_waiters_sbuf,
+			lock_holders_sbuf;
+	const char *modename;
+	int			lockHoldersNum = 0;
 
 	/*
 	 * If group locking is in use, locks held by members of my locking group
@@ -1218,10 +1223,33 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
 
 	/*
 	 * At this point we know that we'd really need to sleep. If we've been
-	 * commanded not to do that, bail out.
+	 * commanded not to do that, bail out. Output lock information only 
+	 * if log_lock_waits is fail or all.
 	 */
 	if (dontWait)
+	{
+		if (log_lock_waits == LOGLOCK_FAIL || log_lock_waits == LOGLOCK_ALL)
+		{
+			initStringInfo(&buf);
+			initStringInfo(&lock_waiters_sbuf);
+			initStringInfo(&lock_holders_sbuf);
+
+			DescribeLockTag(&buf, &locallock->tag.lock);
+			modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
+										lockmode);
+
+			/* Collect lock holders and waiters */
+			CollectLockHoldersAndWaiters(proclock, lock, &lock_holders_sbuf, &lock_waiters_sbuf, &lockHoldersNum);
+
+			ereport(LOG,
+							(errmsg("process %d could not obtain %s on %s",
+									MyProcPid, modename, buf.data),
+							(errdetail_log_plural("Process holding the lock: %s. Wait: %s.",
+												"Processes holding the lock: %s. Wait: %s.",
+												lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
+		}
 		return PROC_WAIT_STATUS_ERROR;
+	}
 
 	/*
 	 * Insert self into queue, at the position determined above.
@@ -1496,22 +1524,14 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
 
 		/*
 		 * If awoken after the deadlock check interrupt has run, and
-		 * log_lock_waits is on, then report about the wait.
+		 * log_lock_waits is on or all, then report about the wait.
 		 */
-		if (log_lock_waits && deadlock_state != DS_NOT_YET_CHECKED)
+		if ((log_lock_waits == LOGLOCK_ON || log_lock_waits == LOGLOCK_ALL) &&
+			deadlock_state != DS_NOT_YET_CHECKED)
 		{
-			StringInfoData buf,
-						lock_waiters_sbuf,
-						lock_holders_sbuf;
-			const char *modename;
 			long		secs;
 			int			usecs;
 			long		msecs;
-			dlist_iter	proc_iter;
-			PROCLOCK   *curproclock;
-			bool		first_holder = true,
-						first_waiter = true;
-			int			lockHoldersNum = 0;
 
 			initStringInfo(&buf);
 			initStringInfo(&lock_waiters_sbuf);
@@ -1526,53 +1546,10 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
 			msecs = secs * 1000 + usecs / 1000;
 			usecs = usecs % 1000;
 
-			/*
-			 * we loop over the lock's procLocks to gather a list of all
-			 * holders and waiters. Thus we will be able to provide more
-			 * detailed information for lock debugging purposes.
-			 *
-			 * lock->procLocks contains all processes which hold or wait for
-			 * this lock.
-			 */
-
 			LWLockAcquire(partitionLock, LW_SHARED);
 
-			dlist_foreach(proc_iter, &lock->procLocks)
-			{
-				curproclock =
-					dlist_container(PROCLOCK, lockLink, proc_iter.cur);
-
-				/*
-				 * we are a waiter if myProc->waitProcLock == curproclock; we
-				 * are a holder if it is NULL or something different
-				 */
-				if (curproclock->tag.myProc->waitProcLock == curproclock)
-				{
-					if (first_waiter)
-					{
-						appendStringInfo(&lock_waiters_sbuf, "%d",
-										 curproclock->tag.myProc->pid);
-						first_waiter = false;
-					}
-					else
-						appendStringInfo(&lock_waiters_sbuf, ", %d",
-										 curproclock->tag.myProc->pid);
-				}
-				else
-				{
-					if (first_holder)
-					{
-						appendStringInfo(&lock_holders_sbuf, "%d",
-										 curproclock->tag.myProc->pid);
-						first_holder = false;
-					}
-					else
-						appendStringInfo(&lock_holders_sbuf, ", %d",
-										 curproclock->tag.myProc->pid);
-
-					lockHoldersNum++;
-				}
-			}
+			/* Collect lock holders and waiters */
+			CollectLockHoldersAndWaiters(NULL, lock, &lock_holders_sbuf, &lock_waiters_sbuf, &lockHoldersNum);
 
 			LWLockRelease(partitionLock);
 
@@ -1995,3 +1972,58 @@ BecomeLockGroupMember(PGPROC *leader, int pid)
 
 	return ok;
 }
+
+/*
+ * we loop over the lock's procLocks to gather a list of all
+ * holders and waiters. Thus we will be able to provide more
+ * detailed information for lock debugging purposes.
+ *
+ * lock->procLocks contains all processes which hold or wait for
+ * this lock.
+ */
+void
+CollectLockHoldersAndWaiters(PROCLOCK *waitProcLock, LOCK *lock, StringInfo lock_holders_sbuf, StringInfo lock_waiters_sbuf, int *lockHoldersNum)
+{
+	bool first_holder = true;
+	bool first_waiter = true;
+	dlist_iter proc_iter;
+	PROCLOCK *curproclock;
+
+	dlist_foreach(proc_iter, &lock->procLocks)
+	{
+		curproclock =
+			dlist_container(PROCLOCK, lockLink, proc_iter.cur);
+
+		/*
+			* we are a waiter if myProc->waitProcLock == curproclock; we
+			* are a holder if it is NULL or something different
+			*/
+		if ((waitProcLock == NULL && curproclock->tag.myProc->waitProcLock == curproclock) ||
+			(waitProcLock != NULL && waitProcLock == curproclock))
+		{
+			if (first_waiter)
+			{
+				appendStringInfo(lock_waiters_sbuf, "%d",
+									curproclock->tag.myProc->pid);
+				first_waiter = false;
+			}
+			else
+				appendStringInfo(lock_waiters_sbuf, ", %d",
+									curproclock->tag.myProc->pid);
+		}
+		else
+		{
+			if (first_holder)
+			{
+				appendStringInfo(lock_holders_sbuf, "%d",
+									curproclock->tag.myProc->pid);
+				first_holder = false;
+			}
+			else
+				appendStringInfo(lock_holders_sbuf, ", %d",
+									curproclock->tag.myProc->pid);
+
+			(*lockHoldersNum)++;
+		}
+	}
+}
\ No newline at end of file
diff --git a/src/backend/utils/misc/guc_tables.c b/src/backend/utils/misc/guc_tables.c
index 2c4cc8cd41..4c7cfc54fa 100644
--- a/src/backend/utils/misc/guc_tables.c
+++ b/src/backend/utils/misc/guc_tables.c
@@ -76,6 +76,7 @@
 #include "storage/large_object.h"
 #include "storage/pg_shmem.h"
 #include "storage/predicate.h"
+#include "storage/proc.h"
 #include "storage/standby.h"
 #include "tcop/backend_startup.h"
 #include "tcop/tcopprot.h"
@@ -191,6 +192,17 @@ static const struct config_enum_entry log_error_verbosity_options[] = {
 StaticAssertDecl(lengthof(log_error_verbosity_options) == (PGERROR_VERBOSE + 2),
 				 "array length mismatch");
 
+static const struct config_enum_entry log_lock_waits_options[] = {
+	{"off", LOGLOCK_OFF, false},
+	{"on", LOGLOCK_ON, false},
+	{"fail", LOGLOCK_FAIL, false},
+	{"all", LOGLOCK_ALL, false},
+	{NULL, 0, false}
+};
+
+StaticAssertDecl(lengthof(log_lock_waits_options) == (LOGLOCK_ALL + 2),
+				 "array length mismatch");
+
 static const struct config_enum_entry log_statement_options[] = {
 	{"none", LOGSTMT_NONE, false},
 	{"ddl", LOGSTMT_DDL, false},
@@ -1552,15 +1564,6 @@ struct config_bool ConfigureNamesBool[] =
 	},
 #endif
 
-	{
-		{"log_lock_waits", PGC_SUSET, LOGGING_WHAT,
-			gettext_noop("Logs long lock waits."),
-			NULL
-		},
-		&log_lock_waits,
-		false,
-		NULL, NULL, NULL
-	},
 	{
 		{"log_recovery_conflict_waits", PGC_SIGHUP, LOGGING_WHAT,
 			gettext_noop("Logs standby recovery conflict waits."),
@@ -4930,6 +4933,16 @@ struct config_enum ConfigureNamesEnum[] =
 		NULL, NULL, NULL
 	},
 
+	{
+		{"log_lock_waits", PGC_SUSET, LOGGING_WHAT,
+			gettext_noop("Logs long lock waits."),
+			NULL
+		},
+		&log_lock_waits,
+		LOGLOCK_OFF, log_lock_waits_options,
+		NULL, NULL, NULL
+	},
+
 	{
 		{"log_statement", PGC_SUSET, LOGGING_WHAT,
 			gettext_noop("Sets the type of statements logged."),
diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample
index 667e0dc40a..fe6a2ab8bc 100644
--- a/src/backend/utils/misc/postgresql.conf.sample
+++ b/src/backend/utils/misc/postgresql.conf.sample
@@ -605,7 +605,7 @@
 					#        processes
 					#   %% = '%'
 					# e.g. '<%u%%%d> '
-#log_lock_waits = off			# log lock waits >= deadlock_timeout
+#log_lock_waits = off			# log lock waits. off, on, fail, all
 #log_recovery_conflict_waits = off	# log standby recovery conflict waits
 					# >= deadlock_timeout
 #log_parameter_max_length = -1		# when logging statements, limit logged
diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h
index ebcf0ad403..d8429931ce 100644
--- a/src/include/storage/proc.h
+++ b/src/include/storage/proc.h
@@ -132,6 +132,14 @@ typedef enum
 	PROC_WAIT_STATUS_ERROR,
 } ProcWaitStatus;
 
+typedef enum
+{
+	LOGLOCK_OFF,				/* log no lock report */
+	LOGLOCK_ON,					/* log lock report if over deadlock_timeout */
+	LOGLOCK_FAIL,				/* log lock report if lock failed */
+	LOGLOCK_ALL,				/* log lock report if all */
+} LogLockLevel;
+
 /*
  * Each backend has a PGPROC struct in shared memory.  There is also a list of
  * currently-unused PGPROC structs that will be reallocated to new backends.
@@ -456,7 +464,7 @@ extern PGDLLIMPORT int LockTimeout;
 extern PGDLLIMPORT int IdleInTransactionSessionTimeout;
 extern PGDLLIMPORT int TransactionTimeout;
 extern PGDLLIMPORT int IdleSessionTimeout;
-extern PGDLLIMPORT bool log_lock_waits;
+extern PGDLLIMPORT int log_lock_waits;
 
 #ifdef EXEC_BACKEND
 extern PGDLLIMPORT slock_t *ProcStructLock;
@@ -496,5 +504,6 @@ extern PGPROC *AuxiliaryPidGetProc(int pid);
 
 extern void BecomeLockGroupLeader(void);
 extern bool BecomeLockGroupMember(PGPROC *leader, int pid);
+extern void CollectLockHoldersAndWaiters(PROCLOCK *waitProcLock, LOCK *lock, StringInfo lock_holders_sbuf, StringInfo lock_waiters_sbuf, int *lockHoldersNum);
 
 #endif							/* _PROC_H_ */
diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c
index 5157629b1c..e90951b9a3 100644
--- a/src/test/regress/pg_regress.c
+++ b/src/test/regress/pg_regress.c
@@ -2397,7 +2397,7 @@ regression_main(int argc, char *argv[],
 		fputs("log_autovacuum_min_duration = 0\n", pg_conf);
 		fputs("log_checkpoints = on\n", pg_conf);
 		fputs("log_line_prefix = '%m %b[%p] %q%a '\n", pg_conf);
-		fputs("log_lock_waits = on\n", pg_conf);
+		fputs("log_lock_waits = all\n", pg_conf);
 		fputs("log_temp_files = 128kB\n", pg_conf);
 		fputs("max_prepared_transactions = 2\n", pg_conf);
 
diff --git a/src/tools/ci/pg_ci_base.conf b/src/tools/ci/pg_ci_base.conf
index d8faa9c26c..3be9f2d0ad 100644
--- a/src/tools/ci/pg_ci_base.conf
+++ b/src/tools/ci/pg_ci_base.conf
@@ -11,4 +11,4 @@ log_checkpoints = true
 log_connections = true
 log_disconnections = true
 log_line_prefix = '%m [%p][%b] %q[%a][%v:%x] '
-log_lock_waits = true
+log_lock_waits = all

Reply via email to