Updated patch set is attached.

This version splits out the existing csvlog code into its own file and
centralizes the common helpers into a new elog-internal.h so that they're
only included by the actual write_xyz sources.

That makes the elog.c changes in the JSON logging patch minimal as all it's
really doing is invoking the new write_jsonlog(...) function.

It also adds those missing fields to the JSON logger output.

Regards,
-- Sehrope Sarkuni
Founder & CEO | JackDB, Inc. | https://www.jackdb.com/
From d5b3f5fe44e91d35aefdd570758d5b2a9e9c1a36 Mon Sep 17 00:00:00 2001
From: Sehrope Sarkuni <sehr...@jackdb.com>
Date: Wed, 10 Jul 2019 10:02:31 -0400
Subject: [PATCH 1/4] Adds separate dest field to log protocol PipeProtoHeader

Adds a separate dest field to PipeProtoHeader to store the log destination
requested by the sending process. Also changes the is_last field to only
store whether the chunk is the last one for a message rather than also
including whether the destination is csvlog.
---
 src/backend/postmaster/syslogger.c | 15 ++++++---------
 src/backend/utils/error/elog.c     |  4 +++-
 src/include/postmaster/syslogger.h |  4 ++--
 3 files changed, 11 insertions(+), 12 deletions(-)

diff --git a/src/backend/postmaster/syslogger.c b/src/backend/postmaster/syslogger.c
index cad43bdef2..edd8f33204 100644
--- a/src/backend/postmaster/syslogger.c
+++ b/src/backend/postmaster/syslogger.c
@@ -878,7 +878,6 @@ process_pipe_input(char *logbuffer, int *bytes_in_logbuffer)
 {
 	char	   *cursor = logbuffer;
 	int			count = *bytes_in_logbuffer;
-	int			dest = LOG_DESTINATION_STDERR;
 
 	/* While we have enough for a header, process data... */
 	while (count >= (int) (offsetof(PipeProtoHeader, data) + 1))
@@ -891,8 +890,9 @@ process_pipe_input(char *logbuffer, int *bytes_in_logbuffer)
 		if (p.nuls[0] == '\0' && p.nuls[1] == '\0' &&
 			p.len > 0 && p.len <= PIPE_MAX_PAYLOAD &&
 			p.pid != 0 &&
-			(p.is_last == 't' || p.is_last == 'f' ||
-			 p.is_last == 'T' || p.is_last == 'F'))
+			(p.is_last == 't' || p.is_last == 'f') &&
+			(p.dest == LOG_DESTINATION_CSVLOG ||
+			 p.dest == LOG_DESTINATION_STDERR))
 		{
 			List	   *buffer_list;
 			ListCell   *cell;
@@ -906,9 +906,6 @@ process_pipe_input(char *logbuffer, int *bytes_in_logbuffer)
 			if (count < chunklen)
 				break;
 
-			dest = (p.is_last == 'T' || p.is_last == 'F') ?
-				LOG_DESTINATION_CSVLOG : LOG_DESTINATION_STDERR;
-
 			/* Locate any existing buffer for this source pid */
 			buffer_list = buffer_lists[p.pid % NBUFFER_LISTS];
 			foreach(cell, buffer_list)
@@ -924,7 +921,7 @@ process_pipe_input(char *logbuffer, int *bytes_in_logbuffer)
 					free_slot = buf;
 			}
 
-			if (p.is_last == 'f' || p.is_last == 'F')
+			if (p.is_last == 'f')
 			{
 				/*
 				 * Save a complete non-final chunk in a per-pid buffer
@@ -970,7 +967,7 @@ process_pipe_input(char *logbuffer, int *bytes_in_logbuffer)
 					appendBinaryStringInfo(str,
 										   cursor + PIPE_HEADER_SIZE,
 										   p.len);
-					write_syslogger_file(str->data, str->len, dest);
+					write_syslogger_file(str->data, str->len, p.dest);
 					/* Mark the buffer unused, and reclaim string storage */
 					existing_slot->pid = 0;
 					pfree(str->data);
@@ -979,7 +976,7 @@ process_pipe_input(char *logbuffer, int *bytes_in_logbuffer)
 				{
 					/* The whole message was one chunk, evidently. */
 					write_syslogger_file(cursor + PIPE_HEADER_SIZE, p.len,
-										 dest);
+										 p.dest);
 				}
 			}
 
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index a3e1c59a82..cd13111708 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -3250,6 +3250,8 @@ write_pipe_chunks(char *data, int len, int dest)
 
 	p.proto.nuls[0] = p.proto.nuls[1] = '\0';
 	p.proto.pid = MyProcPid;
+	p.proto.dest = (int32) dest;
+	p.proto.is_last = 'f';
 
 	/* write all but the last chunk */
 	while (len > PIPE_MAX_PAYLOAD)
@@ -3264,7 +3266,7 @@ write_pipe_chunks(char *data, int len, int dest)
 	}
 
 	/* write the last chunk */
-	p.proto.is_last = (dest == LOG_DESTINATION_CSVLOG ? 'T' : 't');
+	p.proto.is_last = 't';
 	p.proto.len = len;
 	memcpy(p.proto.data, data, len);
 	rc = write(fd, &p, PIPE_HEADER_SIZE + len);
diff --git a/src/include/postmaster/syslogger.h b/src/include/postmaster/syslogger.h
index 1491eecb0f..41d026a474 100644
--- a/src/include/postmaster/syslogger.h
+++ b/src/include/postmaster/syslogger.h
@@ -46,8 +46,8 @@ typedef struct
 	char		nuls[2];		/* always \0\0 */
 	uint16		len;			/* size of this chunk (counts data only) */
 	int32		pid;			/* writer's pid */
-	char		is_last;		/* last chunk of message? 't' or 'f' ('T' or
-								 * 'F' for CSV case) */
+	int32		dest;			/* log destination */
+	char		is_last;        /* last chunk of message? 't' or 'f'*/
 	char		data[FLEXIBLE_ARRAY_MEMBER];	/* data payload starts here */
 } PipeProtoHeader;
 
-- 
2.17.1

From dfb17c0b1804b9e54a287e6a058d02dd1be27ffb Mon Sep 17 00:00:00 2001
From: Sehrope Sarkuni <sehr...@jackdb.com>
Date: Tue, 31 Aug 2021 10:00:54 -0400
Subject: [PATCH 2/4] Add TAP test for csvlog

---
 src/bin/pg_ctl/t/005_csvlog.pl | 118 +++++++++++++++++++++++++++++++++
 1 file changed, 118 insertions(+)
 create mode 100644 src/bin/pg_ctl/t/005_csvlog.pl

diff --git a/src/bin/pg_ctl/t/005_csvlog.pl b/src/bin/pg_ctl/t/005_csvlog.pl
new file mode 100644
index 0000000000..c6ab0ddbcc
--- /dev/null
+++ b/src/bin/pg_ctl/t/005_csvlog.pl
@@ -0,0 +1,118 @@
+use strict;
+use warnings;
+
+use PostgresNode;
+use TestLib;
+use Test::More tests => 4;
+use Time::HiRes qw(usleep);
+
+# Set up node with logging collector
+my $node = PostgresNode->new('primary');
+$node->init();
+$node->append_conf(
+	'postgresql.conf', qq(
+logging_collector = on
+lc_messages = 'C'
+log_destination = 'csvlog'
+));
+
+$node->start();
+
+note "Before sleep";
+usleep(100_000);
+note "Before rotate";
+$node->logrotate();
+note "After rotate";
+usleep(100_000);
+note "After rotate sleep";
+
+# Verify that log output gets to the file
+
+$node->psql('postgres', 'SELECT 1/0');
+
+my $current_logfiles = slurp_file($node->data_dir . '/current_logfiles');
+
+for(my $tmp=0;$tmp < 10;$tmp++) {
+   my $current_logfiles = slurp_file($node->data_dir . '/current_logfiles');
+   note "current_logfiles = $current_logfiles";
+   usleep(100_000);
+}
+
+
+like(
+	$current_logfiles,
+	qr|^csvlog log/postgresql-.*$|,
+	'current_logfiles is sane');
+
+my $lfname = $current_logfiles;
+$lfname =~ s/^csvlog //;
+chomp $lfname;
+
+note "current_logfiles = $current_logfiles";
+note "lfname = $lfname";
+
+# might need to retry if logging collector process is slow...
+my $max_attempts = 180 * 10;
+
+my $first_logfile;
+for (my $attempts = 0; $attempts < $max_attempts; $attempts++)
+{
+    my $foo = $node->data_dir . '/' . $lfname;
+    note "will slurp: $foo";
+    $first_logfile = slurp_file($node->data_dir . '/' . $lfname);
+	last if $first_logfile =~ m/division by zero/;
+	usleep(100_000);
+}
+
+note "first_logfile = $first_logfile";
+# Our log entry should the error message and errant SQL
+like(
+	$first_logfile,
+	qr/division by zero.*"SELECT 1\/0"/,
+	'found expected log file content');
+
+# Sleep 2 seconds and ask for log rotation; this should result in
+# output into a different log file name.
+sleep(2);
+$node->logrotate();
+
+# pg_ctl logrotate doesn't wait for rotation request to be completed.
+# Allow a bit of time for it to happen.
+my $new_current_logfiles;
+for (my $attempts = 0; $attempts < $max_attempts; $attempts++)
+{
+	$new_current_logfiles = slurp_file($node->data_dir . '/current_logfiles');
+	last if $new_current_logfiles ne $current_logfiles;
+	usleep(100_000);
+}
+
+note "now current_logfiles = $new_current_logfiles";
+
+like(
+	$new_current_logfiles,
+	qr|^csvlog log/postgresql-.*$|,
+	'new current_logfiles is sane');
+
+$lfname = $new_current_logfiles;
+$lfname =~ s/^csvlog //;
+chomp $lfname;
+
+# Verify that log output gets to this file, too
+
+$node->psql('postgres', 'fee fi fo fum');
+
+my $second_logfile;
+for (my $attempts = 0; $attempts < $max_attempts; $attempts++)
+{
+	$second_logfile = slurp_file($node->data_dir . '/' . $lfname);
+	last if $second_logfile =~ m/syntax error/;
+	usleep(100_000);
+}
+
+like(
+	$second_logfile,
+	# Our log entry should have our bad string wrapped in quotes after the error
+	qr/syntax error.*,"fee fi fo fum"/,
+	'found expected log file content in new log file');
+
+$node->stop();
-- 
2.17.1

From 1fadc727cc77d020e559b6a16db9db72d5678bf5 Mon Sep 17 00:00:00 2001
From: Sehrope Sarkuni <sehr...@jackdb.com>
Date: Wed, 1 Sep 2021 09:06:15 -0400
Subject: [PATCH 3/4] Split csv handling in elog.c into separate csvlog.c

Split out csvlog to its own file and centralize common elog internals
and helpers into its own file as well.
---
 src/backend/utils/error/Makefile  |   1 +
 src/backend/utils/error/csvlog.c  | 270 ++++++++++++++++++++++++++
 src/backend/utils/error/elog.c    | 313 ++----------------------------
 src/include/utils/elog-internal.h |  78 ++++++++
 4 files changed, 365 insertions(+), 297 deletions(-)
 create mode 100644 src/backend/utils/error/csvlog.c
 create mode 100644 src/include/utils/elog-internal.h

diff --git a/src/backend/utils/error/Makefile b/src/backend/utils/error/Makefile
index 612da215d0..ef770dd2f2 100644
--- a/src/backend/utils/error/Makefile
+++ b/src/backend/utils/error/Makefile
@@ -14,6 +14,7 @@ include $(top_builddir)/src/Makefile.global
 
 OBJS = \
 	assert.o \
+	csvlog.o \
 	elog.o
 
 include $(top_srcdir)/src/backend/common.mk
diff --git a/src/backend/utils/error/csvlog.c b/src/backend/utils/error/csvlog.c
new file mode 100644
index 0000000000..923b7e7d73
--- /dev/null
+++ b/src/backend/utils/error/csvlog.c
@@ -0,0 +1,270 @@
+/*-------------------------------------------------------------------------
+ *
+ * csvlog.c
+ *	  CSV logging
+ *
+ * Copyright (c) 2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ *	  src/backend/utils/error/csvlog.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/xact.h"
+#include "libpq/libpq.h"
+#include "lib/stringinfo.h"
+#include "miscadmin.h"
+#include "postmaster/bgworker.h"
+#include "postmaster/syslogger.h"
+#include "storage/lock.h"
+#include "storage/proc.h"
+#include "tcop/tcopprot.h"
+#include "utils/backend_status.h"
+#include "utils/elog-internal.h"
+#include "utils/guc.h"
+#include "utils/ps_status.h"
+
+/*
+ * append a CSV'd version of a string to a StringInfo
+ * We use the PostgreSQL defaults for CSV, i.e. quote = escape = '"'
+ * If it's NULL, append nothing.
+ */
+static inline void
+appendCSVLiteral(StringInfo buf, const char *data)
+{
+	const char *p = data;
+	char		c;
+
+	/* avoid confusing an empty string with NULL */
+	if (p == NULL)
+		return;
+
+	appendStringInfoCharMacro(buf, '"');
+	while ((c = *p++) != '\0')
+	{
+		if (c == '"')
+			appendStringInfoCharMacro(buf, '"');
+		appendStringInfoCharMacro(buf, c);
+	}
+	appendStringInfoCharMacro(buf, '"');
+}
+
+/*
+ * Constructs the error message, depending on the Errordata it gets, in a CSV
+ * format which is described in doc/src/sgml/config.sgml.
+ */
+void
+write_csvlog(ErrorData *edata)
+{
+	StringInfoData buf;
+	bool		print_stmt = false;
+
+	/* static counter for line numbers */
+	static long log_line_number = 0;
+
+	/* has counter been reset in current process? */
+	static int	log_my_pid = 0;
+
+	/*
+	 * This is one of the few places where we'd rather not inherit a static
+	 * variable's value from the postmaster.  But since we will, reset it when
+	 * MyProcPid changes.
+	 */
+	if (log_my_pid != MyProcPid)
+	{
+		log_line_number = 0;
+		log_my_pid = MyProcPid;
+		formatted_start_time[0] = '\0';
+	}
+	log_line_number++;
+
+	initStringInfo(&buf);
+
+	/*
+	 * timestamp with milliseconds
+	 *
+	 * Check if the timestamp is already calculated for the syslog message,
+	 * and use it if so.  Otherwise, get the current timestamp.  This is done
+	 * to put same timestamp in both syslog and csvlog messages.
+	 */
+	if (formatted_log_time[0] == '\0')
+		setup_formatted_log_time();
+
+	appendStringInfoString(&buf, formatted_log_time);
+	appendStringInfoChar(&buf, ',');
+
+	/* username */
+	if (MyProcPort)
+		appendCSVLiteral(&buf, MyProcPort->user_name);
+	appendStringInfoChar(&buf, ',');
+
+	/* database name */
+	if (MyProcPort)
+		appendCSVLiteral(&buf, MyProcPort->database_name);
+	appendStringInfoChar(&buf, ',');
+
+	/* Process id  */
+	if (MyProcPid != 0)
+		appendStringInfo(&buf, "%d", MyProcPid);
+	appendStringInfoChar(&buf, ',');
+
+	/* Remote host and port */
+	if (MyProcPort && MyProcPort->remote_host)
+	{
+		appendStringInfoChar(&buf, '"');
+		appendStringInfoString(&buf, MyProcPort->remote_host);
+		if (MyProcPort->remote_port && MyProcPort->remote_port[0] != '\0')
+		{
+			appendStringInfoChar(&buf, ':');
+			appendStringInfoString(&buf, MyProcPort->remote_port);
+		}
+		appendStringInfoChar(&buf, '"');
+	}
+	appendStringInfoChar(&buf, ',');
+
+	/* session id */
+	appendStringInfo(&buf, "%lx.%x", (long) MyStartTime, MyProcPid);
+	appendStringInfoChar(&buf, ',');
+
+	/* Line number */
+	appendStringInfo(&buf, "%ld", log_line_number);
+	appendStringInfoChar(&buf, ',');
+
+	/* PS display */
+	if (MyProcPort)
+	{
+		StringInfoData msgbuf;
+		const char *psdisp;
+		int			displen;
+
+		initStringInfo(&msgbuf);
+
+		psdisp = get_ps_display(&displen);
+		appendBinaryStringInfo(&msgbuf, psdisp, displen);
+		appendCSVLiteral(&buf, msgbuf.data);
+
+		pfree(msgbuf.data);
+	}
+	appendStringInfoChar(&buf, ',');
+
+	/* session start timestamp */
+	if (formatted_start_time[0] == '\0')
+		setup_formatted_start_time();
+	appendStringInfoString(&buf, formatted_start_time);
+	appendStringInfoChar(&buf, ',');
+
+	/* Virtual transaction id */
+	/* keep VXID format in sync with lockfuncs.c */
+	if (MyProc != NULL && MyProc->backendId != InvalidBackendId)
+		appendStringInfo(&buf, "%d/%u", MyProc->backendId, MyProc->lxid);
+	appendStringInfoChar(&buf, ',');
+
+	/* Transaction id */
+	appendStringInfo(&buf, "%u", GetTopTransactionIdIfAny());
+	appendStringInfoChar(&buf, ',');
+
+	/* Error severity */
+	appendStringInfoString(&buf, _(error_severity(edata->elevel)));
+	appendStringInfoChar(&buf, ',');
+
+	/* SQL state code */
+	appendStringInfoString(&buf, unpack_sql_state(edata->sqlerrcode));
+	appendStringInfoChar(&buf, ',');
+
+	/* errmessage */
+	appendCSVLiteral(&buf, edata->message);
+	appendStringInfoChar(&buf, ',');
+
+	/* errdetail or errdetail_log */
+	if (edata->detail_log)
+		appendCSVLiteral(&buf, edata->detail_log);
+	else
+		appendCSVLiteral(&buf, edata->detail);
+	appendStringInfoChar(&buf, ',');
+
+	/* errhint */
+	appendCSVLiteral(&buf, edata->hint);
+	appendStringInfoChar(&buf, ',');
+
+	/* internal query */
+	appendCSVLiteral(&buf, edata->internalquery);
+	appendStringInfoChar(&buf, ',');
+
+	/* if printed internal query, print internal pos too */
+	if (edata->internalpos > 0 && edata->internalquery != NULL)
+		appendStringInfo(&buf, "%d", edata->internalpos);
+	appendStringInfoChar(&buf, ',');
+
+	/* errcontext */
+	if (!edata->hide_ctx)
+		appendCSVLiteral(&buf, edata->context);
+	appendStringInfoChar(&buf, ',');
+
+	/* user query --- only reported if not disabled by the caller */
+	if (is_log_level_output(edata->elevel, log_min_error_statement) &&
+		debug_query_string != NULL &&
+		!edata->hide_stmt)
+		print_stmt = true;
+	if (print_stmt)
+		appendCSVLiteral(&buf, debug_query_string);
+	appendStringInfoChar(&buf, ',');
+	if (print_stmt && edata->cursorpos > 0)
+		appendStringInfo(&buf, "%d", edata->cursorpos);
+	appendStringInfoChar(&buf, ',');
+
+	/* file error location */
+	if (Log_error_verbosity >= PGERROR_VERBOSE)
+	{
+		StringInfoData msgbuf;
+
+		initStringInfo(&msgbuf);
+
+		if (edata->funcname && edata->filename)
+			appendStringInfo(&msgbuf, "%s, %s:%d",
+							 edata->funcname, edata->filename,
+							 edata->lineno);
+		else if (edata->filename)
+			appendStringInfo(&msgbuf, "%s:%d",
+							 edata->filename, edata->lineno);
+		appendCSVLiteral(&buf, msgbuf.data);
+		pfree(msgbuf.data);
+	}
+	appendStringInfoChar(&buf, ',');
+
+	/* application name */
+	if (application_name)
+		appendCSVLiteral(&buf, application_name);
+
+	appendStringInfoChar(&buf, ',');
+
+	/* backend type */
+	appendCSVLiteral(&buf, get_backend_type_for_log());
+	appendStringInfoChar(&buf, ',');
+
+	/* leader PID */
+	if (MyProc)
+	{
+		PGPROC	   *leader = MyProc->lockGroupLeader;
+
+		/*
+		 * Show the leader only for active parallel workers.  This leaves out
+		 * the leader of a parallel group.
+		 */
+		if (leader && leader->pid != MyProcPid)
+			appendStringInfo(&buf, "%d", leader->pid);
+	}
+	appendStringInfoChar(&buf, ',');
+
+	/* query id */
+	appendStringInfo(&buf, "%lld", (long long) pgstat_get_my_query_id());
+
+	appendStringInfoChar(&buf, '\n');
+
+	write_syslogger(buf.data, buf.len, LOG_DESTINATION_CSVLOG);
+
+	pfree(buf.data);
+}
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index cd13111708..47d6677827 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -82,7 +82,7 @@
 #include "utils/guc.h"
 #include "utils/memutils.h"
 #include "utils/ps_status.h"
-
+#include "utils/elog-internal.h"
 
 /* In this module, access gettext() via err_gettext() */
 #undef _
@@ -155,9 +155,8 @@ static int	recursion_depth = 0;	/* to detect actual recursion */
 static struct timeval saved_timeval;
 static bool saved_timeval_set = false;
 
-#define FORMATTED_TS_LEN 128
-static char formatted_start_time[FORMATTED_TS_LEN];
-static char formatted_log_time[FORMATTED_TS_LEN];
+char formatted_start_time[FORMATTED_TS_LEN];
+char formatted_log_time[FORMATTED_TS_LEN];
 
 
 /* Macro for checking errordata_stack_depth is reasonable */
@@ -175,52 +174,13 @@ static const char *err_gettext(const char *str) pg_attribute_format_arg(1);
 static pg_noinline void set_backtrace(ErrorData *edata, int num_skip);
 static void set_errdata_field(MemoryContextData *cxt, char **ptr, const char *str);
 static void write_console(const char *line, int len);
-static void setup_formatted_log_time(void);
-static void setup_formatted_start_time(void);
 static const char *process_log_prefix_padding(const char *p, int *padding);
 static void log_line_prefix(StringInfo buf, ErrorData *edata);
-static void write_csvlog(ErrorData *edata);
 static void send_message_to_server_log(ErrorData *edata);
 static void write_pipe_chunks(char *data, int len, int dest);
 static void send_message_to_frontend(ErrorData *edata);
-static const char *error_severity(int elevel);
 static void append_with_tabs(StringInfo buf, const char *str);
 
-
-/*
- * is_log_level_output -- is elevel logically >= log_min_level?
- *
- * We use this for tests that should consider LOG to sort out-of-order,
- * between ERROR and FATAL.  Generally this is the right thing for testing
- * whether a message should go to the postmaster log, whereas a simple >=
- * test is correct for testing whether the message should go to the client.
- */
-static inline bool
-is_log_level_output(int elevel, int log_min_level)
-{
-	if (elevel == LOG || elevel == LOG_SERVER_ONLY)
-	{
-		if (log_min_level == LOG || log_min_level <= ERROR)
-			return true;
-	}
-	else if (elevel == WARNING_CLIENT_ONLY)
-	{
-		/* never sent to log, regardless of log_min_level */
-		return false;
-	}
-	else if (log_min_level == LOG)
-	{
-		/* elevel != LOG */
-		if (elevel >= FATAL)
-			return true;
-	}
-	/* Neither is LOG */
-	else if (elevel >= log_min_level)
-		return true;
-
-	return false;
-}
-
 /*
  * Policy-setting subroutines.  These are fairly simple, but it seems wise
  * to have the code in just one place.
@@ -2291,7 +2251,7 @@ write_console(const char *line, int len)
 /*
  * setup formatted_log_time, for consistent times between CSV and regular logs
  */
-static void
+void
 setup_formatted_log_time(void)
 {
 	pg_time_t	stamp_time;
@@ -2323,7 +2283,7 @@ setup_formatted_log_time(void)
 /*
  * setup formatted_start_time
  */
-static void
+void
 setup_formatted_start_time(void)
 {
 	pg_time_t	stamp_time = (pg_time_t) MyStartTime;
@@ -2729,257 +2689,6 @@ log_line_prefix(StringInfo buf, ErrorData *edata)
 	}
 }
 
-/*
- * append a CSV'd version of a string to a StringInfo
- * We use the PostgreSQL defaults for CSV, i.e. quote = escape = '"'
- * If it's NULL, append nothing.
- */
-static inline void
-appendCSVLiteral(StringInfo buf, const char *data)
-{
-	const char *p = data;
-	char		c;
-
-	/* avoid confusing an empty string with NULL */
-	if (p == NULL)
-		return;
-
-	appendStringInfoCharMacro(buf, '"');
-	while ((c = *p++) != '\0')
-	{
-		if (c == '"')
-			appendStringInfoCharMacro(buf, '"');
-		appendStringInfoCharMacro(buf, c);
-	}
-	appendStringInfoCharMacro(buf, '"');
-}
-
-/*
- * Constructs the error message, depending on the Errordata it gets, in a CSV
- * format which is described in doc/src/sgml/config.sgml.
- */
-static void
-write_csvlog(ErrorData *edata)
-{
-	StringInfoData buf;
-	bool		print_stmt = false;
-
-	/* static counter for line numbers */
-	static long log_line_number = 0;
-
-	/* has counter been reset in current process? */
-	static int	log_my_pid = 0;
-
-	/*
-	 * This is one of the few places where we'd rather not inherit a static
-	 * variable's value from the postmaster.  But since we will, reset it when
-	 * MyProcPid changes.
-	 */
-	if (log_my_pid != MyProcPid)
-	{
-		log_line_number = 0;
-		log_my_pid = MyProcPid;
-		formatted_start_time[0] = '\0';
-	}
-	log_line_number++;
-
-	initStringInfo(&buf);
-
-	/*
-	 * timestamp with milliseconds
-	 *
-	 * Check if the timestamp is already calculated for the syslog message,
-	 * and use it if so.  Otherwise, get the current timestamp.  This is done
-	 * to put same timestamp in both syslog and csvlog messages.
-	 */
-	if (formatted_log_time[0] == '\0')
-		setup_formatted_log_time();
-
-	appendStringInfoString(&buf, formatted_log_time);
-	appendStringInfoChar(&buf, ',');
-
-	/* username */
-	if (MyProcPort)
-		appendCSVLiteral(&buf, MyProcPort->user_name);
-	appendStringInfoChar(&buf, ',');
-
-	/* database name */
-	if (MyProcPort)
-		appendCSVLiteral(&buf, MyProcPort->database_name);
-	appendStringInfoChar(&buf, ',');
-
-	/* Process id  */
-	if (MyProcPid != 0)
-		appendStringInfo(&buf, "%d", MyProcPid);
-	appendStringInfoChar(&buf, ',');
-
-	/* Remote host and port */
-	if (MyProcPort && MyProcPort->remote_host)
-	{
-		appendStringInfoChar(&buf, '"');
-		appendStringInfoString(&buf, MyProcPort->remote_host);
-		if (MyProcPort->remote_port && MyProcPort->remote_port[0] != '\0')
-		{
-			appendStringInfoChar(&buf, ':');
-			appendStringInfoString(&buf, MyProcPort->remote_port);
-		}
-		appendStringInfoChar(&buf, '"');
-	}
-	appendStringInfoChar(&buf, ',');
-
-	/* session id */
-	appendStringInfo(&buf, "%lx.%x", (long) MyStartTime, MyProcPid);
-	appendStringInfoChar(&buf, ',');
-
-	/* Line number */
-	appendStringInfo(&buf, "%ld", log_line_number);
-	appendStringInfoChar(&buf, ',');
-
-	/* PS display */
-	if (MyProcPort)
-	{
-		StringInfoData msgbuf;
-		const char *psdisp;
-		int			displen;
-
-		initStringInfo(&msgbuf);
-
-		psdisp = get_ps_display(&displen);
-		appendBinaryStringInfo(&msgbuf, psdisp, displen);
-		appendCSVLiteral(&buf, msgbuf.data);
-
-		pfree(msgbuf.data);
-	}
-	appendStringInfoChar(&buf, ',');
-
-	/* session start timestamp */
-	if (formatted_start_time[0] == '\0')
-		setup_formatted_start_time();
-	appendStringInfoString(&buf, formatted_start_time);
-	appendStringInfoChar(&buf, ',');
-
-	/* Virtual transaction id */
-	/* keep VXID format in sync with lockfuncs.c */
-	if (MyProc != NULL && MyProc->backendId != InvalidBackendId)
-		appendStringInfo(&buf, "%d/%u", MyProc->backendId, MyProc->lxid);
-	appendStringInfoChar(&buf, ',');
-
-	/* Transaction id */
-	appendStringInfo(&buf, "%u", GetTopTransactionIdIfAny());
-	appendStringInfoChar(&buf, ',');
-
-	/* Error severity */
-	appendStringInfoString(&buf, _(error_severity(edata->elevel)));
-	appendStringInfoChar(&buf, ',');
-
-	/* SQL state code */
-	appendStringInfoString(&buf, unpack_sql_state(edata->sqlerrcode));
-	appendStringInfoChar(&buf, ',');
-
-	/* errmessage */
-	appendCSVLiteral(&buf, edata->message);
-	appendStringInfoChar(&buf, ',');
-
-	/* errdetail or errdetail_log */
-	if (edata->detail_log)
-		appendCSVLiteral(&buf, edata->detail_log);
-	else
-		appendCSVLiteral(&buf, edata->detail);
-	appendStringInfoChar(&buf, ',');
-
-	/* errhint */
-	appendCSVLiteral(&buf, edata->hint);
-	appendStringInfoChar(&buf, ',');
-
-	/* internal query */
-	appendCSVLiteral(&buf, edata->internalquery);
-	appendStringInfoChar(&buf, ',');
-
-	/* if printed internal query, print internal pos too */
-	if (edata->internalpos > 0 && edata->internalquery != NULL)
-		appendStringInfo(&buf, "%d", edata->internalpos);
-	appendStringInfoChar(&buf, ',');
-
-	/* errcontext */
-	if (!edata->hide_ctx)
-		appendCSVLiteral(&buf, edata->context);
-	appendStringInfoChar(&buf, ',');
-
-	/* user query --- only reported if not disabled by the caller */
-	if (is_log_level_output(edata->elevel, log_min_error_statement) &&
-		debug_query_string != NULL &&
-		!edata->hide_stmt)
-		print_stmt = true;
-	if (print_stmt)
-		appendCSVLiteral(&buf, debug_query_string);
-	appendStringInfoChar(&buf, ',');
-	if (print_stmt && edata->cursorpos > 0)
-		appendStringInfo(&buf, "%d", edata->cursorpos);
-	appendStringInfoChar(&buf, ',');
-
-	/* file error location */
-	if (Log_error_verbosity >= PGERROR_VERBOSE)
-	{
-		StringInfoData msgbuf;
-
-		initStringInfo(&msgbuf);
-
-		if (edata->funcname && edata->filename)
-			appendStringInfo(&msgbuf, "%s, %s:%d",
-							 edata->funcname, edata->filename,
-							 edata->lineno);
-		else if (edata->filename)
-			appendStringInfo(&msgbuf, "%s:%d",
-							 edata->filename, edata->lineno);
-		appendCSVLiteral(&buf, msgbuf.data);
-		pfree(msgbuf.data);
-	}
-	appendStringInfoChar(&buf, ',');
-
-	/* application name */
-	if (application_name)
-		appendCSVLiteral(&buf, application_name);
-
-	appendStringInfoChar(&buf, ',');
-
-	/* backend type */
-	if (MyProcPid == PostmasterPid)
-		appendCSVLiteral(&buf, "postmaster");
-	else if (MyBackendType == B_BG_WORKER)
-		appendCSVLiteral(&buf, MyBgworkerEntry->bgw_type);
-	else
-		appendCSVLiteral(&buf, GetBackendTypeDesc(MyBackendType));
-
-	appendStringInfoChar(&buf, ',');
-
-	/* leader PID */
-	if (MyProc)
-	{
-		PGPROC	   *leader = MyProc->lockGroupLeader;
-
-		/*
-		 * Show the leader only for active parallel workers.  This leaves out
-		 * the leader of a parallel group.
-		 */
-		if (leader && leader->pid != MyProcPid)
-			appendStringInfo(&buf, "%d", leader->pid);
-	}
-	appendStringInfoChar(&buf, ',');
-
-	/* query id */
-	appendStringInfo(&buf, "%lld", (long long) pgstat_get_my_query_id());
-
-	appendStringInfoChar(&buf, '\n');
-
-	/* If in the syslogger process, try to write messages direct to file */
-	if (MyBackendType == B_LOGGER)
-		write_syslogger_file(buf.data, buf.len, LOG_DESTINATION_CSVLOG);
-	else
-		write_pipe_chunks(buf.data, buf.len, LOG_DESTINATION_CSVLOG);
-
-	pfree(buf.data);
-}
-
 /*
  * Unpack MAKE_SQLSTATE code. Note that this returns a pointer to a
  * static buffer.
@@ -3273,6 +2982,16 @@ write_pipe_chunks(char *data, int len, int dest)
 	(void) rc;
 }
 
+void
+write_syslogger(char *data, int len, int dest)
+{
+	/* If in the syslogger process, try to write messages direct to file */
+	if (MyBackendType == B_LOGGER)
+		write_syslogger_file(data, len, dest);
+	else
+		write_pipe_chunks(data, len, dest);
+}
+
 
 /*
  * Append a text string to the error report being built for the client.
@@ -3483,7 +3202,7 @@ send_message_to_frontend(ErrorData *edata)
  * The string is not localized here, but we mark the strings for translation
  * so that callers can invoke _() on the result.
  */
-static const char *
+const char *
 error_severity(int elevel)
 {
 	const char *prefix;
diff --git a/src/include/utils/elog-internal.h b/src/include/utils/elog-internal.h
new file mode 100644
index 0000000000..ac08b6f12f
--- /dev/null
+++ b/src/include/utils/elog-internal.h
@@ -0,0 +1,78 @@
+/*-------------------------------------------------------------------------
+ *
+ * elog-internal.h
+ *	  POSTGRES error reporting/logging internal definitions.
+ *
+ *
+ * Portions Copyright (c) 2021, PostgreSQL Global Development Group
+ * src/include/utils/elog-internal.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef ELOG_INTERNAL_H
+#define ELOG_INTERNAL_H
+
+#include "postgres.h"
+
+#include "utils/elog.h"
+#include "miscadmin.h"
+#include "postmaster/postmaster.h"
+#include "postmaster/bgworker.h"
+
+const char * error_severity(int elevel);
+void write_syslogger(char *data, int len, int dest);
+
+void write_csvlog(ErrorData *edata);
+
+/*
+ * is_log_level_output -- is elevel logically >= log_min_level?
+ *
+ * We use this for tests that should consider LOG to sort out-of-order,
+ * between ERROR and FATAL.  Generally this is the right thing for testing
+ * whether a message should go to the postmaster log, whereas a simple >=
+ * test is correct for testing whether the message should go to the client.
+ */
+static inline bool
+is_log_level_output(int elevel, int log_min_level)
+{
+	if (elevel == LOG || elevel == LOG_SERVER_ONLY)
+	{
+		if (log_min_level == LOG || log_min_level <= ERROR)
+			return true;
+	}
+	else if (elevel == WARNING_CLIENT_ONLY)
+	{
+		/* never sent to log, regardless of log_min_level */
+		return false;
+	}
+	else if (log_min_level == LOG)
+	{
+		/* elevel != LOG */
+		if (elevel >= FATAL)
+			return true;
+	}
+	/* Neither is LOG */
+	else if (elevel >= log_min_level)
+		return true;
+
+	return false;
+}
+
+static inline const char *
+get_backend_type_for_log() {
+	if (MyProcPid == PostmasterPid)
+		return "postmaster";
+	else if (MyBackendType == B_BG_WORKER)
+		return MyBgworkerEntry->bgw_type;
+	else
+		return GetBackendTypeDesc(MyBackendType);
+}
+
+#define FORMATTED_TS_LEN 128
+extern char formatted_start_time[FORMATTED_TS_LEN];
+extern char formatted_log_time[FORMATTED_TS_LEN];
+
+void setup_formatted_log_time(void);
+void setup_formatted_start_time(void);
+
+#endif
-- 
2.17.1

From 1805e8dbdfc3ea2f5460610d90ecc972e59d8e88 Mon Sep 17 00:00:00 2001
From: Sehrope Sarkuni <sehr...@jackdb.com>
Date: Wed, 1 Sep 2021 13:49:27 -0400
Subject: [PATCH 4/4] Add jsonlog log_destination for JSON server logs

Adds option to write server log files as JSON. Each log entry is written as its
own line with any internal newlines escaped as \n. Other non-ASCII and special
characters are also escaped using standard JSON escape sequences.

JSON logging is enabled by setting the GUC log_destination to "jsonlog" and
defaults to a log file with a ".json" extension.
---
 doc/src/sgml/config.sgml           |  67 ++++++-
 src/backend/postmaster/syslogger.c | 177 ++++++++++++++--
 src/backend/utils/adt/misc.c       |   5 +-
 src/backend/utils/error/Makefile   |   3 +-
 src/backend/utils/error/elog.c     |  26 ++-
 src/backend/utils/error/jsonlog.c  | 312 +++++++++++++++++++++++++++++
 src/backend/utils/misc/guc.c       |   2 +
 src/bin/pg_ctl/t/006_jsonlog.pl    |  98 +++++++++
 src/include/utils/elog-internal.h  |   1 +
 src/include/utils/elog.h           |   1 +
 10 files changed, 669 insertions(+), 23 deletions(-)
 create mode 100644 src/backend/utils/error/jsonlog.c
 create mode 100644 src/bin/pg_ctl/t/006_jsonlog.pl

diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index 2c31c35a6b..fe4bcd61ef 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -5926,7 +5926,8 @@ SELECT * FROM parent WHERE key = 2400;
        <para>
         <productname>PostgreSQL</productname> supports several methods
          for logging server messages, including
-         <systemitem>stderr</systemitem>, <systemitem>csvlog</systemitem> and
+         <systemitem>stderr</systemitem>, <systemitem>csvlog</systemitem>,
+         <systemitem>jsonlog</systemitem>, and
          <systemitem>syslog</systemitem>. On Windows,
          <systemitem>eventlog</systemitem> is also supported. Set this
          parameter to a list of desired log destinations separated by
@@ -5944,6 +5945,14 @@ SELECT * FROM parent WHERE key = 2400;
         <xref linkend="guc-logging-collector"/> must be enabled to generate
         CSV-format log output.
        </para>
+       <para>
+        If <systemitem>jsonlog</systemitem> is included in <varname>log_destination</varname>,
+        log entries are output in <acronym>JSON</acronym> format, which is convenient for
+        loading logs into programs.
+        See <xref linkend="runtime-config-logging-jsonlog"/> for details.
+        <xref linkend="guc-logging-collector"/> must be enabled to generate
+        CSV-format log output.
+       </para>
        <para>
         When either <systemitem>stderr</systemitem> or
         <systemitem>csvlog</systemitem> are included, the file
@@ -5955,13 +5964,14 @@ SELECT * FROM parent WHERE key = 2400;
 <programlisting>
 stderr log/postgresql.log
 csvlog log/postgresql.csv
+jsonlog log/postgresql.json
 </programlisting>
 
         <filename>current_logfiles</filename> is recreated when a new log file
         is created as an effect of rotation, and
         when <varname>log_destination</varname> is reloaded.  It is removed when
-        neither <systemitem>stderr</systemitem>
-        nor <systemitem>csvlog</systemitem> are included
+        none of <systemitem>stderr</systemitem>,
+        <systemitem>csvlog</systemitem>, <systemitem>jsonlog</systemitem> are included
         in <varname>log_destination</varname>, and when the logging collector is
         disabled.
        </para>
@@ -6101,6 +6111,13 @@ local0.*    /var/log/postgresql
         (If <varname>log_filename</varname> ends in <literal>.log</literal>, the suffix is
         replaced instead.)
        </para>
+       <para>
+        If JSON-format output is enabled in <varname>log_destination</varname>,
+        <literal>.json</literal> will be appended to the timestamped
+        log file name to create the file name for JSON-format output.
+        (If <varname>log_filename</varname> ends in <literal>.log</literal>, the suffix is
+        replaced instead.)
+       </para>
        <para>
         This parameter can only be set in the <filename>postgresql.conf</filename>
         file or on the server command line.
@@ -7433,6 +7450,50 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
         </orderedlist>
       </para>
     </sect2>
+     <sect2 id="runtime-config-logging-jsonlog">
+     <title>Using JSON-Format Log Output</title>
+
+       <para>
+        Including <literal>jsonlog</literal> in the <varname>log_destination</varname> list
+        provides a convenient way to import log files into many different programs.
+        This option emits log lines in (<acronym>JSON</acronym>) format.
+        Each log line is serialized as a JSON object with the following fields:
+<programlisting>
+        {
+            "timestamp": time stamp with milliseconds (string),
+            "user": user name (string),
+            "dbname": database name (string),
+            "pid": process ID (number),
+            "remote_host": client host (string)
+            "remote_port": port number (string),
+            "session_id": session ID (string),
+            "line_num": per-session line number (number),
+            "ps": current ps display (string),
+            "session_start": session start time (string),
+            "vxid": virtual transaction ID (string),
+            "txid": regular transaction ID (string),
+            "error_severity": error severity (string),
+            "state_code": SQLSTATE code (string),
+            "detail": error message detail (string),
+            "hint": hint (string),
+            "internal_query": internal query that led to the error (string),
+            "internal_position": cursor index into internal query (number),
+            "context": error context (string),
+            "statement": client supplied query string (string),
+            "cursor_position": cursor index into query string (string),
+            "func_name": error location function name (string),
+            "file_name": error location file name (string),
+            "file_line_num": error location file line number (number),
+            "application_name": client application name (string),
+            "message": error message (string)
+        }
+</programlisting>
+        String fields with null values are excluded from output.
+        Additional fields may be added in the future. User applications that process jsonlog
+        output should ignore unknown fields.
+       </para>
+
+    </sect2>
 
    <sect2>
     <title>Process Title</title>
diff --git a/src/backend/postmaster/syslogger.c b/src/backend/postmaster/syslogger.c
index edd8f33204..c8f6ef56fc 100644
--- a/src/backend/postmaster/syslogger.c
+++ b/src/backend/postmaster/syslogger.c
@@ -85,9 +85,11 @@ static bool pipe_eof_seen = false;
 static bool rotation_disabled = false;
 static FILE *syslogFile = NULL;
 static FILE *csvlogFile = NULL;
+static FILE *jsonlogFile = NULL;
 NON_EXEC_STATIC pg_time_t first_syslogger_file_time = 0;
 static char *last_file_name = NULL;
 static char *last_csv_file_name = NULL;
+static char *last_json_file_name = NULL;
 
 /*
  * Buffers for saving partial messages from different backends.
@@ -274,6 +276,8 @@ SysLoggerMain(int argc, char *argv[])
 	last_file_name = logfile_getname(first_syslogger_file_time, NULL);
 	if (csvlogFile != NULL)
 		last_csv_file_name = logfile_getname(first_syslogger_file_time, ".csv");
+	if (jsonlogFile != NULL)
+		last_json_file_name = logfile_getname(first_syslogger_file_time, ".json");
 
 	/* remember active logfile parameters */
 	currentLogDir = pstrdup(Log_directory);
@@ -360,6 +364,14 @@ SysLoggerMain(int argc, char *argv[])
 				(csvlogFile != NULL))
 				rotation_requested = true;
 
+			/*
+			 * Force a rotation if JSONLOG output was just turned on or off and
+			 * we need to open or close jsonlogFile accordingly.
+			 */
+			if (((Log_destination & LOG_DESTINATION_JSONLOG) != 0) !=
+				(jsonlogFile != NULL))
+				rotation_requested = true;
+
 			/*
 			 * If rotation time parameter changed, reset next rotation time,
 			 * but don't immediately force a rotation.
@@ -410,6 +422,12 @@ SysLoggerMain(int argc, char *argv[])
 				rotation_requested = true;
 				size_rotation_for |= LOG_DESTINATION_CSVLOG;
 			}
+			if (jsonlogFile != NULL &&
+				ftell(jsonlogFile) >= Log_RotationSize * 1024L)
+			{
+				rotation_requested = true;
+				size_rotation_for |= LOG_DESTINATION_JSONLOG;
+			}
 		}
 
 		if (rotation_requested)
@@ -419,7 +437,7 @@ SysLoggerMain(int argc, char *argv[])
 			 * was sent by pg_rotate_logfile() or "pg_ctl logrotate".
 			 */
 			if (!time_based_rotation && size_rotation_for == 0)
-				size_rotation_for = LOG_DESTINATION_STDERR | LOG_DESTINATION_CSVLOG;
+				size_rotation_for = LOG_DESTINATION_STDERR | LOG_DESTINATION_CSVLOG | LOG_DESTINATION_JSONLOG;
 			logfile_rotate(time_based_rotation, size_rotation_for);
 		}
 
@@ -625,6 +643,20 @@ SysLogger_Start(void)
 		pfree(filename);
 	}
 
+	/*
+	 * Likewise for the initial JSON log file, if that's enabled.  (Note that
+	 * we open syslogFile even when only JSON output is nominally enabled,
+	 * since some code paths will write to syslogFile anyway.)
+	 */
+	if (Log_destination & LOG_DESTINATION_JSONLOG)
+	{
+		filename = logfile_getname(first_syslogger_file_time, ".json");
+
+		jsonlogFile = logfile_open(filename, "a", false);
+
+		pfree(filename);
+	}
+
 #ifdef EXEC_BACKEND
 	switch ((sysloggerPid = syslogger_forkexec()))
 #else
@@ -722,6 +754,11 @@ SysLogger_Start(void)
 				fclose(csvlogFile);
 				csvlogFile = NULL;
 			}
+			if (jsonlogFile != NULL)
+			{
+				fclose(jsonlogFile);
+				jsonlogFile = NULL;
+			}
 			return (int) sysloggerPid;
 	}
 
@@ -744,6 +781,7 @@ syslogger_forkexec(void)
 	int			ac = 0;
 	char		filenobuf[32];
 	char		csvfilenobuf[32];
+	char		jsonfilenobuf[32];
 
 	av[ac++] = "postgres";
 	av[ac++] = "--forklog";
@@ -771,14 +809,25 @@ syslogger_forkexec(void)
 				 fileno(csvlogFile));
 	else
 		strcpy(csvfilenobuf, "-1");
+	if (jsonlogFile != NULL)
+		snprintf(jsonfilenobuf, sizeof(jsonfilenobuf), "%d",
+				 fileno(jsonlogFile));
+	else
+		strcpy(jsonfilenobuf, "-1");
 #else							/* WIN32 */
 	if (csvlogFile != NULL)
 		snprintf(csvfilenobuf, sizeof(csvfilenobuf), "%ld",
 				 (long) _get_osfhandle(_fileno(csvlogFile)));
 	else
 		strcpy(csvfilenobuf, "0");
+	if (jsonlogFile != NULL)
+		snprintf(jsonfilenobuf, sizeof(jsonfilenobuf), "%ld",
+				 (long) _get_osfhandle(_fileno(jsonlogFile)));
+	else
+		strcpy(jsonfilenobuf, "0");
 #endif							/* WIN32 */
 	av[ac++] = csvfilenobuf;
+	av[ac++] = jsonfilenobuf;
 
 	av[ac] = NULL;
 	Assert(ac < lengthof(av));
@@ -796,8 +845,8 @@ syslogger_parseArgs(int argc, char *argv[])
 {
 	int			fd;
 
-	Assert(argc == 5);
-	argv += 3;
+	Assert(argc == 6);
+	argv += 4;
 
 	/*
 	 * Re-open the error output files that were opened by SysLogger_Start().
@@ -819,6 +868,12 @@ syslogger_parseArgs(int argc, char *argv[])
 		csvlogFile = fdopen(fd, "a");
 		setvbuf(csvlogFile, NULL, PG_IOLBF, 0);
 	}
+	fd = atoi(*argv++);
+	if (fd != -1)
+	{
+		jsonlogFile = fdopen(fd, "a");
+		setvbuf(jsonlogFile, NULL, PG_IOLBF, 0);
+	}
 #else							/* WIN32 */
 	fd = atoi(*argv++);
 	if (fd != 0)
@@ -840,6 +895,16 @@ syslogger_parseArgs(int argc, char *argv[])
 			setvbuf(csvlogFile, NULL, PG_IOLBF, 0);
 		}
 	}
+	fd = atoi(*argv++);
+	if (fd != 0)
+	{
+		fd = _open_osfhandle(fd, _O_APPEND | _O_TEXT);
+		if (fd > 0)
+		{
+			jsonlogFile = fdopen(fd, "a");
+			setvbuf(jsonlogFile, NULL, PG_IOLBF, 0);
+		}
+	}
 #endif							/* WIN32 */
 }
 #endif							/* EXEC_BACKEND */
@@ -892,6 +957,7 @@ process_pipe_input(char *logbuffer, int *bytes_in_logbuffer)
 			p.pid != 0 &&
 			(p.is_last == 't' || p.is_last == 'f') &&
 			(p.dest == LOG_DESTINATION_CSVLOG ||
+			 p.dest == LOG_DESTINATION_JSONLOG ||
 			 p.dest == LOG_DESTINATION_STDERR))
 		{
 			List	   *buffer_list;
@@ -1079,19 +1145,23 @@ write_syslogger_file(const char *buffer, int count, int destination)
 	FILE	   *logfile;
 
 	/*
-	 * If we're told to write to csvlogFile, but it's not open, dump the data
-	 * to syslogFile (which is always open) instead.  This can happen if CSV
+	 * If we're told to write to a structured log file, but it's not open, dump the data
+	 * to syslogFile (which is always open) instead.  This can happen if structured
 	 * output is enabled after postmaster start and we've been unable to open
-	 * csvlogFile.  There are also race conditions during a parameter change
-	 * whereby backends might send us CSV output before we open csvlogFile or
-	 * after we close it.  Writing CSV-formatted output to the regular log
+	 * logFile.  There are also race conditions during a parameter change
+	 * whereby backends might send us structured output before we open the logFile or
+	 * after we close it.  Writing formatted output to the regular log
 	 * file isn't great, but it beats dropping log output on the floor.
 	 *
-	 * Think not to improve this by trying to open csvlogFile on-the-fly.  Any
+	 * Think not to improve this by trying to open logFile on-the-fly.  Any
 	 * failure in that would lead to recursion.
 	 */
-	logfile = (destination == LOG_DESTINATION_CSVLOG &&
-			   csvlogFile != NULL) ? csvlogFile : syslogFile;
+	if ((destination & LOG_DESTINATION_CSVLOG) && csvlogFile != NULL)
+		logfile = csvlogFile;
+	else if ((destination & LOG_DESTINATION_JSONLOG) && jsonlogFile != NULL)
+		logfile = jsonlogFile;
+	else
+		logfile = syslogFile;
 
 	rc = fwrite(buffer, 1, count, logfile);
 
@@ -1162,7 +1232,8 @@ pipeThread(void *arg)
 		if (Log_RotationSize > 0)
 		{
 			if (ftell(syslogFile) >= Log_RotationSize * 1024L ||
-				(csvlogFile != NULL && ftell(csvlogFile) >= Log_RotationSize * 1024L))
+				(csvlogFile != NULL && ftell(csvlogFile) >= Log_RotationSize * 1024L) ||
+				(jsonlogFile != NULL && ftell(jsonlogFile) >= Log_RotationSize * 1024L))
 				SetLatch(MyLatch);
 		}
 		LeaveCriticalSection(&sysloggerSection);
@@ -1235,6 +1306,7 @@ logfile_rotate(bool time_based_rotation, int size_rotation_for)
 {
 	char	   *filename;
 	char	   *csvfilename = NULL;
+	char	   *jsonfilename = NULL;
 	pg_time_t	fntime;
 	FILE	   *fh;
 
@@ -1252,6 +1324,9 @@ logfile_rotate(bool time_based_rotation, int size_rotation_for)
 	filename = logfile_getname(fntime, NULL);
 	if (Log_destination & LOG_DESTINATION_CSVLOG)
 		csvfilename = logfile_getname(fntime, ".csv");
+	if (Log_destination & LOG_DESTINATION_JSONLOG)
+		jsonfilename = logfile_getname(fntime, ".json");
+
 
 	/*
 	 * Decide whether to overwrite or append.  We can overwrite if (a)
@@ -1289,6 +1364,8 @@ logfile_rotate(bool time_based_rotation, int size_rotation_for)
 				pfree(filename);
 			if (csvfilename)
 				pfree(csvfilename);
+			if (jsonfilename)
+				pfree(jsonfilename);
 			return;
 		}
 
@@ -1303,10 +1380,10 @@ logfile_rotate(bool time_based_rotation, int size_rotation_for)
 	}
 
 	/*
-	 * Same as above, but for csv file.  Note that if LOG_DESTINATION_CSVLOG
-	 * was just turned on, we might have to open csvlogFile here though it was
+	 * Same as above, but for a structured file.  Note that if LOG_DESTINATION_[STRUCTURED]LOG
+	 * was just turned on, we might have to open logFile here though it was
 	 * not open before.  In such a case we'll append not overwrite (since
-	 * last_csv_file_name will be NULL); that is consistent with the normal
+	 * last_*_file_name will be NULL); that is consistent with the normal
 	 * rules since it's not a time-based rotation.
 	 */
 	if ((Log_destination & LOG_DESTINATION_CSVLOG) &&
@@ -1362,11 +1439,66 @@ logfile_rotate(bool time_based_rotation, int size_rotation_for)
 			pfree(last_csv_file_name);
 		last_csv_file_name = NULL;
 	}
+	else if ((Log_destination & LOG_DESTINATION_JSONLOG) &&
+		(jsonlogFile == NULL ||
+		 time_based_rotation || (size_rotation_for & LOG_DESTINATION_JSONLOG)))
+	{
+		if (Log_truncate_on_rotation && time_based_rotation &&
+			last_json_file_name != NULL &&
+			strcmp(jsonfilename, last_json_file_name) != 0)
+			fh = logfile_open(jsonfilename, "w", true);
+		else
+			fh = logfile_open(jsonfilename, "a", true);
+
+		if (!fh)
+		{
+			/*
+			 * ENFILE/EMFILE are not too surprising on a busy system; just
+			 * keep using the old file till we manage to get a new one.
+			 * Otherwise, assume something's wrong with Log_directory and stop
+			 * trying to create files.
+			 */
+			if (errno != ENFILE && errno != EMFILE)
+			{
+				ereport(LOG,
+						(errmsg("disabling automatic rotation (use SIGHUP to re-enable)")));
+				rotation_disabled = true;
+			}
+
+			if (filename)
+				pfree(filename);
+			if (jsonfilename)
+				pfree(jsonfilename);
+			return;
+		}
+
+		if (jsonlogFile != NULL)
+			fclose(jsonlogFile);
+		jsonlogFile = fh;
+
+		/* instead of pfree'ing filename, remember it for next time */
+		if (last_json_file_name != NULL)
+			pfree(last_json_file_name);
+		last_json_file_name = jsonfilename;
+		jsonfilename = NULL;
+	}
+	else if (!(Log_destination & LOG_DESTINATION_JSONLOG) &&
+			 jsonlogFile != NULL)
+	{
+		/* JSONLOG was just turned off, so close the old file */
+		fclose(jsonlogFile);
+		jsonlogFile = NULL;
+		if (last_json_file_name != NULL)
+			pfree(last_json_file_name);
+		last_json_file_name = NULL;
+	}
 
 	if (filename)
 		pfree(filename);
 	if (csvfilename)
 		pfree(csvfilename);
+	if (jsonfilename)
+		pfree(jsonfilename);
 
 	update_metainfo_datafile();
 
@@ -1454,7 +1586,8 @@ update_metainfo_datafile(void)
 	mode_t		oumask;
 
 	if (!(Log_destination & LOG_DESTINATION_STDERR) &&
-		!(Log_destination & LOG_DESTINATION_CSVLOG))
+		!(Log_destination & LOG_DESTINATION_CSVLOG) &&
+		!(Log_destination & LOG_DESTINATION_JSONLOG))
 	{
 		if (unlink(LOG_METAINFO_DATAFILE) < 0 && errno != ENOENT)
 			ereport(LOG,
@@ -1512,6 +1645,18 @@ update_metainfo_datafile(void)
 			return;
 		}
 	}
+	if (last_json_file_name && (Log_destination & LOG_DESTINATION_JSONLOG))
+	{
+		if (fprintf(fh, "jsonlog %s\n", last_json_file_name) < 0)
+		{
+			ereport(LOG,
+					(errcode_for_file_access(),
+					 errmsg("could not write file \"%s\": %m",
+							LOG_METAINFO_DATAFILE_TMP)));
+			fclose(fh);
+			return;
+		}
+	}
 	fclose(fh);
 
 	if (rename(LOG_METAINFO_DATAFILE_TMP, LOG_METAINFO_DATAFILE) != 0)
diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c
index 88faf4dfd7..4931859627 100644
--- a/src/backend/utils/adt/misc.c
+++ b/src/backend/utils/adt/misc.c
@@ -843,11 +843,12 @@ pg_current_logfile(PG_FUNCTION_ARGS)
 	{
 		logfmt = text_to_cstring(PG_GETARG_TEXT_PP(0));
 
-		if (strcmp(logfmt, "stderr") != 0 && strcmp(logfmt, "csvlog") != 0)
+		if (strcmp(logfmt, "stderr") != 0 && strcmp(logfmt, "csvlog") != 0 &&
+				strcmp(logfmt, "jsonlog") != 0)
 			ereport(ERROR,
 					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
 					 errmsg("log format \"%s\" is not supported", logfmt),
-					 errhint("The supported log formats are \"stderr\" and \"csvlog\".")));
+					 errhint("The supported log formats are \"stderr\", \"csvlog\", and \"jsonlog\".")));
 	}
 
 	fd = AllocateFile(LOG_METAINFO_DATAFILE, "r");
diff --git a/src/backend/utils/error/Makefile b/src/backend/utils/error/Makefile
index ef770dd2f2..65ba61fb3c 100644
--- a/src/backend/utils/error/Makefile
+++ b/src/backend/utils/error/Makefile
@@ -15,6 +15,7 @@ include $(top_builddir)/src/Makefile.global
 OBJS = \
 	assert.o \
 	csvlog.o \
-	elog.o
+	elog.o \
+	jsonlog.o
 
 include $(top_srcdir)/src/backend/common.mk
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index 47d6677827..c259616a76 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -2922,6 +2922,30 @@ send_message_to_server_log(ErrorData *edata)
 			pfree(buf.data);
 		}
 	}
+	/* Write to JSON log if enabled */
+	else if (Log_destination & LOG_DESTINATION_JSONLOG)
+	{
+		if (redirection_done || MyBackendType == B_LOGGER)
+		{
+			/*
+			 * send JSON data if it's safe to do so (syslogger doesn't need the
+			 * pipe). First get back the space in the message buffer.
+			 */
+			pfree(buf.data);
+			write_jsonlog(edata);
+		}
+		else
+		{
+			/*
+			 * syslogger not up (yet), so just dump the message to stderr,
+			 * unless we already did so above.
+			 */
+			if (!(Log_destination & LOG_DESTINATION_STDERR) &&
+				whereToSendOutput != DestDebug)
+				write_console(buf.data, buf.len);
+			pfree(buf.data);
+		}
+	}
 	else
 	{
 		pfree(buf.data);
@@ -2965,7 +2989,7 @@ write_pipe_chunks(char *data, int len, int dest)
 	/* write all but the last chunk */
 	while (len > PIPE_MAX_PAYLOAD)
 	{
-		p.proto.is_last = (dest == LOG_DESTINATION_CSVLOG ? 'F' : 'f');
+		p.proto.is_last = ((dest == LOG_DESTINATION_CSVLOG || dest == LOG_DESTINATION_JSONLOG) ? 'F' : 'f');
 		p.proto.len = PIPE_MAX_PAYLOAD;
 		memcpy(p.proto.data, data, PIPE_MAX_PAYLOAD);
 		rc = write(fd, &p, PIPE_HEADER_SIZE + PIPE_MAX_PAYLOAD);
diff --git a/src/backend/utils/error/jsonlog.c b/src/backend/utils/error/jsonlog.c
new file mode 100644
index 0000000000..2777124977
--- /dev/null
+++ b/src/backend/utils/error/jsonlog.c
@@ -0,0 +1,312 @@
+/*-------------------------------------------------------------------------
+ *
+ * jsonlog.c
+ *	  JSON logging
+ *
+ * Copyright (c) 2021, PostgreSQL Global Development Group
+ *
+ *
+ * IDENTIFICATION
+ *	  src/backend/utils/error/jsonlog.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/xact.h"
+#include "libpq/libpq.h"
+#include "lib/stringinfo.h"
+#include "miscadmin.h"
+#include "postmaster/bgworker.h"
+#include "postmaster/syslogger.h"
+#include "storage/lock.h"
+#include "storage/proc.h"
+#include "tcop/tcopprot.h"
+#include "utils/backend_status.h"
+#include "utils/elog-internal.h"
+#include "utils/guc.h"
+#include "utils/json.h"
+#include "utils/ps_status.h"
+
+/*
+ * appendJSONKeyValue
+ * Append to given StringInfo a comma followed by a JSON key and value.
+ * Both the key and value will be escaped as JSON string literals.
+ */
+static void
+appendJSONKeyValue(StringInfo buf, const char *key, const char *value)
+{
+	if (value == NULL)
+		return;
+	appendStringInfoChar(buf, ',');
+	escape_json(buf, key);
+	appendStringInfoChar(buf, ':');
+	escape_json(buf, value);
+}
+
+/*
+ * appendJSONKeyValueFmt
+ * Evaluate the fmt string and then invoke appendJSONKeyValue as the
+ * value of the JSON property. Both the key and value will be escaped by
+ * appendJSONKeyValue.
+ */
+static void
+appendJSONKeyValueFmt(StringInfo buf, const char *key, const char *fmt,...)
+{
+	int			save_errno = errno;
+	size_t		len = 128;		/* initial assumption about buffer size */
+	char	    *value;
+
+	for (;;)
+	{
+		va_list		args;
+		size_t		newlen;
+
+		/*
+		 * Allocate result buffer.  Note that in frontend this maps to malloc
+		 * with exit-on-error.
+		 */
+		value = (char *) palloc(len);
+
+		/* Try to format the data. */
+		errno = save_errno;
+		va_start(args, fmt);
+		newlen = pvsnprintf(value, len, fmt, args);
+		va_end(args);
+
+		if (newlen < len)
+			break; /* success */
+
+		/* Release buffer and loop around to try again with larger len. */
+		pfree(value);
+		len = newlen;
+	}
+	appendJSONKeyValue(buf, key, value);
+	/* Clean up */
+	pfree(value);
+}
+
+/*
+ * appendJSONKeyValueAsInt
+ * Append to given StringInfo a comma followed by a JSON key and value with
+ * value being formatted as a signed integer (a JSON number).
+ */
+static void
+appendJSONKeyValueAsInt(StringInfo buf, const char *key, int value)
+{
+	appendStringInfoChar(buf, ',');
+	escape_json(buf, key);
+	appendStringInfoChar(buf, ':');
+	appendStringInfo(buf, "%d", value);
+}
+
+/*
+ * appendJSONKeyValueAsInt
+ * Append to given StringInfo a comma followed by a JSON key and value with
+ * value being formatted as an unsigned integer (a JSON number).
+ */
+static void
+appendJSONKeyValueAsUInt(StringInfo buf, const char *key, int value)
+{
+	appendStringInfoChar(buf, ',');
+	escape_json(buf, key);
+	appendStringInfoChar(buf, ':');
+	appendStringInfo(buf, "%u", value);
+}
+
+/*
+ * appendJSONKeyValueAsInt
+ * Append to given StringInfo a comma followed by a JSON key and value with
+ * value being formatted as a long (a JSON number).
+ */
+static void
+appendJSONKeyValueAsLong(StringInfo buf, const char *key, long value)
+{
+	appendStringInfoChar(buf, ',');
+	escape_json(buf, key);
+	appendStringInfoChar(buf, ':');
+	appendStringInfo(buf, "%ld", value);
+}
+
+/*
+ * Write logs in json format.
+ */
+void
+write_jsonlog(ErrorData *edata)
+{
+	StringInfoData	buf;
+
+	/* static counter for line numbers */
+	static long	log_line_number = 0;
+
+	/* Has the counter been reset in the current process? */
+	static int	log_my_pid = 0;
+
+	if (log_my_pid != MyProcPid)
+	{
+		log_line_number = 0;
+		log_my_pid = MyProcPid;
+		formatted_start_time[0] = '\0';
+	}
+	log_line_number++;
+
+	initStringInfo(&buf);
+
+	/* Initialize string */
+	appendStringInfoChar(&buf, '{');
+
+	/*
+	 * timestamp with milliseconds
+	 *
+	 * Check if the timestamp is already calculated for the syslog message,
+	 * and use it if so.  Otherwise, get the current timestamp.  This is done
+	 * to put same timestamp in both syslog and jsonlog messages.
+	 */
+	if (formatted_log_time[0] == '\0')
+		setup_formatted_log_time();
+
+	/* First property does not use appendJSONKeyValue as it does not have comma prefix */
+	escape_json(&buf, "timestamp");
+	appendStringInfoChar(&buf, ':');
+	escape_json(&buf, formatted_log_time);
+
+	/* username */
+	if (MyProcPort)
+		appendJSONKeyValue(&buf, "user", MyProcPort->user_name);
+
+	/* database name */
+	if (MyProcPort)
+		appendJSONKeyValue(&buf, "dbname", MyProcPort->database_name);
+
+	/* leader PID */
+	if (MyProc)
+	{
+		PGPROC	   *leader = MyProc->lockGroupLeader;
+
+		/*
+		 * Show the leader only for active parallel workers.  This leaves out
+		 * the leader of a parallel group.
+		 */
+		if (leader && leader->pid != MyProcPid)
+			appendJSONKeyValueAsInt(&buf, "leader_pid", leader->pid);
+	}
+
+	/* Process ID */
+	if (MyProcPid != 0)
+		appendJSONKeyValueAsInt(&buf, "pid", MyProcPid);
+
+	/* Remote host and port */
+	if (MyProcPort && MyProcPort->remote_host)
+	{
+		appendJSONKeyValue(&buf, "remote_host", MyProcPort->remote_host);
+		if (MyProcPort->remote_port && MyProcPort->remote_port[0] != '\0')
+			appendJSONKeyValue(&buf, "remote_port", MyProcPort->remote_port);
+	}
+
+	/* Session id */
+	appendJSONKeyValueFmt(&buf, "session_id", "%lx.%x", (long) MyStartTime, MyProcPid);
+
+	/* Line number */
+	appendJSONKeyValueAsLong(&buf, "line_num", log_line_number);
+
+	/* PS display */
+	if (MyProcPort)
+	{
+		StringInfoData	msgbuf;
+		const char	   *psdisp;
+		int				displen;
+
+		initStringInfo(&msgbuf);
+		psdisp = get_ps_display(&displen);
+		appendBinaryStringInfo(&msgbuf, psdisp, displen);
+		appendJSONKeyValue(&buf, "ps", msgbuf.data);
+
+		pfree(msgbuf.data);
+	}
+
+	/* session start timestamp */
+	if (formatted_start_time[0] == '\0')
+		setup_formatted_start_time();
+	appendJSONKeyValue(&buf, "session_start", formatted_start_time);
+
+	/* Virtual transaction id */
+	/* keep VXID format in sync with lockfuncs.c */
+	if (MyProc != NULL && MyProc->backendId != InvalidBackendId)
+		appendJSONKeyValueFmt(&buf, "vxid", "%d/%u", MyProc->backendId, MyProc->lxid);
+
+	/* Transaction id */
+	appendJSONKeyValueFmt(&buf, "txid", "%u", GetTopTransactionIdIfAny());
+
+	/* Error severity */
+	if (edata->elevel)
+		appendJSONKeyValue(&buf, "error_severity", (char *) error_severity(edata->elevel));
+
+	/* query id */
+	appendJSONKeyValueFmt(&buf,  "query_id", "%lld", (long long) pgstat_get_my_query_id());
+
+	/* SQL state code */
+	if (edata->sqlerrcode)
+		appendJSONKeyValue(&buf, "state_code", unpack_sql_state(edata->sqlerrcode));
+
+	/* errdetail or error_detail log */
+	if (edata->detail_log)
+		appendJSONKeyValue(&buf, "detail", edata->detail_log);
+	else if (edata->detail)
+		appendJSONKeyValue(&buf, "detail", edata->detail);
+
+	/* errhint */
+	if (edata->hint)
+		appendJSONKeyValue(&buf, "hint", edata->hint);
+
+	/* Internal query */
+	if (edata->internalquery)
+		appendJSONKeyValue(&buf, "internal_query", edata->internalquery);
+
+	/* If the internal query got printed, print internal pos, too */
+	if (edata->internalpos > 0 && edata->internalquery != NULL)
+		appendJSONKeyValueAsUInt(&buf, "internal_position", edata->internalpos);
+
+	/* errcontext */
+	if (edata->context && !edata->hide_ctx)
+		appendJSONKeyValue(&buf, "context", edata->context);
+
+	/* user query --- only reported if not disabled by the caller */
+	if (is_log_level_output(edata->elevel, log_min_error_statement) &&
+		debug_query_string != NULL &&
+		!edata->hide_stmt)
+	{
+		appendJSONKeyValue(&buf, "statement", debug_query_string);
+		if (edata->cursorpos > 0)
+			appendJSONKeyValueAsInt(&buf, "cursor_position", edata->cursorpos);
+	}
+
+	/* file error location */
+	if (Log_error_verbosity >= PGERROR_VERBOSE)
+	{
+		if (edata->funcname)
+			appendJSONKeyValue(&buf, "func_name", edata->funcname);
+		if (edata->filename)
+		{
+			appendJSONKeyValue(&buf, "file_name", edata->filename);
+			appendJSONKeyValueAsInt(&buf, "file_line_num", edata->lineno);
+		}
+	}
+
+	/* Application name */
+	if (application_name && application_name[0] != '\0')
+		appendJSONKeyValue(&buf, "application_name", application_name);
+
+	/* backend type */
+	appendJSONKeyValue(&buf, "backend_type", get_backend_type_for_log());
+
+	/* Error message */
+	appendJSONKeyValue(&buf, "message", edata->message);
+
+	/* Finish string */
+	appendStringInfoChar(&buf, '}');
+	appendStringInfoChar(&buf, '\n');
+
+	write_syslogger(buf.data, buf.len, LOG_DESTINATION_JSONLOG);
+
+	pfree(buf.data);
+}
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 467b0fd6fe..ea4fa0c5c1 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -11691,6 +11691,8 @@ check_log_destination(char **newval, void **extra, GucSource source)
 			newlogdest |= LOG_DESTINATION_STDERR;
 		else if (pg_strcasecmp(tok, "csvlog") == 0)
 			newlogdest |= LOG_DESTINATION_CSVLOG;
+		else if (pg_strcasecmp(tok, "jsonlog") == 0)
+			newlogdest |= LOG_DESTINATION_JSONLOG;
 #ifdef HAVE_SYSLOG
 		else if (pg_strcasecmp(tok, "syslog") == 0)
 			newlogdest |= LOG_DESTINATION_SYSLOG;
diff --git a/src/bin/pg_ctl/t/006_jsonlog.pl b/src/bin/pg_ctl/t/006_jsonlog.pl
new file mode 100644
index 0000000000..e6b9979fd7
--- /dev/null
+++ b/src/bin/pg_ctl/t/006_jsonlog.pl
@@ -0,0 +1,98 @@
+use strict;
+use warnings;
+
+use PostgresNode;
+use TestLib;
+use Test::More tests => 4;
+use Time::HiRes qw(usleep);
+
+# Set up node with logging collector
+my $node = PostgresNode->new('primary');
+$node->init();
+$node->append_conf(
+	'postgresql.conf', qq(
+logging_collector = on
+lc_messages = 'C'
+log_destination = 'jsonlog'
+));
+
+
+$node->start();
+
+# Verify that log output gets to the file
+
+$node->psql('postgres', 'SELECT 1/0');
+
+my $current_logfiles = slurp_file($node->data_dir . '/current_logfiles');
+
+note "current_logfiles = $current_logfiles";
+
+like(
+	$current_logfiles,
+	qr|^jsonlog log/postgresql-.*json$|,
+	'current_logfiles is sane');
+
+my $lfname = $current_logfiles;
+$lfname =~ s/^jsonlog //;
+chomp $lfname;
+
+# might need to retry if logging collector process is slow...
+my $max_attempts = 100;
+
+my $first_logfile;
+for (my $attempts = 0; $attempts < $max_attempts; $attempts++)
+{
+	$first_logfile = slurp_file($node->data_dir . '/' . $lfname);
+	last if $first_logfile =~ m/division by zero/;
+	usleep(100_000);
+}
+
+like(
+	$first_logfile,
+	qr/"statement":"SELECT 1\/0",.*"message":"division by zero/,
+	'found expected log file content');
+
+# Sleep 2 seconds and ask for log rotation; this should result in
+# output into a different log file name.
+sleep(2);
+$node->logrotate();
+
+# pg_ctl logrotate doesn't wait for rotation request to be completed.
+# Allow a bit of time for it to happen.
+my $new_current_logfiles;
+for (my $attempts = 0; $attempts < $max_attempts; $attempts++)
+{
+	$new_current_logfiles = slurp_file($node->data_dir . '/current_logfiles');
+	last if $new_current_logfiles ne $current_logfiles;
+	usleep(100_000);
+}
+
+note "now current_logfiles = $new_current_logfiles";
+
+like(
+	$new_current_logfiles,
+	qr|^jsonlog log/postgresql-.*json$|,
+	'new current_logfiles is sane');
+
+$lfname = $new_current_logfiles;
+$lfname =~ s/^jsonlog //;
+chomp $lfname;
+
+# Verify that log output gets to this file, too
+
+$node->psql('postgres', 'fee fi fo fum');
+
+my $second_logfile;
+for (my $attempts = 0; $attempts < $max_attempts; $attempts++)
+{
+	$second_logfile = slurp_file($node->data_dir . '/' . $lfname);
+	last if $second_logfile =~ m/syntax error/;
+	usleep(100_000);
+}
+
+like(
+	$second_logfile,
+	qr/"statement":"fee fi fo fum",.*"message":"syntax error/,
+	'found expected log file content in new log file');
+
+$node->stop();
diff --git a/src/include/utils/elog-internal.h b/src/include/utils/elog-internal.h
index ac08b6f12f..13c217a29d 100644
--- a/src/include/utils/elog-internal.h
+++ b/src/include/utils/elog-internal.h
@@ -23,6 +23,7 @@ const char * error_severity(int elevel);
 void write_syslogger(char *data, int len, int dest);
 
 void write_csvlog(ErrorData *edata);
+void write_jsonlog(ErrorData *edata);
 
 /*
  * is_log_level_output -- is elevel logically >= log_min_level?
diff --git a/src/include/utils/elog.h b/src/include/utils/elog.h
index f53607e12e..c0c699f485 100644
--- a/src/include/utils/elog.h
+++ b/src/include/utils/elog.h
@@ -436,6 +436,7 @@ extern bool syslog_split_messages;
 #define LOG_DESTINATION_SYSLOG	 2
 #define LOG_DESTINATION_EVENTLOG 4
 #define LOG_DESTINATION_CSVLOG	 8
+#define LOG_DESTINATION_JSONLOG	16
 
 /* Other exported functions */
 extern void DebugFileOpen(void);
-- 
2.17.1

Reply via email to