Hi,

This patch adds a new log_destination, "jsonlog", that writes log entries
as lines of JSON. It was originally started by David Fetter using
the jsonlog module by Michael Paquier (
https://github.com/michaelpq/pg_plugins/blob/master/jsonlog/jsonlog.c) as a
basis for how to serialize the log messages. Thanks to both of them because
this wouldn't be possible without that starting point.

The first commit splits out the destination in log pipe messages into its
own field. Previously it would piggyback on the "is_last" field. This adds
an int to the message size but makes the rest of the code easier to follow.

The second commit adds a TAP test for log_destination "csvlog". This was
done to both confirm that the previous change didn't break anything and as
a skeleton for the test in the next commit.

The third commit adds the new log_destination "jsonlog". The output format
is one line per entry with the top level output being a JSON object keyed
with the log fields. Newlines in the output fields are escaped as \n so the
output file has exactly one line per log entry. It also includes a new test
for verifying the JSON output with some basic regex checks (similar to the
csvlog test).

Here's a sample of what the log entries look like:

{"timestamp":"2021-08-31 10:15:25.129
EDT","user":"sehrope","dbname":"postgres","pid":12012,"remote_host":"[local]","session_id":"612e397d.2eec","line_num":1,"ps":"idle","session_start":"2021-08-31
10:15:25
EDT","vxid":"3/2","txid":"0","error_severity":"LOG","application_name":"
006_jsonlog.pl","message":"statement: SELECT 1/0"}

It builds and passes "make check-world" on Linux. It also includes code to
handle Windows as well but I have not actually tried building it there.

Regards,
-- Sehrope Sarkuni
Founder & CEO | JackDB, Inc. | https://www.jackdb.com/
From d5b3f5fe44e91d35aefdd570758d5b2a9e9c1a36 Mon Sep 17 00:00:00 2001
From: Sehrope Sarkuni <sehr...@jackdb.com>
Date: Wed, 10 Jul 2019 10:02:31 -0400
Subject: [PATCH 1/3] Adds separate dest field to log protocol PipeProtoHeader

Adds a separate dest field to PipeProtoHeader to store the log destination
requested by the sending process. Also changes the is_last field to only
store whether the chunk is the last one for a message rather than also
including whether the destination is csvlog.
---
 src/backend/postmaster/syslogger.c | 15 ++++++---------
 src/backend/utils/error/elog.c     |  4 +++-
 src/include/postmaster/syslogger.h |  4 ++--
 3 files changed, 11 insertions(+), 12 deletions(-)

diff --git a/src/backend/postmaster/syslogger.c b/src/backend/postmaster/syslogger.c
index cad43bdef2..edd8f33204 100644
--- a/src/backend/postmaster/syslogger.c
+++ b/src/backend/postmaster/syslogger.c
@@ -878,7 +878,6 @@ process_pipe_input(char *logbuffer, int *bytes_in_logbuffer)
 {
 	char	   *cursor = logbuffer;
 	int			count = *bytes_in_logbuffer;
-	int			dest = LOG_DESTINATION_STDERR;
 
 	/* While we have enough for a header, process data... */
 	while (count >= (int) (offsetof(PipeProtoHeader, data) + 1))
@@ -891,8 +890,9 @@ process_pipe_input(char *logbuffer, int *bytes_in_logbuffer)
 		if (p.nuls[0] == '\0' && p.nuls[1] == '\0' &&
 			p.len > 0 && p.len <= PIPE_MAX_PAYLOAD &&
 			p.pid != 0 &&
-			(p.is_last == 't' || p.is_last == 'f' ||
-			 p.is_last == 'T' || p.is_last == 'F'))
+			(p.is_last == 't' || p.is_last == 'f') &&
+			(p.dest == LOG_DESTINATION_CSVLOG ||
+			 p.dest == LOG_DESTINATION_STDERR))
 		{
 			List	   *buffer_list;
 			ListCell   *cell;
@@ -906,9 +906,6 @@ process_pipe_input(char *logbuffer, int *bytes_in_logbuffer)
 			if (count < chunklen)
 				break;
 
-			dest = (p.is_last == 'T' || p.is_last == 'F') ?
-				LOG_DESTINATION_CSVLOG : LOG_DESTINATION_STDERR;
-
 			/* Locate any existing buffer for this source pid */
 			buffer_list = buffer_lists[p.pid % NBUFFER_LISTS];
 			foreach(cell, buffer_list)
@@ -924,7 +921,7 @@ process_pipe_input(char *logbuffer, int *bytes_in_logbuffer)
 					free_slot = buf;
 			}
 
-			if (p.is_last == 'f' || p.is_last == 'F')
+			if (p.is_last == 'f')
 			{
 				/*
 				 * Save a complete non-final chunk in a per-pid buffer
@@ -970,7 +967,7 @@ process_pipe_input(char *logbuffer, int *bytes_in_logbuffer)
 					appendBinaryStringInfo(str,
 										   cursor + PIPE_HEADER_SIZE,
 										   p.len);
-					write_syslogger_file(str->data, str->len, dest);
+					write_syslogger_file(str->data, str->len, p.dest);
 					/* Mark the buffer unused, and reclaim string storage */
 					existing_slot->pid = 0;
 					pfree(str->data);
@@ -979,7 +976,7 @@ process_pipe_input(char *logbuffer, int *bytes_in_logbuffer)
 				{
 					/* The whole message was one chunk, evidently. */
 					write_syslogger_file(cursor + PIPE_HEADER_SIZE, p.len,
-										 dest);
+										 p.dest);
 				}
 			}
 
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index a3e1c59a82..cd13111708 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -3250,6 +3250,8 @@ write_pipe_chunks(char *data, int len, int dest)
 
 	p.proto.nuls[0] = p.proto.nuls[1] = '\0';
 	p.proto.pid = MyProcPid;
+	p.proto.dest = (int32) dest;
+	p.proto.is_last = 'f';
 
 	/* write all but the last chunk */
 	while (len > PIPE_MAX_PAYLOAD)
@@ -3264,7 +3266,7 @@ write_pipe_chunks(char *data, int len, int dest)
 	}
 
 	/* write the last chunk */
-	p.proto.is_last = (dest == LOG_DESTINATION_CSVLOG ? 'T' : 't');
+	p.proto.is_last = 't';
 	p.proto.len = len;
 	memcpy(p.proto.data, data, len);
 	rc = write(fd, &p, PIPE_HEADER_SIZE + len);
diff --git a/src/include/postmaster/syslogger.h b/src/include/postmaster/syslogger.h
index 1491eecb0f..41d026a474 100644
--- a/src/include/postmaster/syslogger.h
+++ b/src/include/postmaster/syslogger.h
@@ -46,8 +46,8 @@ typedef struct
 	char		nuls[2];		/* always \0\0 */
 	uint16		len;			/* size of this chunk (counts data only) */
 	int32		pid;			/* writer's pid */
-	char		is_last;		/* last chunk of message? 't' or 'f' ('T' or
-								 * 'F' for CSV case) */
+	int32		dest;			/* log destination */
+	char		is_last;        /* last chunk of message? 't' or 'f'*/
 	char		data[FLEXIBLE_ARRAY_MEMBER];	/* data payload starts here */
 } PipeProtoHeader;
 
-- 
2.17.1

From dfb17c0b1804b9e54a287e6a058d02dd1be27ffb Mon Sep 17 00:00:00 2001
From: Sehrope Sarkuni <sehr...@jackdb.com>
Date: Tue, 31 Aug 2021 10:00:54 -0400
Subject: [PATCH 2/3] Add TAP test for csvlog

---
 src/bin/pg_ctl/t/005_csvlog.pl | 118 +++++++++++++++++++++++++++++++++
 1 file changed, 118 insertions(+)
 create mode 100644 src/bin/pg_ctl/t/005_csvlog.pl

diff --git a/src/bin/pg_ctl/t/005_csvlog.pl b/src/bin/pg_ctl/t/005_csvlog.pl
new file mode 100644
index 0000000000..c6ab0ddbcc
--- /dev/null
+++ b/src/bin/pg_ctl/t/005_csvlog.pl
@@ -0,0 +1,118 @@
+use strict;
+use warnings;
+
+use PostgresNode;
+use TestLib;
+use Test::More tests => 4;
+use Time::HiRes qw(usleep);
+
+# Set up node with logging collector
+my $node = PostgresNode->new('primary');
+$node->init();
+$node->append_conf(
+	'postgresql.conf', qq(
+logging_collector = on
+lc_messages = 'C'
+log_destination = 'csvlog'
+));
+
+$node->start();
+
+note "Before sleep";
+usleep(100_000);
+note "Before rotate";
+$node->logrotate();
+note "After rotate";
+usleep(100_000);
+note "After rotate sleep";
+
+# Verify that log output gets to the file
+
+$node->psql('postgres', 'SELECT 1/0');
+
+my $current_logfiles = slurp_file($node->data_dir . '/current_logfiles');
+
+for(my $tmp=0;$tmp < 10;$tmp++) {
+   my $current_logfiles = slurp_file($node->data_dir . '/current_logfiles');
+   note "current_logfiles = $current_logfiles";
+   usleep(100_000);
+}
+
+
+like(
+	$current_logfiles,
+	qr|^csvlog log/postgresql-.*$|,
+	'current_logfiles is sane');
+
+my $lfname = $current_logfiles;
+$lfname =~ s/^csvlog //;
+chomp $lfname;
+
+note "current_logfiles = $current_logfiles";
+note "lfname = $lfname";
+
+# might need to retry if logging collector process is slow...
+my $max_attempts = 180 * 10;
+
+my $first_logfile;
+for (my $attempts = 0; $attempts < $max_attempts; $attempts++)
+{
+    my $foo = $node->data_dir . '/' . $lfname;
+    note "will slurp: $foo";
+    $first_logfile = slurp_file($node->data_dir . '/' . $lfname);
+	last if $first_logfile =~ m/division by zero/;
+	usleep(100_000);
+}
+
+note "first_logfile = $first_logfile";
+# Our log entry should the error message and errant SQL
+like(
+	$first_logfile,
+	qr/division by zero.*"SELECT 1\/0"/,
+	'found expected log file content');
+
+# Sleep 2 seconds and ask for log rotation; this should result in
+# output into a different log file name.
+sleep(2);
+$node->logrotate();
+
+# pg_ctl logrotate doesn't wait for rotation request to be completed.
+# Allow a bit of time for it to happen.
+my $new_current_logfiles;
+for (my $attempts = 0; $attempts < $max_attempts; $attempts++)
+{
+	$new_current_logfiles = slurp_file($node->data_dir . '/current_logfiles');
+	last if $new_current_logfiles ne $current_logfiles;
+	usleep(100_000);
+}
+
+note "now current_logfiles = $new_current_logfiles";
+
+like(
+	$new_current_logfiles,
+	qr|^csvlog log/postgresql-.*$|,
+	'new current_logfiles is sane');
+
+$lfname = $new_current_logfiles;
+$lfname =~ s/^csvlog //;
+chomp $lfname;
+
+# Verify that log output gets to this file, too
+
+$node->psql('postgres', 'fee fi fo fum');
+
+my $second_logfile;
+for (my $attempts = 0; $attempts < $max_attempts; $attempts++)
+{
+	$second_logfile = slurp_file($node->data_dir . '/' . $lfname);
+	last if $second_logfile =~ m/syntax error/;
+	usleep(100_000);
+}
+
+like(
+	$second_logfile,
+	# Our log entry should have our bad string wrapped in quotes after the error
+	qr/syntax error.*,"fee fi fo fum"/,
+	'found expected log file content in new log file');
+
+$node->stop();
-- 
2.17.1

From fef57e34ab84f07f92b9c16ac3709a8070b5591b Mon Sep 17 00:00:00 2001
From: Sehrope Sarkuni <sehr...@jackdb.com>
Date: Tue, 31 Aug 2021 10:01:34 -0400
Subject: [PATCH 3/3] Add jsonlog log_destination for JSON server logs

Adds option to write server log files as JSON. Each log entry is written as its
own line with any internal newlines escaped as \n. Other non-ASCII and special
characters are also escaped using standard JSON escape sequences.

JSON logging is enabled by setting the GUC log_destination to "jsonlog" and
defaults to a log file with a ".json" extension.
---
 doc/src/sgml/config.sgml           |  67 ++++++-
 src/backend/postmaster/syslogger.c | 177 +++++++++++++++--
 src/backend/utils/adt/misc.c       |   5 +-
 src/backend/utils/error/elog.c     | 296 ++++++++++++++++++++++++++++-
 src/backend/utils/misc/guc.c       |   2 +
 src/bin/pg_ctl/t/006_jsonlog.pl    |  98 ++++++++++
 src/include/utils/elog.h           |   1 +
 7 files changed, 624 insertions(+), 22 deletions(-)
 create mode 100644 src/bin/pg_ctl/t/006_jsonlog.pl

diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index 2c31c35a6b..fe4bcd61ef 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -5926,7 +5926,8 @@ SELECT * FROM parent WHERE key = 2400;
        <para>
         <productname>PostgreSQL</productname> supports several methods
          for logging server messages, including
-         <systemitem>stderr</systemitem>, <systemitem>csvlog</systemitem> and
+         <systemitem>stderr</systemitem>, <systemitem>csvlog</systemitem>,
+         <systemitem>jsonlog</systemitem>, and
          <systemitem>syslog</systemitem>. On Windows,
          <systemitem>eventlog</systemitem> is also supported. Set this
          parameter to a list of desired log destinations separated by
@@ -5944,6 +5945,14 @@ SELECT * FROM parent WHERE key = 2400;
         <xref linkend="guc-logging-collector"/> must be enabled to generate
         CSV-format log output.
        </para>
+       <para>
+        If <systemitem>jsonlog</systemitem> is included in <varname>log_destination</varname>,
+        log entries are output in <acronym>JSON</acronym> format, which is convenient for
+        loading logs into programs.
+        See <xref linkend="runtime-config-logging-jsonlog"/> for details.
+        <xref linkend="guc-logging-collector"/> must be enabled to generate
+        CSV-format log output.
+       </para>
        <para>
         When either <systemitem>stderr</systemitem> or
         <systemitem>csvlog</systemitem> are included, the file
@@ -5955,13 +5964,14 @@ SELECT * FROM parent WHERE key = 2400;
 <programlisting>
 stderr log/postgresql.log
 csvlog log/postgresql.csv
+jsonlog log/postgresql.json
 </programlisting>
 
         <filename>current_logfiles</filename> is recreated when a new log file
         is created as an effect of rotation, and
         when <varname>log_destination</varname> is reloaded.  It is removed when
-        neither <systemitem>stderr</systemitem>
-        nor <systemitem>csvlog</systemitem> are included
+        none of <systemitem>stderr</systemitem>,
+        <systemitem>csvlog</systemitem>, <systemitem>jsonlog</systemitem> are included
         in <varname>log_destination</varname>, and when the logging collector is
         disabled.
        </para>
@@ -6101,6 +6111,13 @@ local0.*    /var/log/postgresql
         (If <varname>log_filename</varname> ends in <literal>.log</literal>, the suffix is
         replaced instead.)
        </para>
+       <para>
+        If JSON-format output is enabled in <varname>log_destination</varname>,
+        <literal>.json</literal> will be appended to the timestamped
+        log file name to create the file name for JSON-format output.
+        (If <varname>log_filename</varname> ends in <literal>.log</literal>, the suffix is
+        replaced instead.)
+       </para>
        <para>
         This parameter can only be set in the <filename>postgresql.conf</filename>
         file or on the server command line.
@@ -7433,6 +7450,50 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
         </orderedlist>
       </para>
     </sect2>
+     <sect2 id="runtime-config-logging-jsonlog">
+     <title>Using JSON-Format Log Output</title>
+
+       <para>
+        Including <literal>jsonlog</literal> in the <varname>log_destination</varname> list
+        provides a convenient way to import log files into many different programs.
+        This option emits log lines in (<acronym>JSON</acronym>) format.
+        Each log line is serialized as a JSON object with the following fields:
+<programlisting>
+        {
+            "timestamp": time stamp with milliseconds (string),
+            "user": user name (string),
+            "dbname": database name (string),
+            "pid": process ID (number),
+            "remote_host": client host (string)
+            "remote_port": port number (string),
+            "session_id": session ID (string),
+            "line_num": per-session line number (number),
+            "ps": current ps display (string),
+            "session_start": session start time (string),
+            "vxid": virtual transaction ID (string),
+            "txid": regular transaction ID (string),
+            "error_severity": error severity (string),
+            "state_code": SQLSTATE code (string),
+            "detail": error message detail (string),
+            "hint": hint (string),
+            "internal_query": internal query that led to the error (string),
+            "internal_position": cursor index into internal query (number),
+            "context": error context (string),
+            "statement": client supplied query string (string),
+            "cursor_position": cursor index into query string (string),
+            "func_name": error location function name (string),
+            "file_name": error location file name (string),
+            "file_line_num": error location file line number (number),
+            "application_name": client application name (string),
+            "message": error message (string)
+        }
+</programlisting>
+        String fields with null values are excluded from output.
+        Additional fields may be added in the future. User applications that process jsonlog
+        output should ignore unknown fields.
+       </para>
+
+    </sect2>
 
    <sect2>
     <title>Process Title</title>
diff --git a/src/backend/postmaster/syslogger.c b/src/backend/postmaster/syslogger.c
index edd8f33204..c8f6ef56fc 100644
--- a/src/backend/postmaster/syslogger.c
+++ b/src/backend/postmaster/syslogger.c
@@ -85,9 +85,11 @@ static bool pipe_eof_seen = false;
 static bool rotation_disabled = false;
 static FILE *syslogFile = NULL;
 static FILE *csvlogFile = NULL;
+static FILE *jsonlogFile = NULL;
 NON_EXEC_STATIC pg_time_t first_syslogger_file_time = 0;
 static char *last_file_name = NULL;
 static char *last_csv_file_name = NULL;
+static char *last_json_file_name = NULL;
 
 /*
  * Buffers for saving partial messages from different backends.
@@ -274,6 +276,8 @@ SysLoggerMain(int argc, char *argv[])
 	last_file_name = logfile_getname(first_syslogger_file_time, NULL);
 	if (csvlogFile != NULL)
 		last_csv_file_name = logfile_getname(first_syslogger_file_time, ".csv");
+	if (jsonlogFile != NULL)
+		last_json_file_name = logfile_getname(first_syslogger_file_time, ".json");
 
 	/* remember active logfile parameters */
 	currentLogDir = pstrdup(Log_directory);
@@ -360,6 +364,14 @@ SysLoggerMain(int argc, char *argv[])
 				(csvlogFile != NULL))
 				rotation_requested = true;
 
+			/*
+			 * Force a rotation if JSONLOG output was just turned on or off and
+			 * we need to open or close jsonlogFile accordingly.
+			 */
+			if (((Log_destination & LOG_DESTINATION_JSONLOG) != 0) !=
+				(jsonlogFile != NULL))
+				rotation_requested = true;
+
 			/*
 			 * If rotation time parameter changed, reset next rotation time,
 			 * but don't immediately force a rotation.
@@ -410,6 +422,12 @@ SysLoggerMain(int argc, char *argv[])
 				rotation_requested = true;
 				size_rotation_for |= LOG_DESTINATION_CSVLOG;
 			}
+			if (jsonlogFile != NULL &&
+				ftell(jsonlogFile) >= Log_RotationSize * 1024L)
+			{
+				rotation_requested = true;
+				size_rotation_for |= LOG_DESTINATION_JSONLOG;
+			}
 		}
 
 		if (rotation_requested)
@@ -419,7 +437,7 @@ SysLoggerMain(int argc, char *argv[])
 			 * was sent by pg_rotate_logfile() or "pg_ctl logrotate".
 			 */
 			if (!time_based_rotation && size_rotation_for == 0)
-				size_rotation_for = LOG_DESTINATION_STDERR | LOG_DESTINATION_CSVLOG;
+				size_rotation_for = LOG_DESTINATION_STDERR | LOG_DESTINATION_CSVLOG | LOG_DESTINATION_JSONLOG;
 			logfile_rotate(time_based_rotation, size_rotation_for);
 		}
 
@@ -625,6 +643,20 @@ SysLogger_Start(void)
 		pfree(filename);
 	}
 
+	/*
+	 * Likewise for the initial JSON log file, if that's enabled.  (Note that
+	 * we open syslogFile even when only JSON output is nominally enabled,
+	 * since some code paths will write to syslogFile anyway.)
+	 */
+	if (Log_destination & LOG_DESTINATION_JSONLOG)
+	{
+		filename = logfile_getname(first_syslogger_file_time, ".json");
+
+		jsonlogFile = logfile_open(filename, "a", false);
+
+		pfree(filename);
+	}
+
 #ifdef EXEC_BACKEND
 	switch ((sysloggerPid = syslogger_forkexec()))
 #else
@@ -722,6 +754,11 @@ SysLogger_Start(void)
 				fclose(csvlogFile);
 				csvlogFile = NULL;
 			}
+			if (jsonlogFile != NULL)
+			{
+				fclose(jsonlogFile);
+				jsonlogFile = NULL;
+			}
 			return (int) sysloggerPid;
 	}
 
@@ -744,6 +781,7 @@ syslogger_forkexec(void)
 	int			ac = 0;
 	char		filenobuf[32];
 	char		csvfilenobuf[32];
+	char		jsonfilenobuf[32];
 
 	av[ac++] = "postgres";
 	av[ac++] = "--forklog";
@@ -771,14 +809,25 @@ syslogger_forkexec(void)
 				 fileno(csvlogFile));
 	else
 		strcpy(csvfilenobuf, "-1");
+	if (jsonlogFile != NULL)
+		snprintf(jsonfilenobuf, sizeof(jsonfilenobuf), "%d",
+				 fileno(jsonlogFile));
+	else
+		strcpy(jsonfilenobuf, "-1");
 #else							/* WIN32 */
 	if (csvlogFile != NULL)
 		snprintf(csvfilenobuf, sizeof(csvfilenobuf), "%ld",
 				 (long) _get_osfhandle(_fileno(csvlogFile)));
 	else
 		strcpy(csvfilenobuf, "0");
+	if (jsonlogFile != NULL)
+		snprintf(jsonfilenobuf, sizeof(jsonfilenobuf), "%ld",
+				 (long) _get_osfhandle(_fileno(jsonlogFile)));
+	else
+		strcpy(jsonfilenobuf, "0");
 #endif							/* WIN32 */
 	av[ac++] = csvfilenobuf;
+	av[ac++] = jsonfilenobuf;
 
 	av[ac] = NULL;
 	Assert(ac < lengthof(av));
@@ -796,8 +845,8 @@ syslogger_parseArgs(int argc, char *argv[])
 {
 	int			fd;
 
-	Assert(argc == 5);
-	argv += 3;
+	Assert(argc == 6);
+	argv += 4;
 
 	/*
 	 * Re-open the error output files that were opened by SysLogger_Start().
@@ -819,6 +868,12 @@ syslogger_parseArgs(int argc, char *argv[])
 		csvlogFile = fdopen(fd, "a");
 		setvbuf(csvlogFile, NULL, PG_IOLBF, 0);
 	}
+	fd = atoi(*argv++);
+	if (fd != -1)
+	{
+		jsonlogFile = fdopen(fd, "a");
+		setvbuf(jsonlogFile, NULL, PG_IOLBF, 0);
+	}
 #else							/* WIN32 */
 	fd = atoi(*argv++);
 	if (fd != 0)
@@ -840,6 +895,16 @@ syslogger_parseArgs(int argc, char *argv[])
 			setvbuf(csvlogFile, NULL, PG_IOLBF, 0);
 		}
 	}
+	fd = atoi(*argv++);
+	if (fd != 0)
+	{
+		fd = _open_osfhandle(fd, _O_APPEND | _O_TEXT);
+		if (fd > 0)
+		{
+			jsonlogFile = fdopen(fd, "a");
+			setvbuf(jsonlogFile, NULL, PG_IOLBF, 0);
+		}
+	}
 #endif							/* WIN32 */
 }
 #endif							/* EXEC_BACKEND */
@@ -892,6 +957,7 @@ process_pipe_input(char *logbuffer, int *bytes_in_logbuffer)
 			p.pid != 0 &&
 			(p.is_last == 't' || p.is_last == 'f') &&
 			(p.dest == LOG_DESTINATION_CSVLOG ||
+			 p.dest == LOG_DESTINATION_JSONLOG ||
 			 p.dest == LOG_DESTINATION_STDERR))
 		{
 			List	   *buffer_list;
@@ -1079,19 +1145,23 @@ write_syslogger_file(const char *buffer, int count, int destination)
 	FILE	   *logfile;
 
 	/*
-	 * If we're told to write to csvlogFile, but it's not open, dump the data
-	 * to syslogFile (which is always open) instead.  This can happen if CSV
+	 * If we're told to write to a structured log file, but it's not open, dump the data
+	 * to syslogFile (which is always open) instead.  This can happen if structured
 	 * output is enabled after postmaster start and we've been unable to open
-	 * csvlogFile.  There are also race conditions during a parameter change
-	 * whereby backends might send us CSV output before we open csvlogFile or
-	 * after we close it.  Writing CSV-formatted output to the regular log
+	 * logFile.  There are also race conditions during a parameter change
+	 * whereby backends might send us structured output before we open the logFile or
+	 * after we close it.  Writing formatted output to the regular log
 	 * file isn't great, but it beats dropping log output on the floor.
 	 *
-	 * Think not to improve this by trying to open csvlogFile on-the-fly.  Any
+	 * Think not to improve this by trying to open logFile on-the-fly.  Any
 	 * failure in that would lead to recursion.
 	 */
-	logfile = (destination == LOG_DESTINATION_CSVLOG &&
-			   csvlogFile != NULL) ? csvlogFile : syslogFile;
+	if ((destination & LOG_DESTINATION_CSVLOG) && csvlogFile != NULL)
+		logfile = csvlogFile;
+	else if ((destination & LOG_DESTINATION_JSONLOG) && jsonlogFile != NULL)
+		logfile = jsonlogFile;
+	else
+		logfile = syslogFile;
 
 	rc = fwrite(buffer, 1, count, logfile);
 
@@ -1162,7 +1232,8 @@ pipeThread(void *arg)
 		if (Log_RotationSize > 0)
 		{
 			if (ftell(syslogFile) >= Log_RotationSize * 1024L ||
-				(csvlogFile != NULL && ftell(csvlogFile) >= Log_RotationSize * 1024L))
+				(csvlogFile != NULL && ftell(csvlogFile) >= Log_RotationSize * 1024L) ||
+				(jsonlogFile != NULL && ftell(jsonlogFile) >= Log_RotationSize * 1024L))
 				SetLatch(MyLatch);
 		}
 		LeaveCriticalSection(&sysloggerSection);
@@ -1235,6 +1306,7 @@ logfile_rotate(bool time_based_rotation, int size_rotation_for)
 {
 	char	   *filename;
 	char	   *csvfilename = NULL;
+	char	   *jsonfilename = NULL;
 	pg_time_t	fntime;
 	FILE	   *fh;
 
@@ -1252,6 +1324,9 @@ logfile_rotate(bool time_based_rotation, int size_rotation_for)
 	filename = logfile_getname(fntime, NULL);
 	if (Log_destination & LOG_DESTINATION_CSVLOG)
 		csvfilename = logfile_getname(fntime, ".csv");
+	if (Log_destination & LOG_DESTINATION_JSONLOG)
+		jsonfilename = logfile_getname(fntime, ".json");
+
 
 	/*
 	 * Decide whether to overwrite or append.  We can overwrite if (a)
@@ -1289,6 +1364,8 @@ logfile_rotate(bool time_based_rotation, int size_rotation_for)
 				pfree(filename);
 			if (csvfilename)
 				pfree(csvfilename);
+			if (jsonfilename)
+				pfree(jsonfilename);
 			return;
 		}
 
@@ -1303,10 +1380,10 @@ logfile_rotate(bool time_based_rotation, int size_rotation_for)
 	}
 
 	/*
-	 * Same as above, but for csv file.  Note that if LOG_DESTINATION_CSVLOG
-	 * was just turned on, we might have to open csvlogFile here though it was
+	 * Same as above, but for a structured file.  Note that if LOG_DESTINATION_[STRUCTURED]LOG
+	 * was just turned on, we might have to open logFile here though it was
 	 * not open before.  In such a case we'll append not overwrite (since
-	 * last_csv_file_name will be NULL); that is consistent with the normal
+	 * last_*_file_name will be NULL); that is consistent with the normal
 	 * rules since it's not a time-based rotation.
 	 */
 	if ((Log_destination & LOG_DESTINATION_CSVLOG) &&
@@ -1362,11 +1439,66 @@ logfile_rotate(bool time_based_rotation, int size_rotation_for)
 			pfree(last_csv_file_name);
 		last_csv_file_name = NULL;
 	}
+	else if ((Log_destination & LOG_DESTINATION_JSONLOG) &&
+		(jsonlogFile == NULL ||
+		 time_based_rotation || (size_rotation_for & LOG_DESTINATION_JSONLOG)))
+	{
+		if (Log_truncate_on_rotation && time_based_rotation &&
+			last_json_file_name != NULL &&
+			strcmp(jsonfilename, last_json_file_name) != 0)
+			fh = logfile_open(jsonfilename, "w", true);
+		else
+			fh = logfile_open(jsonfilename, "a", true);
+
+		if (!fh)
+		{
+			/*
+			 * ENFILE/EMFILE are not too surprising on a busy system; just
+			 * keep using the old file till we manage to get a new one.
+			 * Otherwise, assume something's wrong with Log_directory and stop
+			 * trying to create files.
+			 */
+			if (errno != ENFILE && errno != EMFILE)
+			{
+				ereport(LOG,
+						(errmsg("disabling automatic rotation (use SIGHUP to re-enable)")));
+				rotation_disabled = true;
+			}
+
+			if (filename)
+				pfree(filename);
+			if (jsonfilename)
+				pfree(jsonfilename);
+			return;
+		}
+
+		if (jsonlogFile != NULL)
+			fclose(jsonlogFile);
+		jsonlogFile = fh;
+
+		/* instead of pfree'ing filename, remember it for next time */
+		if (last_json_file_name != NULL)
+			pfree(last_json_file_name);
+		last_json_file_name = jsonfilename;
+		jsonfilename = NULL;
+	}
+	else if (!(Log_destination & LOG_DESTINATION_JSONLOG) &&
+			 jsonlogFile != NULL)
+	{
+		/* JSONLOG was just turned off, so close the old file */
+		fclose(jsonlogFile);
+		jsonlogFile = NULL;
+		if (last_json_file_name != NULL)
+			pfree(last_json_file_name);
+		last_json_file_name = NULL;
+	}
 
 	if (filename)
 		pfree(filename);
 	if (csvfilename)
 		pfree(csvfilename);
+	if (jsonfilename)
+		pfree(jsonfilename);
 
 	update_metainfo_datafile();
 
@@ -1454,7 +1586,8 @@ update_metainfo_datafile(void)
 	mode_t		oumask;
 
 	if (!(Log_destination & LOG_DESTINATION_STDERR) &&
-		!(Log_destination & LOG_DESTINATION_CSVLOG))
+		!(Log_destination & LOG_DESTINATION_CSVLOG) &&
+		!(Log_destination & LOG_DESTINATION_JSONLOG))
 	{
 		if (unlink(LOG_METAINFO_DATAFILE) < 0 && errno != ENOENT)
 			ereport(LOG,
@@ -1512,6 +1645,18 @@ update_metainfo_datafile(void)
 			return;
 		}
 	}
+	if (last_json_file_name && (Log_destination & LOG_DESTINATION_JSONLOG))
+	{
+		if (fprintf(fh, "jsonlog %s\n", last_json_file_name) < 0)
+		{
+			ereport(LOG,
+					(errcode_for_file_access(),
+					 errmsg("could not write file \"%s\": %m",
+							LOG_METAINFO_DATAFILE_TMP)));
+			fclose(fh);
+			return;
+		}
+	}
 	fclose(fh);
 
 	if (rename(LOG_METAINFO_DATAFILE_TMP, LOG_METAINFO_DATAFILE) != 0)
diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c
index 88faf4dfd7..4931859627 100644
--- a/src/backend/utils/adt/misc.c
+++ b/src/backend/utils/adt/misc.c
@@ -843,11 +843,12 @@ pg_current_logfile(PG_FUNCTION_ARGS)
 	{
 		logfmt = text_to_cstring(PG_GETARG_TEXT_PP(0));
 
-		if (strcmp(logfmt, "stderr") != 0 && strcmp(logfmt, "csvlog") != 0)
+		if (strcmp(logfmt, "stderr") != 0 && strcmp(logfmt, "csvlog") != 0 &&
+				strcmp(logfmt, "jsonlog") != 0)
 			ereport(ERROR,
 					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
 					 errmsg("log format \"%s\" is not supported", logfmt),
-					 errhint("The supported log formats are \"stderr\" and \"csvlog\".")));
+					 errhint("The supported log formats are \"stderr\", \"csvlog\", and \"jsonlog\".")));
 	}
 
 	fd = AllocateFile(LOG_METAINFO_DATAFILE, "r");
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index cd13111708..2689e37be9 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -80,6 +80,7 @@
 #include "storage/proc.h"
 #include "tcop/tcopprot.h"
 #include "utils/guc.h"
+#include "utils/json.h"
 #include "utils/memutils.h"
 #include "utils/ps_status.h"
 
@@ -180,6 +181,7 @@ static void setup_formatted_start_time(void);
 static const char *process_log_prefix_padding(const char *p, int *padding);
 static void log_line_prefix(StringInfo buf, ErrorData *edata);
 static void write_csvlog(ErrorData *edata);
+static void write_jsonlog(ErrorData *edata);
 static void send_message_to_server_log(ErrorData *edata);
 static void write_pipe_chunks(char *data, int len, int dest);
 static void send_message_to_frontend(ErrorData *edata);
@@ -2980,6 +2982,274 @@ write_csvlog(ErrorData *edata)
 	pfree(buf.data);
 }
 
+/*
+ * appendJSONKeyValue
+ * Append to given StringInfo a comma followed by a JSON key and value.
+ * Both the key and value will be escaped as JSON string literals.
+ */
+static void
+appendJSONKeyValue(StringInfo buf, const char *key, const char *value)
+{
+	if (value == NULL)
+		return;
+	appendStringInfoChar(buf, ',');
+	escape_json(buf, key);
+	appendStringInfoChar(buf, ':');
+	escape_json(buf, value);
+}
+
+/*
+ * appendJSONKeyValueFmt
+ * Evaluate the fmt string and then invoke appendJSONKeyValue as the
+ * value of the JSON property. Both the key and value will be escaped by
+ * appendJSONKeyValue.
+ */
+static void
+appendJSONKeyValueFmt(StringInfo buf, const char *key, const char *fmt,...)
+{
+	int			save_errno = errno;
+	size_t		len = 128;		/* initial assumption about buffer size */
+	char	    *value;
+
+	for (;;)
+	{
+		va_list		args;
+		size_t		newlen;
+
+		/*
+		 * Allocate result buffer.  Note that in frontend this maps to malloc
+		 * with exit-on-error.
+		 */
+		value = (char *) palloc(len);
+
+		/* Try to format the data. */
+		errno = save_errno;
+		va_start(args, fmt);
+		newlen = pvsnprintf(value, len, fmt, args);
+		va_end(args);
+
+		if (newlen < len)
+			break; /* success */
+
+		/* Release buffer and loop around to try again with larger len. */
+		pfree(value);
+		len = newlen;
+	}
+	appendJSONKeyValue(buf, key, value);
+	/* Clean up */
+	pfree(value);
+}
+
+/*
+ * appendJSONKeyValueAsInt
+ * Append to given StringInfo a comma followed by a JSON key and value with
+ * value being formatted as a signed integer (a JSON number).
+ */
+static void
+appendJSONKeyValueAsInt(StringInfo buf, const char *key, int value)
+{
+	appendStringInfoChar(buf, ',');
+	escape_json(buf, key);
+	appendStringInfoChar(buf, ':');
+	appendStringInfo(buf, "%d", value);
+}
+
+/*
+ * appendJSONKeyValueAsInt
+ * Append to given StringInfo a comma followed by a JSON key and value with
+ * value being formatted as an unsigned integer (a JSON number).
+ */
+static void
+appendJSONKeyValueAsUInt(StringInfo buf, const char *key, int value)
+{
+	appendStringInfoChar(buf, ',');
+	escape_json(buf, key);
+	appendStringInfoChar(buf, ':');
+	appendStringInfo(buf, "%u", value);
+}
+
+/*
+ * appendJSONKeyValueAsInt
+ * Append to given StringInfo a comma followed by a JSON key and value with
+ * value being formatted as a long (a JSON number).
+ */
+static void
+appendJSONKeyValueAsLong(StringInfo buf, const char *key, long value)
+{
+	appendStringInfoChar(buf, ',');
+	escape_json(buf, key);
+	appendStringInfoChar(buf, ':');
+	appendStringInfo(buf, "%ld", value);
+}
+
+/*
+ * Write logs in json format.
+ */
+static void
+write_jsonlog(ErrorData *edata)
+{
+	StringInfoData	buf;
+
+	/* static counter for line numbers */
+	static long	log_line_number = 0;
+
+	/* Has the counter been reset in the current process? */
+	static int	log_my_pid = 0;
+
+	if (log_my_pid != MyProcPid)
+	{
+		log_line_number = 0;
+		log_my_pid = MyProcPid;
+		formatted_start_time[0] = '\0';
+	}
+	log_line_number++;
+
+	initStringInfo(&buf);
+
+	/* Initialize string */
+	appendStringInfoChar(&buf, '{');
+
+	/*
+	 * timestamp with milliseconds
+	 *
+	 * Check if the timestamp is already calculated for the syslog message,
+	 * and use it if so.  Otherwise, get the current timestamp.  This is done
+	 * to put same timestamp in both syslog and jsonlog messages.
+	 */
+	if (formatted_log_time[0] == '\0')
+		setup_formatted_log_time();
+
+	/* First property does not use appendJSONKeyValue as it does not have comma prefix */
+	escape_json(&buf, "timestamp");
+	appendStringInfoChar(&buf, ':');
+	escape_json(&buf, formatted_log_time);
+
+	/* username */
+	if (MyProcPort)
+		appendJSONKeyValue(&buf, "user", MyProcPort->user_name);
+
+	/* database name */
+	if (MyProcPort)
+		appendJSONKeyValue(&buf, "dbname", MyProcPort->database_name);
+
+	/* Process ID */
+	if (MyProcPid != 0)
+		appendJSONKeyValueAsInt(&buf, "pid", MyProcPid);
+
+	/* Remote host and port */
+	if (MyProcPort && MyProcPort->remote_host)
+	{
+		appendJSONKeyValue(&buf, "remote_host", MyProcPort->remote_host);
+		if (MyProcPort->remote_port && MyProcPort->remote_port[0] != '\0')
+			appendJSONKeyValue(&buf, "remote_port", MyProcPort->remote_port);
+	}
+
+	/* Session id */
+	appendJSONKeyValueFmt(&buf, "session_id", "%lx.%x", (long) MyStartTime, MyProcPid);
+
+	/* Line number */
+	appendJSONKeyValueAsLong(&buf, "line_num", log_line_number);
+
+	/* PS display */
+	if (MyProcPort)
+	{
+		StringInfoData	msgbuf;
+		const char	   *psdisp;
+		int				displen;
+
+		initStringInfo(&msgbuf);
+		psdisp = get_ps_display(&displen);
+		appendBinaryStringInfo(&msgbuf, psdisp, displen);
+		appendJSONKeyValue(&buf, "ps", msgbuf.data);
+
+		pfree(msgbuf.data);
+	}
+
+	/* session start timestamp */
+	if (formatted_start_time[0] == '\0')
+		setup_formatted_start_time();
+	appendJSONKeyValue(&buf, "session_start", formatted_start_time);
+
+	/* Virtual transaction id */
+	/* keep VXID format in sync with lockfuncs.c */
+	if (MyProc != NULL && MyProc->backendId != InvalidBackendId)
+		appendJSONKeyValueFmt(&buf, "vxid", "%d/%u", MyProc->backendId, MyProc->lxid);
+
+	/* Transaction id */
+	appendJSONKeyValueFmt(&buf, "txid", "%u", GetTopTransactionIdIfAny());
+
+	/* Error severity */
+	if (edata->elevel)
+		appendJSONKeyValue(&buf, "error_severity", (char *) error_severity(edata->elevel));
+
+	/* SQL state code */
+	if (edata->sqlerrcode)
+		appendJSONKeyValue(&buf, "state_code", unpack_sql_state(edata->sqlerrcode));
+
+	/* errdetail or error_detail log */
+	if (edata->detail_log)
+		appendJSONKeyValue(&buf, "detail", edata->detail_log);
+	else if (edata->detail)
+		appendJSONKeyValue(&buf, "detail", edata->detail);
+
+	/* errhint */
+	if (edata->hint)
+		appendJSONKeyValue(&buf, "hint", edata->hint);
+
+	/* Internal query */
+	if (edata->internalquery)
+		appendJSONKeyValue(&buf, "internal_query", edata->internalquery);
+
+	/* If the internal query got printed, print internal pos, too */
+	if (edata->internalpos > 0 && edata->internalquery != NULL)
+		appendJSONKeyValueAsUInt(&buf, "internal_position", edata->internalpos);
+
+	/* errcontext */
+	if (edata->context && !edata->hide_ctx)
+		appendJSONKeyValue(&buf, "context", edata->context);
+
+	/* user query --- only reported if not disabled by the caller */
+	if (is_log_level_output(edata->elevel, log_min_error_statement) &&
+		debug_query_string != NULL &&
+		!edata->hide_stmt)
+	{
+		appendJSONKeyValue(&buf, "statement", debug_query_string);
+		if (edata->cursorpos > 0)
+			appendJSONKeyValueAsInt(&buf, "cursor_position", edata->cursorpos);
+	}
+
+	/* file error location */
+	if (Log_error_verbosity >= PGERROR_VERBOSE)
+	{
+		if (edata->funcname)
+			appendJSONKeyValue(&buf, "func_name", edata->funcname);
+		if (edata->filename)
+		{
+			appendJSONKeyValue(&buf, "file_name", edata->filename);
+			appendJSONKeyValueAsInt(&buf, "file_line_num", edata->lineno);
+		}
+	}
+
+	/* Application name */
+	if (application_name && application_name[0] != '\0')
+		appendJSONKeyValue(&buf, "application_name", application_name);
+
+	/* Error message */
+	appendJSONKeyValue(&buf, "message", edata->message);
+
+	/* Finish string */
+	appendStringInfoChar(&buf, '}');
+	appendStringInfoChar(&buf, '\n');
+
+	/* If in the syslogger process, try to write messages direct to file */
+	if (MyBackendType == B_LOGGER)
+		write_syslogger_file(buf.data, buf.len, LOG_DESTINATION_JSONLOG);
+	else
+		write_pipe_chunks(buf.data, buf.len, LOG_DESTINATION_JSONLOG);
+
+	pfree(buf.data);
+}
+
 /*
  * Unpack MAKE_SQLSTATE code. Note that this returns a pointer to a
  * static buffer.
@@ -3213,6 +3483,30 @@ send_message_to_server_log(ErrorData *edata)
 			pfree(buf.data);
 		}
 	}
+	/* Write to JSON log if enabled */
+	else if (Log_destination & LOG_DESTINATION_JSONLOG)
+	{
+		if (redirection_done || MyBackendType == B_LOGGER)
+		{
+			/*
+			 * send JSON data if it's safe to do so (syslogger doesn't need the
+			 * pipe). First get back the space in the message buffer.
+			 */
+			pfree(buf.data);
+			write_jsonlog(edata);
+		}
+		else
+		{
+			/*
+			 * syslogger not up (yet), so just dump the message to stderr,
+			 * unless we already did so above.
+			 */
+			if (!(Log_destination & LOG_DESTINATION_STDERR) &&
+				whereToSendOutput != DestDebug)
+				write_console(buf.data, buf.len);
+			pfree(buf.data);
+		}
+	}
 	else
 	{
 		pfree(buf.data);
@@ -3256,7 +3550,7 @@ write_pipe_chunks(char *data, int len, int dest)
 	/* write all but the last chunk */
 	while (len > PIPE_MAX_PAYLOAD)
 	{
-		p.proto.is_last = (dest == LOG_DESTINATION_CSVLOG ? 'F' : 'f');
+		p.proto.is_last = ((dest == LOG_DESTINATION_CSVLOG || dest == LOG_DESTINATION_JSONLOG) ? 'F' : 'f');
 		p.proto.len = PIPE_MAX_PAYLOAD;
 		memcpy(p.proto.data, data, PIPE_MAX_PAYLOAD);
 		rc = write(fd, &p, PIPE_HEADER_SIZE + PIPE_MAX_PAYLOAD);
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 467b0fd6fe..ea4fa0c5c1 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -11691,6 +11691,8 @@ check_log_destination(char **newval, void **extra, GucSource source)
 			newlogdest |= LOG_DESTINATION_STDERR;
 		else if (pg_strcasecmp(tok, "csvlog") == 0)
 			newlogdest |= LOG_DESTINATION_CSVLOG;
+		else if (pg_strcasecmp(tok, "jsonlog") == 0)
+			newlogdest |= LOG_DESTINATION_JSONLOG;
 #ifdef HAVE_SYSLOG
 		else if (pg_strcasecmp(tok, "syslog") == 0)
 			newlogdest |= LOG_DESTINATION_SYSLOG;
diff --git a/src/bin/pg_ctl/t/006_jsonlog.pl b/src/bin/pg_ctl/t/006_jsonlog.pl
new file mode 100644
index 0000000000..e6b9979fd7
--- /dev/null
+++ b/src/bin/pg_ctl/t/006_jsonlog.pl
@@ -0,0 +1,98 @@
+use strict;
+use warnings;
+
+use PostgresNode;
+use TestLib;
+use Test::More tests => 4;
+use Time::HiRes qw(usleep);
+
+# Set up node with logging collector
+my $node = PostgresNode->new('primary');
+$node->init();
+$node->append_conf(
+	'postgresql.conf', qq(
+logging_collector = on
+lc_messages = 'C'
+log_destination = 'jsonlog'
+));
+
+
+$node->start();
+
+# Verify that log output gets to the file
+
+$node->psql('postgres', 'SELECT 1/0');
+
+my $current_logfiles = slurp_file($node->data_dir . '/current_logfiles');
+
+note "current_logfiles = $current_logfiles";
+
+like(
+	$current_logfiles,
+	qr|^jsonlog log/postgresql-.*json$|,
+	'current_logfiles is sane');
+
+my $lfname = $current_logfiles;
+$lfname =~ s/^jsonlog //;
+chomp $lfname;
+
+# might need to retry if logging collector process is slow...
+my $max_attempts = 100;
+
+my $first_logfile;
+for (my $attempts = 0; $attempts < $max_attempts; $attempts++)
+{
+	$first_logfile = slurp_file($node->data_dir . '/' . $lfname);
+	last if $first_logfile =~ m/division by zero/;
+	usleep(100_000);
+}
+
+like(
+	$first_logfile,
+	qr/"statement":"SELECT 1\/0",.*"message":"division by zero/,
+	'found expected log file content');
+
+# Sleep 2 seconds and ask for log rotation; this should result in
+# output into a different log file name.
+sleep(2);
+$node->logrotate();
+
+# pg_ctl logrotate doesn't wait for rotation request to be completed.
+# Allow a bit of time for it to happen.
+my $new_current_logfiles;
+for (my $attempts = 0; $attempts < $max_attempts; $attempts++)
+{
+	$new_current_logfiles = slurp_file($node->data_dir . '/current_logfiles');
+	last if $new_current_logfiles ne $current_logfiles;
+	usleep(100_000);
+}
+
+note "now current_logfiles = $new_current_logfiles";
+
+like(
+	$new_current_logfiles,
+	qr|^jsonlog log/postgresql-.*json$|,
+	'new current_logfiles is sane');
+
+$lfname = $new_current_logfiles;
+$lfname =~ s/^jsonlog //;
+chomp $lfname;
+
+# Verify that log output gets to this file, too
+
+$node->psql('postgres', 'fee fi fo fum');
+
+my $second_logfile;
+for (my $attempts = 0; $attempts < $max_attempts; $attempts++)
+{
+	$second_logfile = slurp_file($node->data_dir . '/' . $lfname);
+	last if $second_logfile =~ m/syntax error/;
+	usleep(100_000);
+}
+
+like(
+	$second_logfile,
+	qr/"statement":"fee fi fo fum",.*"message":"syntax error/,
+	'found expected log file content in new log file');
+
+$node->stop();
diff --git a/src/include/utils/elog.h b/src/include/utils/elog.h
index f53607e12e..c0c699f485 100644
--- a/src/include/utils/elog.h
+++ b/src/include/utils/elog.h
@@ -436,6 +436,7 @@ extern bool syslog_split_messages;
 #define LOG_DESTINATION_SYSLOG	 2
 #define LOG_DESTINATION_EVENTLOG 4
 #define LOG_DESTINATION_CSVLOG	 8
+#define LOG_DESTINATION_JSONLOG	16
 
 /* Other exported functions */
 extern void DebugFileOpen(void);
-- 
2.17.1

Reply via email to