diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index ac339fb..ed833e0 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -5732,7 +5732,8 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
         platforms.  You can use the <xref linkend="pgtesttiming"> tool to
         measure the overhead of timing on your system.
         I/O timing information is
-        displayed in <xref linkend="pg-stat-database-view">, in the output of
+        displayed in <xref linkend="pg-stat-database-view">, 
+        <xref linkend="pg-stat-walwrites-view">, in the output of
         <xref linkend="sql-explain"> when the <literal>BUFFERS</> option is
         used, and by <xref linkend="pgstatstatements">.  Only superusers can
         change this setting.
diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml
index 9856968..b5ed688 100644
--- a/doc/src/sgml/monitoring.sgml
+++ b/doc/src/sgml/monitoring.sgml
@@ -357,6 +357,14 @@ postgres   27093  0.0  0.0  30096  2752 ?        Ss   11:34   0:00 postgres: ser
      </row>
 
      <row>
+      <entry><structname>pg_stat_walwrites</><indexterm><primary>pg_stat_walwrites</primary></indexterm></entry>
+      <entry>One row only, showing statistics about the
+       WAL writing activity. See
+       <xref linkend="pg-stat-walwrites-view"> for details.
+     </entry>
+     </row>
+
+     <row>
       <entry><structname>pg_stat_database</><indexterm><primary>pg_stat_database</primary></indexterm></entry>
       <entry>One row per database, showing database-wide statistics. See
        <xref linkend="pg-stat-database-view"> for details.
@@ -2207,6 +2215,108 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i
    single row, containing global data for the cluster.
   </para>
 
+  <table id="pg-stat-walwrites-view" xreflabel="pg_stat_walwrites">
+   <title><structname>pg_stat_walwrites</structname> View</title>
+
+   <tgroup cols="3">
+    <thead>
+     <row>
+      <entry>Column</entry>
+      <entry>Type</entry>
+      <entry>Description</entry>
+     </row>
+    </thead>
+
+    <tbody>
+     <row>
+      <entry><structfield>writes</></entry>
+      <entry><type>bigint</type></entry>
+      <entry>
+      Number of WAL writes that are carried out by background processes and workers. 
+      </entry>
+     </row>
+    <row>
+      <entry><structfield>backend_writes</></entry>
+      <entry><type>bigint</type></entry>
+      <entry>Number of WAL writes that are carried out by the backend process</entry>
+     </row>
+     <row>
+     <entry><structfield>dirty_writes</></entry>
+      <entry><type>bigint</type></entry>
+      <entry>
+      Number of dirty WAL writes that are carried out by background processes and workers
+      when the <xref linkend="guc-wal-buffers"> are full.
+      </entry>
+     </row>
+    <row>
+      <entry><structfield>backend_dirty_writes</></entry>
+      <entry><type>bigint</type></entry>
+      <entry>
+      Number of dirty WAL writes that are carried out by the backend processes when 
+      the <xref linkend="guc-wal-buffers"> are full. 
+      </entry>
+     </row>
+     <row>
+      <entry><structfield>write_blocks</></entry>
+      <entry><type>bigint</type></entry>
+      <entry>Number of WAL pages written to the disk by the background processes/workers</entry>
+     </row>
+     <row>
+      <entry><structfield>backend_write_blocks</></entry>
+      <entry><type>bigint</type></entry>
+      <entry>Number of WAL pages written to the disk by the backend processes</entry>
+     </row>
+     <row>
+      <entry><structfield>write_time</></entry>
+      <entry><type>double precision</type></entry>
+      <entry>
+      Total amount of time that has been spent in the portion of WAL write processing where files 
+      are written to disk by the background processes/workers, in milliseconds.
+      This field data will be populated only when the <xref linkend="guc-track-io-timing"> is enabled
+      </entry>
+     </row>
+     <row>
+      <entry><structfield>backend_write_time</></entry>
+      <entry><type>double precision</type></entry>
+      <entry>
+      Total amount of time that has been spent in the portion of WAL write processing where files 
+      are written to disk by the backend processes, in milliseconds.
+      This field data will be populated only when the <xref linkend="guc-track-io-timing"> is enabled
+      </entry>
+     </row>
+     <row>
+      <entry><structfield>sync_time</></entry>
+      <entry><type>double precision</type></entry>
+      <entry>
+      Total amount of time that has been spent in the portion of WAL write processing where files 
+      are synchronized to disk by the background processes/workers, in milliseconds.
+      This field data will be populated only when the <xref linkend="guc-track-io-timing"> is enabled
+      </entry>
+     </row>
+     <row>
+      <entry><structfield>backend_sync_time</></entry>
+      <entry><type>double precision</type></entry>
+      <entry>
+      Total amount of time that has been spent in the portion of WAL write processing where files 
+      are synchronized to disk by the backend processes, in milliseconds.
+      This field data will be populated only when the <xref linkend="guc-track-io-timing"> is enabled
+      </entry>
+     </row>
+     <row>
+      <entry><structfield>stats_reset</></entry>
+      <entry><type>timestamp with time zone</type></entry>
+      <entry>Time at which these statistics were last reset</entry>
+     </row>
+     </tbody>
+   </tgroup>
+  </table>
+
+  <para>
+   The <structname>pg_stat_walwrites</structname> view will always have a
+   single row, containing data about the WAL writing activity of the cluster.
+  </para>
+
+
   <table id="pg-stat-database-view" xreflabel="pg_stat_database">
    <title><structname>pg_stat_database</structname> View</title>
    <tgroup cols="3">
@@ -2986,6 +3096,8 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i
        counters shown in the <structname>pg_stat_bgwriter</> view.
        Calling <literal>pg_stat_reset_shared('archiver')</> will zero all the
        counters shown in the <structname>pg_stat_archiver</> view.
+       Calling <literal>pg_stat_reset_shared('walwrites')</> will zero all the
+       counters shown in the <structname>pg_stat_walwrites</> view.
       </entry>
      </row>
 
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 61ca81d..1f474ae 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -860,6 +860,7 @@ static XLogRecPtr XLogGetReplicationSlotMinimumLSN(void);
 
 static void AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic);
 static bool XLogCheckpointNeeded(XLogSegNo new_segno);
+static bool am_background_process(void);
 static void XLogWrite(XLogwrtRqst WriteRqst, bool flexible);
 static bool InstallXLogFileSegment(XLogSegNo *segno, char *tmppath,
 					   bool find_free, XLogSegNo max_segno,
@@ -2109,6 +2110,10 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic)
 					WriteRqst.Write = OldPageRqstPtr;
 					WriteRqst.Flush = 0;
 					XLogWrite(WriteRqst, false);
+					if (am_background_process())
+						LocalWalWritesStats.m_dirty_writes++;
+					else
+						LocalWalWritesStats.m_backend_dirty_writes++;
 					LWLockRelease(WALWriteLock);
 					TRACE_POSTGRESQL_WAL_BUFFER_WRITE_DIRTY_DONE();
 				}
@@ -2319,6 +2324,34 @@ XLogCheckpointNeeded(XLogSegNo new_segno)
 }
 
 /*
+ * Check whether the current process is a normal backend or not.
+ * This function checks for the background processes that does
+ * some WAL write activity only and other background processes
+ * are not considered. It considers all the background workers
+ * as WAL write activity workers.
+ *
+ * Returns FALSE - when the current process is a normal backend
+ *		   TRUE - when the current process a background process/worker
+ */
+static bool
+am_background_process()
+{
+	/* check whether current process is a background process/worker? */
+	if (AmBackgroundWriterProcess() ||
+		AmWalWriterProcess() ||
+		AmCheckpointerProcess() ||
+		AmStartupProcess() ||
+		IsBackgroundWorker ||
+		am_walsender ||
+		am_autovacuum_worker)
+	{
+		return true;
+	}
+
+	return false;
+}
+
+/*
  * Write and/or fsync the log at least as far as WriteRqst indicates.
  *
  * If flexible == TRUE, we don't have to write as far as WriteRqst, but
@@ -2341,6 +2374,9 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible)
 	int			npages;
 	int			startidx;
 	uint32		startoffset;
+	instr_time	io_start,
+				io_time;
+	bool		is_background_process = am_background_process();
 
 	/* We should always be inside a critical section here */
 	Assert(CritSectionCount > 0);
@@ -2458,6 +2494,11 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible)
 			/* OK to write the page(s) */
 			from = XLogCtl->pages + startidx * (Size) XLOG_BLCKSZ;
 			nbytes = npages * (Size) XLOG_BLCKSZ;
+
+			/* Start timer to acquire start time of the wal write */
+			if (track_io_timing)
+				INSTR_TIME_SET_CURRENT(io_start);
+
 			nleft = nbytes;
 			do
 			{
@@ -2480,6 +2521,34 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible)
 				from += written;
 			} while (nleft > 0);
 
+			/* calculate the total time spent for wal writing */
+			if (track_io_timing)
+			{
+				INSTR_TIME_SET_CURRENT(io_time);
+				INSTR_TIME_SUBTRACT(io_time, io_start);
+
+				if (is_background_process)
+					LocalWalWritesStats.m_total_write_time += INSTR_TIME_GET_MILLISEC(io_time);
+				else
+					LocalWalWritesStats.m_backend_total_write_time += INSTR_TIME_GET_MILLISEC(io_time);
+			}
+			else
+			{
+				LocalWalWritesStats.m_total_write_time = 0;
+				LocalWalWritesStats.m_backend_total_write_time = 0;
+			}
+
+			/* check whether writer is a normal backend or not? */
+			if (is_background_process)
+				LocalWalWritesStats.m_writes++;
+			else
+				LocalWalWritesStats.m_backend_writes++;
+
+			if (is_background_process)
+				LocalWalWritesStats.m_write_blocks += npages;
+			else
+				LocalWalWritesStats.m_backend_write_blocks += npages;
+
 			/* Update state for write */
 			openLogOff += nbytes;
 			npages = 0;
@@ -2499,8 +2568,29 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible)
 			 */
 			if (finishing_seg)
 			{
+				/* Start timer to acquire start time of the wal sync */
+				if (track_io_timing)
+					INSTR_TIME_SET_CURRENT(io_start);
+
 				issue_xlog_fsync(openLogFile, openLogSegNo);
 
+				/* calculate the total time spent for wal sync */
+				if (track_io_timing)
+				{
+					INSTR_TIME_SET_CURRENT(io_time);
+					INSTR_TIME_SUBTRACT(io_time, io_start);
+
+					if (is_background_process)
+						LocalWalWritesStats.m_total_sync_time += INSTR_TIME_GET_MILLISEC(io_time);
+					else
+						LocalWalWritesStats.m_backend_total_sync_time += INSTR_TIME_GET_MILLISEC(io_time);
+				}
+				else
+				{
+					LocalWalWritesStats.m_total_sync_time = 0;
+					LocalWalWritesStats.m_backend_total_sync_time = 0;
+				}
+
 				/* signal that we need to wakeup walsenders later */
 				WalSndWakeupRequest();
 
@@ -2568,7 +2658,28 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible)
 				openLogOff = 0;
 			}
 
+			/* Start timer to acquire start time of the wal sync */
+			if (track_io_timing)
+				INSTR_TIME_SET_CURRENT(io_start);
+
 			issue_xlog_fsync(openLogFile, openLogSegNo);
+
+			/* calculate the total time spent for wal sync */
+			if (track_io_timing)
+			{
+				INSTR_TIME_SET_CURRENT(io_time);
+				INSTR_TIME_SUBTRACT(io_time, io_start);
+
+				if (is_background_process)
+					LocalWalWritesStats.m_total_sync_time += INSTR_TIME_GET_MILLISEC(io_time);
+				else
+					LocalWalWritesStats.m_backend_total_sync_time += INSTR_TIME_GET_MILLISEC(io_time);
+			}
+			else
+			{
+				LocalWalWritesStats.m_total_sync_time = 0;
+				LocalWalWritesStats.m_backend_total_sync_time = 0;
+			}
 		}
 
 		/* signal that we need to wakeup walsenders later */
@@ -7642,6 +7753,9 @@ StartupXLOG(void)
 		}
 	}
 
+	/* Report WAL write activity now */
+	pgstat_send_walwrites();
+
 	/*
 	 * Preallocate additional log files, if wanted.
 	 */
diff --git a/src/backend/catalog/system_views.sql b/src/backend/catalog/system_views.sql
index d357c8b..f424c9d 100644
--- a/src/backend/catalog/system_views.sql
+++ b/src/backend/catalog/system_views.sql
@@ -891,6 +891,9 @@ CREATE VIEW pg_stat_bgwriter AS
         pg_stat_get_buf_alloc() AS buffers_alloc,
         pg_stat_get_bgwriter_stat_reset_time() AS stats_reset;
 
+CREATE VIEW pg_stat_walwrites AS
+   SELECT * FROM pg_stat_get_walwrites() AS A;
+
 CREATE VIEW pg_stat_progress_vacuum AS
 	SELECT
 		S.pid AS pid, S.datid AS datid, D.datname AS datname,
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index 5b43a66..0f3b007 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -344,6 +344,8 @@ lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params,
 						 onerel->rd_rel->relisshared,
 						 new_live_tuples,
 						 vacrelstats->new_dead_tuples);
+	/* In case if there is any WAL write activity, send it. */
+	pgstat_send_walwrites();
 	pgstat_progress_end_command();
 
 	/* and log the action if appropriate */
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index 33ca749..5a42c69 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -132,7 +132,6 @@ int			Log_autovacuum_min_duration = -1;
 
 /* Flags to tell if we are in an autovacuum process */
 static bool am_autovacuum_launcher = false;
-static bool am_autovacuum_worker = false;
 
 /* Flags set by signal handlers */
 static volatile sig_atomic_t got_SIGHUP = false;
diff --git a/src/backend/postmaster/bgwriter.c b/src/backend/postmaster/bgwriter.c
index dcb4cf2..1cd623d 100644
--- a/src/backend/postmaster/bgwriter.c
+++ b/src/backend/postmaster/bgwriter.c
@@ -284,6 +284,7 @@ BackgroundWriterMain(void)
 		 * Send off activity statistics to the stats collector
 		 */
 		pgstat_send_bgwriter();
+		pgstat_send_walwrites();
 
 		if (FirstCallSinceLastCheckpoint())
 		{
diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c
index fe9041f..8549f8f 100644
--- a/src/backend/postmaster/checkpointer.c
+++ b/src/backend/postmaster/checkpointer.c
@@ -538,6 +538,7 @@ CheckpointerMain(void)
 		 * stats message types.)
 		 */
 		pgstat_send_bgwriter();
+		pgstat_send_walwrites();
 
 		/*
 		 * Sleep until we are signaled or it's time for another checkpoint or
diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c
index 56a8bf2..5400757 100644
--- a/src/backend/postmaster/pgstat.c
+++ b/src/backend/postmaster/pgstat.c
@@ -139,6 +139,15 @@ char	   *pgstat_stat_tmpname = NULL;
  */
 PgStat_MsgBgWriter BgWriterStats;
 
+/*
+ * WalWrites Local statistics counters.
+ * The statistics data gets populated in XLogWrite function.
+ * Stored directly in a stats message structure so it can be sent
+ * to stats collector process without needing to copy things around.
+ * We assume this inits to zeroes.
+ */
+PgStat_MsgWalWrites LocalWalWritesStats;
+
 /* ----------
  * Local data
  * ----------
@@ -253,6 +262,7 @@ static int	localNumBackends = 0;
  */
 static PgStat_ArchiverStats archiverStats;
 static PgStat_GlobalStats globalStats;
+static PgStat_WalWritesStats walwritesStats;
 
 /*
  * List of OIDs of databases we need to write out.  If an entry is InvalidOid,
@@ -333,6 +343,7 @@ static void pgstat_recv_funcpurge(PgStat_MsgFuncpurge *msg, int len);
 static void pgstat_recv_recoveryconflict(PgStat_MsgRecoveryConflict *msg, int len);
 static void pgstat_recv_deadlock(PgStat_MsgDeadlock *msg, int len);
 static void pgstat_recv_tempfile(PgStat_MsgTempFile *msg, int len);
+static void pgstat_recv_walwrites(PgStat_MsgWalWrites * msg, int len);
 
 /* ------------------------------------------------------------
  * Public functions called from postmaster follow
@@ -874,6 +885,9 @@ pgstat_report_stat(bool force)
 
 	/* Now, send function statistics */
 	pgstat_send_funcstats();
+
+	/* Now, send wal writes statistics */
+	pgstat_send_walwrites();
 }
 
 /*
@@ -1300,11 +1314,13 @@ pgstat_reset_shared_counters(const char *target)
 		msg.m_resettarget = RESET_ARCHIVER;
 	else if (strcmp(target, "bgwriter") == 0)
 		msg.m_resettarget = RESET_BGWRITER;
+	else if (strcmp(target, "walwrites") == 0)
+		msg.m_resettarget = RESET_WALWRITES;
 	else
 		ereport(ERROR,
 				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
 				 errmsg("unrecognized reset target: \"%s\"", target),
-				 errhint("Target must be \"archiver\" or \"bgwriter\".")));
+				 errhint("Target must be \"archiver\" or \"bgwriter\" or \"walwrites\".")));
 
 	pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_RESETSHAREDCOUNTER);
 	pgstat_send(&msg, sizeof(msg));
@@ -2543,6 +2559,21 @@ pgstat_fetch_global(void)
 	return &globalStats;
 }
 
+/*
+ * ---------
+ * pgstat_fetch_stat_walwrites() -
+ *
+ *	Support function for the SQL-callable pgstat* functions. Returns
+ *	a pointer to the walwrites statistics struct.
+ * ---------
+ */
+PgStat_WalWritesStats *
+pgstat_fetch_stat_walwrites(void)
+{
+	backend_read_statsfile();
+
+	return &walwritesStats;
+}
 
 /* ------------------------------------------------------------
  * Functions for management of the shared-memory PgBackendStatus array
@@ -4123,6 +4154,39 @@ pgstat_send_bgwriter(void)
 	MemSet(&BgWriterStats, 0, sizeof(BgWriterStats));
 }
 
+/* ----------
+ * pgstat_send_walwrites() -
+ *
+ *		Send wal writes statistics to the collector
+ * ----------
+ */
+
+void
+pgstat_send_walwrites(void)
+{
+	/* We assume this initializes to zeroes */
+	static const PgStat_MsgWalWrites all_zeroes;
+
+	/*
+	 * This function can be called even if nothing at all has happened. In
+	 * this case, avoid sending a completely empty message to the stats
+	 * collector.
+	 */
+	if (memcmp(&LocalWalWritesStats, &all_zeroes, sizeof(PgStat_MsgWalWrites)) == 0)
+		return;
+
+	/*
+	 * Prepare and send the message
+	 */
+	pgstat_setheader(&LocalWalWritesStats.m_hdr, PGSTAT_MTYPE_WALWRITES);
+	pgstat_send(&LocalWalWritesStats, sizeof(LocalWalWritesStats));
+
+	/*
+	 * Clear out the statistics buffer, so it can be re-used.
+	 */
+	MemSet(&LocalWalWritesStats, 0, sizeof(LocalWalWritesStats));
+}
+
 
 /* ----------
  * PgstatCollectorMain() -
@@ -4339,6 +4403,10 @@ PgstatCollectorMain(int argc, char *argv[])
 					pgstat_recv_tempfile((PgStat_MsgTempFile *) &msg, len);
 					break;
 
+				case PGSTAT_MTYPE_WALWRITES:
+					pgstat_recv_walwrites((PgStat_MsgWalWrites *) & msg, len);
+					break;
+
 				default:
 					break;
 			}
@@ -4605,6 +4673,12 @@ pgstat_write_statsfiles(bool permanent, bool allDbs)
 	(void) rc;					/* we'll check for error with ferror */
 
 	/*
+	 * Write wal writes stats struct
+	 */
+	rc = fwrite(&walwritesStats, sizeof(walwritesStats), 1, fpout);
+	(void) rc;					/* we'll check for error with ferror */
+
+	/*
 	 * Walk through the database table.
 	 */
 	hash_seq_init(&hstat, pgStatDBHash);
@@ -4861,6 +4935,7 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep)
 	 */
 	memset(&globalStats, 0, sizeof(globalStats));
 	memset(&archiverStats, 0, sizeof(archiverStats));
+	memset(&walwritesStats, 0, sizeof(walwritesStats));
 
 	/*
 	 * Set the current timestamp (will be kept only in case we can't load an
@@ -4868,6 +4943,7 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep)
 	 */
 	globalStats.stat_reset_timestamp = GetCurrentTimestamp();
 	archiverStats.stat_reset_timestamp = globalStats.stat_reset_timestamp;
+	walwritesStats.stat_reset_timestamp = globalStats.stat_reset_timestamp;
 
 	/*
 	 * Try to open the stats file. If it doesn't exist, the backends simply
@@ -4920,6 +4996,16 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep)
 	}
 
 	/*
+	 * Read wal writes stats struct
+	 */
+	if (fread(&walwritesStats, 1, sizeof(walwritesStats), fpin) != sizeof(walwritesStats))
+	{
+		ereport(pgStatRunningInCollector ? LOG : WARNING,
+				(errmsg("corrupted statistics file \"%s\"", statfile)));
+		goto done;
+	}
+
+	/*
 	 * We found an existing collector stats file. Read it and put all the
 	 * hashtable entries into place.
 	 */
@@ -5208,6 +5294,7 @@ pgstat_read_db_statsfile_timestamp(Oid databaseid, bool permanent,
 	PgStat_StatDBEntry dbentry;
 	PgStat_GlobalStats myGlobalStats;
 	PgStat_ArchiverStats myArchiverStats;
+	PgStat_WalWritesStats myWalwritesStats;
 	FILE	   *fpin;
 	int32		format_id;
 	const char *statfile = permanent ? PGSTAT_STAT_PERMANENT_FILENAME : pgstat_stat_filename;
@@ -5262,6 +5349,18 @@ pgstat_read_db_statsfile_timestamp(Oid databaseid, bool permanent,
 		return false;
 	}
 
+	/*
+	 * Read wal writes stats struct
+	 */
+	if (fread(&myWalwritesStats, 1, sizeof(myWalwritesStats),
+			  fpin) != sizeof(myWalwritesStats))
+	{
+		ereport(pgStatRunningInCollector ? LOG : WARNING,
+				(errmsg("corrupted statistics file \"%s\"", statfile)));
+		FreeFile(fpin);
+		return false;
+	}
+
 	/* By default, we're going to return the timestamp of the global file. */
 	*ts = myGlobalStats.stats_timestamp;
 
@@ -5824,6 +5923,12 @@ pgstat_recv_resetsharedcounter(PgStat_MsgResetsharedcounter *msg, int len)
 		memset(&archiverStats, 0, sizeof(archiverStats));
 		archiverStats.stat_reset_timestamp = GetCurrentTimestamp();
 	}
+	else if (msg->m_resettarget == RESET_WALWRITES)
+	{
+		/* Reset the wal writes statistics of the cluster. */
+		memset(&walwritesStats, 0, sizeof(walwritesStats));
+		walwritesStats.stat_reset_timestamp = GetCurrentTimestamp();
+	}
 
 	/*
 	 * Presumably the sender of this message validated the target, don't
@@ -6004,6 +6109,27 @@ pgstat_recv_bgwriter(PgStat_MsgBgWriter *msg, int len)
 }
 
 /* ----------
+ * pgstat_recv_walwrites() -
+ *
+ *	Process a WALWRITES message.
+ * ----------
+ */
+static void
+pgstat_recv_walwrites(PgStat_MsgWalWrites * msg, int len)
+{
+	walwritesStats.writes += msg->m_writes;
+	walwritesStats.backend_writes += msg->m_backend_writes;
+	walwritesStats.dirty_writes += msg->m_dirty_writes;
+	walwritesStats.backend_dirty_writes += msg->m_backend_dirty_writes;
+	walwritesStats.write_blocks += msg->m_write_blocks;
+	walwritesStats.backend_write_blocks += msg->m_backend_write_blocks;
+	walwritesStats.total_write_time += msg->m_total_write_time;
+	walwritesStats.backend_total_write_time += msg->m_backend_total_write_time;
+	walwritesStats.total_sync_time += msg->m_total_sync_time;
+	walwritesStats.backend_total_sync_time += msg->m_backend_total_sync_time;
+}
+
+/* ----------
  * pgstat_recv_recoveryconflict() -
  *
  *	Process a RECOVERYCONFLICT message.
diff --git a/src/backend/postmaster/walwriter.c b/src/backend/postmaster/walwriter.c
index a575d8f..cf0c279 100644
--- a/src/backend/postmaster/walwriter.c
+++ b/src/backend/postmaster/walwriter.c
@@ -280,6 +280,8 @@ WalWriterMain(void)
 		else if (left_till_hibernate > 0)
 			left_till_hibernate--;
 
+		pgstat_send_walwrites();
+
 		/*
 		 * Sleep until we are signaled or WalWriterDelay has elapsed.  If we
 		 * haven't done anything useful for quite some time, lengthen the
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index cfc3fba..677473e 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -2106,6 +2106,9 @@ WalSndLoop(WalSndSendDataCallback send_data)
 		/* Send keepalive if the time has come */
 		WalSndKeepaliveIfNecessary(now);
 
+		/* in case if it has any WAL write activity, send now */
+		pgstat_send_walwrites();
+
 		/*
 		 * We don't block if not caught up, unless there is unsent data
 		 * pending in which case we'd better block until the socket is
diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c
index dd2b924..f2d8d50 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -1859,3 +1859,42 @@ pg_stat_get_archiver(PG_FUNCTION_ARGS)
 	PG_RETURN_DATUM(HeapTupleGetDatum(
 								   heap_form_tuple(tupdesc, values, nulls)));
 }
+
+#define NUM_PG_STAT_WALWRITE_ATTS 11
+
+Datum
+pg_stat_get_walwrites(PG_FUNCTION_ARGS)
+{
+	TupleDesc	tupdesc;
+	Datum		values[NUM_PG_STAT_WALWRITE_ATTS];
+	bool		nulls[NUM_PG_STAT_WALWRITE_ATTS];
+	PgStat_WalWritesStats *walwrite_stats;
+
+	/* Initialize values and NULL flags arrays */
+	MemSet(values, 0, sizeof(values));
+	MemSet(nulls, 0, sizeof(nulls));
+
+	/* Build a tuple descriptor for our result type */
+	if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
+		elog(ERROR, "return type must be a row type");
+
+	/* Get statistics about the archiver process */
+	walwrite_stats = pgstat_fetch_stat_walwrites();
+
+	/* Fill values and NULLs */
+	values[0] = Int64GetDatum(walwrite_stats->writes);
+	values[1] = Int64GetDatum(walwrite_stats->backend_writes);
+	values[2] = Int64GetDatum(walwrite_stats->dirty_writes);
+	values[3] = Int64GetDatum(walwrite_stats->backend_dirty_writes);
+	values[4] = Int64GetDatum(walwrite_stats->write_blocks);
+	values[5] = Int64GetDatum(walwrite_stats->backend_write_blocks);
+	values[6] = Float8GetDatum(walwrite_stats->total_write_time);
+	values[7] = Float8GetDatum(walwrite_stats->backend_total_write_time);
+	values[8] = Float8GetDatum(walwrite_stats->total_sync_time);
+	values[9] = Float8GetDatum(walwrite_stats->backend_total_sync_time);
+	values[10] = TimestampTzGetDatum(walwrite_stats->stat_reset_timestamp);
+
+	/* Returns the record as Datum */
+	PG_RETURN_DATUM(HeapTupleGetDatum(
+								   heap_form_tuple(tupdesc, values, nulls)));
+}
diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c
index 08b6030..4931559 100644
--- a/src/backend/utils/init/globals.c
+++ b/src/backend/utils/init/globals.c
@@ -125,6 +125,8 @@ int			max_worker_processes = 8;
 int			max_parallel_workers = 8;
 int			MaxBackends = 0;
 
+bool 		am_autovacuum_worker = false;
+
 int			VacuumCostPageHit = 1;		/* GUC parameters for vacuum */
 int			VacuumCostPageMiss = 10;
 int			VacuumCostPageDirty = 20;
diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h
index 79f9b90..2a00d5b 100644
--- a/src/include/catalog/pg_proc.h
+++ b/src/include/catalog/pg_proc.h
@@ -2914,6 +2914,9 @@ DESCR("statistics: number of backend buffer writes that did their own fsync");
 DATA(insert OID = 2859 ( pg_stat_get_buf_alloc			PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 20 "" _null_ _null_ _null_ _null_ _null_ pg_stat_get_buf_alloc _null_ _null_ _null_ ));
 DESCR("statistics: number of buffer allocations");
 
+DATA(insert OID = 3373 (  pg_stat_get_walwrites		PGNSP PGUID 12 1 0 0 0 f f f f f f s r 0 0 2249 "" "{20,20,20,20,20,20,701,701,701,701,1184}" "{o,o,o,o,o,o,o,o,o,o,o}" "{writes,backend_writes,dirty_writes,backend_dirty_writes,write_blocks,backend_write_blocks,write_time,backend_write_time,sync_time,backend_sync_time,stats_reset}" _null_ _null_ pg_stat_get_walwrites _null_ _null_ _null_ ));
+DESCR("statistics: information about WAL writes activity");
+
 DATA(insert OID = 2978 (  pg_stat_get_function_calls		PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_function_calls _null_ _null_ _null_ ));
 DESCR("statistics: number of function calls");
 DATA(insert OID = 2979 (  pg_stat_get_function_total_time	PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 701 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_function_total_time _null_ _null_ _null_ ));
diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h
index 4c607b2..386ed06 100644
--- a/src/include/miscadmin.h
+++ b/src/include/miscadmin.h
@@ -242,6 +242,8 @@ extern PGDLLIMPORT int work_mem;
 extern PGDLLIMPORT int maintenance_work_mem;
 extern PGDLLIMPORT int replacement_sort_tuples;
 
+extern bool	am_autovacuum_worker;
+
 extern int	VacuumCostPageHit;
 extern int	VacuumCostPageMiss;
 extern int	VacuumCostPageDirty;
diff --git a/src/include/pgstat.h b/src/include/pgstat.h
index e29397f..6c025ff 100644
--- a/src/include/pgstat.h
+++ b/src/include/pgstat.h
@@ -64,7 +64,8 @@ typedef enum StatMsgType
 	PGSTAT_MTYPE_FUNCPURGE,
 	PGSTAT_MTYPE_RECOVERYCONFLICT,
 	PGSTAT_MTYPE_TEMPFILE,
-	PGSTAT_MTYPE_DEADLOCK
+	PGSTAT_MTYPE_DEADLOCK,
+	PGSTAT_MTYPE_WALWRITES
 } StatMsgType;
 
 /* ----------
@@ -119,7 +120,8 @@ typedef struct PgStat_TableCounts
 typedef enum PgStat_Shared_Reset_Target
 {
 	RESET_ARCHIVER,
-	RESET_BGWRITER
+	RESET_BGWRITER,
+	RESET_WALWRITES
 } PgStat_Shared_Reset_Target;
 
 /* Possible object types for resetting single counters */
@@ -423,6 +425,38 @@ typedef struct PgStat_MsgBgWriter
 } PgStat_MsgBgWriter;
 
 /* ----------
+ * PgStat_MsgWalWrites			Sent by the backend and all background processes/workers
+ *								that does the WAL write operations.
+ * ----------
+ */
+typedef struct PgStat_MsgWalWrites
+{
+	PgStat_MsgHdr m_hdr;
+
+	PgStat_Counter m_writes;	/* No of writes by background
+								 * processes/workers */
+	PgStat_Counter m_backend_writes;	/* No of writes by backend */
+	PgStat_Counter m_dirty_writes;		/* No of dirty writes by background
+										 * processes/workers when WAL buffers
+										 * full */
+	PgStat_Counter m_backend_dirty_writes;		/* No of dirty writes by
+												 * backend when WAL buffers
+												 * full */
+	PgStat_Counter m_write_blocks;		/* Total no of pages written by
+										 * background processes/workers */
+	PgStat_Counter m_backend_write_blocks;		/* Total no of pages written
+												 * by backend */
+	PgStat_Counter m_total_write_time;	/* Total write time in milliseconds by
+										 * background processes/workers */
+	PgStat_Counter m_backend_total_write_time;	/* Total write time in
+												 * milliseconds by backend */
+	PgStat_Counter m_total_sync_time;	/* Total write time in milliseconds by
+										 * background processes/workers */
+	PgStat_Counter m_backend_total_sync_time;	/* Total write time in
+												 * milliseconds by backend */
+}	PgStat_MsgWalWrites;
+
+/* ----------
  * PgStat_MsgRecoveryConflict	Sent by the backend upon recovery conflict
  * ----------
  */
@@ -555,6 +589,7 @@ typedef union PgStat_Msg
 	PgStat_MsgFuncpurge msg_funcpurge;
 	PgStat_MsgRecoveryConflict msg_recoveryconflict;
 	PgStat_MsgDeadlock msg_deadlock;
+	PgStat_MsgWalWrites msg_walwrites;
 } PgStat_Msg;
 
 
@@ -694,6 +729,33 @@ typedef struct PgStat_GlobalStats
 	TimestampTz stat_reset_timestamp;
 } PgStat_GlobalStats;
 
+/*
+ * Walwrites statistics kept in the stats collector
+ */
+typedef struct PgStat_WalWritesStats
+{
+	PgStat_Counter writes;		/* No of writes by background
+								 * processes/workers */
+	PgStat_Counter backend_writes;		/* No of writes by backend */
+	PgStat_Counter dirty_writes;/* No of dirty writes by background
+								 * processes/workers when WAL buffers full */
+	PgStat_Counter backend_dirty_writes;		/* No of dirty writes by
+												 * backend when WAL buffers
+												 * full */
+	PgStat_Counter write_blocks;/* Total no of pages written by background
+								 * processes/workers */
+	PgStat_Counter backend_write_blocks;		/* Total no of pages written
+												 * by backend */
+	PgStat_Counter total_write_time;	/* Total write time in milliseconds by
+										 * background processes/workers */
+	PgStat_Counter backend_total_write_time;	/* Total write time in
+												 * milliseconds by backend */
+	PgStat_Counter total_sync_time;		/* Total write time in milliseconds by
+										 * background processes/workers */
+	PgStat_Counter backend_total_sync_time;		/* Total write time in
+												 * milliseconds by backend */
+	TimestampTz stat_reset_timestamp;	/* Last time when the stats reset */
+}	PgStat_WalWritesStats;
 
 /* ----------
  * Backend types
@@ -1115,6 +1177,11 @@ extern char *pgstat_stat_filename;
 extern PgStat_MsgBgWriter BgWriterStats;
 
 /*
+ * Wal writes statistics updated in XLogWrite function
+ */
+extern PgStat_MsgWalWrites LocalWalWritesStats;
+
+/*
  * Updated by pgstat_count_buffer_*_time macros
  */
 extern PgStat_Counter pgStatBlockReadTime;
@@ -1308,6 +1375,7 @@ extern void pgstat_twophase_postabort(TransactionId xid, uint16 info,
 
 extern void pgstat_send_archiver(const char *xlog, bool failed);
 extern void pgstat_send_bgwriter(void);
+extern void pgstat_send_walwrites(void);
 
 /* ----------
  * Support functions for the SQL-callable functions to
@@ -1322,5 +1390,7 @@ extern PgStat_StatFuncEntry *pgstat_fetch_stat_funcentry(Oid funcid);
 extern int	pgstat_fetch_stat_numbackends(void);
 extern PgStat_ArchiverStats *pgstat_fetch_stat_archiver(void);
 extern PgStat_GlobalStats *pgstat_fetch_global(void);
+extern PgStat_WalWritesStats *pgstat_fetch_stat_walwrites(void);
+
 
 #endif   /* PGSTAT_H */
diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out
index d706f42..4c0a55b 100644
--- a/src/test/regress/expected/rules.out
+++ b/src/test/regress/expected/rules.out
@@ -1973,6 +1973,18 @@ pg_stat_wal_receiver| SELECT s.pid,
     s.conninfo
    FROM pg_stat_get_wal_receiver() s(pid, status, receive_start_lsn, receive_start_tli, received_lsn, received_tli, last_msg_send_time, last_msg_receipt_time, latest_end_lsn, latest_end_time, slot_name, conninfo)
   WHERE (s.pid IS NOT NULL);
+pg_stat_walwrites| SELECT a.writes,
+    a.backend_writes,
+    a.dirty_writes,
+    a.backend_dirty_writes,
+    a.write_blocks,
+    a.backend_write_blocks,
+    a.write_time,
+    a.backend_write_time,
+    a.sync_time,
+    a.backend_sync_time,
+    a.stats_reset
+   FROM pg_stat_get_walwrites() a(writes, backend_writes, dirty_writes, backend_dirty_writes, write_blocks, backend_write_blocks, write_time, backend_write_time, sync_time, backend_sync_time, stats_reset);
 pg_stat_xact_all_tables| SELECT c.oid AS relid,
     n.nspname AS schemaname,
     c.relname,
diff --git a/src/test/regress/expected/sysviews.out b/src/test/regress/expected/sysviews.out
index 568b783..8448cac 100644
--- a/src/test/regress/expected/sysviews.out
+++ b/src/test/regress/expected/sysviews.out
@@ -67,6 +67,13 @@ select count(*) >= 0 as ok from pg_prepared_xacts;
  t
 (1 row)
 
+-- There will surely and maximum one record
+select count(*) > 0 as ok from pg_stat_walwrites;
+ ok 
+----
+ t
+(1 row)
+
 -- This is to record the prevailing planner enable_foo settings during
 -- a regression test run.
 select name, setting from pg_settings where name like 'enable%';
diff --git a/src/test/regress/sql/sysviews.sql b/src/test/regress/sql/sysviews.sql
index 28e412b..3769880 100644
--- a/src/test/regress/sql/sysviews.sql
+++ b/src/test/regress/sql/sysviews.sql
@@ -32,6 +32,9 @@ select count(*) = 0 as ok from pg_prepared_statements;
 -- See also prepared_xacts.sql
 select count(*) >= 0 as ok from pg_prepared_xacts;
 
+-- There will surely and maximum one record
+select count(*) > 0 as ok from pg_stat_walwrites;
+
 -- This is to record the prevailing planner enable_foo settings during
 -- a regression test run.
 select name, setting from pg_settings where name like 'enable%';
