diff --git a/src/backend/postmaster/bgwriter.c b/src/backend/postmaster/bgwriter.c
index 780ee3b..ae4237d 100644
--- a/src/backend/postmaster/bgwriter.c
+++ b/src/backend/postmaster/bgwriter.c
@@ -67,12 +67,6 @@
 int			BgWriterDelay = 200;
 
 /*
- * Multiplier to apply to BgWriterDelay when we decide to hibernate.
- * (Perhaps this needs to be configurable?)
- */
-#define HIBERNATE_FACTOR			50
-
-/*
  * Interval in which standby snapshots are logged into the WAL stream, in
  * milliseconds.
  */
@@ -111,7 +105,6 @@ BackgroundWriterMain(void)
 {
 	sigjmp_buf	local_sigjmp_buf;
 	MemoryContext bgwriter_context;
-	bool		prev_hibernate;
 
 	/*
 	 * If possible, make this process a group leader, so that the postmaster
@@ -246,19 +239,15 @@ BackgroundWriterMain(void)
 	 */
 	PG_SETMASK(&UnBlockSig);
 
-	/*
-	 * Reset hibernation state after any error.
-	 */
-	prev_hibernate = false;
+	/* Initialize the freelist latch. */
+	StrategyInitBgWriterLatch(&MyProc->procLatch);
 
 	/*
 	 * Loop forever
 	 */
 	for (;;)
 	{
-		bool		can_hibernate;
 		int			rc;
-
 		/* Clear any already-pending wakeups */
 		ResetLatch(&MyProc->procLatch);
 
@@ -279,9 +268,25 @@ BackgroundWriterMain(void)
 		}
 
 		/*
-		 * Do one cycle of dirty-buffer writing.
+		 * Sleep untill signalled by backend or LOG_SNAPSHOT_INTERVAL_MS has
+		 * elapsed.
+		 *
+		 * Backend will signal bgwriter when the number of buffers in
+		 * freelist fall below than low threshhold of freelist.  We need
+		 * to wake bgwriter after LOG_SNAPSHOT_INTERVAL_MS to ensure that
+		 * it can log information about xl_running_xacts.
 		 */
-		can_hibernate = BgBufferSync();
+		if (XLogStandbyInfoActive() && !RecoveryInProgress())
+			rc = WaitLatch(&MyProc->procLatch,
+						   WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
+						   LOG_SNAPSHOT_INTERVAL_MS);
+		else
+			rc = WaitLatch(&MyProc->procLatch,
+						   WL_LATCH_SET | WL_POSTMASTER_DEATH,
+						   -1);
+
+		if (rc & WL_LATCH_SET)
+			BgBufferSyncAndMoveBuffersToFreelist();
 
 		/*
 		 * Send off activity statistics to the stats collector
@@ -318,7 +323,9 @@ BackgroundWriterMain(void)
 		 * Checkpointer, when active, is barely ever in its mainloop and thus
 		 * makes it hard to log regularly.
 		 */
-		if (XLogStandbyInfoActive() && !RecoveryInProgress())
+		if ((rc & WL_TIMEOUT || rc & WL_LATCH_SET) &&
+			XLogStandbyInfoActive() &&
+			!RecoveryInProgress())
 		{
 			TimestampTz timeout = 0;
 			TimestampTz now = GetCurrentTimestamp();
@@ -339,57 +346,11 @@ BackgroundWriterMain(void)
 		}
 
 		/*
-		 * Sleep until we are signaled or BgWriterDelay has elapsed.
-		 *
-		 * Note: the feedback control loop in BgBufferSync() expects that we
-		 * will call it every BgWriterDelay msec.  While it's not critical for
-		 * correctness that that be exact, the feedback loop might misbehave
-		 * if we stray too far from that.  Hence, avoid loading this process
-		 * down with latch events that are likely to happen frequently during
-		 * normal operation.
-		 */
-		rc = WaitLatch(&MyProc->procLatch,
-					   WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
-					   BgWriterDelay /* ms */ );
-
-		/*
-		 * If no latch event and BgBufferSync says nothing's happening, extend
-		 * the sleep in "hibernation" mode, where we sleep for much longer
-		 * than bgwriter_delay says.  Fewer wakeups save electricity.  When a
-		 * backend starts using buffers again, it will wake us up by setting
-		 * our latch.  Because the extra sleep will persist only as long as no
-		 * buffer allocations happen, this should not distort the behavior of
-		 * BgBufferSync's control loop too badly; essentially, it will think
-		 * that the system-wide idle interval didn't exist.
-		 *
-		 * There is a race condition here, in that a backend might allocate a
-		 * buffer between the time BgBufferSync saw the alloc count as zero
-		 * and the time we call StrategyNotifyBgWriter.  While it's not
-		 * critical that we not hibernate anyway, we try to reduce the odds of
-		 * that by only hibernating when BgBufferSync says nothing's happening
-		 * for two consecutive cycles.  Also, we mitigate any possible
-		 * consequences of a missed wakeup by not hibernating forever.
-		 */
-		if (rc == WL_TIMEOUT && can_hibernate && prev_hibernate)
-		{
-			/* Ask for notification at next buffer allocation */
-			StrategyNotifyBgWriter(&MyProc->procLatch);
-			/* Sleep ... */
-			rc = WaitLatch(&MyProc->procLatch,
-						   WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
-						   BgWriterDelay * HIBERNATE_FACTOR);
-			/* Reset the notification request in case we timed out */
-			StrategyNotifyBgWriter(NULL);
-		}
-
-		/*
 		 * Emergency bailout if postmaster has died.  This is to avoid the
 		 * necessity for manual cleanup of all postmaster children.
 		 */
 		if (rc & WL_POSTMASTER_DEATH)
 			exit(1);
-
-		prev_hibernate = can_hibernate;
 	}
 }
 
diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c
index 3ab1428..d82667b 100644
--- a/src/backend/postmaster/pgstat.c
+++ b/src/backend/postmaster/pgstat.c
@@ -5020,6 +5020,7 @@ pgstat_recv_bgwriter(PgStat_MsgBgWriter *msg, int len)
 	globalStats.buf_written_backend += msg->m_buf_written_backend;
 	globalStats.buf_fsync_backend += msg->m_buf_fsync_backend;
 	globalStats.buf_alloc += msg->m_buf_alloc;
+	globalStats.buf_freelist += msg->m_buf_freelist;
 }
 
 /* ----------
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 07ea665..5b8975b 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -1637,10 +1637,75 @@ BgBufferSync(void)
 }
 
 /*
+ * Write out some dirty buffers in the pool and maintain enough
+ * number of buffers in freelist (equal to high threshold for
+ * freelsit), so that backend's don't need to perform clock sweep
+ * often.
+ *
+ * This is called by the background writer process when the number
+ * of buffers in freelist fall below low threshold of freelist.
+ */
+void
+BgBufferSyncAndMoveBuffersToFreelist(void)
+{
+	volatile uint32	next_to_clean;
+	uint32	num_to_free;
+	uint32	tmp_num_to_free;
+	uint32  save_next_to_clean;
+	uint32	recent_alloc;
+	int		num_written;
+	int		num_freelist;
+	volatile BufferDesc *bufHdr;
+
+	num_freelist = StrategySyncStartAndEnd(&save_next_to_clean,
+										   &num_to_free,
+										   &recent_alloc);
+
+	/* Report buffer alloc and buffer freelist counts to pgstat */
+	BgWriterStats.m_buf_alloc += recent_alloc;
+	BgWriterStats.m_buf_freelist += num_freelist;
+
+	/* Make sure we can handle the pin inside SyncOneBuffer */
+	ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
+
+	num_written = 0;
+	tmp_num_to_free = num_to_free;
+	next_to_clean = save_next_to_clean;
+
+	/* Execute the LRU scan */
+	while (tmp_num_to_free > 0)
+	{
+		int			buffer_state = SyncOneBuffer(next_to_clean, true);
+
+		bufHdr = &BufferDescriptors[next_to_clean];
+
+		/* choose next victim buffer to clean. */
+		StrategySyncNextVictimBuffer(&next_to_clean);
+		if (buffer_state & BUF_WRITTEN)
+			++num_written;
+		if (buffer_state & BUF_REUSABLE)
+		{
+			if (StrategyMoveBufferToFreeListEnd (bufHdr))
+				tmp_num_to_free--;
+		}
+	}
+
+	BgWriterStats.m_buf_written_clean += num_written;
+
+#ifdef BGW_DEBUG
+	elog(DEBUG1, "bgwriter: recent_alloc=%u num_freelist=%u  next_to_clean=%d wrote=%d num_freed=%u",
+		 recent_alloc, num_freelist, save_next_to_clean, num_written,
+		 num_to_free);
+#endif
+}
+
+/*
  * SyncOneBuffer -- process a single buffer during syncing.
  *
- * If skip_recently_used is true, we don't write currently-pinned buffers, nor
- * buffers marked recently used, as these are not replacement candidates.
+ * If skip_recently_used is true, we decrement the usage count, so that
+ * we can find reusable buffers in consecutive cycles, also we don't write
+ * currently-pinned buffers, nor buffers marked recently used, as these are
+ * not replacement candidates.
  *
  * Returns a bitmask containing the following flag bits:
  *	BUF_WRITTEN: we wrote the buffer.
@@ -1673,7 +1738,13 @@ SyncOneBuffer(int buf_id, bool skip_recently_used)
 		result |= BUF_REUSABLE;
 	else if (skip_recently_used)
 	{
-		/* Caller told us not to write recently-used buffers */
+		/*
+		 * Caller told us not to write recently-used buffers and
+		 * reduce usage count, so that it can find the reusable
+		 * buffers in consecutive cycles.
+		 */
+		if (bufHdr->refcount == 0 && bufHdr->usage_count > 0)
+			bufHdr->usage_count--;
 		UnlockBufHdr(bufHdr);
 		return result;
 	}
diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c
index 4befab0..358f35c 100644
--- a/src/backend/storage/buffer/freelist.c
+++ b/src/backend/storage/buffer/freelist.c
@@ -29,6 +29,7 @@ typedef struct
 
 	int			firstFreeBuffer;	/* Head of list of unused buffers */
 	int			lastFreeBuffer; /* Tail of list of unused buffers */
+	int			numFreeListBuffers; /* number of buffers on freelist */
 
 	/*
 	 * NOTE: lastFreeBuffer is undefined when firstFreeBuffer is -1 (that is,
@@ -43,7 +44,21 @@ typedef struct
 	uint32		numBufferAllocs;	/* Buffers allocated since last reset */
 
 	/*
-	 * Notification latch, or NULL if none.  See StrategyNotifyBgWriter.
+	 * protects freelist variables (firstFreeBuffer, lastFreeBuffer,
+	 * numFreeListBuffers, BufferDesc->freeNext).
+	 */
+	slock_t	     freelist_lck;
+
+	/*
+	 * Protects nextVictimBuffer. We need separate lock to protect
+	 * victim buffer so that clock sweep of one backend doesn't
+	 * contend with another backend which is evicting buffer from
+	 * freelist.
+	 */
+	slock_t	     victimbuf_lck;
+
+	/*
+	 * Latch to wake bgwriter.
 	 */
 	Latch	   *bgwriterLatch;
 } BufferStrategyControl;
@@ -112,7 +127,6 @@ volatile BufferDesc *
 StrategyGetBuffer(BufferAccessStrategy strategy, bool *lock_held)
 {
 	volatile BufferDesc *buf;
-	Latch	   *bgwriterLatch;
 	int			trycounter;
 
 	/*
@@ -129,76 +143,92 @@ StrategyGetBuffer(BufferAccessStrategy strategy, bool *lock_held)
 		}
 	}
 
-	/* Nope, so lock the freelist */
-	*lock_held = true;
-	LWLockAcquire(BufFreelistLock, LW_EXCLUSIVE);
-
 	/*
-	 * We count buffer allocation requests so that the bgwriter can estimate
-	 * the rate of buffer consumption.  Note that buffers recycled by a
-	 * strategy object are intentionally not counted here.
+	 * We count buffer allocation requests so that the bgwriter can know
+	 * the rate of buffer consumption and report it as stats.  Note that
+	 * buffers recycled by a strategy object are intentionally not counted
+	 * here.
 	 */
 	StrategyControl->numBufferAllocs++;
+	*lock_held = false;
 
 	/*
-	 * If bgwriterLatch is set, we need to waken the bgwriter, but we should
-	 * not do so while holding BufFreelistLock; so release and re-grab.  This
-	 * is annoyingly tedious, but it happens at most once per bgwriter cycle,
-	 * so the performance hit is minimal.
+	 * Ideally numFreeListBuffers should get called under freelist spinlock,
+	 * however here we need this number for estimating approximate number of
+	 * free buffers required on freelist, so it should not be a problem, even
+	 * if numFreeListBuffers is not exact.  bgwriterLatch is initialized in
+	 * early phase of BgWriter startup, however we still check before using
+	 * it to avoid any problem incase we reach here before its initializion.
 	 */
-	bgwriterLatch = StrategyControl->bgwriterLatch;
-	if (bgwriterLatch)
-	{
-		StrategyControl->bgwriterLatch = NULL;
-		LWLockRelease(BufFreelistLock);
-		SetLatch(bgwriterLatch);
-		LWLockAcquire(BufFreelistLock, LW_EXCLUSIVE);
-	}
+	if (StrategyControl->numFreeListBuffers < freelistLowThreshold &&
+		StrategyControl->bgwriterLatch)
+		SetLatch(StrategyControl->bgwriterLatch);
 
 	/*
 	 * Try to get a buffer from the freelist.  Note that the freeNext fields
-	 * are considered to be protected by the BufFreelistLock not the
+	 * are considered to be protected by the freelist_lck not the
 	 * individual buffer spinlocks, so it's OK to manipulate them without
-	 * holding the spinlock.
+	 * holding the buffer spinlock.
 	 */
-	while (StrategyControl->firstFreeBuffer >= 0)
+	for(;;)
 	{
-		buf = &BufferDescriptors[StrategyControl->firstFreeBuffer];
-		Assert(buf->freeNext != FREENEXT_NOT_IN_LIST);
+		SpinLockAcquire(&StrategyControl->freelist_lck);
 
-		/* Unconditionally remove buffer from freelist */
-		StrategyControl->firstFreeBuffer = buf->freeNext;
-		buf->freeNext = FREENEXT_NOT_IN_LIST;
+		if (StrategyControl->firstFreeBuffer >= 0)
+		{
+			buf = &BufferDescriptors[StrategyControl->firstFreeBuffer];
+			Assert(buf->freeNext != FREENEXT_NOT_IN_LIST);
 
-		/*
-		 * If the buffer is pinned or has a nonzero usage_count, we cannot use
-		 * it; discard it and retry.  (This can only happen if VACUUM put a
-		 * valid buffer in the freelist and then someone else used it before
-		 * we got to it.  It's probably impossible altogether as of 8.3, but
-		 * we'd better check anyway.)
-		 */
-		LockBufHdr(buf);
-		if (buf->refcount == 0 && buf->usage_count == 0)
+			/* Unconditionally remove buffer from freelist */
+			StrategyControl->firstFreeBuffer = buf->freeNext;
+			buf->freeNext = FREENEXT_NOT_IN_LIST;
+			--StrategyControl->numFreeListBuffers;
+
+			SpinLockRelease(&StrategyControl->freelist_lck);
+
+			/*
+			 * If the buffer is pinned or has a nonzero usage_count, we cannot use
+			 * it; discard it and retry.  (This can only happen if VACUUM put a
+			 * valid buffer in the freelist and then someone else used it before
+			 * we got to it.  It's probably impossible altogether as of 8.3, but
+			 * we'd better check anyway.)
+			 */
+			LockBufHdr(buf);
+			if (buf->refcount == 0 && buf->usage_count == 0)
+			{
+				if (strategy != NULL)
+					AddBufferToRing(strategy, buf);
+				return buf;
+			}
+			UnlockBufHdr(buf);
+		}
+		else
 		{
-			if (strategy != NULL)
-				AddBufferToRing(strategy, buf);
-			return buf;
+			SpinLockRelease(&StrategyControl->freelist_lck);
+			break;
 		}
-		UnlockBufHdr(buf);
 	}
 
 	/* Nothing on the freelist, so run the "clock sweep" algorithm */
 	trycounter = NBuffers;
+
+	/**lock_held = true;
+	LWLockAcquire(BufFreelistLock, LW_EXCLUSIVE);*/
+
 	for (;;)
 	{
+		SpinLockAcquire(&StrategyControl->victimbuf_lck);
+
 		buf = &BufferDescriptors[StrategyControl->nextVictimBuffer];
 
 		if (++StrategyControl->nextVictimBuffer >= NBuffers)
 		{
 			StrategyControl->nextVictimBuffer = 0;
-			StrategyControl->completePasses++;
+			/*StrategyControl->completePasses++;*/
 		}
 
+		SpinLockRelease(&StrategyControl->victimbuf_lck);
+
 		/*
 		 * If the buffer is pinned or has a nonzero usage_count, we cannot use
 		 * it; decrement the usage_count (unless pinned) and keep scanning.
@@ -241,7 +271,7 @@ StrategyGetBuffer(BufferAccessStrategy strategy, bool *lock_held)
 void
 StrategyFreeBuffer(volatile BufferDesc *buf)
 {
-	LWLockAcquire(BufFreelistLock, LW_EXCLUSIVE);
+	SpinLockAcquire(&StrategyControl->freelist_lck);
 
 	/*
 	 * It is possible that we are told to put something in the freelist that
@@ -253,11 +283,50 @@ StrategyFreeBuffer(volatile BufferDesc *buf)
 		if (buf->freeNext < 0)
 			StrategyControl->lastFreeBuffer = buf->buf_id;
 		StrategyControl->firstFreeBuffer = buf->buf_id;
+		++StrategyControl->numFreeListBuffers;
 	}
 
-	LWLockRelease(BufFreelistLock);
+	SpinLockRelease(&StrategyControl->freelist_lck);
+}
+
+/*
+ * StrategyMoveBufferToFreeListEnd: put a buffer on the end of freelist
+ */
+bool
+StrategyMoveBufferToFreeListEnd(volatile BufferDesc *buf)
+{
+	bool		freed = false;
+	SpinLockAcquire(&StrategyControl->freelist_lck);
+
+	/*
+	 * It is possible that we are told to put something in the freelist that
+	 * is already in it; don't screw up the list if so.
+	 */
+	if (buf->freeNext == FREENEXT_NOT_IN_LIST)
+	{
+		++StrategyControl->numFreeListBuffers;
+		freed = true;
+		/*
+		 * put the buffer on end of list and if list is empty then
+		 * assign first and last freebuffer with this buffer id.
+		 */
+		buf->freeNext = FREENEXT_END_OF_LIST;
+		if (StrategyControl->firstFreeBuffer < 0)
+		{
+			StrategyControl->firstFreeBuffer = buf->buf_id;
+			StrategyControl->lastFreeBuffer = buf->buf_id;
+			SpinLockRelease(&StrategyControl->freelist_lck);
+			return freed;
+		}
+		BufferDescriptors[StrategyControl->lastFreeBuffer].freeNext = buf->buf_id;
+		StrategyControl->lastFreeBuffer = buf->buf_id;
+	}
+	SpinLockRelease(&StrategyControl->freelist_lck);
+
+	return freed;
 }
 
+
 /*
  * StrategySyncStart -- tell BufferSync where to start syncing
  *
@@ -274,8 +343,10 @@ StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
 {
 	int			result;
 
-	LWLockAcquire(BufFreelistLock, LW_EXCLUSIVE);
+	SpinLockAcquire(&StrategyControl->victimbuf_lck);
 	result = StrategyControl->nextVictimBuffer;
+	SpinLockRelease(&StrategyControl->victimbuf_lck);
+
 	if (complete_passes)
 		*complete_passes = StrategyControl->completePasses;
 	if (num_buf_alloc)
@@ -283,11 +354,69 @@ StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
 		*num_buf_alloc = StrategyControl->numBufferAllocs;
 		StrategyControl->numBufferAllocs = 0;
 	}
-	LWLockRelease(BufFreelistLock);
 	return result;
 }
 
 /*
+ * StrategySyncStartAndEnd -- tell BgWriter where to start looking
+ * for unused buffers.
+ *
+ * The result is the buffer index of the best buffer to start looking for
+ * unused buffers, number of buffers that are required to be moved to
+ * freelist and count of recent buffer allocs.
+ *
+ * In addition, we return the number of of buffers on freelist.
+ */
+int
+StrategySyncStartAndEnd(uint32 *start, uint32 *end, uint32 *num_buf_alloc)
+{
+	int			curfreebuffers;
+
+	SpinLockAcquire(&StrategyControl->victimbuf_lck);
+	*start = StrategyControl->nextVictimBuffer;
+	SpinLockRelease(&StrategyControl->victimbuf_lck);
+
+	/*
+	 * Ideally numFreeListBuffers should get called under freelist spinlock,
+	 * however here we need this number for estimating approximate number of
+	 * free buffers required on freelist, so it should not be a problem, even
+	 * if numFreeListBuffers is not exact.
+	 */
+
+	curfreebuffers = StrategyControl->numFreeListBuffers;
+	if (curfreebuffers < freelistHighThreshold)
+		*end = freelistHighThreshold - curfreebuffers;
+	else
+		*end = 0;
+
+	/*
+	 * We need numBufferAllocs just for statistics purpose, so getting
+	 * the number with lock.
+	 */
+	if (num_buf_alloc)
+	{
+		*num_buf_alloc = StrategyControl->numBufferAllocs;
+		StrategyControl->numBufferAllocs = 0;
+	}
+
+	return curfreebuffers;
+}
+
+/*
+ * StrategySyncNextVictimBuffer -- tell BgWriter which next unused
+ * buffer to look for syncing.
+ */
+void
+StrategySyncNextVictimBuffer(volatile uint32 *next_victim_buffer)
+{
+	SpinLockAcquire(&StrategyControl->victimbuf_lck);
+	if (++StrategyControl->nextVictimBuffer >= NBuffers)
+		StrategyControl->nextVictimBuffer = 0;
+	*next_victim_buffer = StrategyControl->nextVictimBuffer;
+	SpinLockRelease(&StrategyControl->victimbuf_lck);
+}
+
+/*
  * StrategyNotifyBgWriter -- set or clear allocation notification latch
  *
  * If bgwriterLatch isn't NULL, the next invocation of StrategyGetBuffer will
@@ -309,6 +438,12 @@ StrategyNotifyBgWriter(Latch *bgwriterLatch)
 }
 
 
+void
+StrategyInitBgWriterLatch(Latch *bgwriterLatch)
+{
+	StrategyControl->bgwriterLatch = bgwriterLatch;
+}
+
 /*
  * StrategyShmemSize
  *
@@ -376,6 +511,7 @@ StrategyInitialize(bool init)
 		 */
 		StrategyControl->firstFreeBuffer = 0;
 		StrategyControl->lastFreeBuffer = NBuffers - 1;
+		StrategyControl->numFreeListBuffers = NBuffers;
 
 		/* Initialize the clock sweep pointer */
 		StrategyControl->nextVictimBuffer = 0;
@@ -386,9 +522,43 @@ StrategyInitialize(bool init)
 
 		/* No pending notification */
 		StrategyControl->bgwriterLatch = NULL;
+		SpinLockInit(&StrategyControl->freelist_lck);
+		SpinLockInit(&StrategyControl->victimbuf_lck);
 	}
 	else
 		Assert(!init);
+
+	/*
+	 * Initialize the low and high threshold number of buffer's
+	 * for freelist.  This is used to maintain buffer's on freelist
+	 * so that backend doesn't often need to perform clock sweep to
+	 * find the buffer.
+	 */
+	if (NBuffers > 100000)
+	{
+		freelistLowThreshold = 200;
+		freelistHighThreshold = 2000;
+	}
+	else if (NBuffers > 10000)
+	{
+		freelistLowThreshold = 100;
+		freelistHighThreshold = 1000;
+	}
+	else if (NBuffers > 1000)
+	{
+		freelistLowThreshold = 50;
+		freelistHighThreshold = 200;
+	}
+	else if (NBuffers > 100)
+	{
+		freelistLowThreshold = 30;
+		freelistHighThreshold = 75;
+	}
+	else
+	{
+		freelistLowThreshold = 5;
+		freelistHighThreshold = 15;
+	}
 }
 
 
diff --git a/src/include/pgstat.h b/src/include/pgstat.h
index 0892533..2b55bca 100644
--- a/src/include/pgstat.h
+++ b/src/include/pgstat.h
@@ -397,6 +397,7 @@ typedef struct PgStat_MsgBgWriter
 	PgStat_Counter m_buf_written_backend;
 	PgStat_Counter m_buf_fsync_backend;
 	PgStat_Counter m_buf_alloc;
+	PgStat_Counter m_buf_freelist;
 	PgStat_Counter m_checkpoint_write_time;		/* times in milliseconds */
 	PgStat_Counter m_checkpoint_sync_time;
 } PgStat_MsgBgWriter;
@@ -545,7 +546,7 @@ typedef union PgStat_Msg
  * ------------------------------------------------------------
  */
 
-#define PGSTAT_FILE_FORMAT_ID	0x01A5BC9C
+#define PGSTAT_FILE_FORMAT_ID	0x01A5BC9D
 
 /* ----------
  * PgStat_StatDBEntry			The collector's data per database
@@ -670,6 +671,7 @@ typedef struct PgStat_GlobalStats
 	PgStat_Counter buf_written_backend;
 	PgStat_Counter buf_fsync_backend;
 	PgStat_Counter buf_alloc;
+	PgStat_Counter buf_freelist;
 	TimestampTz stat_reset_timestamp;
 } PgStat_GlobalStats;
 
diff --git a/src/include/storage/buf_internals.h b/src/include/storage/buf_internals.h
index c019013..54a8b8f 100644
--- a/src/include/storage/buf_internals.h
+++ b/src/include/storage/buf_internals.h
@@ -161,6 +161,16 @@ typedef struct sbufdesc
 #define FREENEXT_NOT_IN_LIST	(-2)
 
 /*
+ * Threshold indicators for maintaining buffers on freelist.  When the
+ * number of buffers on freelist drops below the low threshold, the
+ * allocating backend sets the latch and bgwriter wakesup and begin
+ * adding buffer's to freelist until it reaches high threshold and then
+ * again goes back to sleep.
+ */
+int freelistLowThreshold;
+int freelistHighThreshold;
+
+/*
  * Macros for acquiring/releasing a shared buffer header's spinlock.
  * Do not apply these to local buffers!
  *
@@ -188,11 +198,16 @@ extern BufferDesc *LocalBufferDescriptors;
 extern volatile BufferDesc *StrategyGetBuffer(BufferAccessStrategy strategy,
 				  bool *lock_held);
 extern void StrategyFreeBuffer(volatile BufferDesc *buf);
+extern bool StrategyMoveBufferToFreeListEnd(volatile BufferDesc *buf);
 extern bool StrategyRejectBuffer(BufferAccessStrategy strategy,
 					 volatile BufferDesc *buf);
 
 extern int	StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc);
+extern int  StrategySyncStartAndEnd(uint32 *start, uint32 *end,
+									uint32 *num_buf_alloc);
+extern void StrategySyncNextVictimBuffer(volatile uint32 *next_victim_buffer);
 extern void StrategyNotifyBgWriter(Latch *bgwriterLatch);
+extern void StrategyInitBgWriterLatch(Latch *bgwriterLatch);
 
 extern Size StrategyShmemSize(void);
 extern void StrategyInitialize(bool init);
diff --git a/src/include/storage/bufmgr.h b/src/include/storage/bufmgr.h
index 89447d0..b0e5598 100644
--- a/src/include/storage/bufmgr.h
+++ b/src/include/storage/bufmgr.h
@@ -219,6 +219,7 @@ extern void AbortBufferIO(void);
 
 extern void BufmgrCommit(void);
 extern bool BgBufferSync(void);
+extern void BgBufferSyncAndMoveBuffersToFreelist(void);
 
 extern void AtProcExit_LocalBuffers(void);
 
diff --git a/src/include/storage/lwlock.h b/src/include/storage/lwlock.h
index d588b14..cd26ff0 100644
--- a/src/include/storage/lwlock.h
+++ b/src/include/storage/lwlock.h
@@ -136,7 +136,7 @@ extern PGDLLIMPORT LWLockPadded *MainLWLockArray;
  */
 
 /* Number of partitions of the shared buffer mapping hashtable */
-#define NUM_BUFFER_PARTITIONS  16
+#define NUM_BUFFER_PARTITIONS  128
 
 /* Number of partitions the shared lock tables are divided into */
 #define LOG2_NUM_LOCK_PARTITIONS  4
