diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index 2b99e4b..64f25bf 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -71,6 +71,7 @@
 #include <limits.h>
 
 #include "access/xact.h"
+#include "port/atomics.h"
 #include "storage/shmem.h"
 #include "storage/spin.h"
 #include "utils/dynahash.h"
@@ -387,6 +388,7 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
 
 	if (flags & HASH_PARTITION)
 	{
+		Size	elementSize;
 		/* Doesn't make sense to partition a local hash table */
 		Assert(flags & HASH_SHARED_MEM);
 
@@ -398,6 +400,13 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
 		Assert(info->num_partitions == next_pow2_int(info->num_partitions));
 
 		hctl->num_partitions = info->num_partitions;
+
+		/*
+		 * allocate the first element of freelist, as the same is expected
+		 * to be always present before any operation on freelist.
+		 */
+		elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(hctl->entrysize);
+		hctl->freeList = (HASHELEMENT *) hashp->alloc(elementSize);
 	}
 
 	if (flags & HASH_SEGMENT)
@@ -912,22 +921,52 @@ hash_search_with_hash_value(HTAB *hashp,
 				/* use volatile pointer to prevent code rearrangement */
 				volatile HASHHDR *hctlv = hctl;
 
-				/* if partitioned, must lock to touch nentries and freeList */
+				/*
+				 * if partitioned, must use atomic ops to touch nentries
+				 * and freeList
+				 */
 				if (IS_PARTITIONED(hctlv))
-					SpinLockAcquire(&hctlv->mutex);
-
-				Assert(hctlv->nentries > 0);
-				hctlv->nentries--;
+				{
+					uint64		freelistval;
+
+					pg_atomic_sub_fetch_u64((volatile pg_atomic_uint64*)&hctlv->nentries,1);
+
+					pg_atomic_write_u64((volatile pg_atomic_uint64*)prevBucketPtr, *(uint64*)currBucket);
+
+					/*
+					 * to add an entry to freelist, we need to save the freelist
+					 * head, and then add an entry to head and then finally using
+					 * compare and exchange point the freelist to head.  Now it is quite
+					 * possible that when it tries to point head of freelist to newly
+					 * added entry, the freelist head is already changed, so this
+					 * operation needs to be tried in loop.  One point to note here
+					 * is that the freed elements are always added to the second slot of
+					 * freelist as there doesn't seem to be a way with which we can add
+					 * it as first element using currently supportted atomic operations.
+					 */
+					while (true)
+					{
+						freelistval = pg_atomic_read_u64((volatile pg_atomic_uint64*)hctlv->freeList);
+
+						pg_atomic_exchange_u64((volatile pg_atomic_uint64*)currBucket,*(uint64*)hctlv->freeList);
+						if (pg_atomic_compare_exchange_u64((volatile pg_atomic_uint64*)hctlv->freeList,
+														   &freelistval,
+														   (uint64)currBucket))
+							break;
+					}
+				}
+				else
+				{
+					Assert(hctlv->nentries > 0);
+					hctlv->nentries--;
 
-				/* remove record from hash bucket's chain. */
-				*prevBucketPtr = currBucket->link;
+					/* remove record from hash bucket's chain. */
+					*prevBucketPtr = currBucket->link;
 
-				/* add the record to the freelist for this table.  */
-				currBucket->link = hctlv->freeList;
-				hctlv->freeList = currBucket;
-
-				if (IS_PARTITIONED(hctlv))
-					SpinLockRelease(&hctlv->mutex);
+					/* add the record to the freelist for this table.  */
+					currBucket->link = hctlv->freeList;
+					hctlv->freeList = currBucket;
+				}
 
 				/*
 				 * better hope the caller is synchronizing access to this
@@ -1152,36 +1191,69 @@ get_hash_entry(HTAB *hashp)
 	volatile HASHHDR *hctlv = hashp->hctl;
 	HASHBUCKET	newElement;
 
-	for (;;)
+	if (IS_PARTITIONED(hctlv))
 	{
-		/* if partitioned, must lock to touch nentries and freeList */
-		if (IS_PARTITIONED(hctlv))
-			SpinLockAcquire(&hctlv->mutex);
+		uint64 newElementVal;
 
-		/* try to get an entry from the freelist */
-		newElement = hctlv->freeList;
-		if (newElement != NULL)
-			break;
-
-		/* no free elements.  allocate another chunk of buckets */
-		if (IS_PARTITIONED(hctlv))
-			SpinLockRelease(&hctlv->mutex);
-
-		if (!element_alloc(hashp, hctlv->nelem_alloc))
+		/* if partitioned, must use atomic ops to touch nentries and freeList */
+		for (;;)
 		{
-			/* out of memory */
-			return NULL;
+		   /*
+			* to remove an entry from freelist, we need to save the freelist
+			* head, and then using compare and exchange remove it from the freelist
+			* head.  Now it is quite possible that when it tries to remove an
+			* entry from head of freelist, the freelist head is already changed,
+			* so this operation needs to be tried in loop.  One point to note here
+			* is that an entry is always removed from the second slot of
+			* freelist as there doesn't seem to be a way with which we can remove
+			* it from first slot using currently supportted atomic operations.
+			*/
+			newElementVal = pg_atomic_read_u64((volatile pg_atomic_uint64*)hctlv->freeList);
+			newElement = (HASHELEMENT*)newElementVal;
+			if (newElement != NULL)
+			{
+				/* remove entry from freelist */
+				if (pg_atomic_compare_exchange_u64((volatile pg_atomic_uint64*)hctlv->freeList,
+												   &newElementVal,
+												   *(uint64*)newElement))
+					break;
+			}
+			else
+			{
+				/* no free elements.  allocate another chunk of buckets */
+				if (!element_alloc(hashp, hctlv->nelem_alloc))
+				{
+					/* out of memory */
+					return NULL;
+				}
+			}
 		}
+
+		/* bump nentries */
+		pg_atomic_add_fetch_u64((volatile pg_atomic_uint64*)&hctlv->nentries,1);
 	}
+	else
+	{
+		for (;;)
+		{
+			/* try to get an entry from the freelist */
+			newElement = hctlv->freeList;
+			if (newElement != NULL)
+				break;
 
-	/* remove entry from freelist, bump nentries */
-	hctlv->freeList = newElement->link;
-	hctlv->nentries++;
+			if (!element_alloc(hashp, hctlv->nelem_alloc))
+			{
+				/* out of memory */
+				return NULL;
+			}
+		}
 
-	if (IS_PARTITIONED(hctlv))
-		SpinLockRelease(&hctlv->mutex);
+		/* remove entry from freelist, bump nentries */
+		hctlv->freeList = newElement->link;
+		hctlv->nentries++;
+	 }
 
-	return newElement;
+	 return newElement;
 }
 
 /*
@@ -1536,14 +1608,37 @@ element_alloc(HTAB *hashp, int nelem)
 
 	/* if partitioned, must lock to touch freeList */
 	if (IS_PARTITIONED(hctlv))
-		SpinLockAcquire(&hctlv->mutex);
-
-	/* freelist could be nonempty if two backends did this concurrently */
-	firstElement->link = hctlv->freeList;
-	hctlv->freeList = prevElement;
+	{
+		uint64	freelistval;
+
+	   /*
+		* to prepare the freelist, the freelist head needs to point to the
+		* prepared list, and to achieve the same we need to use compare and
+		* exchange operation.  Now it is quite possible that when it tries to
+		* point head of freelist to newly prepared list, the freelist head is
+		* already changed, so this operation needs to be tried in loop.  One
+		* point to note here is that the list is always added to the second
+		* slot of freelist as there doesn't seem to be a way with which we
+		* can add it as first element using currently supportted atomic
+		* operations.  The first slot is always alllocated during hash_create.
+		*/
+		while (true)
+		{
+			freelistval = pg_atomic_read_u64((volatile pg_atomic_uint64*)hctlv->freeList);
 
-	if (IS_PARTITIONED(hctlv))
-		SpinLockRelease(&hctlv->mutex);
+			pg_atomic_exchange_u64((volatile pg_atomic_uint64*)firstElement,*(uint64*)hctlv->freeList);
+			if (pg_atomic_compare_exchange_u64((volatile pg_atomic_uint64*)hctlv->freeList,
+												&freelistval,
+												(uint64)prevElement))
+				break;
+		}
+	}
+	else
+	{
+		/* freelist could be nonempty if two backends did this concurrently */
+		firstElement->link = hctlv->freeList;
+		hctlv->freeList = prevElement;
+	}
 
 	return true;
 }
