diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index 6d0e0f5..210e1f2 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -195,6 +195,9 @@ static HASHSEGMENT seg_alloc(HTAB *hashp);
 static bool element_alloc(HTAB *hashp, int nelem);
 static bool dir_realloc(HTAB *hashp);
 static bool expand_table(HTAB *hashp);
+static HASHBUCKET get_matched_bucket(HTAB *hashp,
+				   const void *keyPtr, uint32 hashvalue,
+				   HASHBUCKET **prevBucketPtr);
 static HASHBUCKET get_hash_entry(HTAB *hashp);
 static void hdefault(HTAB *hashp);
 static int	choose_nelem_alloc(Size entrysize);
@@ -806,54 +809,15 @@ hash_search_with_hash_value(HTAB *hashp,
 							bool *foundPtr)
 {
 	HASHHDR    *hctl = hashp->hctl;
-	Size		keysize;
-	uint32		bucket;
-	long		segment_num;
-	long		segment_ndx;
-	HASHSEGMENT segp;
 	HASHBUCKET	currBucket;
 	HASHBUCKET *prevBucketPtr;
-	HashCompareFunc match;
 
 #if HASH_STATISTICS
 	hash_accesses++;
 	hctl->accesses++;
 #endif
 
-	/*
-	 * Do the initial lookup
-	 */
-	bucket = calc_bucket(hctl, hashvalue);
-
-	segment_num = bucket >> hashp->sshift;
-	segment_ndx = MOD(bucket, hashp->ssize);
-
-	segp = hashp->dir[segment_num];
-
-	if (segp == NULL)
-		hash_corrupted(hashp);
-
-	prevBucketPtr = &segp[segment_ndx];
-	currBucket = *prevBucketPtr;
-
-	/*
-	 * Follow collision chain looking for matching key
-	 */
-	match = hashp->match;		/* save one fetch in inner loop */
-	keysize = hashp->keysize;	/* ditto */
-
-	while (currBucket != NULL)
-	{
-		if (currBucket->hashvalue == hashvalue &&
-			match(ELEMENTKEY(currBucket), keyPtr, keysize) == 0)
-			break;
-		prevBucketPtr = &(currBucket->link);
-		currBucket = *prevBucketPtr;
-#if HASH_STATISTICS
-		hash_collisions++;
-		hctl->collisions++;
-#endif
-	}
+	currBucket = get_matched_bucket(hashp, keyPtr, hashvalue, &prevBucketPtr);
 
 	if (foundPtr)
 		*foundPtr = (bool) (currBucket != NULL);
@@ -915,6 +879,31 @@ hash_search_with_hash_value(HTAB *hashp,
 				elog(ERROR, "cannot insert into frozen hashtable \"%s\"",
 					 hashp->tabname);
 
+			/*
+			 * Check if it is time to split a bucket.  Can't split if running
+			 * in partitioned mode, nor if table is the subject of any active
+			 * hash_seq_search scans.  Strange order of these tests is to try
+			 * to check cheaper conditions first.
+			 */
+			if (!IS_PARTITIONED(hctl) &&
+			(hctl->nentries + 1) / (long) (hctl->max_bucket + 1) >= hctl->ffactor &&
+				!has_seq_scans(hashp))
+			{
+				/*
+				 * We do this before allocating a hash entry, because some
+				 * long-lived hash tables like relcache could run into
+				 * trouble in transaction abort clean up if expand_table
+				 * errors out after we allocate an entry that has not been
+				 * filled out.
+				 */
+				expand_table(hashp);
+
+				/*
+				 * Calculate bucket to find the spot to insert the new entry.
+				 */
+				get_matched_bucket(hashp, keyPtr, hashvalue, &prevBucketPtr);
+			}
+
 			currBucket = get_hash_entry(hashp);
 			if (currBucket == NULL)
 			{
@@ -938,27 +927,10 @@ hash_search_with_hash_value(HTAB *hashp,
 
 			/* copy key into record */
 			currBucket->hashvalue = hashvalue;
-			hashp->keycopy(ELEMENTKEY(currBucket), keyPtr, keysize);
+			hashp->keycopy(ELEMENTKEY(currBucket), keyPtr, hashp->keysize);
 
 			/* caller is expected to fill the data field on return */
 
-			/*
-			 * Check if it is time to split a bucket.  Can't split if running
-			 * in partitioned mode, nor if table is the subject of any active
-			 * hash_seq_search scans.  Strange order of these tests is to try
-			 * to check cheaper conditions first.
-			 */
-			if (!IS_PARTITIONED(hctl) &&
-			hctl->nentries / (long) (hctl->max_bucket + 1) >= hctl->ffactor &&
-				!has_seq_scans(hashp))
-			{
-				/*
-				 * NOTE: failure to expand table is not a fatal error, it just
-				 * means we have to run at higher fill factor than we wanted.
-				 */
-				expand_table(hashp);
-			}
-
 			return (void *) ELEMENTKEY(currBucket);
 	}
 
@@ -968,6 +940,60 @@ hash_search_with_hash_value(HTAB *hashp,
 }
 
 /*
+ * Get the entry that matches key, with the previous entry in the same bucket.
+ */
+static HASHBUCKET
+get_matched_bucket(HTAB *hashp, const void *keyPtr, uint32 hashvalue,
+				   HASHBUCKET **prevBucketPtr)
+{
+	HASHHDR	   *hctl = hashp->hctl;
+	Size		keysize;
+	uint32		bucket;
+	long		segment_num;
+	long		segment_ndx;
+	HASHSEGMENT	segp;
+	HASHBUCKET	currBucket;
+	HashCompareFunc match;
+
+	/*
+	 * Do the initial lookup
+	 */
+	bucket = calc_bucket(hctl, hashvalue);
+
+	segment_num = bucket >> hashp->sshift;
+	segment_ndx = MOD(bucket, hashp->ssize);
+
+	segp = hashp->dir[segment_num];
+
+	if (segp == NULL)
+		hash_corrupted(hashp);
+
+	*prevBucketPtr = &segp[segment_ndx];
+	currBucket = **prevBucketPtr;
+
+	/*
+	 * Follow collision chain looking for matching key
+	 */
+	match = hashp->match;		/* save one fetch in inner loop */
+	keysize = hashp->keysize;	/* ditto */
+
+	while (currBucket != NULL)
+	{
+		if (currBucket->hashvalue == hashvalue &&
+			match(ELEMENTKEY(currBucket), keyPtr, keysize) == 0)
+			break;
+		*prevBucketPtr = &(currBucket->link);
+		currBucket = **prevBucketPtr;
+#if HASH_STATISTICS
+		hash_collisions++;
+		hctl->collisions++;
+#endif
+	}
+
+	return currBucket;
+}
+
+/*
  * create a new entry if possible
  */
 static HASHBUCKET
