diff --git a/src/backend/access/hash/README b/src/backend/access/hash/README
index cd4e058..4082581 100644
--- a/src/backend/access/hash/README
+++ b/src/backend/access/hash/README
@@ -132,15 +132,6 @@ long-term locking since there is a (small) risk of deadlock, which we must
 be able to detect.  Buffer context locks are used for short-term access
 control to individual pages of the index.
 
-We define the following lmgr locks for a hash index:
-
-LockPage(rel, 0) represents the right to modify the hash-code-to-bucket
-mapping.  A process attempting to enlarge the hash table by splitting a
-bucket must exclusive-lock this lock before modifying the metapage data
-representing the mapping.  Processes intending to access a particular
-bucket must share-lock this lock until they have acquired lock on the
-correct target bucket.
-
 LockPage(rel, page), where page is the page number of a hash bucket page,
 represents the right to split or compact an individual bucket.  A process
 splitting a bucket must exclusive-lock both old and new halves of the
@@ -150,7 +141,10 @@ insertions must share-lock the bucket they are scanning or inserting into.
 (It is okay to allow concurrent scans and insertions.)
 
 The lmgr lock IDs corresponding to overflow pages are currently unused.
-These are available for possible future refinements.
+These are available for possible future refinements.  LockPage(rel, 0)
+is also currently undefined (it was previously used to represent the right
+to modify the hash-code-to-bucket mapping, but it is no longer needed for
+that purpose).
 
 Note that these lock definitions are conceptually distinct from any sort
 of lock on the pages whose numbers they share.  A process must also obtain
@@ -165,9 +159,7 @@ hash index code, since a process holding one of these locks could block
 waiting for an unrelated lock held by another process.  If that process
 then does something that requires exclusive lock on the bucket, we have
 deadlock.  Therefore the bucket locks must be lmgr locks so that deadlock
-can be detected and recovered from.  This also forces the page-zero lock
-to be an lmgr lock, because as we'll see below it is held while attempting
-to acquire a bucket lock, and so it could also participate in a deadlock.
+can be detected and recovered from.
 
 Processes must obtain read (share) buffer context lock on any hash index
 page while reading it, and write (exclusive) lock while modifying it.
@@ -195,12 +187,14 @@ track of available overflow pages.
 
 The reader algorithm is:
 
-	share-lock page 0 (to prevent active split)
 	read/sharelock meta page
-	compute bucket number for target hash key
-	release meta page
-	share-lock bucket page (to prevent split/compact of this bucket)
-	release page 0 share-lock
+    loop:
+		compute bucket number for target hash key
+		release meta page
+		if (correct bucket page is already locked)
+			break
+		release any existing bucket page lock (if a concurrent split happened)
+		share-lock bucket page
 -- then, per read request:
 	read/sharelock current page of bucket
 		step to next page if necessary (no chaining of locks)
@@ -209,10 +203,12 @@ The reader algorithm is:
 -- at scan shutdown:
 	release bucket share-lock
 
-By holding the page-zero lock until lock on the target bucket is obtained,
-the reader ensures that the target bucket calculation is valid (otherwise
-the bucket might be split before the reader arrives at it, and the target
-entries might go into the new bucket).  Holding the bucket sharelock for
+We can't hold the metapage lock while acquiring a lock on the target bucket,
+because that might result in an undetected deadlock (lwlocks do not participate
+in deadlock detection).  Instead, we relock the metapage after acquiring the
+bucket page lock and check whether the bucket has been split.  If not, we're
+done.  If so, we release our previously-acquired lock and repeat the process
+using the new bucket number.  Holding the bucket sharelock for
 the remainder of the scan prevents the reader's current-tuple pointer from
 being invalidated by splits or compactions.  Notice that the reader's lock
 does not prevent other buckets from being split or compacted.
@@ -229,12 +225,14 @@ as it was before.
 
 The insertion algorithm is rather similar:
 
-	share-lock page 0 (to prevent active split)
 	read/sharelock meta page
-	compute bucket number for target hash key
-	release meta page
-	share-lock bucket page (to prevent split/compact of this bucket)
-	release page 0 share-lock
+    loop:
+		compute bucket number for target hash key
+		release meta page
+		if (correct bucket page is already locked)
+			break
+		release any existing bucket page lock (if a concurrent split happened)
+		share-lock bucket page
 -- (so far same as reader)
 	read/exclusive-lock current page of bucket
 	if full, release, read/exclusive-lock next page; repeat as needed
@@ -285,10 +283,9 @@ existing bucket in two, thereby lowering the fill ratio:
 	>> see below about acquiring needed extra space
 	Release X-locks of old and new buckets
 
-Note the page zero and metapage locks are not held while the actual tuple
-rearrangement is performed, so accesses to other buckets can proceed in
-parallel; in fact, it's possible for multiple bucket splits to proceed
-in parallel.
+Note the metapage lock is not held while the actual tuple rearrangement is
+performed, so accesses to other buckets can proceed in parallel; in fact,
+it's possible for multiple bucket splits to proceed in parallel.
 
 Split's attempt to X-lock the old bucket number could fail if another
 process holds S-lock on it.  We do not want to wait if that happens, first
diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c
index 66084f4..c534372 100644
--- a/src/backend/access/hash/hashinsert.c
+++ b/src/backend/access/hash/hashinsert.c
@@ -32,6 +32,8 @@ _hash_doinsert(Relation rel, IndexTuple itup)
 	Buffer		metabuf;
 	HashMetaPage metap;
 	BlockNumber blkno;
+	BlockNumber oldblkno = InvalidBlockNumber;
+	bool		retry = false;
 	Page		page;
 	HashPageOpaque pageopaque;
 	Size		itemsz;
@@ -49,12 +51,6 @@ _hash_doinsert(Relation rel, IndexTuple itup)
 	itemsz = MAXALIGN(itemsz);	/* be safe, PageAddItem will do this but we
 								 * need to be consistent */
 
-	/*
-	 * Acquire shared split lock so we can compute the target bucket safely
-	 * (see README).
-	 */
-	_hash_getlock(rel, 0, HASH_SHARE);
-
 	/* Read the metapage */
 	metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE);
 	metap = HashPageGetMeta(BufferGetPage(metabuf));
@@ -75,24 +71,44 @@ _hash_doinsert(Relation rel, IndexTuple itup)
 			errhint("Values larger than a buffer page cannot be indexed.")));
 
 	/*
-	 * Compute the target bucket number, and convert to block number.
+	 * Loop until we get a lock on the correct target bucket.
 	 */
-	bucket = _hash_hashkey2bucket(hashkey,
-								  metap->hashm_maxbucket,
-								  metap->hashm_highmask,
-								  metap->hashm_lowmask);
+	for (;;)
+	{
+		/*
+		 * Compute the target bucket number, and convert to block number.
+		 */
+		bucket = _hash_hashkey2bucket(hashkey,
+									  metap->hashm_maxbucket,
+									  metap->hashm_highmask,
+									  metap->hashm_lowmask);
 
-	blkno = BUCKET_TO_BLKNO(metap, bucket);
+		blkno = BUCKET_TO_BLKNO(metap, bucket);
 
-	/* release lock on metapage, but keep pin since we'll need it again */
-	_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
+		/* Release metapage lock, but keep pin. */
+		_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
 
-	/*
-	 * Acquire share lock on target bucket; then we can release split lock.
-	 */
-	_hash_getlock(rel, blkno, HASH_SHARE);
+		/*
+		 * If the previous iteration of this loop locked what is still the
+		 * correct target bucket, we are done.  Otherwise, drop any old lock
+		 * and lock what now appears to be the correct bucket.
+		 */
+		if (retry)
+		{
+			if (oldblkno == blkno)
+				break;
+			_hash_droplock(rel, oldblkno, HASH_SHARE);
+		}
+		_hash_getlock(rel, blkno, HASH_SHARE);
 
-	_hash_droplock(rel, 0, HASH_SHARE);
+		/*
+		 * Reacquire metapage lock and check that no bucket split has taken
+		 * place while we were awaiting the bucket lock.
+		 */
+		_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_READ);
+		oldblkno = blkno;
+		retry = true;
+	}
 
 	/* Fetch the primary bucket page for the bucket */
 	buf = _hash_getbuf(rel, blkno, HASH_WRITE, LH_BUCKET_PAGE);
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 6b647a8..c0b6eb0 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -57,9 +57,9 @@ static void _hash_splitbucket(Relation rel, Buffer metabuf,
 /*
  * _hash_getlock() -- Acquire an lmgr lock.
  *
- * 'whichlock' should be zero to acquire the split-control lock, or the
- * block number of a bucket's primary bucket page to acquire the per-bucket
- * lock.  (See README for details of the use of these locks.)
+ * 'whichlock' should the block number of a bucket's primary bucket page to
+ * acquire the per-bucket lock.  (See README for details of the use of these
+ * locks.)
  *
  * 'access' must be HASH_SHARE or HASH_EXCLUSIVE.
  */
@@ -507,21 +507,9 @@ _hash_expandtable(Relation rel, Buffer metabuf)
 	uint32		lowmask;
 
 	/*
-	 * Obtain the page-zero lock to assert the right to begin a split (see
-	 * README).
-	 *
-	 * Note: deadlock should be impossible here. Our own backend could only be
-	 * holding bucket sharelocks due to stopped indexscans; those will not
-	 * block other holders of the page-zero lock, who are only interested in
-	 * acquiring bucket sharelocks themselves.	Exclusive bucket locks are
-	 * only taken here and in hashbulkdelete, and neither of these operations
-	 * needs any additional locks to complete.	(If, due to some flaw in this
-	 * reasoning, we manage to deadlock anyway, it's okay to error out; the
-	 * index will be left in a consistent state.)
+	 * Write-lock the meta page.  It used to be necessary to acquire a
+	 * heavyweight lock to begin a split, but that is no longer required.
 	 */
-	_hash_getlock(rel, 0, HASH_EXCLUSIVE);
-
-	/* Write-lock the meta page */
 	_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
 
 	_hash_checkpage(rel, metabuf, LH_META_PAGE);
@@ -571,6 +559,12 @@ _hash_expandtable(Relation rel, Buffer metabuf)
 	if (_hash_has_active_scan(rel, old_bucket))
 		goto fail;
 
+	/*
+	 * It's normally a bad idea to grab a heavyweight lock while holding
+	 * a buffer content lock, both because of deadlock risk and because
+	 * content locks should be held only briefly.  But since we are only
+	 * trylocking here it should be OK.
+	 */
 	if (!_hash_try_getlock(rel, start_oblkno, HASH_EXCLUSIVE))
 		goto fail;
 
@@ -587,6 +581,12 @@ _hash_expandtable(Relation rel, Buffer metabuf)
 	if (_hash_has_active_scan(rel, new_bucket))
 		elog(ERROR, "scan in progress on supposedly new bucket");
 
+	/*
+	 * It's normally a bad idea to grab a heavyweight lock while holding
+	 * a buffer content lock, both because of deadlock risk and because
+	 * content locks should be held only briefly.  But since we are only
+	 * trylocking here it should be OK.
+	 */
 	if (!_hash_try_getlock(rel, start_nblkno, HASH_EXCLUSIVE))
 		elog(ERROR, "could not get lock on supposedly new bucket");
 
@@ -663,9 +663,6 @@ _hash_expandtable(Relation rel, Buffer metabuf)
 	/* Write out the metapage and drop lock, but keep pin */
 	_hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
 
-	/* Release split lock; okay for other splits to occur now */
-	_hash_droplock(rel, 0, HASH_EXCLUSIVE);
-
 	/* Relocate records to the new bucket */
 	_hash_splitbucket(rel, metabuf, old_bucket, new_bucket,
 					  start_oblkno, start_nblkno,
@@ -682,9 +679,6 @@ fail:
 
 	/* We didn't write the metapage, so just drop lock */
 	_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
-
-	/* Release split lock */
-	_hash_droplock(rel, 0, HASH_EXCLUSIVE);
 }
 
 
diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c
index 88a2ad1..b8abc6a 100644
--- a/src/backend/access/hash/hashsearch.c
+++ b/src/backend/access/hash/hashsearch.c
@@ -125,6 +125,8 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
 	uint32		hashkey;
 	Bucket		bucket;
 	BlockNumber blkno;
+	BlockNumber oldblkno = InvalidBuffer;
+	bool		retry = false;
 	Buffer		buf;
 	Buffer		metabuf;
 	Page		page;
@@ -184,35 +186,52 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
 
 	so->hashso_sk_hash = hashkey;
 
-	/*
-	 * Acquire shared split lock so we can compute the target bucket safely
-	 * (see README).
-	 */
-	_hash_getlock(rel, 0, HASH_SHARE);
-
 	/* Read the metapage */
 	metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE);
 	metap = HashPageGetMeta(BufferGetPage(metabuf));
 
 	/*
-	 * Compute the target bucket number, and convert to block number.
+	 * Loop until we get a lock on the correct target bucket.
 	 */
-	bucket = _hash_hashkey2bucket(hashkey,
-								  metap->hashm_maxbucket,
-								  metap->hashm_highmask,
-								  metap->hashm_lowmask);
-
-	blkno = BUCKET_TO_BLKNO(metap, bucket);
+	for (;;)
+	{
+		/*
+		 * Compute the target bucket number, and convert to block number.
+		 */
+		bucket = _hash_hashkey2bucket(hashkey,
+									  metap->hashm_maxbucket,
+									  metap->hashm_highmask,
+									  metap->hashm_lowmask);
+
+		blkno = BUCKET_TO_BLKNO(metap, bucket);
+
+		/* Release metapage lock, but keep pin. */
+		_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
+
+		/*
+		 * If the previous iteration of this loop locked what is still the
+		 * correct target bucket, we are done.  Otherwise, drop any old lock
+		 * and lock what now appears to be the correct bucket.
+		 */
+		if (retry)
+		{
+			if (oldblkno == blkno)
+				break;
+			_hash_droplock(rel, oldblkno, HASH_SHARE);
+		}
+		_hash_getlock(rel, blkno, HASH_SHARE);
+
+		/*
+		 * Reacquire metapage lock and check that no bucket split has taken
+		 * place while we were awaiting the bucket lock.
+		 */
+		_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_READ);
+		oldblkno = blkno;
+		retry = true;
+	}
 
 	/* done with the metapage */
-	_hash_relbuf(rel, metabuf);
-
-	/*
-	 * Acquire share lock on target bucket; then we can release split lock.
-	 */
-	_hash_getlock(rel, blkno, HASH_SHARE);
-
-	_hash_droplock(rel, 0, HASH_SHARE);
+	_hash_dropbuf(rel, metabuf);
 
 	/* Update scan opaque state to show we have lock on the bucket */
 	so->hashso_bucket = bucket;
