diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c
index 8e91564..970635e 100644
--- a/src/backend/nodes/tidbitmap.c
+++ b/src/backend/nodes/tidbitmap.c
@@ -363,13 +363,16 @@ void
 tbm_free_shared_area(dsa_area *dsa, dsa_pointer dp)
 {
 	TBMSharedIteratorState *istate = dsa_get_address(dsa, dp);
-	PTEntryArray *ptbase = dsa_get_address(dsa, istate->pagetable);
+	PTEntryArray *ptbase;
 	PTIterationArray *ptpages;
 	PTIterationArray *ptchunks;
 
-	if (pg_atomic_sub_fetch_u32(&ptbase->refcount, 1) == 0)
-		dsa_free(dsa, istate->pagetable);
-
+	if (istate->pagetable)
+	{
+		ptbase = dsa_get_address(dsa, istate->pagetable);
+		if (pg_atomic_sub_fetch_u32(&ptbase->refcount, 1) == 0)
+			dsa_free(dsa, istate->pagetable);
+	}
 	if (istate->spages)
 	{
 		ptpages = dsa_get_address(dsa, istate->spages);
@@ -856,7 +859,7 @@ tbm_prepare_shared_iterate(TIDBitmap *tbm)
 			Assert(npages == tbm->npages);
 			Assert(nchunks == tbm->nchunks);
 		}
-		else
+		else if (tbm->status == TBM_ONE_PAGE)
 		{
 			/*
 			 * In one page mode allocate the space for one pagetable entry and
@@ -868,8 +871,8 @@ tbm_prepare_shared_iterate(TIDBitmap *tbm)
 			ptpages->index[0] = 0;
 		}
 
-		pg_atomic_init_u32(&ptbase->refcount, 0);
-
+		if (ptbase)
+			pg_atomic_init_u32(&ptbase->refcount, 0);
 		if (npages > 1)
 			qsort_arg((void *) (ptpages->index), npages, sizeof(int),
 					  tbm_shared_comparator, (void *) ptbase->ptentry);
@@ -899,7 +902,8 @@ tbm_prepare_shared_iterate(TIDBitmap *tbm)
 	 * increase the refcount by 1 so that while freeing the shared iterator
 	 * we don't free pagetable and iterator array until its refcount becomes 0.
 	 */
-	pg_atomic_add_fetch_u32(&ptbase->refcount, 1);
+	if (ptbase)
+		pg_atomic_add_fetch_u32(&ptbase->refcount, 1);
 	if (ptpages)
 		pg_atomic_add_fetch_u32(&ptpages->refcount, 1);
 	if (ptchunks)
