This is an automated email from the ASF dual-hosted git repository.

chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git


The following commit(s) were added to refs/heads/cbdb-postgres-merge by this 
push:
     new 3c12bca960d Fix conflict for access/heap access/spgist
3c12bca960d is described below

commit 3c12bca960da3f2756e5871a64629c02af2c34ef
Author: Jinbao Chen <chenjinbao1...@gmail.com>
AuthorDate: Wed Jul 16 09:51:34 2025 +0800

    Fix conflict for access/heap access/spgist
---
 src/backend/access/heap/heapam.c            |  270 ++-----
 src/backend/access/heap/heapam_visibility.c |   20 -
 src/backend/access/heap/pruneheap.c         |   21 -
 src/backend/access/heap/vacuumlazy.c        | 1009 +--------------------------
 src/backend/access/spgist/spginsert.c       |   36 +-
 src/backend/access/table/table.c            |    9 +-
 6 files changed, 50 insertions(+), 1315 deletions(-)

diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 968323bfb96..883936f481e 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -82,25 +82,19 @@
 #include "utils/faultinjector.h"
 
 
-static TM_Result heap_update_internal(Relation relation, ItemPointer otid, 
HeapTuple newtup,
-                                                                         
CommandId cid, Snapshot crosscheck, bool wait,
-                                                                         
TM_FailureData *tmfd, LockTupleMode *lockmode, bool simple);
-
 static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
                                                                         
TransactionId xid, CommandId cid, int options);
 static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
                                                                  Buffer 
newbuf, HeapTuple oldtup,
                                                                  HeapTuple 
newtup, HeapTuple old_key_tuple,
                                                                  bool 
all_visible_cleared, bool new_all_visible_cleared);
-<<<<<<< HEAD
-=======
+
 #ifdef USE_ASSERT_CHECKING
 static void check_lock_if_inplace_updateable_rel(Relation relation,
                                                                                
                 ItemPointer otid,
                                                                                
                 HeapTuple newtup);
 static void check_inplace_rel_lock(HeapTuple oldtup);
 #endif
->>>>>>> REL_16_9
 static Bitmapset *HeapDetermineColumnsInfo(Relation relation,
                                                                                
   Bitmapset *interesting_cols,
                                                                                
   Bitmapset *external_cols,
@@ -134,11 +128,7 @@ static bool ConditionalMultiXactIdWait(MultiXactId multi, 
MultiXactStatus status
 static void index_delete_sort(TM_IndexDeleteOp *delstate);
 static int     bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate);
 static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup);
-<<<<<<< HEAD
-static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool 
key_required,
-=======
 static HeapTuple ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool 
key_required,
->>>>>>> REL_16_9
                                                                                
bool *copy);
 
 
@@ -486,29 +476,25 @@ heapgetpage(TableScanDesc sscan, BlockNumber block)
 
        for (lineoff = FirstOffsetNumber; lineoff <= lines; lineoff++)
        {
-<<<<<<< HEAD
-               if (ItemIdIsNormal(lpp))
-               {
-                       HeapTupleData loctup;
-                       bool            valid;
-                       HeapTupleHeader theader = (HeapTupleHeader) 
PageGetItem((Page) dp, lpp);
-=======
                ItemId          lpp = PageGetItemId(page, lineoff);
                HeapTupleData loctup;
                bool            valid;
->>>>>>> REL_16_9
+               HeapTupleHeader theader = (HeapTupleHeader) PageGetItem((Page) 
page, lpp);
 
                if (!ItemIdIsNormal(lpp))
                        continue;
 
-<<<<<<< HEAD
-                       if (all_visible)
-                       {
-                               valid = true;
-                       }
-                       else
-                       {
-                               /*
+
+               loctup.t_tableOid = RelationGetRelid(scan->rs_base.rs_rd);
+               loctup.t_data = (HeapTupleHeader) PageGetItem(page, lpp);
+               loctup.t_len = ItemIdGetLength(lpp);
+               ItemPointerSet(&(loctup.t_self), block, lineoff);
+
+               if (all_visible)
+                       valid = true;
+               else
+               {
+                       /*
                                 * GPDB: We have a one-item cache for the 
common case that a
                                 * lot of tuples have the same visibility info. 
Don't use the
                                 * cache, if the tuple was ever deleted, though 
(i.e. if xmax
@@ -517,43 +503,32 @@ heapgetpage(TableScanDesc sscan, BlockNumber block)
                                 * with locked-only tuples and multi-XIDs, so 
it seems better
                                 * to just give up early.
                                 */
-                               bool            use_cache;
+                       bool            use_cache;
 
-                               if ((theader->t_infomask & HEAP_XMAX_INVALID) 
!= 0 ||
-                                       
HEAP_XMAX_IS_LOCKED_ONLY(theader->t_infomask))
-                                       use_cache = true;
-                               else
-                                       use_cache = false;
+                       if ((theader->t_infomask & HEAP_XMAX_INVALID) != 0 ||
+                               HEAP_XMAX_IS_LOCKED_ONLY(theader->t_infomask))
+                               use_cache = true;
+                       else
+                               use_cache = false;
 
-                               if (use_cache &&
-                                       t_xmin == 
HeapTupleHeaderGetXmin(theader) &&
-                                       t_cid == 
HeapTupleHeaderGetRawCommandId(theader))
-                               {
-                                       valid = true;
-                               }
-                               else
-                               {
-                                       valid = 
HeapTupleSatisfiesVisibility(scan->rs_base.rs_rd,
-                                                                               
                                 &loctup, snapshot, buffer);
+                       if (use_cache &&
+                               t_xmin == HeapTupleHeaderGetXmin(theader) &&
+                               t_cid == 
HeapTupleHeaderGetRawCommandId(theader))
+                       {
+                               valid = true;
+                       }
+                       else
+                       {
+                               valid = 
HeapTupleSatisfiesVisibility(scan->rs_base.rs_rd,
+                                                                               
                         &loctup, snapshot, buffer);
 
-                                       if (valid && use_cache)
-                                       {
-                                               t_xmin = 
HeapTupleHeaderGetXmin(loctup.t_data);
-                                               t_cid = 
HeapTupleHeaderGetRawCommandId(loctup.t_data);
-                                       }
+                               if (valid && use_cache)
+                               {
+                                       t_xmin = 
HeapTupleHeaderGetXmin(loctup.t_data);
+                                       t_cid = 
HeapTupleHeaderGetRawCommandId(loctup.t_data);
                                }
                        }
-=======
-               loctup.t_tableOid = RelationGetRelid(scan->rs_base.rs_rd);
-               loctup.t_data = (HeapTupleHeader) PageGetItem(page, lpp);
-               loctup.t_len = ItemIdGetLength(lpp);
-               ItemPointerSet(&(loctup.t_self), block, lineoff);
->>>>>>> REL_16_9
-
-               if (all_visible)
-                       valid = true;
-               else
-                       valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, 
buffer);
+               }
 
                HeapCheckForSerializableConflictOut(valid, scan->rs_base.rs_rd,
                                                                                
        &loctup, buffer, snapshot);
@@ -845,14 +820,12 @@ heapgettup(HeapScanDesc scan,
         */
        while (block != InvalidBlockNumber)
        {
-<<<<<<< HEAD
         CHECK_FOR_INTERRUPTS();
-=======
+
                heapgetpage((TableScanDesc) scan, block);
                LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
                page = heapgettup_start_page(scan, dir, &linesleft, &lineoff);
 continue_page:
->>>>>>> REL_16_9
 
                /*
                 * Only continue scanning the page while we have lines left.
@@ -869,19 +842,9 @@ continue_page:
                        if (!ItemIdIsNormal(lpp))
                                continue;
 
-<<<<<<< HEAD
-                               /*
-                                * if current tuple qualifies, return it.
-                                */
-                               valid = 
HeapTupleSatisfiesVisibility(scan->rs_base.rs_rd,
-                                                                               
                         tuple,
-                                                                               
                         snapshot,
-                                                                               
                         scan->rs_cbuf);
-=======
                        tuple->t_data = (HeapTupleHeader) PageGetItem(page, 
lpp);
                        tuple->t_len = ItemIdGetLength(lpp);
                        ItemPointerSet(&(tuple->t_self), block, lineoff);
->>>>>>> REL_16_9
 
                        visible = HeapTupleSatisfiesVisibility(tuple,
                                                                                
                   scan->rs_base.rs_snapshot,
@@ -981,11 +944,9 @@ heapgettup_pagemode(HeapScanDesc scan,
         */
        while (block != InvalidBlockNumber)
        {
-<<<<<<< HEAD
         CHECK_FOR_INTERRUPTS();
 
-               while (linesleft > 0)
-=======
+
                heapgetpage((TableScanDesc) scan, block);
                page = BufferGetPage(scan->rs_cbuf);
                TestForOldSnapshot(scan->rs_base.rs_snapshot, 
scan->rs_base.rs_rd, page);
@@ -996,7 +957,6 @@ heapgettup_pagemode(HeapScanDesc scan,
 continue_page:
 
                for (; linesleft > 0; linesleft--, lineindex += dir)
->>>>>>> REL_16_9
                {
                        ItemId          lpp;
                        OffsetNumber lineoff;
@@ -1439,18 +1399,11 @@ heap_scan_flags(Relation relation)
  * then tuple->t_data is set to NULL, *userbuf is set to InvalidBuffer,
  * and false is returned.
  *
-<<<<<<< HEAD
- * If the tuple is found but fails the time qual check, then false is returned
- * and *userbuf is set to InvalidBuffer, but tuple->t_data is left pointing
- * to the tuple.  (Note that it is unsafe to dereference tuple->t_data in
- * this case, but callers might choose to test it for NULL-ness.)
-=======
  * If the tuple is found but fails the time qual check, then the behavior
  * depends on the keep_buf parameter.  If keep_buf is false, the results
  * are the same as for the tuple-not-found case.  If keep_buf is true,
  * then tuple->t_data and *userbuf are returned as for the success case,
  * and again the caller must unpin the buffer; but false is returned.
->>>>>>> REL_16_9
  *
  * heap_fetch does not follow HOT chains: only the exact TID requested will
  * be fetched.
@@ -1578,10 +1531,7 @@ heap_fetch_extended(Relation relation,
        {
                ReleaseBuffer(buffer);
                *userbuf = InvalidBuffer;
-<<<<<<< HEAD
-=======
                tuple->t_data = NULL;
->>>>>>> REL_16_9
        }
 
        return false;
@@ -2759,7 +2709,6 @@ heap_delete(Relation relation, ItemPointer tid,
        tp.t_self = *tid;
 
 l1:
-
        /*
         * If we didn't pin the visibility map page and the page has become all
         * visible while we were busy locking the buffer, we'll have to unlock 
and
@@ -2773,7 +2722,6 @@ l1:
                LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
        }
 
-<<<<<<< HEAD
        lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
        Assert(ItemIdIsNormal(lp));
 
@@ -2784,9 +2732,6 @@ l1:
 
 l1:
        result = HeapTupleSatisfiesUpdate(relation, &tp, cid, buffer);
-=======
-       result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
->>>>>>> REL_16_9
 
        if (result == TM_Invisible)
        {
@@ -2909,7 +2854,6 @@ l1:
                        result = TM_Deleted;
        }
 
-<<<<<<< HEAD
        if (crosscheck != InvalidSnapshot && result == TM_Ok)
        {
                /* Perform additional check for transaction-snapshot mode RI 
updates */
@@ -2917,9 +2861,7 @@ l1:
                        result = TM_Updated;
        }
 
-=======
        /* sanity check the result HeapTupleSatisfiesUpdate() and the logic 
above */
->>>>>>> REL_16_9
        if (result != TM_Ok)
        {
                Assert(result == TM_SelfModified ||
@@ -3203,14 +3145,10 @@ simple_heap_delete(Relation relation, ItemPointer tid)
  * generated by another transaction).
  */
 static TM_Result
-heap_update_internal(Relation relation, ItemPointer otid, HeapTuple newtup,
+heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
                        CommandId cid, Snapshot crosscheck, bool wait,
-<<<<<<< HEAD
-                       TM_FailureData *tmfd, LockTupleMode *lockmode, bool 
simple)
-=======
                        TM_FailureData *tmfd, LockTupleMode *lockmode,
                        TU_UpdateIndexes *update_indexes)
->>>>>>> REL_16_9
 {
        TM_Result       result;
        TransactionId xid = GetCurrentTransactionId();
@@ -3320,8 +3258,6 @@ heap_update_internal(Relation relation, ItemPointer otid, 
HeapTuple newtup,
        lp = PageGetItemId(page, ItemPointerGetOffsetNumber(otid));
 
        /*
-<<<<<<< HEAD
-=======
         * Usually, a buffer pin and/or snapshot blocks pruning of otid, 
ensuring
         * we see LP_NORMAL here.  When the otid origin is a syscache, we may 
have
         * neither a pin nor a snapshot.  Hence, we may see other LP_ states, 
each
@@ -3367,7 +3303,6 @@ heap_update_internal(Relation relation, ItemPointer otid, 
HeapTuple newtup,
        }
 
        /*
->>>>>>> REL_16_9
         * Fill in enough data in oldtup for HeapDetermineColumnsInfo to work
         * properly.
         */
@@ -3625,20 +3560,6 @@ l2:
                        result = TM_Deleted;
        }
 
-<<<<<<< HEAD
-       if (crosscheck != InvalidSnapshot && result == TM_Ok)
-       {
-               /* Perform additional check for transaction-snapshot mode RI 
updates */
-               if (!HeapTupleSatisfiesVisibility(relation, &oldtup, 
crosscheck, buffer))
-               {
-                       result = TM_Updated;
-                       Assert(!ItemPointerEquals(&oldtup.t_self, 
&oldtup.t_data->t_ctid));
-               }
-       }
-
-=======
-       /* Sanity check the result HeapTupleSatisfiesUpdate() and the logic 
above */
->>>>>>> REL_16_9
        if (result != TM_Ok)
        {
                Assert(result == TM_SelfModified ||
@@ -4166,11 +4087,7 @@ l2:
        if (have_tuple_lock)
                UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
 
-<<<<<<< HEAD
        pgstat_count_heap_update(relation, false);
-=======
-       pgstat_count_heap_update(relation, use_hot_update, newbuf != buffer);
->>>>>>> REL_16_9
 
        /*
         * If heaptup is a private copy, release it.  Don't forget to copy 
t_self
@@ -4211,27 +4128,6 @@ l2:
        return TM_Ok;
 }
 
-<<<<<<< HEAD
-TM_Result
-heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
-                       CommandId cid, Snapshot crosscheck, bool wait,
-                       TM_FailureData *tmfd, LockTupleMode *lockmode)
-{
-       return heap_update_internal(relation, otid, newtup,
-                                                               cid, 
crosscheck, wait,
-                                                               tmfd, lockmode,
-                                                               /* simple */ 
false);
-}
-
-/*
- * Check if the specified attribute's values are the same.  Subroutine for
- * HeapDetermineColumnsInfo.
- */
-static bool
-heap_attr_equals(TupleDesc tupdesc, int attrnum, Datum value1, Datum value2,
-                                bool isnull1, bool isnull2)
-{
-=======
 #ifdef USE_ASSERT_CHECKING
 /*
  * Confirm adequate lock held during heap_update(), per rules from
@@ -4362,7 +4258,6 @@ static bool
 heap_attr_equals(TupleDesc tupdesc, int attrnum, Datum value1, Datum value2,
                                 bool isnull1, bool isnull2)
 {
->>>>>>> REL_16_9
        Form_pg_attribute att;
 
        /*
@@ -4409,12 +4304,9 @@ heap_attr_equals(TupleDesc tupdesc, int attrnum, Datum 
value1, Datum value2,
  * has_external indicates if any of the unmodified attributes (from those
  * listed as interesting) of the old tuple is a member of external_cols and is
  * stored externally.
-<<<<<<< HEAD
  *
  * The input interesting_cols bitmapset is destructively modified; that is OK
  * since this is invoked at most once in heap_update.
-=======
->>>>>>> REL_16_9
  */
 static Bitmapset *
 HeapDetermineColumnsInfo(Relation relation,
@@ -4423,14 +4315,6 @@ HeapDetermineColumnsInfo(Relation relation,
                                                 HeapTuple oldtup, HeapTuple 
newtup,
                                                 bool *has_external)
 {
-<<<<<<< HEAD
-       int                     attrnum;
-       Bitmapset  *modified = NULL;
-       TupleDesc       tupdesc = RelationGetDescr(relation);
-
-       while ((attrnum = bms_first_member(interesting_cols)) >= 0)
-       {
-=======
        int                     attidx;
        Bitmapset  *modified = NULL;
        TupleDesc       tupdesc = RelationGetDescr(relation);
@@ -4440,17 +4324,11 @@ HeapDetermineColumnsInfo(Relation relation,
        {
                /* attidx is zero-based, attrnum is the normal attribute number 
*/
                AttrNumber      attrnum = attidx + 
FirstLowInvalidHeapAttributeNumber;
->>>>>>> REL_16_9
                Datum           value1,
                                        value2;
                bool            isnull1,
                                        isnull2;
 
-<<<<<<< HEAD
-               attrnum += FirstLowInvalidHeapAttributeNumber;
-
-=======
->>>>>>> REL_16_9
                /*
                 * If it's a whole-tuple reference, say "not equal".  It's not 
really
                 * worth supporting this case, since it could only succeed 
after a
@@ -4458,13 +4336,7 @@ HeapDetermineColumnsInfo(Relation relation,
                 */
                if (attrnum == 0)
                {
-<<<<<<< HEAD
-                       modified = bms_add_member(modified,
-                                                                         
attrnum -
-                                                                         
FirstLowInvalidHeapAttributeNumber);
-=======
                        modified = bms_add_member(modified, attidx);
->>>>>>> REL_16_9
                        continue;
                }
 
@@ -4477,13 +4349,7 @@ HeapDetermineColumnsInfo(Relation relation,
                {
                        if (attrnum != TableOidAttributeNumber)
                        {
-<<<<<<< HEAD
-                               modified = bms_add_member(modified,
-                                                                               
  attrnum -
-                                                                               
  FirstLowInvalidHeapAttributeNumber);
-=======
                                modified = bms_add_member(modified, attidx);
->>>>>>> REL_16_9
                                continue;
                        }
                }
@@ -4500,13 +4366,7 @@ HeapDetermineColumnsInfo(Relation relation,
                if (!heap_attr_equals(tupdesc, attrnum, value1,
                                                          value2, isnull1, 
isnull2))
                {
-<<<<<<< HEAD
-                       modified = bms_add_member(modified,
-                                                                         
attrnum -
-                                                                         
FirstLowInvalidHeapAttributeNumber);
-=======
                        modified = bms_add_member(modified, attidx);
->>>>>>> REL_16_9
                        continue;
                }
 
@@ -4523,12 +4383,7 @@ HeapDetermineColumnsInfo(Relation relation,
                 * member of external_cols.
                 */
                if (VARATT_IS_EXTERNAL((struct varlena *) 
DatumGetPointer(value1)) &&
-<<<<<<< HEAD
-                       bms_is_member(attrnum - 
FirstLowInvalidHeapAttributeNumber,
-                                                 external_cols))
-=======
                        bms_is_member(attidx, external_cols))
->>>>>>> REL_16_9
                        *has_external = true;
        }
 
@@ -4551,15 +4406,10 @@ simple_heap_update(Relation relation, ItemPointer otid, 
HeapTuple tup,
        TM_FailureData tmfd;
        LockTupleMode lockmode;
 
-       result = heap_update_internal(relation, otid, tup,
+       result = heap_update(relation, otid, tup,
                                                 GetCurrentCommandId(true), 
InvalidSnapshot,
                                                 true /* wait for commit */ ,
-<<<<<<< HEAD
-                                                &tmfd, &lockmode,
-                                                /* simple */ true);
-=======
                                                 &tmfd, &lockmode, 
update_indexes);
->>>>>>> REL_16_9
        switch (result)
        {
                case TM_SelfModified:
@@ -7155,16 +7005,9 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple,
                /* Will set freeze_xmin flags in freeze plan below */
                freeze_xmin = TransactionIdPrecedes(xid, cutoffs->OldestXmin);
 
-<<<<<<< HEAD
-                       frz->t_infomask |= HEAP_XMIN_FROZEN;
-                       changed = true;
-                       xmin_frozen = true;
-               }
-=======
                /* Verify that xmin committed if and when freeze plan is 
executed */
                if (freeze_xmin)
                        frz->checkflags |= HEAP_FREEZE_CHECK_XMIN_COMMITTED;
->>>>>>> REL_16_9
        }
 
        /*
@@ -8675,10 +8518,6 @@ heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp 
*delstate)
                        ItemId          lp;
                        HeapTupleHeader htup;
 
-<<<<<<< HEAD
-                       /* Some sanity checks */
-                       if (offnum < FirstOffsetNumber || offnum > maxoff)
-=======
                        /* Sanity check (pure paranoia) */
                        if (offnum < FirstOffsetNumber)
                                break;
@@ -8688,7 +8527,6 @@ heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp 
*delstate)
                         * when the array was truncated
                         */
                        if (offnum > maxoff)
->>>>>>> REL_16_9
                                break;
 
                        lp = PageGetItemId(page, offnum);
@@ -9864,34 +9702,8 @@ heap_xlog_visible(XLogReaderState *record)
                reln = CreateFakeRelcacheEntry(rlocator);
                visibilitymap_pin(reln, blkno, &vmbuffer);
 
-<<<<<<< HEAD
-               /*
-                * Don't set the bit if replay has already passed this point.
-                *
-                * It might be safe to do this unconditionally; if replay has 
passed
-                * this point, we'll replay at least as far this time as we did
-                * before, and if this bit needs to be cleared, the record 
responsible
-                * for doing so should be again replayed, and clear it.  For 
right
-                * now, out of an abundance of conservatism, we use the same 
test here
-                * we did for the heap page.  If this results in a dropped bit, 
no
-                * real harm is done; and the next VACUUM will fix it.
-                */
-
-               /*
-                * CDB: don't use PageGetLSN here, GPDB PageGetLSN checks the 
buffer
-                * is locked. But here vmbuffer is in function 
visibilitymap_set().
-                *
-                * if (lsn > PageGetLSN(vmpage))
-                *              visibilitymap_set(reln, blkno, InvalidBuffer, 
lsn, vmbuffer,
-                *                              xlrec->cutoff_xid);
-                */
-               if (lsn > PageXLogRecPtrGet(((PageHeader) vmpage)->pd_lsn))
-                       visibilitymap_set(reln, blkno, InvalidBuffer, lsn, 
vmbuffer,
-                                                         xlrec->cutoff_xid, 
xlrec->flags);
-=======
                visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer,
                                                  
xlrec->snapshotConflictHorizon, vmbits);
->>>>>>> REL_16_9
 
                ReleaseBuffer(vmbuffer);
                FreeFakeRelcacheEntry(reln);
@@ -10730,14 +10542,10 @@ heap_xlog_lock(XLogReaderState *record)
                                                   BufferGetBlockNumber(buffer),
                                                   offnum);
                }
-<<<<<<< HEAD
-               HeapTupleHeaderSetXmax(htup, xlrec->locking_xid);
+               HeapTupleHeaderSetXmax(htup, xlrec->xmax);
 #ifdef SERVERLESS
                HeapTupleHeaderSetCmax(htup, xlrec->t_cid, false);
 #else
-=======
-               HeapTupleHeaderSetXmax(htup, xlrec->xmax);
->>>>>>> REL_16_9
                HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
 #endif
 
diff --git a/src/backend/access/heap/heapam_visibility.c 
b/src/backend/access/heap/heapam_visibility.c
index bdf311a6d7f..0ba9853e4d9 100644
--- a/src/backend/access/heap/heapam_visibility.c
+++ b/src/backend/access/heap/heapam_visibility.c
@@ -1909,16 +1909,11 @@ HeapTupleSatisfiesHistoricMVCC(Relation relation, 
HeapTuple htup, Snapshot snaps
  *     if so, the indicated buffer is marked dirty.
  */
 bool
-<<<<<<< HEAD
 HeapTupleSatisfiesVisibility(Relation relation, HeapTuple tup, Snapshot 
snapshot, Buffer buffer)
-=======
-HeapTupleSatisfiesVisibility(HeapTuple htup, Snapshot snapshot, Buffer buffer)
->>>>>>> REL_16_9
 {
        switch (snapshot->snapshot_type)
        {
                case SNAPSHOT_MVCC:
-<<<<<<< HEAD
                        return HeapTupleSatisfiesMVCC(relation, tup, snapshot, 
buffer);
                        break;
                case SNAPSHOT_SELF:
@@ -1939,21 +1934,6 @@ HeapTupleSatisfiesVisibility(HeapTuple htup, Snapshot 
snapshot, Buffer buffer)
                case SNAPSHOT_NON_VACUUMABLE:
                        return HeapTupleSatisfiesNonVacuumable(relation, tup, 
snapshot, buffer);
                        break;
-=======
-                       return HeapTupleSatisfiesMVCC(htup, snapshot, buffer);
-               case SNAPSHOT_SELF:
-                       return HeapTupleSatisfiesSelf(htup, snapshot, buffer);
-               case SNAPSHOT_ANY:
-                       return HeapTupleSatisfiesAny(htup, snapshot, buffer);
-               case SNAPSHOT_TOAST:
-                       return HeapTupleSatisfiesToast(htup, snapshot, buffer);
-               case SNAPSHOT_DIRTY:
-                       return HeapTupleSatisfiesDirty(htup, snapshot, buffer);
-               case SNAPSHOT_HISTORIC_MVCC:
-                       return HeapTupleSatisfiesHistoricMVCC(htup, snapshot, 
buffer);
-               case SNAPSHOT_NON_VACUUMABLE:
-                       return HeapTupleSatisfiesNonVacuumable(htup, snapshot, 
buffer);
->>>>>>> REL_16_9
        }
 
        return false;                           /* keep compiler quiet */
diff --git a/src/backend/access/heap/pruneheap.c 
b/src/backend/access/heap/pruneheap.c
index eefcd5f7315..3fc524ca88a 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -69,15 +69,9 @@ typedef struct
 
        /*
         * Tuple visibility is only computed once for each tuple, for 
correctness
-<<<<<<< HEAD
-        * and efficiency reasons; see comment in heap_page_prune() for
-        * details. This is of type int8[,] intead of HTSV_Result[], so we can 
use
-        * -1 to indicate no visibility has been computed, e.g. for LP_DEAD 
items.
-=======
         * and efficiency reasons; see comment in heap_page_prune() for details.
         * This is of type int8[], instead of HTSV_Result[], so we can use -1 to
         * indicate no visibility has been computed, e.g. for LP_DEAD items.
->>>>>>> REL_16_9
         *
         * Same indexing as ->marked.
         */
@@ -85,11 +79,7 @@ typedef struct
 } PruneState;
 
 /* Local functions */
-<<<<<<< HEAD
 static HTSV_Result heap_prune_satisfies_vacuum(Relation relation, PruneState 
*prstate,
-=======
-static HTSV_Result heap_prune_satisfies_vacuum(PruneState *prstate,
->>>>>>> REL_16_9
                                                                                
           HeapTuple tup,
                                                                                
           Buffer buffer);
 static int     heap_prune_chain(Buffer buffer,
@@ -355,11 +345,7 @@ heap_page_prune(Relation relation, Buffer buffer,
                htup = (HeapTupleHeader) PageGetItem(page, itemid);
                tup.t_data = htup;
                tup.t_len = ItemIdGetLength(itemid);
-<<<<<<< HEAD
-               ItemPointerSet(&(tup.t_self), BufferGetBlockNumber(buffer), 
offnum);
-=======
                ItemPointerSet(&(tup.t_self), blockno, offnum);
->>>>>>> REL_16_9
 
                /*
                 * Set the offset number so that we can display it along with 
any
@@ -368,11 +354,7 @@ heap_page_prune(Relation relation, Buffer buffer,
                if (off_loc)
                        *off_loc = offnum;
 
-<<<<<<< HEAD
                prstate.htsv[offnum] = heap_prune_satisfies_vacuum(relation, 
&prstate, &tup,
-=======
-               prstate.htsv[offnum] = heap_prune_satisfies_vacuum(&prstate, 
&tup,
->>>>>>> REL_16_9
                                                                                
                                   buffer);
        }
 
@@ -609,8 +591,6 @@ heap_prune_satisfies_vacuum(Relation relation,PruneState 
*prstate, HeapTuple tup
  * chain.  We also prune any RECENTLY_DEAD tuples preceding a DEAD tuple.
  * This is OK because a RECENTLY_DEAD tuple preceding a DEAD tuple is really
  * DEAD, our visibility test is just too coarse to detect it.
-<<<<<<< HEAD
-=======
  *
  * In general, pruning must never leave behind a DEAD tuple that still has
  * tuple storage.  VACUUM isn't prepared to deal with that case.  That's why
@@ -618,7 +598,6 @@ heap_prune_satisfies_vacuum(Relation relation,PruneState 
*prstate, HeapTuple tup
  * in the interim) when it sees a newly DEAD tuple that we initially saw as
  * in-progress.  Retrying pruning like this can only happen when an inserting
  * transaction concurrently aborts.
->>>>>>> REL_16_9
  *
  * The root line pointer is redirected to the tuple immediately after the
  * latest DEAD tuple.  If all tuples in the chain are DEAD, the root line
diff --git a/src/backend/access/heap/vacuumlazy.c 
b/src/backend/access/heap/vacuumlazy.c
index 63c44cef664..f32a8bfd743 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -662,13 +662,9 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
                        BufferUsage bufferusage;
                        StringInfoData buf;
                        char       *msgfmt;
-<<<<<<< HEAD
-                       BlockNumber orig_rel_pages;
-=======
                        int32           diff;
                        double          read_rate = 0,
                                                write_rate = 0;
->>>>>>> REL_16_9
 
                        TimestampDifference(starttime, endtime, &secs_dur, 
&usecs_dur);
                        memset(&walusage, 0, sizeof(WalUsage));
@@ -721,35 +717,6 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
                                                         _("tuples: %lld 
removed, %lld remain, %lld are dead but not yet removable\n"),
                                                         (long long) 
vacrel->tuples_deleted,
                                                         (long long) 
vacrel->new_rel_tuples,
-<<<<<<< HEAD
-                                                        (long long) 
vacrel->new_dead_tuples,
-                                                        OldestXmin);
-                       orig_rel_pages = vacrel->rel_pages + 
vacrel->pages_removed;
-                       if (orig_rel_pages > 0)
-                       {
-                               if (vacrel->do_index_vacuuming)
-                               {
-                                       if (vacrel->nindexes == 0 || 
vacrel->num_index_scans == 0)
-                                               appendStringInfoString(&buf, 
_("index scan not needed: "));
-                                       else
-                                               appendStringInfoString(&buf, 
_("index scan needed: "));
-
-                                       msgfmt = _("%u pages from table (%.2f%% 
of total) had %lld dead item identifiers removed\n");
-                               }
-                               else
-                               {
-                                       if (!vacrel->failsafe_active)
-                                               appendStringInfoString(&buf, 
_("index scan bypassed: "));
-                                       else
-                                               appendStringInfoString(&buf, 
_("index scan bypassed by failsafe: "));
-
-                                       msgfmt = _("%u pages from table (%.2f%% 
of total) have %lld dead item identifiers\n");
-                               }
-                               appendStringInfo(&buf, msgfmt,
-                                                                
vacrel->lpdead_item_pages,
-                                                                100.0 * 
vacrel->lpdead_item_pages / orig_rel_pages,
-                                                                (long long) 
vacrel->lpdead_items);
-=======
                                                         (long long) 
vacrel->recently_dead_tuples);
                        if (vacrel->missed_dead_tuples > 0)
                                appendStringInfo(&buf,
@@ -768,7 +735,6 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
                                appendStringInfo(&buf,
                                                                 _("new 
relfrozenxid: %u, which is %d XIDs ahead of previous value\n"),
                                                                 
vacrel->NewRelfrozenXid, diff);
->>>>>>> REL_16_9
                        }
                        if (minmulti_updated)
                        {
@@ -829,8 +795,6 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
                                appendStringInfo(&buf, _("I/O timings: read: 
%.3f ms, write: %.3f ms\n"),
                                                                 read_ms, 
write_ms);
                        }
-<<<<<<< HEAD
-=======
                        if (secs_dur > 0 || usecs_dur > 0)
                        {
                                read_rate = (double) BLCKSZ * 
(bufferusage.shared_blks_read + bufferusage.local_blks_read) /
@@ -838,20 +802,13 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
                                write_rate = (double) BLCKSZ * 
(bufferusage.shared_blks_dirtied + bufferusage.local_blks_dirtied) /
                                        (1024 * 1024) / (secs_dur + usecs_dur / 
1000000.0);
                        }
->>>>>>> REL_16_9
                        appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg 
write rate: %.3f MB/s\n"),
                                                         read_rate, write_rate);
                        appendStringInfo(&buf,
                                                         _("buffer usage: %lld 
hits, %lld misses, %lld dirtied\n"),
-<<<<<<< HEAD
-                                                        (long long) 
VacuumPageHit,
-                                                        (long long) 
VacuumPageMiss,
-                                                        (long long) 
VacuumPageDirty);
-=======
                                                         (long long) 
(bufferusage.shared_blks_hit + bufferusage.local_blks_hit),
                                                         (long long) 
(bufferusage.shared_blks_read + bufferusage.local_blks_read),
                                                         (long long) 
(bufferusage.shared_blks_dirtied + bufferusage.local_blks_dirtied));
->>>>>>> REL_16_9
                        appendStringInfo(&buf,
                                                         _("WAL usage: %lld 
records, %lld full page images, %llu bytes\n"),
                                                         (long long) 
walusage.wal_records,
@@ -1449,62 +1406,6 @@ lazy_scan_skip(LVRelState *vacrel, Buffer *vmbuffer, 
BlockNumber next_block,
        }
 
        /*
-<<<<<<< HEAD
-        * Free resources managed by lazy_space_alloc().  (We must end parallel
-        * mode/free shared memory before updating index statistics.  We cannot
-        * write while in parallel mode.)
-        */
-       lazy_space_free(vacrel);
-
-       /* Update index statistics */
-       if (vacrel->nindexes > 0 && vacrel->do_index_cleanup)
-               update_index_statistics(vacrel);
-
-       /*
-        * When the table has no indexes (i.e. in the one-pass strategy case),
-        * make log report that lazy_vacuum_heap_rel would've made had there 
been
-        * indexes.  (As in the two-pass strategy case, only make this report 
when
-        * there were LP_DEAD line pointers vacuumed in lazy_vacuum_heap_page.)
-        */
-       if (vacrel->nindexes == 0 && vacrel->lpdead_item_pages > 0)
-               ereport(elevel,
-                               (errmsg("table \"%s\": removed %lld dead item 
identifiers in %u pages",
-                                               vacrel->relname, (long long) 
vacrel->lpdead_items,
-                                               vacrel->lpdead_item_pages)));
-
-       /*
-        * Make a log report summarizing pruning and freezing.
-        *
-        * The autovacuum specific logging in heap_vacuum_rel summarizes an 
entire
-        * VACUUM operation, whereas each VACUUM VERBOSE log report generally
-        * summarizes a single round of index/heap vacuuming (or rel 
truncation).
-        * It wouldn't make sense to report on pruning or freezing while 
following
-        * that convention, though.  You can think of this log report as a 
summary
-        * of our first pass over the heap.
-        */
-       initStringInfo(&buf);
-       appendStringInfo(&buf,
-                                        _("%lld dead row versions cannot be 
removed yet, oldest xmin: %u\n"),
-                                        (long long) vacrel->new_dead_tuples, 
vacrel->OldestXmin);
-       appendStringInfo(&buf, ngettext("Skipped %u page due to buffer pins, ",
-                                                                       
"Skipped %u pages due to buffer pins, ",
-                                                                       
vacrel->pinskipped_pages),
-                                        vacrel->pinskipped_pages);
-       appendStringInfo(&buf, ngettext("%u frozen page.\n",
-                                                                       "%u 
frozen pages.\n",
-                                                                       
vacrel->frozenskipped_pages),
-                                        vacrel->frozenskipped_pages);
-       appendStringInfo(&buf, _("%s."), pg_rusage_show(&ru0));
-
-       ereport(elevel,
-                       (errmsg("table \"%s\": found %lld removable, %lld 
nonremovable row versions in %u out of %u pages",
-                                       vacrel->relname,
-                                       (long long) vacrel->tuples_deleted,
-                                       (long long) vacrel->num_tuples, 
vacrel->scanned_pages,
-                                       nblocks),
-                        errdetail_internal("%s", buf.data)));
-       pfree(buf.data);
-=======
         * We only skip a range with at least SKIP_PAGES_THRESHOLD consecutive
         * pages.  Since we're reading sequentially, the OS should be doing
         * readahead for us, so there's no gain in skipping a page now and then.
@@ -1655,7 +1556,6 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, 
BlockNumber blkno,
 
        /* page isn't new or empty -- keep lock and pin */
        return false;
->>>>>>> REL_16_9
 }
 
 /*
@@ -1806,12 +1706,8 @@ retry:
                 * since heap_page_prune() looked.  Handle that here by 
restarting.
                 * (See comments at the top of function for a full explanation.)
                 */
-<<<<<<< HEAD
-               res = HeapTupleSatisfiesVacuum(rel, &tuple, vacrel->OldestXmin, 
buf);
-=======
-               res = HeapTupleSatisfiesVacuum(&tuple, 
vacrel->cutoffs.OldestXmin,
+               res = HeapTupleSatisfiesVacuum(rel, &tuple, 
vacrel->cutoffs.OldestXmin,
                                                                           buf);
->>>>>>> REL_16_9
 
                if (unlikely(res == HEAPTUPLE_DEAD))
                        goto retry;
@@ -2426,14 +2322,6 @@ lazy_vacuum(LVRelState *vacrel)
                 * calls.)
                 */
                vacrel->do_index_vacuuming = false;
-<<<<<<< HEAD
-               ereport(elevel,
-                               (errmsg("table \"%s\": index scan bypassed: %u 
pages from table (%.2f%% of total) have %lld dead item identifiers",
-                                               vacrel->relname, 
vacrel->lpdead_item_pages,
-                                               100.0 * 
vacrel->lpdead_item_pages / vacrel->rel_pages,
-                                               (long long) 
vacrel->lpdead_items)));
-=======
->>>>>>> REL_16_9
        }
        else if (lazy_vacuum_all_indexes(vacrel))
        {
@@ -2633,25 +2521,14 @@ lazy_vacuum_heap_rel(LVRelState *vacrel)
         * We set all LP_DEAD items from the first heap pass to LP_UNUSED during
         * the second heap pass.  No more, no less.
         */
-<<<<<<< HEAD
-       Assert(tupindex > 0);
-=======
        Assert(index > 0);
->>>>>>> REL_16_9
        Assert(vacrel->num_index_scans > 1 ||
                   (index == vacrel->lpdead_items &&
                        vacuumed_pages == vacrel->lpdead_item_pages));
 
-<<<<<<< HEAD
-       ereport(elevel,
-                       (errmsg("table \"%s\": removed %lld dead item 
identifiers in %u pages",
-                                       vacrel->relname, (long long ) tupindex, 
vacuumed_pages),
-                        errdetail_internal("%s", pg_rusage_show(&ru0))));
-=======
        ereport(DEBUG2,
                        (errmsg("table \"%s\": removed %lld dead item 
identifiers in %u pages",
                                        vacrel->relname, (long long) index, 
vacuumed_pages)));
->>>>>>> REL_16_9
 
        /* Revert to the previous phase information for error traceback */
        restore_vacuum_error_info(vacrel, &saved_err_info);
@@ -2828,354 +2705,6 @@ lazy_check_wraparound_failsafe(LVRelState *vacrel)
 }
 
 /*
-<<<<<<< HEAD
- * Perform lazy_vacuum_all_indexes() steps in parallel
- */
-static void
-do_parallel_lazy_vacuum_all_indexes(LVRelState *vacrel)
-{
-       /* Tell parallel workers to do index vacuuming */
-       vacrel->lps->lvshared->for_cleanup = false;
-       vacrel->lps->lvshared->first_time = false;
-
-       /*
-        * We can only provide an approximate value of num_heap_tuples, at least
-        * for now.  Matches serial VACUUM case.
-        */
-       vacrel->lps->lvshared->reltuples = vacrel->old_live_tuples;
-       vacrel->lps->lvshared->estimated_count = true;
-
-       do_parallel_vacuum_or_cleanup(vacrel,
-                                                                 
vacrel->lps->nindexes_parallel_bulkdel);
-}
-
-/*
- * Perform lazy_cleanup_all_indexes() steps in parallel
- */
-static void
-do_parallel_lazy_cleanup_all_indexes(LVRelState *vacrel)
-{
-       int                     nworkers;
-
-       /*
-        * If parallel vacuum is active we perform index cleanup with parallel
-        * workers.
-        *
-        * Tell parallel workers to do index cleanup.
-        */
-       vacrel->lps->lvshared->for_cleanup = true;
-       vacrel->lps->lvshared->first_time = (vacrel->num_index_scans == 0);
-
-       /*
-        * Now we can provide a better estimate of total number of surviving
-        * tuples (we assume indexes are more interested in that than in the
-        * number of nominally live tuples).
-        */
-       vacrel->lps->lvshared->reltuples = vacrel->new_rel_tuples;
-       vacrel->lps->lvshared->estimated_count =
-               (vacrel->tupcount_pages < vacrel->rel_pages);
-
-       /* Determine the number of parallel workers to launch */
-       if (vacrel->lps->lvshared->first_time)
-               nworkers = vacrel->lps->nindexes_parallel_cleanup +
-                       vacrel->lps->nindexes_parallel_condcleanup;
-       else
-               nworkers = vacrel->lps->nindexes_parallel_cleanup;
-
-       do_parallel_vacuum_or_cleanup(vacrel, nworkers);
-}
-
-/*
- * Perform index vacuum or index cleanup with parallel workers.  This function
- * must be used by the parallel vacuum leader process.  The caller must set
- * lps->lvshared->for_cleanup to indicate whether to perform vacuum or
- * cleanup.
- */
-static void
-do_parallel_vacuum_or_cleanup(LVRelState *vacrel, int nworkers)
-{
-       LVParallelState *lps = vacrel->lps;
-
-       Assert(!IsParallelWorker());
-       Assert(ParallelVacuumIsActive(vacrel));
-       Assert(vacrel->nindexes > 0);
-
-       /* The leader process will participate */
-       nworkers--;
-
-       /*
-        * It is possible that parallel context is initialized with fewer 
workers
-        * than the number of indexes that need a separate worker in the current
-        * phase, so we need to consider it.  See 
compute_parallel_vacuum_workers.
-        */
-       nworkers = Min(nworkers, lps->pcxt->nworkers);
-
-       /* Setup the shared cost-based vacuum delay and launch workers */
-       if (nworkers > 0)
-       {
-               if (vacrel->num_index_scans > 0)
-               {
-                       /* Reset the parallel index processing counter */
-                       pg_atomic_write_u32(&(lps->lvshared->idx), 0);
-
-                       /* Reinitialize the parallel context to relaunch 
parallel workers */
-                       ReinitializeParallelDSM(lps->pcxt);
-               }
-
-               /*
-                * Set up shared cost balance and the number of active workers 
for
-                * vacuum delay.  We need to do this before launching workers as
-                * otherwise, they might not see the updated values for these
-                * parameters.
-                */
-               pg_atomic_write_u32(&(lps->lvshared->cost_balance), 
VacuumCostBalance);
-               pg_atomic_write_u32(&(lps->lvshared->active_nworkers), 0);
-
-               /*
-                * The number of workers can vary between bulkdelete and cleanup
-                * phase.
-                */
-               ReinitializeParallelWorkers(lps->pcxt, nworkers);
-
-               LaunchParallelWorkers(lps->pcxt);
-
-               if (lps->pcxt->nworkers_launched > 0)
-               {
-                       /*
-                        * Reset the local cost values for leader backend as we 
have
-                        * already accumulated the remaining balance of heap.
-                        */
-                       VacuumCostBalance = 0;
-                       VacuumCostBalanceLocal = 0;
-
-                       /* Enable shared cost balance for leader backend */
-                       VacuumSharedCostBalance = 
&(lps->lvshared->cost_balance);
-                       VacuumActiveNWorkers = 
&(lps->lvshared->active_nworkers);
-               }
-
-               if (lps->lvshared->for_cleanup)
-                       ereport(elevel,
-                                       (errmsg(ngettext("launched %d parallel 
vacuum worker for index cleanup (planned: %d)",
-                                                                        
"launched %d parallel vacuum workers for index cleanup (planned: %d)",
-                                                                        
lps->pcxt->nworkers_launched),
-                                                       
lps->pcxt->nworkers_launched, nworkers)));
-               else
-                       ereport(elevel,
-                                       (errmsg(ngettext("launched %d parallel 
vacuum worker for index vacuuming (planned: %d)",
-                                                                        
"launched %d parallel vacuum workers for index vacuuming (planned: %d)",
-                                                                        
lps->pcxt->nworkers_launched),
-                                                       
lps->pcxt->nworkers_launched, nworkers)));
-       }
-
-       /* Process the indexes that can be processed by only leader process */
-       do_serial_processing_for_unsafe_indexes(vacrel, lps->lvshared);
-
-       /*
-        * Join as a parallel worker.  The leader process alone processes all 
the
-        * indexes in the case where no workers are launched.
-        */
-       do_parallel_processing(vacrel, lps->lvshared);
-
-       /*
-        * Next, accumulate buffer and WAL usage.  (This must wait for the 
workers
-        * to finish, or we might get incomplete data.)
-        */
-       if (nworkers > 0)
-       {
-               /* Wait for all vacuum workers to finish */
-               WaitForParallelWorkersToFinish(lps->pcxt);
-
-               for (int i = 0; i < lps->pcxt->nworkers_launched; i++)
-                       InstrAccumParallelQuery(&lps->buffer_usage[i], 
&lps->wal_usage[i]);
-       }
-
-       /*
-        * Carry the shared balance value to heap scan and disable shared 
costing
-        */
-       if (VacuumSharedCostBalance)
-       {
-               VacuumCostBalance = pg_atomic_read_u32(VacuumSharedCostBalance);
-               VacuumSharedCostBalance = NULL;
-               VacuumActiveNWorkers = NULL;
-       }
-}
-
-/*
- * Index vacuum/cleanup routine used by the leader process and parallel
- * vacuum worker processes to process the indexes in parallel.
- */
-static void
-do_parallel_processing(LVRelState *vacrel, LVShared *lvshared)
-{
-       /*
-        * Increment the active worker count if we are able to launch any 
worker.
-        */
-       if (VacuumActiveNWorkers)
-               pg_atomic_add_fetch_u32(VacuumActiveNWorkers, 1);
-
-       /* Loop until all indexes are vacuumed */
-       for (;;)
-       {
-               int                     idx;
-               LVSharedIndStats *shared_istat;
-               Relation        indrel;
-               IndexBulkDeleteResult *istat;
-
-               /* Get an index number to process */
-               idx = pg_atomic_fetch_add_u32(&(lvshared->idx), 1);
-
-               /* Done for all indexes? */
-               if (idx >= vacrel->nindexes)
-                       break;
-
-               /* Get the index statistics space from DSM, if any */
-               shared_istat = parallel_stats_for_idx(lvshared, idx);
-
-               /* Skip indexes not participating in parallelism */
-               if (shared_istat == NULL)
-                       continue;
-
-               indrel = vacrel->indrels[idx];
-
-               /*
-                * Skip processing indexes that are unsafe for workers (these 
are
-                * processed in do_serial_processing_for_unsafe_indexes() by 
leader)
-                */
-               if (!parallel_processing_is_safe(indrel, lvshared))
-                       continue;
-
-               /* Do vacuum or cleanup of the index */
-               istat = (vacrel->indstats[idx]);
-               vacrel->indstats[idx] = parallel_process_one_index(indrel, 
istat,
-                                                                               
                                   lvshared,
-                                                                               
                                   shared_istat,
-                                                                               
                                   vacrel);
-       }
-
-       /*
-        * We have completed the index vacuum so decrement the active worker
-        * count.
-        */
-       if (VacuumActiveNWorkers)
-               pg_atomic_sub_fetch_u32(VacuumActiveNWorkers, 1);
-}
-
-/*
- * Perform parallel processing of indexes in leader process.
- *
- * Handles index vacuuming (or index cleanup) for indexes that are not
- * parallel safe.  It's possible that this will vary for a given index, based
- * on details like whether we're performing for_cleanup processing right now.
- *
- * Also performs processing of smaller indexes that fell under the size cutoff
- * enforced by compute_parallel_vacuum_workers().  These indexes never get a
- * slot for statistics in DSM.
- */
-static void
-do_serial_processing_for_unsafe_indexes(LVRelState *vacrel, LVShared *lvshared)
-{
-       Assert(!IsParallelWorker());
-
-       /*
-        * Increment the active worker count if we are able to launch any 
worker.
-        */
-       if (VacuumActiveNWorkers)
-               pg_atomic_add_fetch_u32(VacuumActiveNWorkers, 1);
-
-       for (int idx = 0; idx < vacrel->nindexes; idx++)
-       {
-               LVSharedIndStats *shared_istat;
-               Relation        indrel;
-               IndexBulkDeleteResult *istat;
-
-               shared_istat = parallel_stats_for_idx(lvshared, idx);
-               indrel = vacrel->indrels[idx];
-
-               /*
-                * We're only here for the indexes that parallel workers won't
-                * process.  Note that the shared_istat test ensures that we 
process
-                * indexes that fell under initial size cutoff.
-                */
-               if (shared_istat != NULL &&
-                       parallel_processing_is_safe(indrel, lvshared))
-                       continue;
-
-               /* Do vacuum or cleanup of the index */
-               istat = (vacrel->indstats[idx]);
-               vacrel->indstats[idx] = parallel_process_one_index(indrel, 
istat,
-                                                                               
                                   lvshared,
-                                                                               
                                   shared_istat,
-                                                                               
                                   vacrel);
-       }
-
-       /*
-        * We have completed the index vacuum so decrement the active worker
-        * count.
-        */
-       if (VacuumActiveNWorkers)
-               pg_atomic_sub_fetch_u32(VacuumActiveNWorkers, 1);
-}
-
-/*
- * Vacuum or cleanup index either by leader process or by one of the worker
- * process.  After processing the index this function copies the index
- * statistics returned from ambulkdelete and amvacuumcleanup to the DSM
- * segment.
- */
-static IndexBulkDeleteResult *
-parallel_process_one_index(Relation indrel,
-                                                  IndexBulkDeleteResult *istat,
-                                                  LVShared *lvshared,
-                                                  LVSharedIndStats 
*shared_istat,
-                                                  LVRelState *vacrel)
-{
-       IndexBulkDeleteResult *istat_res;
-
-       /*
-        * Update the pointer to the corresponding bulk-deletion result if 
someone
-        * has already updated it
-        */
-       if (shared_istat && shared_istat->updated && istat == NULL)
-               istat = &shared_istat->istat;
-
-       /* Do vacuum or cleanup of the index */
-       if (lvshared->for_cleanup)
-               istat_res = lazy_cleanup_one_index(indrel, istat, 
lvshared->reltuples,
-                                                                               
   lvshared->estimated_count, vacrel);
-       else
-               istat_res = lazy_vacuum_one_index(indrel, istat, 
lvshared->reltuples,
-                                                                               
  vacrel);
-
-       /*
-        * Copy the index bulk-deletion result returned from ambulkdelete and
-        * amvacuumcleanup to the DSM segment if it's the first cycle because 
they
-        * allocate locally and it's possible that an index will be vacuumed by 
a
-        * different vacuum process the next cycle.  Copying the result normally
-        * happens only the first time an index is vacuumed.  For any additional
-        * vacuum pass, we directly point to the result on the DSM segment and
-        * pass it to vacuum index APIs so that workers can update it directly.
-        *
-        * Since all vacuum workers write the bulk-deletion result at different
-        * slots we can write them without locking.
-        */
-       if (shared_istat && !shared_istat->updated && istat_res != NULL)
-       {
-               memcpy(&shared_istat->istat, istat_res, 
sizeof(IndexBulkDeleteResult));
-               shared_istat->updated = true;
-
-               /* Free the locally-allocated bulk-deletion result */
-               pfree(istat_res);
-
-               /* return the pointer to the result from shared memory */
-               return &shared_istat->istat;
-       }
-
-       return istat_res;
-}
-
-/*
-=======
->>>>>>> REL_16_9
  *     lazy_cleanup_all_indexes() -- cleanup all indexes of relation.
  */
 static void
@@ -3476,11 +3005,7 @@ lazy_truncate_heap(LVRelState *vacrel)
                vacrel->removed_pages += orig_rel_pages - new_rel_pages;
                vacrel->rel_pages = new_rel_pages;
 
-<<<<<<< HEAD
-               ereport(elevel,
-=======
                ereport(vacrel->verbose ? INFO : DEBUG2,
->>>>>>> REL_16_9
                                (errmsg("table \"%s\": truncated %u to %u 
pages",
                                                vacrel->relname,
                                                orig_rel_pages, 
new_rel_pages)));
@@ -3543,11 +3068,7 @@ count_nondeletable_pages(LVRelState *vacrel, bool 
*lock_waiter_detected)
                        {
                                if (LockHasWaitersRelation(vacrel->rel, 
AccessExclusiveLock))
                                {
-<<<<<<< HEAD
-                                       ereport(elevel,
-=======
                                        ereport(vacrel->verbose ? INFO : DEBUG2,
->>>>>>> REL_16_9
                                                        (errmsg("table \"%s\": 
suspending truncate due to conflicting lock request",
                                                                        
vacrel->relname)));
 
@@ -3718,19 +3239,13 @@ dead_items_alloc(LVRelState *vacrel, int nworkers)
                                                        vacrel->relname)));
 #if 0
                else
-<<<<<<< HEAD
-                       vacrel->lps = begin_parallel_vacuum(vacrel, nblocks, 
nworkers);
-#endif
-               /* If parallel mode started, we're done */
-=======
                        vacrel->pvs = parallel_vacuum_init(vacrel->rel, 
vacrel->indrels,
                                                                                
           vacrel->nindexes, nworkers,
                                                                                
           max_items,
                                                                                
           vacrel->verbose ? INFO : DEBUG2,
                                                                                
           vacrel->bstrategy);
-
+#endif
                /* If parallel mode started, dead_items space is allocated in 
DSM */
->>>>>>> REL_16_9
                if (ParallelVacuumIsActive(vacrel))
                {
                        vacrel->dead_items = 
parallel_vacuum_get_dead_items(vacrel->pvs);
@@ -3826,12 +3341,8 @@ heap_page_is_all_visible(LVRelState *vacrel, Buffer buf,
                tuple.t_len = ItemIdGetLength(itemid);
                tuple.t_tableOid = RelationGetRelid(vacrel->rel);
 
-<<<<<<< HEAD
-               switch (HeapTupleSatisfiesVacuum(vacrel->rel, &tuple, 
vacrel->OldestXmin, buf))
-=======
-               switch (HeapTupleSatisfiesVacuum(&tuple, 
vacrel->cutoffs.OldestXmin,
+               switch (HeapTupleSatisfiesVacuum(vacrel->rel, &tuple, 
vacrel->cutoffs.OldestXmin,
                                                                                
 buf))
->>>>>>> REL_16_9
                {
                        case HEAPTUPLE_LIVE:
                                {
@@ -3891,82 +3402,7 @@ heap_page_is_all_visible(LVRelState *vacrel, Buffer buf,
        return all_visible;
 }
 
-#if 0
-/*
-<<<<<<< HEAD
- * Compute the number of parallel worker processes to request.  Both index
- * vacuum and index cleanup can be executed with parallel workers.  The index
- * is eligible for parallel vacuum iff its size is greater than
- * min_parallel_index_scan_size as invoking workers for very small indexes
- * can hurt performance.
- *
- * nrequested is the number of parallel workers that user requested.  If
- * nrequested is 0, we compute the parallel degree based on nindexes, that is
- * the number of indexes that support parallel vacuum.  This function also
- * sets will_parallel_vacuum to remember indexes that participate in parallel
- * vacuum.
- */
-static int
-compute_parallel_vacuum_workers(LVRelState *vacrel, int nrequested,
-                                                               bool 
*will_parallel_vacuum)
-{
-       int                     nindexes_parallel = 0;
-       int                     nindexes_parallel_bulkdel = 0;
-       int                     nindexes_parallel_cleanup = 0;
-       int                     parallel_workers;
-
-       /*
-        * We don't allow performing parallel operation in standalone backend or
-        * when parallelism is disabled.
-        */
-       if (!IsUnderPostmaster || max_parallel_maintenance_workers == 0)
-               return 0;
-
-       /*
-        * Compute the number of indexes that can participate in parallel 
vacuum.
-        */
-       for (int idx = 0; idx < vacrel->nindexes; idx++)
-       {
-               Relation        indrel = vacrel->indrels[idx];
-               uint8           vacoptions = 
indrel->rd_indam->amparallelvacuumoptions;
-
-               if (vacoptions == VACUUM_OPTION_NO_PARALLEL ||
-                       RelationGetNumberOfBlocks(indrel) < 
min_parallel_index_scan_size)
-                       continue;
-
-               will_parallel_vacuum[idx] = true;
-
-               if ((vacoptions & VACUUM_OPTION_PARALLEL_BULKDEL) != 0)
-                       nindexes_parallel_bulkdel++;
-               if (((vacoptions & VACUUM_OPTION_PARALLEL_CLEANUP) != 0) ||
-                       ((vacoptions & VACUUM_OPTION_PARALLEL_COND_CLEANUP) != 
0))
-                       nindexes_parallel_cleanup++;
-       }
-
-       nindexes_parallel = Max(nindexes_parallel_bulkdel,
-                                                       
nindexes_parallel_cleanup);
-
-       /* The leader process takes one index */
-       nindexes_parallel--;
-
-       /* No index supports parallel vacuum */
-       if (nindexes_parallel <= 0)
-               return 0;
-
-       /* Compute the parallel degree */
-       parallel_workers = (nrequested > 0) ?
-               Min(nrequested, nindexes_parallel) : nindexes_parallel;
-
-       /* Cap by max_parallel_maintenance_workers */
-       parallel_workers = Min(parallel_workers, 
max_parallel_maintenance_workers);
-
-       return parallel_workers;
-}
-#endif
-
 /*
-=======
->>>>>>> REL_16_9
  * Update index statistics in pg_class if the statistics are accurate.
  */
 static void
@@ -3994,452 +3430,15 @@ update_relstats_all_indexes(LVRelState *vacrel)
                                                        false,
                                                        InvalidTransactionId,
                                                        InvalidMultiXactId,
-<<<<<<< HEAD
-                                                       false,
-                                                       true /* isvacuum */);
-=======
-                                                       NULL, NULL, false);
->>>>>>> REL_16_9
-       }
-}
-
-#if 0
-/*
-<<<<<<< HEAD
- * This function prepares and returns parallel vacuum state if we can launch
- * even one worker.  This function is responsible for entering parallel mode,
- * create a parallel context, and then initialize the DSM segment.
- */
-static LVParallelState *
-begin_parallel_vacuum(LVRelState *vacrel, BlockNumber nblocks,
-                                         int nrequested)
-{
-       LVParallelState *lps = NULL;
-       Relation   *indrels = vacrel->indrels;
-       int                     nindexes = vacrel->nindexes;
-       ParallelContext *pcxt;
-       LVShared   *shared;
-       LVDeadTuples *dead_tuples;
-       BufferUsage *buffer_usage;
-       WalUsage   *wal_usage;
-       bool       *will_parallel_vacuum;
-       long            maxtuples;
-       Size            est_shared;
-       Size            est_deadtuples;
-       int                     nindexes_mwm = 0;
-       int                     parallel_workers = 0;
-       int                     querylen;
-
-       /*
-        * A parallel vacuum must be requested and there must be indexes on the
-        * relation
-        */
-       Assert(nrequested >= 0);
-       Assert(nindexes > 0);
-
-       /*
-        * Compute the number of parallel vacuum workers to launch
-        */
-       will_parallel_vacuum = (bool *) palloc0(sizeof(bool) * nindexes);
-       parallel_workers = compute_parallel_vacuum_workers(vacrel,
-                                                                               
                           nrequested,
-                                                                               
                           will_parallel_vacuum);
-
-       /* Can't perform vacuum in parallel */
-       if (parallel_workers <= 0)
-       {
-               pfree(will_parallel_vacuum);
-               return lps;
-       }
-
-       lps = (LVParallelState *) palloc0(sizeof(LVParallelState));
-
-       EnterParallelMode();
-       pcxt = CreateParallelContext("postgres", "parallel_vacuum_main",
-                                                                
parallel_workers);
-       Assert(pcxt->nworkers > 0);
-       lps->pcxt = pcxt;
-
-       /* Estimate size for shared information -- PARALLEL_VACUUM_KEY_SHARED */
-       est_shared = MAXALIGN(add_size(SizeOfLVShared, BITMAPLEN(nindexes)));
-       for (int idx = 0; idx < nindexes; idx++)
-       {
-               Relation        indrel = indrels[idx];
-               uint8           vacoptions = 
indrel->rd_indam->amparallelvacuumoptions;
-
-               /*
-                * Cleanup option should be either disabled, always performing 
in
-                * parallel or conditionally performing in parallel.
-                */
-               Assert(((vacoptions & VACUUM_OPTION_PARALLEL_CLEANUP) == 0) ||
-                          ((vacoptions & VACUUM_OPTION_PARALLEL_COND_CLEANUP) 
== 0));
-               Assert(vacoptions <= VACUUM_OPTION_MAX_VALID_VALUE);
-
-               /* Skip indexes that don't participate in parallel vacuum */
-               if (!will_parallel_vacuum[idx])
-                       continue;
-
-               if (indrel->rd_indam->amusemaintenanceworkmem)
-                       nindexes_mwm++;
-
-               est_shared = add_size(est_shared, sizeof(LVSharedIndStats));
-
-               /*
-                * Remember the number of indexes that support parallel 
operation for
-                * each phase.
-                */
-               if ((vacoptions & VACUUM_OPTION_PARALLEL_BULKDEL) != 0)
-                       lps->nindexes_parallel_bulkdel++;
-               if ((vacoptions & VACUUM_OPTION_PARALLEL_CLEANUP) != 0)
-                       lps->nindexes_parallel_cleanup++;
-               if ((vacoptions & VACUUM_OPTION_PARALLEL_COND_CLEANUP) != 0)
-                       lps->nindexes_parallel_condcleanup++;
-       }
-       shm_toc_estimate_chunk(&pcxt->estimator, est_shared);
-       shm_toc_estimate_keys(&pcxt->estimator, 1);
-
-       /* Estimate size for dead tuples -- PARALLEL_VACUUM_KEY_DEAD_TUPLES */
-       maxtuples = compute_max_dead_tuples(nblocks, true);
-       est_deadtuples = MAXALIGN(SizeOfDeadTuples(maxtuples));
-       shm_toc_estimate_chunk(&pcxt->estimator, est_deadtuples);
-       shm_toc_estimate_keys(&pcxt->estimator, 1);
-
-       /*
-        * Estimate space for BufferUsage and WalUsage --
-        * PARALLEL_VACUUM_KEY_BUFFER_USAGE and PARALLEL_VACUUM_KEY_WAL_USAGE.
-        *
-        * If there are no extensions loaded that care, we could skip this.  We
-        * have no way of knowing whether anyone's looking at pgBufferUsage or
-        * pgWalUsage, so do it unconditionally.
-        */
-       shm_toc_estimate_chunk(&pcxt->estimator,
-                                                  
mul_size(sizeof(BufferUsage), pcxt->nworkers));
-       shm_toc_estimate_keys(&pcxt->estimator, 1);
-       shm_toc_estimate_chunk(&pcxt->estimator,
-                                                  mul_size(sizeof(WalUsage), 
pcxt->nworkers));
-       shm_toc_estimate_keys(&pcxt->estimator, 1);
-
-       /* Finally, estimate PARALLEL_VACUUM_KEY_QUERY_TEXT space */
-       if (debug_query_string)
-       {
-               querylen = strlen(debug_query_string);
-               shm_toc_estimate_chunk(&pcxt->estimator, querylen + 1);
-               shm_toc_estimate_keys(&pcxt->estimator, 1);
-       }
-       else
-               querylen = 0;                   /* keep compiler quiet */
-
-       InitializeParallelDSM(pcxt);
-
-       /* Prepare shared information */
-       shared = (LVShared *) shm_toc_allocate(pcxt->toc, est_shared);
-       MemSet(shared, 0, est_shared);
-       shared->relid = RelationGetRelid(vacrel->rel);
-       shared->elevel = elevel;
-       shared->maintenance_work_mem_worker =
-               (nindexes_mwm > 0) ?
-               maintenance_work_mem / Min(parallel_workers, nindexes_mwm) :
-               maintenance_work_mem;
-
-       pg_atomic_init_u32(&(shared->cost_balance), 0);
-       pg_atomic_init_u32(&(shared->active_nworkers), 0);
-       pg_atomic_init_u32(&(shared->idx), 0);
-       shared->offset = MAXALIGN(add_size(SizeOfLVShared, 
BITMAPLEN(nindexes)));
-
-       /*
-        * Initialize variables for shared index statistics, set NULL bitmap and
-        * the size of stats for each index.
-        */
-       memset(shared->bitmap, 0x00, BITMAPLEN(nindexes));
-       for (int idx = 0; idx < nindexes; idx++)
-       {
-               if (!will_parallel_vacuum[idx])
-                       continue;
-
-               /* Set NOT NULL as this index does support parallelism */
-               shared->bitmap[idx >> 3] |= 1 << (idx & 0x07);
-       }
-
-       shm_toc_insert(pcxt->toc, PARALLEL_VACUUM_KEY_SHARED, shared);
-       lps->lvshared = shared;
-
-       /* Prepare the dead tuple space */
-       dead_tuples = (LVDeadTuples *) shm_toc_allocate(pcxt->toc, 
est_deadtuples);
-       dead_tuples->max_tuples = maxtuples;
-       dead_tuples->num_tuples = 0;
-       MemSet(dead_tuples->itemptrs, 0, sizeof(ItemPointerData) * maxtuples);
-       shm_toc_insert(pcxt->toc, PARALLEL_VACUUM_KEY_DEAD_TUPLES, dead_tuples);
-       vacrel->dead_tuples = dead_tuples;
-
-       /*
-        * Allocate space for each worker's BufferUsage and WalUsage; no need to
-        * initialize
-        */
-       buffer_usage = shm_toc_allocate(pcxt->toc,
-                                                                       
mul_size(sizeof(BufferUsage), pcxt->nworkers));
-       shm_toc_insert(pcxt->toc, PARALLEL_VACUUM_KEY_BUFFER_USAGE, 
buffer_usage);
-       lps->buffer_usage = buffer_usage;
-       wal_usage = shm_toc_allocate(pcxt->toc,
-                                                                
mul_size(sizeof(WalUsage), pcxt->nworkers));
-       shm_toc_insert(pcxt->toc, PARALLEL_VACUUM_KEY_WAL_USAGE, wal_usage);
-       lps->wal_usage = wal_usage;
-
-       /* Store query string for workers */
-       if (debug_query_string)
-       {
-               char       *sharedquery;
-
-               sharedquery = (char *) shm_toc_allocate(pcxt->toc, querylen + 
1);
-               memcpy(sharedquery, debug_query_string, querylen + 1);
-               sharedquery[querylen] = '\0';
-               shm_toc_insert(pcxt->toc,
-                                          PARALLEL_VACUUM_KEY_QUERY_TEXT, 
sharedquery);
+                                                       NULL, NULL, false, true 
/* isvacuum */);
        }
-
-       pfree(will_parallel_vacuum);
-       return lps;
-}
-#endif
-
-/*
- * Destroy the parallel context, and end parallel mode.
- *
- * Since writes are not allowed during parallel mode, copy the
- * updated index statistics from DSM into local memory and then later use that
- * to update the index statistics.  One might think that we can exit from
- * parallel mode, update the index statistics and then destroy parallel
- * context, but that won't be safe (see ExitParallelMode).
- */
-static void
-end_parallel_vacuum(LVRelState *vacrel)
-{
-       IndexBulkDeleteResult **indstats = vacrel->indstats;
-       LVParallelState *lps = vacrel->lps;
-       int                     nindexes = vacrel->nindexes;
-
-       Assert(!IsParallelWorker());
-
-       /* Copy the updated statistics */
-       for (int idx = 0; idx < nindexes; idx++)
-       {
-               LVSharedIndStats *shared_istat;
-
-               shared_istat = parallel_stats_for_idx(lps->lvshared, idx);
-
-               /*
-                * Skip index -- it must have been processed by the leader, from
-                * inside do_serial_processing_for_unsafe_indexes()
-                */
-               if (shared_istat == NULL)
-                       continue;
-
-               if (shared_istat->updated)
-               {
-                       indstats[idx] = (IndexBulkDeleteResult *) 
palloc0(sizeof(IndexBulkDeleteResult));
-                       memcpy(indstats[idx], &(shared_istat->istat), 
sizeof(IndexBulkDeleteResult));
-               }
-               else
-                       indstats[idx] = NULL;
-       }
-
-       DestroyParallelContext(lps->pcxt);
-       ExitParallelMode();
-
-       /* Deactivate parallel vacuum */
-       pfree(lps);
-       vacrel->lps = NULL;
-}
-
-/*
- * Return shared memory statistics for index at offset 'getidx', if any
- *
- * Returning NULL indicates that compute_parallel_vacuum_workers() determined
- * that the index is a totally unsuitable target for all parallel processing
- * up front.  For example, the index could be < min_parallel_index_scan_size
- * cutoff.
- */
-static LVSharedIndStats *
-parallel_stats_for_idx(LVShared *lvshared, int getidx)
-{
-       char       *p;
-
-       if (IndStatsIsNull(lvshared, getidx))
-               return NULL;
-
-       p = (char *) GetSharedIndStats(lvshared);
-       for (int idx = 0; idx < getidx; idx++)
-       {
-               if (IndStatsIsNull(lvshared, idx))
-                       continue;
-
-               p += sizeof(LVSharedIndStats);
-       }
-
-       return (LVSharedIndStats *) p;
-}
-
-/*
- * Returns false, if the given index can't participate in parallel index
- * vacuum or parallel index cleanup
- */
-static bool
-parallel_processing_is_safe(Relation indrel, LVShared *lvshared)
-{
-       uint8           vacoptions = indrel->rd_indam->amparallelvacuumoptions;
-
-       /* first_time must be true only if for_cleanup is true */
-       Assert(lvshared->for_cleanup || !lvshared->first_time);
-
-       if (lvshared->for_cleanup)
-       {
-               /* Skip, if the index does not support parallel cleanup */
-               if (((vacoptions & VACUUM_OPTION_PARALLEL_CLEANUP) == 0) &&
-                       ((vacoptions & VACUUM_OPTION_PARALLEL_COND_CLEANUP) == 
0))
-                       return false;
-
-               /*
-                * Skip, if the index supports parallel cleanup conditionally, 
but we
-                * have already processed the index (for bulkdelete).  See the
-                * comments for option VACUUM_OPTION_PARALLEL_COND_CLEANUP to 
know
-                * when indexes support parallel cleanup conditionally.
-                */
-               if (!lvshared->first_time &&
-                       ((vacoptions & VACUUM_OPTION_PARALLEL_COND_CLEANUP) != 
0))
-                       return false;
-       }
-       else if ((vacoptions & VACUUM_OPTION_PARALLEL_BULKDEL) == 0)
-       {
-               /* Skip if the index does not support parallel bulk deletion */
-               return false;
-       }
-
-       return true;
-}
-
-/*
- * Perform work within a launched parallel process.
- *
- * Since parallel vacuum workers perform only index vacuum or index cleanup,
- * we don't need to report progress information.
- */
-void
-parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
-{
-       Relation        rel;
-       Relation   *indrels;
-       LVShared   *lvshared;
-       LVDeadTuples *dead_tuples;
-       BufferUsage *buffer_usage;
-       WalUsage   *wal_usage;
-       int                     nindexes;
-       char       *sharedquery;
-       LVRelState      vacrel;
-       ErrorContextCallback errcallback;
-
-       /*
-        * A parallel vacuum worker must have only PROC_IN_VACUUM flag since we
-        * don't support parallel vacuum for autovacuum as of now.
-        */
-       Assert(MyProc->statusFlags == PROC_IN_VACUUM);
-
-       lvshared = (LVShared *) shm_toc_lookup(toc, PARALLEL_VACUUM_KEY_SHARED,
-                                                                               
   false);
-       elevel = lvshared->elevel;
-
-       if (lvshared->for_cleanup)
-               elog(DEBUG1, "starting parallel vacuum worker for cleanup");
-       else
-               elog(DEBUG1, "starting parallel vacuum worker for bulk delete");
-
-       /* Set debug_query_string for individual workers */
-       sharedquery = shm_toc_lookup(toc, PARALLEL_VACUUM_KEY_QUERY_TEXT, true);
-       debug_query_string = sharedquery;
-       pgstat_report_activity(STATE_RUNNING, debug_query_string);
-
-       /*
-        * Open table.  The lock mode is the same as the leader process.  It's
-        * okay because the lock mode does not conflict among the parallel
-        * workers.
-        */
-       rel = table_open(lvshared->relid, ShareUpdateExclusiveLock);
-
-       /*
-        * Open all indexes. indrels are sorted in order by OID, which should be
-        * matched to the leader's one.
-        */
-       vac_open_indexes(rel, RowExclusiveLock, &nindexes, &indrels);
-       Assert(nindexes > 0);
-
-       /* Set dead tuple space */
-       dead_tuples = (LVDeadTuples *) shm_toc_lookup(toc,
-                                                                               
                  PARALLEL_VACUUM_KEY_DEAD_TUPLES,
-                                                                               
                  false);
-
-       /* Set cost-based vacuum delay */
-       VacuumCostActive = (VacuumCostDelay > 0);
-       VacuumCostBalance = 0;
-       VacuumPageHit = 0;
-       VacuumPageMiss = 0;
-       VacuumPageDirty = 0;
-       VacuumCostBalanceLocal = 0;
-       VacuumSharedCostBalance = &(lvshared->cost_balance);
-       VacuumActiveNWorkers = &(lvshared->active_nworkers);
-
-       vacrel.rel = rel;
-       vacrel.indrels = indrels;
-       vacrel.nindexes = nindexes;
-       /* Each parallel VACUUM worker gets its own access strategy */
-       vacrel.bstrategy = GetAccessStrategy(BAS_VACUUM);
-       vacrel.indstats = (IndexBulkDeleteResult **)
-               palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
-
-       if (lvshared->maintenance_work_mem_worker > 0)
-               maintenance_work_mem = lvshared->maintenance_work_mem_worker;
-
-       /*
-        * Initialize vacrel for use as error callback arg by parallel worker.
-        */
-       vacrel.relnamespace = get_namespace_name(RelationGetNamespace(rel));
-       vacrel.relname = pstrdup(RelationGetRelationName(rel));
-       vacrel.indname = NULL;
-       vacrel.phase = VACUUM_ERRCB_PHASE_UNKNOWN;      /* Not yet processing */
-       vacrel.dead_tuples = dead_tuples;
-
-       /* Setup error traceback support for ereport() */
-       errcallback.callback = vacuum_error_callback;
-       errcallback.arg = &vacrel;
-       errcallback.previous = error_context_stack;
-       error_context_stack = &errcallback;
-
-       /* Prepare to track buffer usage during parallel execution */
-       InstrStartParallelQuery();
-
-       /* Process indexes to perform vacuum/cleanup */
-       do_parallel_processing(&vacrel, lvshared);
-
-       /* Report buffer/WAL usage during parallel execution */
-       buffer_usage = shm_toc_lookup(toc, PARALLEL_VACUUM_KEY_BUFFER_USAGE, 
false);
-       wal_usage = shm_toc_lookup(toc, PARALLEL_VACUUM_KEY_WAL_USAGE, false);
-       InstrEndParallelQuery(&buffer_usage[ParallelWorkerNumber],
-                                                 
&wal_usage[ParallelWorkerNumber]);
-
-       /* Pop the error context stack */
-       error_context_stack = errcallback.previous;
-
-       vac_close_indexes(nindexes, indrels, RowExclusiveLock);
-       table_close(rel, ShareUpdateExclusiveLock);
-       FreeAccessStrategy(vacrel.bstrategy);
-       pfree(vacrel.indstats);
 }
 
 /*
- * Error context callback for errors occurring during vacuum.
-=======
  * Error context callback for errors occurring during vacuum.  The error
  * context messages for index phases should match the messages set in parallel
  * vacuum.  If you change this function for those phases, change
  * parallel_vacuum_error_callback() as well.
->>>>>>> REL_16_9
  */
 static void
 vacuum_error_callback(void *arg)
diff --git a/src/backend/access/spgist/spginsert.c 
b/src/backend/access/spgist/spginsert.c
index 789a64ae0e7..e6c9d7b6191 100644
--- a/src/backend/access/spgist/spginsert.c
+++ b/src/backend/access/spgist/spginsert.c
@@ -162,59 +162,35 @@ spgbuildempty(Relation index)
        /*
         * Initialize the meta page and root pages
         */
-<<<<<<< HEAD
-       PageEncryptInplace(page, INIT_FORKNUM,
-                                          SPGIST_METAPAGE_BLKNO);
-       PageSetChecksumInplace(page, SPGIST_METAPAGE_BLKNO);
-       smgrwrite(index->rd_smgr, INIT_FORKNUM, SPGIST_METAPAGE_BLKNO,
-                         (char *) page, true);
-       log_newpage(&index->rd_smgr->smgr_rnode.node, INIT_FORKNUM,
-                               SPGIST_METAPAGE_BLKNO, page, true);
-=======
        metabuffer = ReadBufferExtended(index, INIT_FORKNUM, P_NEW, RBM_NORMAL, 
NULL);
        LockBuffer(metabuffer, BUFFER_LOCK_EXCLUSIVE);
        rootbuffer = ReadBufferExtended(index, INIT_FORKNUM, P_NEW, RBM_NORMAL, 
NULL);
        LockBuffer(rootbuffer, BUFFER_LOCK_EXCLUSIVE);
        nullbuffer = ReadBufferExtended(index, INIT_FORKNUM, P_NEW, RBM_NORMAL, 
NULL);
        LockBuffer(nullbuffer, BUFFER_LOCK_EXCLUSIVE);
->>>>>>> REL_16_9
 
        Assert(BufferGetBlockNumber(metabuffer) == SPGIST_METAPAGE_BLKNO);
        Assert(BufferGetBlockNumber(rootbuffer) == SPGIST_ROOT_BLKNO);
        Assert(BufferGetBlockNumber(nullbuffer) == SPGIST_NULL_BLKNO);
 
-<<<<<<< HEAD
-       PageEncryptInplace(page, INIT_FORKNUM,
-                                          SPGIST_ROOT_BLKNO);
-       PageSetChecksumInplace(page, SPGIST_ROOT_BLKNO);
-       smgrwrite(index->rd_smgr, INIT_FORKNUM, SPGIST_ROOT_BLKNO,
-                         (char *) page, true);
-       log_newpage(&index->rd_smgr->smgr_rnode.node, INIT_FORKNUM,
-                               SPGIST_ROOT_BLKNO, page, true);
-=======
        START_CRIT_SECTION();
->>>>>>> REL_16_9
 
        SpGistInitMetapage(BufferGetPage(metabuffer));
+       PageEncryptInplace(BufferGetPage(metabuffer), INIT_FORKNUM,
+                                          SPGIST_METAPAGE_BLKNO);
        MarkBufferDirty(metabuffer);
        SpGistInitBuffer(rootbuffer, SPGIST_LEAF);
+       PageEncryptInplace(BufferGetPage(rootbuffer), INIT_FORKNUM,
+                                          SPGIST_ROOT_BLKNO);
        MarkBufferDirty(rootbuffer);
        SpGistInitBuffer(nullbuffer, SPGIST_LEAF | SPGIST_NULLS);
+       PageEncryptInplace(BufferGetPage(nullbuffer), INIT_FORKNUM,
+                                          SPGIST_NULL_BLKNO);
        MarkBufferDirty(nullbuffer);
 
-<<<<<<< HEAD
-       PageEncryptInplace(page, INIT_FORKNUM,
-                                          SPGIST_NULL_BLKNO);
-       PageSetChecksumInplace(page, SPGIST_NULL_BLKNO);
-       smgrwrite(index->rd_smgr, INIT_FORKNUM, SPGIST_NULL_BLKNO,
-                         (char *) page, true);
-       log_newpage(&index->rd_smgr->smgr_rnode.node, INIT_FORKNUM,
-                               SPGIST_NULL_BLKNO, page, true);
-=======
        log_newpage_buffer(metabuffer, true);
        log_newpage_buffer(rootbuffer, true);
        log_newpage_buffer(nullbuffer, true);
->>>>>>> REL_16_9
 
        END_CRIT_SECTION();
 
diff --git a/src/backend/access/table/table.c b/src/backend/access/table/table.c
index 0d9aa1cc37f..bd274149505 100644
--- a/src/backend/access/table/table.c
+++ b/src/backend/access/table/table.c
@@ -25,15 +25,12 @@
 #include "access/table.h"
 #include "storage/lmgr.h"
 
-<<<<<<< HEAD
 #include "catalog/namespace.h"
 #include "cdb/cdbvars.h"
 #include "utils/faultinjector.h"
 #include "utils/guc.h"
 
-=======
 static inline void validate_relation_kind(Relation r);
->>>>>>> REL_16_9
 
 /* ----------------
  *             table_open - open a table relation by relation OID
@@ -136,9 +133,6 @@ table_close(Relation relation, LOCKMODE lockmode)
        relation_close(relation, lockmode);
 }
 
-<<<<<<< HEAD
-
-
 /*
  * CdbTryOpenTable -- Opens a table with a specified lock mode.
  *
@@ -253,7 +247,7 @@ CdbOpenTable(Oid relid, LOCKMODE reqmode, bool 
*lockUpgraded)
        return rel;
 
 }                                       /* CdbOpenTable */
-=======
+
 /* ----------------
  *             validate_relation_kind - check the relation's kind
  *
@@ -272,4 +266,3 @@ validate_relation_kind(Relation r)
                                                RelationGetRelationName(r)),
                                 
errdetail_relkind_not_supported(r->rd_rel->relkind)));
 }
->>>>>>> REL_16_9


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@cloudberry.apache.org
For additional commands, e-mail: commits-h...@cloudberry.apache.org

Reply via email to