This is an automated email from the ASF dual-hosted git repository.

chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git


The following commit(s) were added to refs/heads/cbdb-postgres-merge by this 
push:
     new 88b256ba47a Fix all the compile error for executor
88b256ba47a is described below

commit 88b256ba47aa07d052cbf9f59b2e3bbd0efb8cd7
Author: Jinbao Chen <chenjinbao1...@gmail.com>
AuthorDate: Sun Sep 28 14:35:28 2025 +0800

    Fix all the compile error for executor
---
 src/backend/executor/execDynamicIndexes.c        |  2 +-
 src/backend/executor/nodeDynamicBitmapHeapscan.c |  3 +-
 src/backend/executor/nodeDynamicForeignscan.c    |  3 +-
 src/backend/executor/nodeDynamicIndexOnlyscan.c  |  1 +
 src/backend/executor/nodeDynamicIndexscan.c      |  1 +
 src/backend/executor/nodeDynamicSeqscan.c        | 11 ++--
 src/backend/executor/nodeModifyTable.c           | 50 ++++++-----------
 src/backend/executor/nodeMotion.c                |  5 +-
 src/backend/executor/nodeSeqscan.c               |  2 +-
 src/backend/executor/nodeSubplan.c               |  5 +-
 src/backend/executor/nodeWindowAgg.c             | 70 +++---------------------
 src/backend/executor/spi.c                       |  3 +-
 12 files changed, 44 insertions(+), 112 deletions(-)

diff --git a/src/backend/executor/execDynamicIndexes.c 
b/src/backend/executor/execDynamicIndexes.c
index 45f9a395905..744cfc6b17b 100644
--- a/src/backend/executor/execDynamicIndexes.c
+++ b/src/backend/executor/execDynamicIndexes.c
@@ -61,7 +61,7 @@ GetColumnMapping(Oid oldOid, Oid newOid)
        TupleDesc       oldTupDesc = oldRel->rd_att;
        TupleDesc       newTupDesc = newRel->rd_att;
 
-       attMap = build_attrmap_by_name_if_req(oldTupDesc, newTupDesc);
+       attMap = build_attrmap_by_name_if_req(oldTupDesc, newTupDesc, false);
 
        heap_close(oldRel, AccessShareLock);
        heap_close(newRel, AccessShareLock);
diff --git a/src/backend/executor/nodeDynamicBitmapHeapscan.c 
b/src/backend/executor/nodeDynamicBitmapHeapscan.c
index 95dc85d940f..3411c59e5bf 100644
--- a/src/backend/executor/nodeDynamicBitmapHeapscan.c
+++ b/src/backend/executor/nodeDynamicBitmapHeapscan.c
@@ -162,7 +162,7 @@ initNextTableToScan(DynamicBitmapHeapScanState *node)
         * FIXME: should we use execute_attr_map_tuple instead? Seems like a
         * higher level abstraction that fits the bill
         */
-       attMap = build_attrmap_by_name_if_req(partTupDesc, lastTupDesc);
+       attMap = build_attrmap_by_name_if_req(partTupDesc, lastTupDesc, false);
        table_close(lastScannedRel, AccessShareLock);
 
        /* If attribute remapping is not necessary, then do not change the 
varattno */
@@ -204,6 +204,7 @@ ExecDynamicBitmapHeapScan(PlanState *pstate)
                node->did_pruning = true;
                node->as_valid_subplans =
                                ExecFindMatchingSubPlans(node->as_prune_state,
+                                                                               
 false,
                                                                                
 node->ss.ps.state,
                                                                                
 list_length(plan->partOids),
                                                                                
 plan->join_prune_paramids);
diff --git a/src/backend/executor/nodeDynamicForeignscan.c 
b/src/backend/executor/nodeDynamicForeignscan.c
index b73cf4fc702..a75f2f1904d 100644
--- a/src/backend/executor/nodeDynamicForeignscan.c
+++ b/src/backend/executor/nodeDynamicForeignscan.c
@@ -162,7 +162,7 @@ initNextTableToScan(DynamicForeignScanState *node)
         * FIXME: should we use execute_attr_map_tuple instead? Seems like a
         * higher level abstraction that fits the bill
         */
-       attMap = build_attrmap_by_name_if_req(partTupDesc, lastTupDesc);
+       attMap = build_attrmap_by_name_if_req(partTupDesc, lastTupDesc, false);
        table_close(lastScannedRel, AccessShareLock);
 
        /* If attribute remapping is not necessary, then do not change the 
varattno */
@@ -201,6 +201,7 @@ ExecDynamicForeignScan(PlanState *pstate)
                node->did_pruning = true;
                node->as_valid_subplans =
                        ExecFindMatchingSubPlans(node->as_prune_state,
+                                                                        false,
                                                                         
node->ss.ps.state,
                                                                         
list_length(plan->partOids),
                                                                         
plan->join_prune_paramids);
diff --git a/src/backend/executor/nodeDynamicIndexOnlyscan.c 
b/src/backend/executor/nodeDynamicIndexOnlyscan.c
index 2908672132a..915257a0266 100644
--- a/src/backend/executor/nodeDynamicIndexOnlyscan.c
+++ b/src/backend/executor/nodeDynamicIndexOnlyscan.c
@@ -109,6 +109,7 @@ ExecDynamicIndexOnlyScan(PlanState *pstate)
                node->did_pruning = true;
                node->as_valid_subplans =
                        ExecFindMatchingSubPlans(node->as_prune_state,
+                                                                        false,
                                                                         
node->ss.ps.state,
                                                                         
list_length(plan->partOids),
                                                                         
plan->join_prune_paramids);
diff --git a/src/backend/executor/nodeDynamicIndexscan.c 
b/src/backend/executor/nodeDynamicIndexscan.c
index 4ceff686cef..fa3bf6e8192 100644
--- a/src/backend/executor/nodeDynamicIndexscan.c
+++ b/src/backend/executor/nodeDynamicIndexscan.c
@@ -114,6 +114,7 @@ ExecDynamicIndexScan(PlanState *pstate)
                node->did_pruning = true;
                node->as_valid_subplans =
                        ExecFindMatchingSubPlans(node->as_prune_state,
+                                                                        false,
                                                                         
node->ss.ps.state,
                                                                         
list_length(plan->partOids),
                                                                         
plan->join_prune_paramids);
diff --git a/src/backend/executor/nodeDynamicSeqscan.c 
b/src/backend/executor/nodeDynamicSeqscan.c
index c13be0a1f1b..2e652e33b5d 100644
--- a/src/backend/executor/nodeDynamicSeqscan.c
+++ b/src/backend/executor/nodeDynamicSeqscan.c
@@ -58,9 +58,9 @@ ExecInitDynamicSeqScan(DynamicSeqScan *node, EState *estate, 
int eflags)
 
        /* Initialize child expressions. This is needed to find subplans. */
        state->ss.ps.qual =
-               ExecInitQual(node->seqscan.plan.qual, (PlanState *) state);
+               ExecInitQual(node->seqscan.scan.plan.qual, (PlanState *) state);
 
-       Relation scanRel = ExecOpenScanRelation(estate, 
node->seqscan.scanrelid, eflags);
+       Relation scanRel = ExecOpenScanRelation(estate, 
node->seqscan.scan.scanrelid, eflags);
        ExecInitScanTupleSlot(estate, &state->ss, RelationGetDescr(scanRel), 
table_slot_callbacks(scanRel));
 
        /* Dynamic table/index/bitmap scan can't tell the ops of tupleslot */
@@ -80,13 +80,13 @@ ExecInitDynamicSeqScan(DynamicSeqScan *node, EState 
*estate, int eflags)
                state->partOids[i] = lfirst_oid(lc);
        state->whichPart = -1;
 
-       reloid = exec_rt_fetch(node->seqscan.scanrelid, estate)->relid;
+       reloid = exec_rt_fetch(node->seqscan.scan.scanrelid, estate)->relid;
        Assert(OidIsValid(reloid));
 
        /* lastRelOid is used to remap varattno for heterogeneous partitions */
        state->lastRelOid = reloid;
 
-       state->scanrelid = node->seqscan.scanrelid;
+       state->scanrelid = node->seqscan.scan.scanrelid;
 
        state->as_prune_state = NULL;
 
@@ -151,7 +151,7 @@ initNextTableToScan(DynamicSeqScanState *node)
         * FIXME: should we use execute_attr_map_tuple instead? Seems like a
         * higher level abstraction that fits the bill
         */
-       attMap = build_attrmap_by_name_if_req(partTupDesc, lastTupDesc);
+       attMap = build_attrmap_by_name_if_req(partTupDesc, lastTupDesc, false);
        table_close(lastScannedRel, AccessShareLock);
 
        /* If attribute remapping is not necessary, then do not change the 
varattno */
@@ -187,6 +187,7 @@ ExecDynamicSeqScan(PlanState *pstate)
                node->did_pruning = true;
                node->as_valid_subplans =
                        ExecFindMatchingSubPlans(node->as_prune_state,
+                                                                        false,
                                                                         
node->ss.ps.state,
                                                                         
list_length(plan->partOids),
                                                                         
plan->join_prune_paramids);
diff --git a/src/backend/executor/nodeModifyTable.c 
b/src/backend/executor/nodeModifyTable.c
index 99597dcdbcf..675e5bf47ad 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -825,11 +825,9 @@ static TupleTableSlot *
 ExecInsert(ModifyTableContext *context,
                   ResultRelInfo *resultRelInfo,
                   TupleTableSlot *slot,
-                  TupleTableSlot *planSlot,
-                  EState *estate,
                   bool canSetTag,
                   TupleTableSlot **inserted_tuple,
-                  ResultRelInfo **insert_destrel
+                  ResultRelInfo **insert_destrel,
                   bool splitUpdate)
 {
        ModifyTableState *mtstate = context->mtstate;
@@ -1875,9 +1873,9 @@ ldelete:
        if (resultRelationDesc->rd_rel->relispartition)
        {
 
-               mtstate->mt_leaf_relids_deleted =
-                       bms_add_member(mtstate->mt_leaf_relids_deleted, 
RelationGetRelid(resultRelationDesc));
-               mtstate->has_leaf_changed = true;
+               context->mtstate->mt_leaf_relids_deleted =
+                       
bms_add_member(context->mtstate->mt_leaf_relids_deleted, 
RelationGetRelid(resultRelationDesc));
+               context->mtstate->has_leaf_changed = true;
        }
 
        /* Tell caller that the delete actually happened. */
@@ -2126,20 +2124,6 @@ ExecUpdatePrologue(ModifyTableContext *context, 
ResultRelInfo *resultRelInfo,
        if (result)
                *result = TM_Ok;
 
-       /*
-        * Sanity check the distribution of the tuple to prevent
-        * potential data corruption in case users manipulate data
-        * incorrectly (e.g. insert data on incorrect segment through
-        * utility mode) or there is bug in code, etc.
-        */
-       if (segid != GpIdentity.segindex)
-               elog(ERROR,
-                        "distribution key of the tuple (%u, %u) doesn't belong 
to "
-                        "current segment (actually from seg%d)",
-                        BlockIdGetBlockNumber(&(tupleid->ip_blkid)),
-                        tupleid->ip_posid,
-                        segid);
-
        ExecMaterializeSlot(slot);
 
        /*
@@ -2272,7 +2256,7 @@ lreplace:
                 * if the tuple has been concurrently updated, a retry is 
needed.
                 */
                if (ExecCrossPartitionUpdate(context, resultRelInfo,
-                                                                        
tupleid, oldtuple, slot, segid
+                                                                        
tupleid, oldtuple, slot, segid,
                                                                         
canSetTag, updateCxt,
                                                                         
&result,
                                                                         
&retry_slot,
@@ -2742,9 +2726,9 @@ redo_act:
 
        if (resultRelationDesc->rd_rel->relispartition)
        {
-               mtstate->mt_leaf_relids_updated =
-                       bms_add_member(mtstate->mt_leaf_relids_updated, 
RelationGetRelid(resultRelationDesc));
-               mtstate->has_leaf_changed = true;
+               context->mtstate->mt_leaf_relids_updated =
+                       
bms_add_member(context->mtstate->mt_leaf_relids_updated, 
RelationGetRelid(resultRelationDesc));
+               context->mtstate->has_leaf_changed = true;
        }
 
        ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid, 
oldtuple,
@@ -3195,7 +3179,7 @@ lmerge_matched:
                                        break;          /* concurrent 
update/delete */
                                }
                                result = ExecUpdateAct(context, resultRelInfo, 
tupleid, NULL,
-                                                                          
newslot, canSetTag, &updateCxt);
+                                                                          
newslot, canSetTag, &updateCxt, GpIdentity.segindex);
 
                                /*
                                 * As in ExecUpdate(), if ExecUpdateAct() 
reports that a
@@ -3221,7 +3205,7 @@ lmerge_matched:
                        case CMD_DELETE:
                                context->relaction = relaction;
                                if (!ExecDeletePrologue(context, resultRelInfo, 
tupleid,
-                                                                               
NULL, NULL, &result))
+                                                                               
NULL, NULL, &result, false))
                                {
                                        if (result == TM_Ok)
                                                goto out;       /* "do nothing" 
*/
@@ -3231,7 +3215,7 @@ lmerge_matched:
                                if (result == TM_Ok)
                                {
                                        ExecDeleteEpilogue(context, 
resultRelInfo, tupleid, NULL,
-                                                                          
false);
+                                                                          
false, false);
                                        mtstate->mt_merge_deleted += 1;
                                }
                                break;
@@ -3528,7 +3512,7 @@ ExecMergeNotMatched(ModifyTableContext *context, 
ResultRelInfo *resultRelInfo,
                                context->relaction = action;
 
                                (void) ExecInsert(context, 
mtstate->rootResultRelInfo, newslot,
-                                                                 canSetTag, 
NULL, NULL);
+                                                                 canSetTag, 
NULL, NULL, false);
                                mtstate->mt_merge_inserted += 1;
                                break;
                        case CMD_NOTHING:
@@ -3911,7 +3895,9 @@ ExecModifyTable(PlanState *pstate)
        HeapTupleData oldtupdata;
        HeapTuple       oldtuple;
        ItemPointer tupleid;
-       bool            tuplock;
+       List       *relinfos = NIL;
+       ListCell   *lc;
+       PartitionTupleRouting *proute = node->mt_partition_tuple_routing;
 
        CHECK_FOR_INTERRUPTS();
 
@@ -4265,7 +4251,7 @@ ExecModifyTable(PlanState *pstate)
                                                                                
                                   oldSlot))
                                                        elog(ERROR, "failed to 
fetch tuple being updated");
                                        }
-                                       slot = 
ExecGetUpdateNewTuple(resultRelInfo, planSlot,
+                                       slot = 
ExecGetUpdateNewTuple(resultRelInfo, context.planSlot,
                                                                                
                 oldSlot);
 
                                        /* Now apply the update. */
@@ -4280,8 +4266,8 @@ ExecModifyTable(PlanState *pstate)
                                        if 
(unlikely(!resultRelInfo->ri_projectNewInfoValid))
                                                ExecInitInsertProjection(node, 
resultRelInfo);
                                        slot = 
ExecGetInsertNewTuple(resultRelInfo, context.planSlot);
-                                       slot = ExecInsert(&context, 
resultRelInfo, slot, context.planSlot,
-                                                                         
estate, node->canSetTag, NULL, NULL, true/* splitUpdate */);
+                                       slot = ExecInsert(&context, 
resultRelInfo, slot,
+                                                                         
node->canSetTag, NULL, NULL, true/* splitUpdate */);
                                        resultRelInfo = old;
                                }
                                else if (action == DML_DELETE)
diff --git a/src/backend/executor/nodeMotion.c 
b/src/backend/executor/nodeMotion.c
index b6514f2a0ce..957efd12c9d 100644
--- a/src/backend/executor/nodeMotion.c
+++ b/src/backend/executor/nodeMotion.c
@@ -480,9 +480,8 @@ execMotionSortedReceiver(MotionState *node)
         */
        if (!node->tupleheapReady)
        {
-               MinimalTuple inputTuple;
-               binaryheap *hp = node->tupleheap;
-               Motion     *motion = (Motion *) node->ps.plan;
+               hp = node->tupleheap;
+               motion = (Motion *) node->ps.plan;
                int                     iSegIdx;
                ListCell   *lcProcess;
                ExecSlice  *sendSlice = 
&node->ps.state->es_sliceTable->slices[motion->motionID];
diff --git a/src/backend/executor/nodeSeqscan.c 
b/src/backend/executor/nodeSeqscan.c
index 6bdb9fb13d2..769b05078b5 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -164,7 +164,7 @@ ExecInitSeqScan(SeqScan *node, EState *estate, int eflags)
         * get the relation object id from the relid'th entry in the range 
table,
         * open that relation and acquire appropriate lock on it.
         */
-       currentRelation = ExecOpenScanRelation(estate, node->scanrelid, eflags);
+       currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, 
eflags);
 
        return ExecInitSeqScanForPartition(node, estate, currentRelation);
 }
diff --git a/src/backend/executor/nodeSubplan.c 
b/src/backend/executor/nodeSubplan.c
index a19d7875283..53df2d270ec 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -1141,6 +1141,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext 
*econtext, QueryDesc *queryDesc
        TupleTableSlot *slot;
        ListCell   *l;
        bool            found = false;
+       ListCell   *pvar;
        ArrayBuildState *astate pg_attribute_unused() = NULL;
        Size            savepeakspace = 
MemoryContextGetPeakSpace(planstate->state->es_query_cxt);
 
@@ -1427,7 +1428,6 @@ PG_TRY();
                ErrorData *qeError = NULL;
                CdbDispatchResults *pr = NULL;
                CdbDispatcherState *ds = queryDesc->estate->dispatcherState;
-               int     primaryWriterSliceIndex = 
PrimaryWriterSliceIndex(queryDesc->estate);
 
                cdbdisp_checkDispatchResult(ds, DISPATCH_WAIT_NONE);
                pr = cdbdisp_getDispatchResults(ds, &qeError);
@@ -1439,9 +1439,6 @@ PG_TRY();
                        ThrowErrorData(qeError);
                }
 
-               /* collect pgstat from QEs for current transaction level */
-               pgstat_combine_from_qe(pr, primaryWriterSliceIndex);
-
                /* If EXPLAIN ANALYZE, collect execution stats from qExecs. */
                if (planstate->instrument && planstate->instrument->need_cdb)
                {
diff --git a/src/backend/executor/nodeWindowAgg.c 
b/src/backend/executor/nodeWindowAgg.c
index af988032f31..9ceb1fb5377 100644
--- a/src/backend/executor/nodeWindowAgg.c
+++ b/src/backend/executor/nodeWindowAgg.c
@@ -711,7 +711,7 @@ perform_distinct_windowaggregate(WindowAggState *winstate,
 #endif
 
        /* load the first tuple from spool */
-       if (tuplesort_getdatum(peraggstate->distinctSortState, true,
+       if (tuplesort_getdatum(peraggstate->distinctSortState, true, false,
                                                   &fcinfo->args[1].value, 
&fcinfo->args[1].isnull, NULL))
        {
                call_transfunc(winstate, perfuncstate, peraggstate, fcinfo);
@@ -719,7 +719,7 @@ perform_distinct_windowaggregate(WindowAggState *winstate,
                prevNull = fcinfo->args[1].isnull;
 
                /* continue loading more tuples */
-               while (tuplesort_getdatum(peraggstate->distinctSortState, true,
+               while (tuplesort_getdatum(peraggstate->distinctSortState, true, 
false,
                                                                  
&fcinfo->args[1].value, &fcinfo->args[1].isnull, NULL))
                {
                        int             cmp;
@@ -2448,66 +2448,7 @@ ExecWindowAgg(PlanState *pstate)
        /* We need to loop as the runCondition or qual may filter out tuples */
        for (;;)
        {
-               /* Initialize for first partition and set current row = 0 */
-               begin_partition(winstate);
-               /* If there are no input rows, we'll detect that and exit below 
*/
-       }
-       else
-       {
-               /* Advance current row within partition */
-               winstate->currentpos++;
-               /* This might mean that the frame moves, too */
-               winstate->framehead_valid = false;
-               winstate->frametail_valid = false;
-               /* we don't need to invalidate grouptail here; see below */
-
-               if (!winstate->start_offset_var_free)
-                       winstate->start_offset_valid = false;
-               if (!winstate->end_offset_var_free)
-                       winstate->end_offset_valid = false;
-       }
-
-       /*
-        * Spool all tuples up to and including the current row, if we haven't
-        * already
-        */
-       spool_tuples(winstate, winstate->currentpos);
-
-#ifdef FAULT_INJECTOR
-       /*
-        * This routine is used for testing if we have allocated enough memory
-        * for the tuplestore (winstate->buffer) in begin_partition(). If all
-        * tuples of the current partition can be fitted in the memory, we
-        * emit a notice saying 'fitted in memory'. If they cannot be fitted in
-        * the memory, we emit a notice saying 'spilled to disk'. If there're
-        * no input rows, we emit a notice saying 'no input rows'.
-        *
-        * NOTE: The fault-injector only triggers once, we emit the notice when
-        * we finishes spooling all the tuples of the first partition.
-        */
-       if (winstate->partition_spooled &&
-               winstate->currentpos >= winstate->spooled_rows &&
-               SIMPLE_FAULT_INJECTOR("winagg_after_spool_tuples") == 
FaultInjectorTypeSkip)
-       {
-               if (winstate->buffer)
-               {
-                       if (tuplestore_in_memory(winstate->buffer))
-                               ereport(NOTICE, (errmsg("winagg: tuplestore 
fitted in memory")));
-                       else
-                               ereport(NOTICE, (errmsg("winagg: tuplestore 
spilled to disk")));
-               }
-               else
-                       ereport(NOTICE, (errmsg("winagg: no input rows")));
-       }
-#endif
-
-       /* Move to the next partition if we reached the end of this partition */
-       if (winstate->partition_spooled &&
-               winstate->currentpos >= winstate->spooled_rows)
-       {
-               release_partition(winstate);
-
-               if (winstate->more_partitions)
+               if (winstate->buffer == NULL)
                {
                        /* Initialize for first partition and set current row = 
0 */
                        begin_partition(winstate);
@@ -2521,6 +2462,11 @@ ExecWindowAgg(PlanState *pstate)
                        winstate->framehead_valid = false;
                        winstate->frametail_valid = false;
                        /* we don't need to invalidate grouptail here; see 
below */
+
+                       if (!winstate->start_offset_var_free)
+                               winstate->start_offset_valid = false;
+                       if (!winstate->end_offset_var_free)
+                               winstate->end_offset_valid = false;
                }
 
                /*
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index 8ffce8f477b..3e8e2154840 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -444,6 +444,7 @@ SPI_rollback_and_chain(void)
        _SPI_rollback(true);
 }
 
+/*
  * Clean up SPI state at transaction commit or abort.
  */
 void
@@ -1768,8 +1769,6 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr 
plan,
         */
        if (read_only)
        {
-               ListCell   *lc;
-
                foreach(lc, stmt_list)
                {
                        PlannedStmt *pstmt = lfirst_node(PlannedStmt, lc);


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@cloudberry.apache.org
For additional commands, e-mail: commits-h...@cloudberry.apache.org

Reply via email to