This is an automated email from the ASF dual-hosted git repository.

yiguolei pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
     new 746d83647e4 [fix](mtmv) Fix compensate union wrongly when direct query 
is empty relation (#51700) (#51899)
746d83647e4 is described below

commit 746d83647e43b28ba75227fb832c3c461840a98d
Author: seawinde <[email protected]>
AuthorDate: Fri Jun 20 13:53:06 2025 +0800

    [fix](mtmv) Fix compensate union wrongly when direct query is empty 
relation (#51700) (#51899)
    
    commitId: 85b7231c
    pr: https://github.com/apache/doris/pull/51700
---
 .../doris/common/profile/SummaryProfile.java       | 22 ++++++++++-
 .../org/apache/doris/mtmv/MTMVRelationManager.java |  3 +-
 .../org/apache/doris/nereids/CascadesContext.java  |  5 ---
 .../org/apache/doris/nereids/NereidsPlanner.java   | 18 ---------
 .../jobs/executor/TablePartitionCollector.java     | 46 ----------------------
 .../mv/AbstractMaterializedViewRule.java           | 16 +++++++-
 .../mv/InitMaterializationContextHook.java         | 21 ++++++++--
 .../exploration/mv/MaterializedViewUtils.java      | 10 +++++
 .../rules/exploration/mv/PartitionCompensator.java | 28 +++++++++----
 .../nereids/rules/exploration/mv/StructInfo.java   | 33 +++++++++++-----
 .../rules/rewrite/QueryPartitionCollector.java     | 26 +++---------
 .../exploration/mv/PartitionCompensatorTest.java   | 13 ++++--
 .../org/apache/doris/nereids/util/PlanChecker.java |  3 +-
 13 files changed, 129 insertions(+), 115 deletions(-)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/common/profile/SummaryProfile.java 
b/fe/fe-core/src/main/java/org/apache/doris/common/profile/SummaryProfile.java
index 7fe120d4dc1..20761b8c415 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/common/profile/SummaryProfile.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/common/profile/SummaryProfile.java
@@ -19,6 +19,8 @@ package org.apache.doris.common.profile;
 
 import org.apache.doris.common.util.RuntimeProfile;
 import org.apache.doris.common.util.TimeUtils;
+import org.apache.doris.qe.ConnectContext;
+import org.apache.doris.qe.StmtExecutor;
 import org.apache.doris.thrift.TNetworkAddress;
 import org.apache.doris.thrift.TUnit;
 import org.apache.doris.transaction.TransactionType;
@@ -202,6 +204,7 @@ public class SummaryProfile {
     private long nereidsLockTableFinishTime = -1;
 
     private long nereidsCollectTablePartitionFinishTime = -1;
+    private long nereidsCollectTablePartitionTime = 0;
     private long nereidsAnalysisFinishTime = -1;
     private long nereidsRewriteFinishTime = -1;
     private long nereidsOptimizeFinishTime = -1;
@@ -416,6 +419,10 @@ public class SummaryProfile {
         this.nereidsCollectTablePartitionFinishTime = 
TimeUtils.getStartTimeMs();
     }
 
+    public void addCollectTablePartitionTime(long elapsed) {
+        nereidsCollectTablePartitionTime += elapsed;
+    }
+
     public void setNereidsAnalysisTime() {
         this.nereidsAnalysisFinishTime = TimeUtils.getStartTimeMs();
     }
@@ -675,7 +682,9 @@ public class SummaryProfile {
 
 
     public String getPrettyNereidsCollectTablePartitionTime() {
-        return getPrettyTime(nereidsCollectTablePartitionFinishTime, 
nereidsRewriteFinishTime, TUnit.TIME_MS);
+        long totalTime = nereidsCollectTablePartitionFinishTime
+                - nereidsRewriteFinishTime + nereidsCollectTablePartitionTime;
+        return RuntimeProfile.printCounter(totalTime, TUnit.TIME_MS);
     }
 
     public String getPrettyNereidsOptimizeTime() {
@@ -803,4 +812,15 @@ public class SummaryProfile {
     public void setExecutedByFrontend(boolean executedByFrontend) {
         executionSummaryProfile.addInfoString(EXECUTED_BY_FRONTEND, 
String.valueOf(executedByFrontend));
     }
+
+    public static SummaryProfile getSummaryProfile(ConnectContext 
connectContext) {
+        ConnectContext ctx = connectContext == null ? ConnectContext.get() : 
connectContext;
+        if (ctx != null) {
+            StmtExecutor executor = ctx.getExecutor();
+            if (executor != null) {
+                return executor.getSummaryProfile();
+            }
+        }
+        return null;
+    }
 }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/mtmv/MTMVRelationManager.java 
b/fe/fe-core/src/main/java/org/apache/doris/mtmv/MTMVRelationManager.java
index bd9244af61e..c45558ec8cf 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/mtmv/MTMVRelationManager.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/mtmv/MTMVRelationManager.java
@@ -45,6 +45,7 @@ import org.apache.commons.collections.CollectionUtils;
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 
+import java.util.BitSet;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
@@ -86,7 +87,7 @@ public class MTMVRelationManager implements MTMVHookService {
         Set<MTMV> res = Sets.newLinkedHashSet();
         Set<BaseTableInfo> mvInfos = getMTMVInfos(tableInfos);
         Map<List<String>, Set<String>> queryUsedPartitions = 
PartitionCompensator.getQueryUsedPartitions(
-                ctx.getStatementContext());
+                ctx.getStatementContext(), new BitSet());
 
         for (BaseTableInfo tableInfo : mvInfos) {
             try {
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/CascadesContext.java 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/CascadesContext.java
index d431fc54539..1486f03e269 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/nereids/CascadesContext.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/CascadesContext.java
@@ -24,7 +24,6 @@ import org.apache.doris.nereids.jobs.Job;
 import org.apache.doris.nereids.jobs.JobContext;
 import org.apache.doris.nereids.jobs.executor.Analyzer;
 import org.apache.doris.nereids.jobs.executor.TableCollectAndHookInitializer;
-import org.apache.doris.nereids.jobs.executor.TablePartitionCollector;
 import org.apache.doris.nereids.jobs.rewrite.RewriteBottomUpJob;
 import org.apache.doris.nereids.jobs.rewrite.RewriteTopDownJob;
 import 
org.apache.doris.nereids.jobs.rewrite.RootPlanTreeRewriteJob.RootRewriteJobContext;
@@ -229,10 +228,6 @@ public class CascadesContext implements ScheduleContext {
         return new TableCollectAndHookInitializer(this);
     }
 
-    public TablePartitionCollector newTablePartitionCollector() {
-        return new TablePartitionCollector(this);
-    }
-
     public Analyzer newAnalyzer() {
         return new Analyzer(this);
     }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/NereidsPlanner.java 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/NereidsPlanner.java
index 9e9765ec139..69ec05893a3 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/nereids/NereidsPlanner.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/NereidsPlanner.java
@@ -344,21 +344,6 @@ public class NereidsPlanner extends Planner {
         }
     }
 
-    protected void collectTableUsedPartitions(boolean showPlanProcess) {
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Start to collect table used partition");
-        }
-        keepOrShowPlanProcess(showPlanProcess, () -> 
cascadesContext.newTablePartitionCollector().execute());
-        NereidsTracer.logImportantTime("EndCollectTablePartitions");
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Start to collect table used partition");
-        }
-        if (statementContext.getConnectContext().getExecutor() != null) {
-            
statementContext.getConnectContext().getExecutor().getSummaryProfile()
-                    .setNereidsCollectTablePartitionFinishTime();
-        }
-    }
-
     protected void analyze(boolean showPlanProcess) {
         if (LOG.isDebugEnabled()) {
             LOG.debug("Start analyze plan");
@@ -391,9 +376,6 @@ public class NereidsPlanner extends Planner {
         if (statementContext.getConnectContext().getExecutor() != null) {
             
statementContext.getConnectContext().getExecutor().getSummaryProfile().setNereidsRewriteTime();
         }
-        // collect partitions table used, this is for query rewrite by 
materialized view
-        // this is needed before init hook
-        collectTableUsedPartitions(showPlanProcess);
         cascadesContext.getStatementContext().getPlannerHooks().forEach(hook 
-> hook.afterRewrite(this));
     }
 
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/TablePartitionCollector.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/TablePartitionCollector.java
deleted file mode 100644
index e67b94d1314..00000000000
--- 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/executor/TablePartitionCollector.java
+++ /dev/null
@@ -1,46 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.doris.nereids.jobs.executor;
-
-import org.apache.doris.nereids.CascadesContext;
-import org.apache.doris.nereids.jobs.rewrite.RewriteJob;
-import org.apache.doris.nereids.rules.RuleType;
-import org.apache.doris.nereids.rules.rewrite.QueryPartitionCollector;
-
-import java.util.List;
-
-/**
- * Collect partitions which query used, this is useful for optimizing get 
available mvs,
- * should collect after RBO
- */
-public class TablePartitionCollector extends AbstractBatchJobExecutor {
-    public TablePartitionCollector(CascadesContext cascadesContext) {
-        super(cascadesContext);
-    }
-
-    @Override
-    public List<RewriteJob> getJobs() {
-        return buildCollectorJobs();
-    }
-
-    private static List<RewriteJob> buildCollectorJobs() {
-        return jobs(
-                custom(RuleType.COLLECT_PARTITIONS, 
QueryPartitionCollector::new)
-        );
-    }
-}
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/AbstractMaterializedViewRule.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/AbstractMaterializedViewRule.java
index b079f0d1235..71d53099090 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/AbstractMaterializedViewRule.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/AbstractMaterializedViewRule.java
@@ -22,6 +22,8 @@ import org.apache.doris.catalog.constraint.TableIdentifier;
 import org.apache.doris.common.AnalysisException;
 import org.apache.doris.common.Id;
 import org.apache.doris.common.Pair;
+import org.apache.doris.common.profile.SummaryProfile;
+import org.apache.doris.common.util.TimeUtils;
 import org.apache.doris.mtmv.BaseTableInfo;
 import org.apache.doris.nereids.CascadesContext;
 import org.apache.doris.nereids.StatementContext;
@@ -291,7 +293,7 @@ public abstract class AbstractMaterializedViewRule 
implements ExplorationRuleFac
                 MTMV mtmv = ((AsyncMaterializationContext) 
materializationContext).getMtmv();
                 BaseTableInfo relatedTableInfo = 
mtmv.getMvPartitionInfo().getRelatedTableInfo();
                 Map<List<String>, Set<String>> queryUsedPartitions = 
PartitionCompensator.getQueryUsedPartitions(
-                        
cascadesContext.getConnectContext().getStatementContext());
+                        cascadesContext.getStatementContext(), 
queryStructInfo.getRelationIdBitSet());
                 Set<String> relateTableUsedPartitions = 
queryUsedPartitions.get(relatedTableInfo.toList());
                 if (relateTableUsedPartitions == null) {
                     materializationContext.recordFailReason(queryStructInfo,
@@ -413,6 +415,18 @@ public abstract class AbstractMaterializedViewRule 
implements ExplorationRuleFac
                                 logicalProperties, 
queryPlan.getLogicalProperties()));
                 continue;
             }
+            // need to collect table partition again, because the rewritten 
plan would contain new relation
+            // and the rewritten plan would part in rewritten later , the 
table used partition info is needed
+            // for later rewrite
+            long startTimeMs = TimeUtils.getStartTimeMs();
+            try {
+                
MaterializedViewUtils.collectTableUsedPartitions(rewrittenPlan, 
cascadesContext);
+            } finally {
+                SummaryProfile summaryProfile = 
SummaryProfile.getSummaryProfile(cascadesContext.getConnectContext());
+                if (summaryProfile != null) {
+                    
summaryProfile.addCollectTablePartitionTime(TimeUtils.getElapsedTimeMs(startTimeMs));
+                }
+            }
             trySetStatistics(materializationContext, cascadesContext);
             rewriteResults.add(rewrittenPlan);
             recordIfRewritten(queryStructInfo.getOriginalPlan(), 
materializationContext, cascadesContext);
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/InitMaterializationContextHook.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/InitMaterializationContextHook.java
index 205350e3b37..914ae3a5195 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/InitMaterializationContextHook.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/InitMaterializationContextHook.java
@@ -26,6 +26,7 @@ import org.apache.doris.mtmv.MTMVUtil;
 import org.apache.doris.nereids.CascadesContext;
 import org.apache.doris.nereids.NereidsPlanner;
 import org.apache.doris.nereids.PlannerHook;
+import org.apache.doris.nereids.StatementContext;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableList;
@@ -50,6 +51,15 @@ public class InitMaterializationContextHook implements 
PlannerHook {
 
     @Override
     public void afterRewrite(NereidsPlanner planner) {
+        CascadesContext cascadesContext = planner.getCascadesContext();
+        // collect partitions table used, this is for query rewrite by 
materialized view
+        // this is needed before init hook, because compare partition version 
in init hook would use this
+        
MaterializedViewUtils.collectTableUsedPartitions(cascadesContext.getRewritePlan(),
 cascadesContext);
+        StatementContext statementContext = 
cascadesContext.getStatementContext();
+        if (statementContext.getConnectContext().getExecutor() != null) {
+            
statementContext.getConnectContext().getExecutor().getSummaryProfile()
+                    .setNereidsCollectTablePartitionFinishTime();
+        }
         initMaterializationContext(planner.getCascadesContext());
     }
 
@@ -121,12 +131,17 @@ public class InitMaterializationContextHook implements 
PlannerHook {
                 // so regenerate the struct info table bitset
                 StructInfo mvStructInfo = mtmvCache.getStructInfo();
                 BitSet tableBitSetInCurrentCascadesContext = new BitSet();
-                mvStructInfo.getRelations().forEach(relation -> 
tableBitSetInCurrentCascadesContext.set(
-                        
cascadesContext.getStatementContext().getTableId(relation.getTable()).asInt()));
+                BitSet relationIdBitSetInCurrentCascadesContext = new BitSet();
+                mvStructInfo.getRelations().forEach(relation -> {
+                    tableBitSetInCurrentCascadesContext.set(
+                            
cascadesContext.getStatementContext().getTableId(relation.getTable()).asInt());
+                    
relationIdBitSetInCurrentCascadesContext.set(relation.getRelationId().asInt());
+                });
                 asyncMaterializationContext.add(new 
AsyncMaterializationContext(materializedView,
                         mtmvCache.getLogicalPlan(), 
mtmvCache.getOriginalPlan(), ImmutableList.of(),
                         ImmutableList.of(), cascadesContext,
-                        
mtmvCache.getStructInfo().withTableBitSet(tableBitSetInCurrentCascadesContext)));
+                        
mtmvCache.getStructInfo().withTableBitSet(tableBitSetInCurrentCascadesContext,
+                                relationIdBitSetInCurrentCascadesContext)));
             } catch (Exception e) {
                 LOG.warn(String.format("MaterializationContext init mv cache 
generate fail, current queryId is %s",
                         
cascadesContext.getConnectContext().getQueryIdentifier()), e);
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/MaterializedViewUtils.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/MaterializedViewUtils.java
index 65208f3aeb9..00b81a9532f 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/MaterializedViewUtils.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/MaterializedViewUtils.java
@@ -40,6 +40,7 @@ import org.apache.doris.nereids.rules.analysis.BindRelation;
 import org.apache.doris.nereids.rules.expression.ExpressionNormalization;
 import org.apache.doris.nereids.rules.expression.ExpressionRewriteContext;
 import org.apache.doris.nereids.rules.rewrite.EliminateSort;
+import org.apache.doris.nereids.rules.rewrite.QueryPartitionCollector;
 import org.apache.doris.nereids.trees.expressions.Alias;
 import org.apache.doris.nereids.trees.expressions.ExprId;
 import org.apache.doris.nereids.trees.expressions.Expression;
@@ -311,6 +312,15 @@ public class MaterializedViewUtils {
         return nondeterministicFunctions;
     }
 
+    /**
+     * Collect table used partitions, this is used for mv rewrite partition 
union
+     * can not cumulative, if called multi times, should clean firstly
+     */
+    public static void collectTableUsedPartitions(Plan plan, CascadesContext 
cascadesContext) {
+        // the recorded partition is based on relation id
+        plan.accept(new QueryPartitionCollector(), cascadesContext);
+    }
+
     /**
      * createMTMVCache from querySql
      */
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/PartitionCompensator.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/PartitionCompensator.java
index 98629f86028..0ee88ce82af 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/PartitionCompensator.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/PartitionCompensator.java
@@ -40,6 +40,7 @@ import com.google.common.collect.Sets;
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 
+import java.util.BitSet;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -164,8 +165,12 @@ public class PartitionCompensator {
     /**
      * Get query used partitions
      * this is calculated from tableUsedPartitionNameMap and tables in 
statementContext
-     * */
-    public static Map<List<String>, Set<String>> 
getQueryUsedPartitions(StatementContext statementContext) {
+     *
+     * @param customRelationIdSet if union compensate occurs, the new query 
used partitions is changed,
+     *         so need to get used partitions by relation id set
+     */
+    public static Map<List<String>, Set<String>> 
getQueryUsedPartitions(StatementContext statementContext,
+            BitSet customRelationIdSet) {
         // get table used partitions
         // if table is not in statementContext().getTables() which means the 
table is partition prune as empty relation
         Multimap<List<String>, Pair<RelationId, Set<String>>> 
tableUsedPartitionNameMap = statementContext
@@ -174,7 +179,7 @@ public class PartitionCompensator {
         // if value is null, means query all partitions
         // if value is not empty, means query some partitions
         Map<List<String>, Set<String>> queryUsedRelatedTablePartitionsMap = 
new HashMap<>();
-        outer:
+        tableLoop:
         for (Map.Entry<List<String>, TableIf> queryUsedTableEntry : 
statementContext.getTables().entrySet()) {
             Set<String> usedPartitionSet = new HashSet<>();
             Collection<Pair<RelationId, Set<String>>> tableUsedPartitions =
@@ -185,11 +190,20 @@ public class PartitionCompensator {
                     continue;
                 }
                 for (Pair<RelationId, Set<String>> partitionPair : 
tableUsedPartitions) {
-                    if (ALL_PARTITIONS.equals(partitionPair)) {
-                        
queryUsedRelatedTablePartitionsMap.put(queryUsedTableEntry.getKey(), null);
-                        continue outer;
+                    if (!customRelationIdSet.isEmpty()) {
+                        if (ALL_PARTITIONS.equals(partitionPair)) {
+                            continue;
+                        }
+                        if 
(customRelationIdSet.get(partitionPair.key().asInt())) {
+                            usedPartitionSet.addAll(partitionPair.value());
+                        }
+                    } else {
+                        if (ALL_PARTITIONS.equals(partitionPair)) {
+                            
queryUsedRelatedTablePartitionsMap.put(queryUsedTableEntry.getKey(), null);
+                            continue tableLoop;
+                        }
+                        usedPartitionSet.addAll(partitionPair.value());
                     }
-                    usedPartitionSet.addAll(partitionPair.value());
                 }
             }
             
queryUsedRelatedTablePartitionsMap.put(queryUsedTableEntry.getKey(), 
usedPartitionSet);
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/StructInfo.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/StructInfo.java
index 2ffde75fe5b..4fdf36b855b 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/StructInfo.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/exploration/mv/StructInfo.java
@@ -100,6 +100,7 @@ public class StructInfo {
     // So if the cascadesContext currently is different form the 
cascadesContext which generated it.
     // Should regenerate the tableBitSet by current cascadesContext and call 
withTableBitSet method
     private final BitSet tableBitSet;
+    private final BitSet relationIdBitSet;
     // this is for LogicalCompatibilityContext later
     private final Map<RelationId, StructInfoNode> relationIdStructInfoNodeMap;
     // this recorde the predicates which can pull up, not shuttled
@@ -137,6 +138,7 @@ public class StructInfo {
                     shuttledExpressionsToExpressionsMap,
             Map<ExpressionPosition, Map<Expression, Expression>> 
expressionToShuttledExpressionToMap,
             BitSet tableIdSet,
+            BitSet relationIdSet,
             SplitPredicate splitPredicate,
             EquivalenceClass equivalenceClass,
             List<? extends Expression> planOutputShuttledExpressions) {
@@ -148,6 +150,7 @@ public class StructInfo {
         this.bottomPlan = bottomPlan;
         this.relations = relations;
         this.tableBitSet = tableIdSet;
+        this.relationIdBitSet = relationIdSet;
         this.relationIdStructInfoNodeMap = relationIdStructInfoNodeMap;
         this.predicates = predicates;
         this.splitPredicate = splitPredicate;
@@ -164,17 +167,19 @@ public class StructInfo {
         return new StructInfo(this.originalPlan, this.originalPlanId, 
this.hyperGraph, this.valid, this.topPlan,
                 this.bottomPlan, this.relations, 
this.relationIdStructInfoNodeMap, predicates,
                 this.shuttledExpressionsToExpressionsMap, 
this.expressionToShuttledExpressionToMap,
-                this.tableBitSet, null, null, 
this.planOutputShuttledExpressions);
+                this.tableBitSet, this.relationIdBitSet, null, null,
+                this.planOutputShuttledExpressions);
     }
 
     /**
      * Construct StructInfo with new tableBitSet
      */
-    public StructInfo withTableBitSet(BitSet tableBitSet) {
+    public StructInfo withTableBitSet(BitSet tableBitSet, BitSet 
relationIdBitSet) {
         return new StructInfo(this.originalPlan, this.originalPlanId, 
this.hyperGraph, this.valid, this.topPlan,
                 this.bottomPlan, this.relations, 
this.relationIdStructInfoNodeMap, this.predicates,
                 this.shuttledExpressionsToExpressionsMap, 
this.expressionToShuttledExpressionToMap,
-                tableBitSet, this.splitPredicate, this.equivalenceClass, 
this.planOutputShuttledExpressions);
+                tableBitSet, relationIdBitSet, this.splitPredicate, 
this.equivalenceClass,
+                this.planOutputShuttledExpressions);
     }
 
     private static boolean collectStructInfoFromGraph(HyperGraph hyperGraph,
@@ -185,6 +190,7 @@ public class StructInfo {
             List<CatalogRelation> relations,
             Map<RelationId, StructInfoNode> relationIdStructInfoNodeMap,
             BitSet hyperTableBitSet,
+            BitSet relationBitSet,
             CascadesContext cascadesContext) {
 
         // Collect relations from hyper graph which in the bottom plan firstly
@@ -194,8 +200,11 @@ public class StructInfo {
             List<CatalogRelation> nodeRelations = new ArrayList<>();
             nodePlan.accept(RELATION_COLLECTOR, nodeRelations);
             relations.addAll(nodeRelations);
-            nodeRelations.forEach(relation -> hyperTableBitSet.set(
-                    
cascadesContext.getStatementContext().getTableId(relation.getTable()).asInt()));
+            nodeRelations.forEach(relation -> {
+                hyperTableBitSet.set(
+                        
cascadesContext.getStatementContext().getTableId(relation.getTable()).asInt());
+                relationBitSet.set(relation.getRelationId().asInt());
+            });
             // plan relation collector and set to map
             StructInfoNode structInfoNode = (StructInfoNode) node;
             // record expressions in node
@@ -314,12 +323,14 @@ public class StructInfo {
         Map<ExpressionPosition, Multimap<Expression, Pair<Expression, 
HyperElement>>>
                 shuttledHashConjunctsToConjunctsMap = new LinkedHashMap<>();
         BitSet tableBitSet = new BitSet();
+        BitSet relationBitSet = new BitSet();
         Map<ExpressionPosition, Map<Expression, Expression>> 
expressionToShuttledExpressionToMap = new HashMap<>();
         boolean valid = collectStructInfoFromGraph(hyperGraph, topPlan, 
shuttledHashConjunctsToConjunctsMap,
                 expressionToShuttledExpressionToMap,
                 relationList,
                 relationIdStructInfoNodeMap,
                 tableBitSet,
+                relationBitSet,
                 cascadesContext);
         valid = valid
                 && hyperGraph.getNodes().stream().allMatch(n -> 
((StructInfoNode) n).getExpressions() != null);
@@ -338,8 +349,7 @@ public class StructInfo {
         return new StructInfo(originalPlan, originalPlanId, hyperGraph, valid, 
topPlan, bottomPlan,
                 relationList, relationIdStructInfoNodeMap, predicates, 
shuttledHashConjunctsToConjunctsMap,
                 expressionToShuttledExpressionToMap,
-                tableBitSet, null, null,
-                planOutputShuttledExpressions);
+                tableBitSet, relationBitSet, null, null, 
planOutputShuttledExpressions);
     }
 
     public List<CatalogRelation> getRelations() {
@@ -443,6 +453,10 @@ public class StructInfo {
         return tableBitSet;
     }
 
+    public BitSet getRelationIdBitSet() {
+        return relationIdBitSet;
+    }
+
     public List<? extends Expression> getPlanOutputShuttledExpressions() {
         return planOutputShuttledExpressions;
     }
@@ -755,10 +769,11 @@ public class StructInfo {
         queryPlanWithUnionFilter = new LogicalPlanDeepCopier().deepCopy(
                 (LogicalPlan) queryPlanWithUnionFilter, new 
DeepCopierContext());
         // rbo rewrite after adding filter on origin plan
-        return 
Pair.of(MaterializedViewUtils.rewriteByRules(parentCascadesContext, context -> {
+        Plan filterAddedPlan = 
MaterializedViewUtils.rewriteByRules(parentCascadesContext, context -> {
             Rewriter.getWholeTreeRewriter(context).execute();
             return context.getRewritePlan();
-        }, queryPlanWithUnionFilter, queryPlan), true);
+        }, queryPlanWithUnionFilter, queryPlan);
+        return Pair.of(filterAddedPlan, true);
     }
 
     /**
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/QueryPartitionCollector.java
 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/QueryPartitionCollector.java
index 2ad993b361d..cfe5d6863c3 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/QueryPartitionCollector.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/QueryPartitionCollector.java
@@ -20,17 +20,14 @@ package org.apache.doris.nereids.rules.rewrite;
 import org.apache.doris.catalog.TableIf;
 import org.apache.doris.common.Pair;
 import org.apache.doris.datasource.ExternalTable;
-import org.apache.doris.nereids.jobs.JobContext;
+import org.apache.doris.nereids.CascadesContext;
 import org.apache.doris.nereids.rules.exploration.mv.PartitionCompensator;
-import org.apache.doris.nereids.trees.plans.Plan;
 import org.apache.doris.nereids.trees.plans.RelationId;
 import org.apache.doris.nereids.trees.plans.logical.LogicalCatalogRelation;
 import org.apache.doris.nereids.trees.plans.logical.LogicalFileScan;
 import 
org.apache.doris.nereids.trees.plans.logical.LogicalFileScan.SelectedPartitions;
 import org.apache.doris.nereids.trees.plans.logical.LogicalOlapScan;
-import org.apache.doris.nereids.trees.plans.visitor.CustomRewriter;
-import org.apache.doris.nereids.trees.plans.visitor.DefaultPlanRewriter;
-import org.apache.doris.qe.ConnectContext;
+import org.apache.doris.nereids.trees.plans.visitor.DefaultPlanVisitor;
 
 import com.google.common.collect.Multimap;
 import org.apache.logging.log4j.LogManager;
@@ -43,29 +40,18 @@ import java.util.Set;
 /**
  * Used to collect query partitions, only collect once
  * */
-public class QueryPartitionCollector extends 
DefaultPlanRewriter<ConnectContext> implements CustomRewriter {
+public class QueryPartitionCollector extends DefaultPlanVisitor<Void, 
CascadesContext> {
 
     public static final Logger LOG = 
LogManager.getLogger(QueryPartitionCollector.class);
 
     @Override
-    public Plan rewriteRoot(Plan plan, JobContext jobContext) {
-
-        ConnectContext connectContext = ConnectContext.get();
-        if (connectContext != null && 
connectContext.getSessionVariable().internalSession) {
-            return plan;
-        }
-        plan.accept(this, connectContext);
-        return plan;
-    }
-
-    @Override
-    public Plan visitLogicalCatalogRelation(LogicalCatalogRelation 
catalogRelation, ConnectContext context) {
+    public Void visitLogicalCatalogRelation(LogicalCatalogRelation 
catalogRelation, CascadesContext context) {
 
         TableIf table = catalogRelation.getTable();
         if (table.getDatabase() == null) {
             LOG.error("QueryPartitionCollector visitLogicalCatalogRelation 
database is null, table is "
                     + table.getName());
-            return catalogRelation;
+            return null;
         }
         Multimap<List<String>, Pair<RelationId, Set<String>>> 
tableUsedPartitionNameMap = context.getStatementContext()
                 .getTableUsedPartitionNameMap();
@@ -90,6 +76,6 @@ public class QueryPartitionCollector extends 
DefaultPlanRewriter<ConnectContext>
             // not support get partition scene, we consider query all 
partitions from table
             tableUsedPartitionNameMap.put(table.getFullQualifiers(), 
PartitionCompensator.ALL_PARTITIONS);
         }
-        return catalogRelation;
+        return null;
     }
 }
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/exploration/mv/PartitionCompensatorTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/exploration/mv/PartitionCompensatorTest.java
index 6588d4abd86..9168d111f66 100644
--- 
a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/exploration/mv/PartitionCompensatorTest.java
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/exploration/mv/PartitionCompensatorTest.java
@@ -28,6 +28,7 @@ import com.google.common.collect.Multimap;
 import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.api.Test;
 
+import java.util.BitSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -107,9 +108,11 @@ public class PartitionCompensatorTest extends 
TestWithFeService {
                                 + "left outer join orders_list_partition\n"
                                 + "on l1.l_shipdate = o_orderdate\n",
                         nereidsPlanner -> {
+                            
MaterializedViewUtils.collectTableUsedPartitions(nereidsPlanner.getRewrittenPlan(),
+                                    nereidsPlanner.getCascadesContext());
                             Map<List<String>, Set<String>> queryUsedPartitions
                                     = 
PartitionCompensator.getQueryUsedPartitions(
-                                            
nereidsPlanner.getCascadesContext().getStatementContext());
+                                            
nereidsPlanner.getCascadesContext().getStatementContext(), new BitSet());
 
                             List<String> itmeQualifier = ImmutableList.of(
                                     "internal", "partition_compensate_test", 
"lineitem_list_partition");
@@ -131,6 +134,8 @@ public class PartitionCompensatorTest extends 
TestWithFeService {
                                 + "left outer join orders_list_partition\n"
                                 + "on l1.l_shipdate = o_orderdate\n",
                         nereidsPlanner -> {
+                            
MaterializedViewUtils.collectTableUsedPartitions(nereidsPlanner.getRewrittenPlan(),
+                                    nereidsPlanner.getCascadesContext());
                             List<String> qualifier = ImmutableList.of(
                                     "internal", "partition_compensate_test", 
"lineitem_list_partition");
 
@@ -140,7 +145,7 @@ public class PartitionCompensatorTest extends 
TestWithFeService {
 
                             Map<List<String>, Set<String>> queryUsedPartitions
                                     = 
PartitionCompensator.getQueryUsedPartitions(
-                                            
nereidsPlanner.getCascadesContext().getStatementContext());
+                                    
nereidsPlanner.getCascadesContext().getStatementContext(), new BitSet());
                             Set<String> queryTableUsedPartition = 
queryUsedPartitions.get(qualifier);
                             // if tableUsedPartitionNameMap contain any 
PartitionCompensator.ALL_PARTITIONS
                             // consider query all partitions from table
@@ -161,6 +166,8 @@ public class PartitionCompensatorTest extends 
TestWithFeService {
                                 + "left outer join orders_list_partition\n"
                                 + "on l1.l_shipdate = o_orderdate\n",
                         nereidsPlanner -> {
+                            
MaterializedViewUtils.collectTableUsedPartitions(nereidsPlanner.getRewrittenPlan(),
+                                    nereidsPlanner.getCascadesContext());
                             List<String> qualifier = ImmutableList.of(
                                     "internal", "partition_compensate_test", 
"lineitem_list_partition");
 
@@ -171,7 +178,7 @@ public class PartitionCompensatorTest extends 
TestWithFeService {
 
                             Map<List<String>, Set<String>> queryUsedPartitions
                                     = 
PartitionCompensator.getQueryUsedPartitions(
-                                            
nereidsPlanner.getCascadesContext().getStatementContext());
+                                            
nereidsPlanner.getCascadesContext().getStatementContext(), new BitSet());
                             Set<String> queryTableUsedPartition = 
queryUsedPartitions.get(qualifier);
                             // if tableUsedPartitionNameMap contain only 
PartitionCompensator.ALL_PARTITIONS
                             // consider query all partitions from table
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/nereids/util/PlanChecker.java 
b/fe/fe-core/src/test/java/org/apache/doris/nereids/util/PlanChecker.java
index e900937d2ba..e535e3a8ac5 100644
--- a/fe/fe-core/src/test/java/org/apache/doris/nereids/util/PlanChecker.java
+++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/util/PlanChecker.java
@@ -50,6 +50,7 @@ import org.apache.doris.nereids.rules.RuleFactory;
 import org.apache.doris.nereids.rules.RuleSet;
 import org.apache.doris.nereids.rules.RuleType;
 import 
org.apache.doris.nereids.rules.exploration.mv.InitMaterializationContextHook;
+import org.apache.doris.nereids.rules.exploration.mv.MaterializedViewUtils;
 import org.apache.doris.nereids.rules.rewrite.OneRewriteRuleFactory;
 import org.apache.doris.nereids.trees.plans.GroupPlan;
 import org.apache.doris.nereids.trees.plans.Plan;
@@ -244,7 +245,7 @@ public class PlanChecker {
 
     public PlanChecker rewrite() {
         Rewriter.getWholeTreeRewriter(cascadesContext).execute();
-        cascadesContext.newTablePartitionCollector().execute();
+        
MaterializedViewUtils.collectTableUsedPartitions(cascadesContext.getRewritePlan(),
 cascadesContext);
         
InitMaterializationContextHook.INSTANCE.initMaterializationContext(this.cascadesContext);
         cascadesContext.toMemo();
         return this;


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to