morrySnow commented on code in PR #14827:
URL: https://github.com/apache/doris/pull/14827#discussion_r1045570822


##########
fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfo.java:
##########
@@ -391,6 +394,10 @@ public ArrayList<FunctionCallExpr> 
getMaterializedAggregateExprs() {
         return result;
     }
 
+    public List<String> getMaterializedAggregateExprLabels() {
+        return Lists.newArrayList(materializedSlotLabels);
+    }

Review Comment:
   why not put it into AggregateInfoBase?



##########
fe/fe-core/src/main/java/org/apache/doris/planner/OlapScanNode.java:
##########
@@ -1216,4 +1219,19 @@ public String getReasonOfPreAggregation() {
     public String getSelectedIndexName() {
         return olapTable.getIndexNameById(selectedIndexId);
     }
+
+    public void finalizeForNerieds() {
+        computeNumNodes();
+        computeStatsForNerieds();

Review Comment:
   could we compute them in Nereids itself?



##########
fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java:
##########
@@ -1055,6 +1112,18 @@ public PlanFragment visitPhysicalLimit(PhysicalLimit<? 
extends Plan> physicalLim
     public PlanFragment visitPhysicalDistribute(PhysicalDistribute<? extends 
Plan> distribute,
             PlanTranslatorContext context) {
         PlanFragment childFragment = distribute.child().accept(this, context);
+
+        if (childFragment.getPlanRoot() instanceof AggregationNode
+                && distribute.child() instanceof PhysicalHashAggregate
+                && context.getFirstAggregateInFragment(childFragment) == 
distribute.child()) {
+            PhysicalHashAggregate<Plan> hashAggregate = 
(PhysicalHashAggregate) distribute.child();
+            if (hashAggregate.getAggPhase() == AggPhase.LOCAL
+                    && hashAggregate.getAggMode() == AggMode.INPUT_TO_BUFFER) {

Review Comment:
   ditto



##########
fe/fe-core/src/main/java/org/apache/doris/nereids/properties/RequestPropertyDeriver.java:
##########
@@ -183,22 +147,32 @@ private void 
addRequestPropertyToChildren(PhysicalProperties... physicalProperti
         requestPropertyToChildren.add(Lists.newArrayList(physicalProperties));
     }
 
-    private List<ExprId> extractFromDistinctFunction(List<NamedExpression> 
outputExpression) {
+    private void addRequestPropertyToChildren(List<PhysicalProperties> 
physicalProperties) {
+        requestPropertyToChildren.add(physicalProperties);
+    }
+
+    private List<ExprId> 
extractExprIdFromDistinctFunction(List<NamedExpression> outputExpression) {
+        Set<AggregateFunction> distinctAggregateFunctions = 
ExpressionUtils.collect(outputExpression, expr ->
+                expr instanceof AggregateFunction && ((AggregateFunction) 
expr).isDistinct()
+        );
         List<ExprId> exprIds = Lists.newArrayList();
-        for (NamedExpression originOutputExpr : outputExpression) {
-            Set<AggregateFunction> aggregateFunctions
-                    = 
originOutputExpr.collect(AggregateFunction.class::isInstance);
-            for (AggregateFunction aggregateFunction : aggregateFunctions) {
-                if (aggregateFunction.isDistinct()) {
-                    for (Expression expr : aggregateFunction.children()) {
-                        Preconditions.checkState(expr instanceof 
SlotReference, "normalize aggregate failed to"
-                                + " normalize aggregate function " + 
aggregateFunction.toSql());
-                        exprIds.add(((SlotReference) expr).getExprId());
-                    }
-                }
+        for (AggregateFunction aggregateFunction : distinctAggregateFunctions) 
{
+            for (Expression expr : aggregateFunction.children()) {
+                Preconditions.checkState(expr instanceof SlotReference, 
"normalize aggregate failed to"
+                        + " normalize aggregate function " + 
aggregateFunction.toSql());
+                exprIds.add(((SlotReference) expr).getExprId());
             }
         }
         return exprIds;
     }
+
+    private void addRequestHashDistribution(List<Expression> hashColumns, 
ShuffleType shuffleType) {

Review Comment:
   not use?



##########
fe/fe-core/src/main/java/org/apache/doris/nereids/properties/PhysicalProperties.java:
##########
@@ -99,6 +120,15 @@ public int hashCode() {
 
     @Override
     public String toString() {
+        if (this.equals(ANY)) {
+            return "ANY";
+        }
+        if (this.equals(REPLICATED)) {
+            return "REPLICATED";
+        }
+        if (this.equals(GATHER)) {
+            return "GATHER";
+        }

Review Comment:
   distributed and order is two part of Phyiscal properties, so cannot just 
return distributed string



##########
fe/fe-core/src/main/java/org/apache/doris/nereids/memo/Memo.java:
##########
@@ -667,7 +668,8 @@ public String toString() {
             builder.append(group).append("\n");
             builder.append("  
stats=").append(group.getStatistics()).append("\n");
             StatsDeriveResult stats = group.getStatistics();
-            if (stats != null && 
group.getLogicalExpressions().get(0).getPlan() instanceof LogicalOlapScan) {
+            if (stats != null && !group.getLogicalExpressions().isEmpty()

Review Comment:
   this is because, some group has no logical expression?



##########
fe/fe-core/src/main/java/org/apache/doris/nereids/properties/ChildOutputPropertyDeriver.java:
##########
@@ -218,13 +222,27 @@ public PhysicalProperties visitPhysicalNestedLoopJoin(
 
     @Override
     public PhysicalProperties visitPhysicalOlapScan(PhysicalOlapScan olapScan, 
PlanContext context) {
-        if (olapScan.getDistributionSpec() instanceof DistributionSpecHash) {
+        if (!olapScan.getTable().isColocateTable() && 
olapScan.getScanTabletNum() == 1) {
+            return PhysicalProperties.GATHER;

Review Comment:
   add a todo, let's find a better way to handle both tablet num == 1 and 
colocate table together in future



##########
fe/fe-core/src/main/java/org/apache/doris/nereids/NereidsPlanner.java:
##########
@@ -314,6 +320,11 @@ public CascadesContext getCascadesContext() {
         return cascadesContext;
     }
 
+    public static PhysicalProperties buildInitRequireProperties(Plan initPlan) 
{
+        boolean isQuery = !(initPlan instanceof Command) || (initPlan 
instanceof ExplainCommand);

Review Comment:
   why add ` (initPlan instanceof ExplainCommand)`



##########
fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java:
##########
@@ -1233,6 +1240,16 @@ public void setEnableNereidsPlanner(boolean 
enableNereidsPlanner) {
         this.enableNereidsPlanner = enableNereidsPlanner;
     }
 
+    public Set<String> getDisableNereidsRules() {
+        return Arrays.stream(disableNereidsRules.split(",[\\s]*"))
+                .map(rule -> rule.toUpperCase(Locale.ROOT))
+                .collect(ImmutableSet.toImmutableSet());

Review Comment:
   nit: could do split and check legality when user do SET, see more in 
`org.apache.doris.qe.VariableMgr.VarAttr#checker`



##########
fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/algebra/OlapScan.java:
##########
@@ -0,0 +1,58 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.nereids.trees.plans.algebra;
+
+import org.apache.doris.catalog.OlapTable;
+
+import java.util.List;
+
+/** OlapScan */
+public interface OlapScan extends Scan {
+    OlapTable getTable();
+
+    long getSelectedIndexId();
+
+    List<Long> getSelectedPartitionIds();
+
+    List<Long> getSelectedTabletIds();
+
+    /** getScanTabletNum */
+    default int getScanTabletNum() {

Review Comment:
   after prune, select 0 tablet is possible



##########
fe/fe-core/src/main/java/org/apache/doris/nereids/trees/expressions/functions/FunctionBuilder.java:
##########
@@ -57,8 +59,12 @@ public boolean canApply(List<? extends Object> arguments) {
         }
         for (int i = 0; i < arguments.size(); i++) {
             Class constructorArgumentType = getConstructorArgumentType(i);
-            if (!constructorArgumentType.isInstance(arguments.get(i))) {
-                return false;
+            Object argument = arguments.get(i);
+            if (!constructorArgumentType.isInstance(argument)) {
+                Optional<Class> primitiveType = 
ReflectionUtils.getPrimitiveType(argument.getClass());

Review Comment:
   this mean, currently, we could only build function with primitive type 
function?



##########
fe/fe-core/src/main/java/org/apache/doris/nereids/properties/ChildrenPropertiesRegulator.java:
##########
@@ -72,12 +70,7 @@ public Double visit(Plan plan, Void context) {
     }
 
     @Override
-    public Double visitPhysicalAggregate(PhysicalAggregate<? extends Plan> 
agg, Void context) {
-        if (agg.isFinalPhase()
-                && agg.getAggPhase() == AggPhase.LOCAL
-                && children.get(0).getPlan() instanceof PhysicalDistribute) {
-            return -1.0;
-        }
+    public Double visitPhysicalHashAggregate(PhysicalHashAggregate<? extends 
Plan> agg, Void context) {
         return 0.0;
     }
 

Review Comment:
   just remove this function



##########
fe/fe-core/src/main/java/org/apache/doris/nereids/jobs/cascades/DeriveStatsJob.java:
##########
@@ -45,31 +47,42 @@ public class DeriveStatsJob extends Job {
      * @param context context of current job
      */
     public DeriveStatsJob(GroupExpression groupExpression, JobContext context) 
{
-        super(JobType.DERIVE_STATS, context);
-        this.groupExpression = groupExpression;
-        this.deriveChildren = false;
+        this(groupExpression, false, context);
     }
 
-    /**
-     * Copy constructor for DeriveStatsJob.
-     *
-     * @param other DeriveStatsJob copied from
-     */
-    public DeriveStatsJob(DeriveStatsJob other) {
-        super(JobType.DERIVE_STATS, other.context);
-        this.groupExpression = other.groupExpression;
-        this.deriveChildren = other.deriveChildren;
+    private DeriveStatsJob(GroupExpression groupExpression, boolean 
deriveChildren, JobContext context) {
+        super(JobType.DERIVE_STATS, context);
+        this.groupExpression = groupExpression;
+        this.deriveChildren = deriveChildren;
     }
 
     @Override
     public void execute() {
         countJobExecutionTimesOfGroupExpressions(groupExpression);
-        if (!deriveChildren) {
-            deriveChildren = true;
-            pushJob(new DeriveStatsJob(this));
-            for (Group child : groupExpression.children()) {
-                if (!child.getLogicalExpressions().isEmpty()) {
-                    pushJob(new 
DeriveStatsJob(child.getLogicalExpressions().get(0), context));
+        if (groupExpression.isStatDerived()) {
+            return;
+        }
+        if (!deriveChildren && groupExpression.arity() > 0) {
+            pushJob(new DeriveStatsJob(groupExpression, true, context));
+
+            List<Group> children = groupExpression.children();
+            for (int i = children.size() - 1; i >= 0; i--) {
+                Group childGroup = children.get(i);
+
+                List<GroupExpression> logicalExpressions = 
childGroup.getLogicalExpressions();
+                for (int j = logicalExpressions.size() - 1; j >= 0; j--) {

Review Comment:
   need add some comment to explain, why we use all group expression to do 
stats derive



##########
fe/fe-core/src/main/java/org/apache/doris/nereids/properties/RequestPropertyDeriver.java:
##########
@@ -183,22 +147,32 @@ private void 
addRequestPropertyToChildren(PhysicalProperties... physicalProperti
         requestPropertyToChildren.add(Lists.newArrayList(physicalProperties));
     }
 
-    private List<ExprId> extractFromDistinctFunction(List<NamedExpression> 
outputExpression) {
+    private void addRequestPropertyToChildren(List<PhysicalProperties> 
physicalProperties) {
+        requestPropertyToChildren.add(physicalProperties);
+    }
+
+    private List<ExprId> 
extractExprIdFromDistinctFunction(List<NamedExpression> outputExpression) {

Review Comment:
   not use?



##########
fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/logical/LogicalAggregate.java:
##########
@@ -82,8 +67,8 @@ public LogicalAggregate(
             List<Expression> groupByExpressions,
             List<NamedExpression> outputExpressions,
             CHILD_TYPE child) {
-        this(groupByExpressions, outputExpressions, Optional.empty(), false,
-                false, true, AggPhase.LOCAL, Optional.empty(), child);
+        this(groupByExpressions, outputExpressions,
+                false, Optional.empty(), child);

Review Comment:
   nit: could in one line



##########
fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java:
##########
@@ -1160,10 +1230,11 @@ private PlanFragment 
exchangeToMergeFragment(PlanFragment inputFragment, PlanTra
         Preconditions.checkState(inputFragment.isPartitioned());
 
         // exchange node clones the behavior of its input, aside from the 
conjuncts
-        ExchangeNode mergePlan =
-                new ExchangeNode(context.nextPlanNodeId(), 
inputFragment.getPlanRoot(), false);
+        ExchangeNode mergePlan = new ExchangeNode(context.nextPlanNodeId(),
+                inputFragment.getPlanRoot(), false);
+        DataPartition dataPartition = DataPartition.UNPARTITIONED;

Review Comment:
   why add this local variable?



##########
fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java:
##########
@@ -1055,6 +1112,18 @@ public PlanFragment visitPhysicalLimit(PhysicalLimit<? 
extends Plan> physicalLim
     public PlanFragment visitPhysicalDistribute(PhysicalDistribute<? extends 
Plan> distribute,
             PlanTranslatorContext context) {
         PlanFragment childFragment = distribute.child().accept(this, context);
+
+        if (childFragment.getPlanRoot() instanceof AggregationNode
+                && distribute.child() instanceof PhysicalHashAggregate

Review Comment:
   why need double check?



##########
fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/logical/DistinctToGroupBy.java:
##########
@@ -0,0 +1,32 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.nereids.rules.rewrite.logical;
+
+import org.apache.doris.nereids.rules.Rule;
+import org.apache.doris.nereids.rules.RuleType;
+import org.apache.doris.nereids.rules.rewrite.OneRewriteRuleFactory;
+
+/** AggGroupByToDistinct */
+public class DistinctToGroupBy extends OneRewriteRuleFactory {

Review Comment:
   what's this?



##########
fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/BindSlotReference.java:
##########
@@ -480,7 +478,7 @@ private boolean handleNamePartsTwoOrThree(Slot boundSlot, 
List<String> nameParts
     }
 
     /** BoundStar is used to wrap list of slots for temporary. */
-    private class BoundStar extends NamedExpression implements 
PropagateNullable {
+    public static class BoundStar extends NamedExpression implements 
PropagateNullable {

Review Comment:
   move out from BindSlotReference class?



##########
fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/logical/NormalizeAggregate.java:
##########
@@ -56,100 +52,86 @@
  * After rule:
  * Project(k1#1, Alias(SR#9)#4, Alias(k1#1 + 1)#5, Alias(SR#10))#6, 
Alias(SR#11))#7, Alias(SR#10 + 1)#8)
  * +-- Aggregate(keys:[k1#1, SR#9], outputs:[k1#1, SR#9, Alias(SUM(v1#3))#10, 
Alias(SUM(v1#3 + 1))#11])
- * +-- Project(k1#1, Alias(K2#2 + 1)#9, v1#3)
+ *   +-- Project(k1#1, Alias(K2#2 + 1)#9, v1#3)
  * <p>
  * More example could get from UT {NormalizeAggregateTest}
  */
-public class NormalizeAggregate extends OneRewriteRuleFactory {
+public class NormalizeAggregate extends OneRewriteRuleFactory implements 
NormalizeToSlot {
     @Override
     public Rule build() {
         return 
logicalAggregate().whenNot(LogicalAggregate::isNormalized).then(aggregate -> {
-            // substitution map used to substitute expression in aggregate's 
output to use it as top projections
-            Map<Expression, Expression> substitutionMap = Maps.newHashMap();
-            List<Expression> keys = aggregate.getGroupByExpressions();
-            List<NamedExpression> newOutputs = Lists.newArrayList();
-
-            // keys
-            Map<Boolean, List<Expression>> partitionedKeys = keys.stream()
-                    
.collect(Collectors.groupingBy(SlotReference.class::isInstance));
-            List<Expression> newKeys = Lists.newArrayList();
-            List<NamedExpression> bottomProjections = Lists.newArrayList();
-            if (partitionedKeys.containsKey(false)) {
-                // process non-SlotReference keys
-                newKeys.addAll(partitionedKeys.get(false).stream()
-                        .map(e -> new Alias(e, e.toSql()))
-                        .peek(a -> substitutionMap.put(a.child(), a.toSlot()))
-                        .peek(bottomProjections::add)
-                        .map(Alias::toSlot)
-                        .collect(Collectors.toList()));
-            }
-            if (partitionedKeys.containsKey(true)) {
-                // process SlotReference keys
-                partitionedKeys.get(true).stream()
-                        .map(SlotReference.class::cast)
-                        .peek(s -> substitutionMap.put(s, s))
-                        .peek(bottomProjections::add)
-                        .forEach(newKeys::add);
-            }
-            // add all necessary key to output
-            substitutionMap.entrySet().stream()
-                    .filter(kv -> aggregate.getOutputExpressions().stream()
-                            .anyMatch(e -> e.anyMatch(kv.getKey()::equals)))
-                    .map(Entry::getValue)
-                    .map(NamedExpression.class::cast)
-                    .forEach(newOutputs::add);
-
-            // if we generate bottom, we need to generate to project too.
-            // output
-            List<NamedExpression> outputs = aggregate.getOutputExpressions();
-            Map<Boolean, List<NamedExpression>> partitionedOutputs = 
outputs.stream()
-                    .collect(Collectors.groupingBy(e -> 
e.anyMatch(AggregateFunction.class::isInstance)));
-
-            boolean needBottomProjects = partitionedKeys.containsKey(false);
-            if (partitionedOutputs.containsKey(true)) {
-                // process expressions that contain aggregate function
-                Set<AggregateFunction> aggregateFunctions = 
partitionedOutputs.get(true).stream()
-                        .flatMap(e -> 
e.<Set<AggregateFunction>>collect(AggregateFunction.class::isInstance).stream())
-                        .collect(Collectors.toSet());
-
-                // replace all non-slot expression in aggregate functions 
children.
-                for (AggregateFunction aggregateFunction : aggregateFunctions) 
{
-                    List<Expression> newChildren = Lists.newArrayList();
-                    for (Expression child : aggregateFunction.getArguments()) {
-                        if (child instanceof SlotReference || child instanceof 
Literal) {
-                            newChildren.add(child);
-                            if (child instanceof SlotReference) {
-                                bottomProjections.add((SlotReference) child);
-                            }
-                        } else {
-                            needBottomProjects = true;
-                            Alias alias = new Alias(child, child.toSql());
-                            bottomProjections.add(alias);
-                            newChildren.add(alias.toSlot());
-                        }
-                    }
-                    AggregateFunction newFunction = (AggregateFunction) 
aggregateFunction.withChildren(newChildren);
-                    Alias alias = new Alias(newFunction, newFunction.toSql());
-                    newOutputs.add(alias);
-                    substitutionMap.put(aggregateFunction, alias.toSlot());
-                }
-            }
-
-            // assemble
-            LogicalPlan root = aggregate.child();
-            if (needBottomProjects) {
-                root = new LogicalProject<>(bottomProjections, root);
-            }
-            root = new LogicalAggregate<>(newKeys, newOutputs, 
aggregate.isDisassembled(),
-                    true, aggregate.isFinalPhase(), aggregate.getAggPhase(),
-                    aggregate.getSourceRepeat(), root);
-            List<NamedExpression> projections = outputs.stream()
-                    .map(e -> ExpressionUtils.replace(e, substitutionMap))
-                    .map(NamedExpression.class::cast)
-                    .collect(Collectors.toList());
-            root = new LogicalProject<>(projections, root);
-
-            return root;
+            // push expression to bottom project
+            Set<Alias> existsAliases = ExpressionUtils.collect(

Review Comment:
   does it solve the problem of explosion of data? SQL like:
   ```sql
   SELECT a + 1, a + 2, a + 3, SUM(b) FROM t GROUP BY a + 1, a + 2, a + 3;
   SELECT a, SUM(b + 1), SUM(b + 2), SUM(b + 3) FROM t GROUP BY a;
   ```
   



##########
fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/logical/NormalizeToSlot.java:
##########
@@ -0,0 +1,133 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.nereids.rules.rewrite.logical;
+
+import org.apache.doris.nereids.trees.expressions.Alias;
+import org.apache.doris.nereids.trees.expressions.Expression;
+import org.apache.doris.nereids.trees.expressions.NamedExpression;
+import org.apache.doris.nereids.trees.expressions.Slot;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Maps;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.function.BiFunction;
+import javax.annotation.Nullable;
+
+/** NormalizeToSlot */
+public interface NormalizeToSlot {
+
+    /** NormalizeSlotContext */
+    class NormalizeToSlotContext {
+        private final Map<Expression, NormalizeToSlotTriplet> 
normalizeToSlotMap;
+
+        public NormalizeToSlotContext(Map<Expression, NormalizeToSlotTriplet> 
normalizeToSlotMap) {
+            this.normalizeToSlotMap = normalizeToSlotMap;
+        }
+
+        /** buildContext */
+        public static NormalizeToSlotContext buildContext(
+                Set<Alias> existsAliases, Set<? extends Expression> 
sourceExpressions) {
+            Map<Expression, NormalizeToSlotTriplet> normalizeToSlotMap = 
Maps.newLinkedHashMap();
+
+            Map<Expression, Alias> existsAliasMap = Maps.newLinkedHashMap();
+            for (Alias existsAlias : existsAliases) {
+                existsAliasMap.put(existsAlias.child(), existsAlias);
+            }
+
+            for (Expression expression : sourceExpressions) {
+                if (normalizeToSlotMap.containsKey(expression)) {
+                    continue;
+                }
+                NormalizeToSlotTriplet normalizeToSlotTriplet =
+                        NormalizeToSlotTriplet.toTriplet(expression, 
existsAliasMap.get(expression));
+                normalizeToSlotMap.put(expression, normalizeToSlotTriplet);
+            }
+            return new NormalizeToSlotContext(normalizeToSlotMap);
+        }
+
+        /** normalizeToUseSlotRef, no custom normalize */
+        public <E extends Expression> List<E> normalizeToUseSlotRef(List<E> 
expressions) {
+            return normalizeToUseSlotRef(expressions, (context, expr) -> expr);
+        }
+
+        /** normalizeToUseSlotRef */
+        public <E extends Expression> List<E> normalizeToUseSlotRef(List<E> 
expressions,
+                BiFunction<NormalizeToSlotContext, Expression, Expression> 
customNormalize) {
+            return expressions.stream()
+                    .map(expr -> (E) expr.rewriteDownShortCircuit(child -> {
+                        Expression newChild = customNormalize.apply(this, 
child);
+                        if (newChild != null && newChild != child) {
+                            return newChild;
+                        }
+                        NormalizeToSlotTriplet normalizeToSlotTriplet = 
normalizeToSlotMap.get(child);
+                        return normalizeToSlotTriplet == null ? child : 
normalizeToSlotTriplet.remainExpr;
+                    })).collect(ImmutableList.toImmutableList());
+        }
+
+        /**
+         * generate bottom projections with groupByExpressions.
+         * eg:
+         * groupByExpressions: k1#0, k2#1 + 1;
+         * bottom: k1#0, (k2#1 + 1) AS (k2 + 1)#2;
+         */
+        public Set<NamedExpression> pushDownToNamedExpression(Collection<? 
extends Expression> needToPushExpressions) {
+            return needToPushExpressions.stream()
+                    .map(expr -> {
+                        NormalizeToSlotTriplet normalizeToSlotTriplet = 
normalizeToSlotMap.get(expr);
+                        return normalizeToSlotTriplet == null
+                                ? (NamedExpression) expr
+                                : normalizeToSlotTriplet.pushedExpr;
+                    }).collect(ImmutableSet.toImmutableSet());
+        }
+    }
+
+    /** NormalizeToSlotTriplet */
+    class NormalizeToSlotTriplet {
+        public final Expression originExpr;
+        public final Slot remainExpr;
+        public final NamedExpression pushedExpr;

Review Comment:
   nit: add a example to explain them



##########
fe/fe-core/src/main/java/org/apache/doris/nereids/rules/expression/rewrite/rules/FoldConstantRuleOnFE.java:
##########
@@ -59,143 +61,148 @@ public class FoldConstantRuleOnFE extends 
AbstractExpressionRewriteRule {
 
     @Override
     public Expression rewrite(Expression expr, ExpressionRewriteContext ctx) {
-        return process(expr, ctx);
-    }
+        if (expr instanceof AggregateFunction && ((AggregateFunction) 
expr).isDistinct()) {
+            return expr;
+        } else if (expr instanceof AggregateExpression && 
((AggregateExpression) expr).getFunction().isDistinct()) {
+            return expr;

Review Comment:
   why their child could not be fold?



##########
fe/fe-core/src/main/java/org/apache/doris/nereids/rules/rewrite/AggregateStrategies.java:
##########
@@ -0,0 +1,1234 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.nereids.rules.rewrite;
+
+import org.apache.doris.catalog.Column;
+import org.apache.doris.catalog.KeysType;
+import org.apache.doris.catalog.PrimitiveType;
+import org.apache.doris.common.Pair;
+import org.apache.doris.nereids.CascadesContext;
+import org.apache.doris.nereids.annotation.DependsRules;
+import org.apache.doris.nereids.pattern.PatternDescriptor;
+import org.apache.doris.nereids.properties.DistributionSpecHash.ShuffleType;
+import org.apache.doris.nereids.properties.PhysicalProperties;
+import org.apache.doris.nereids.properties.RequireProperties;
+import org.apache.doris.nereids.rules.Rule;
+import org.apache.doris.nereids.rules.RuleType;
+import 
org.apache.doris.nereids.rules.expression.rewrite.ExpressionRewriteContext;
+import 
org.apache.doris.nereids.rules.expression.rewrite.rules.FoldConstantRuleOnFE;
+import org.apache.doris.nereids.rules.expression.rewrite.rules.TypeCoercion;
+import org.apache.doris.nereids.rules.implementation.ImplementationRuleFactory;
+import 
org.apache.doris.nereids.rules.implementation.LogicalOlapScanToPhysicalOlapScan;
+import org.apache.doris.nereids.rules.rewrite.logical.NormalizeAggregate;
+import org.apache.doris.nereids.trees.expressions.AggregateExpression;
+import org.apache.doris.nereids.trees.expressions.Alias;
+import org.apache.doris.nereids.trees.expressions.Cast;
+import org.apache.doris.nereids.trees.expressions.Expression;
+import org.apache.doris.nereids.trees.expressions.IsNull;
+import org.apache.doris.nereids.trees.expressions.NamedExpression;
+import org.apache.doris.nereids.trees.expressions.SlotReference;
+import 
org.apache.doris.nereids.trees.expressions.functions.agg.AggregateFunction;
+import org.apache.doris.nereids.trees.expressions.functions.agg.AggregateParam;
+import org.apache.doris.nereids.trees.expressions.functions.agg.Count;
+import 
org.apache.doris.nereids.trees.expressions.functions.agg.MultiDistinctCount;
+import 
org.apache.doris.nereids.trees.expressions.functions.agg.MultiDistinctSum;
+import org.apache.doris.nereids.trees.expressions.functions.agg.Sum;
+import org.apache.doris.nereids.trees.expressions.functions.scalar.If;
+import org.apache.doris.nereids.trees.expressions.literal.Literal;
+import org.apache.doris.nereids.trees.expressions.literal.NullLiteral;
+import org.apache.doris.nereids.trees.plans.AggMode;
+import org.apache.doris.nereids.trees.plans.AggPhase;
+import org.apache.doris.nereids.trees.plans.GroupPlan;
+import org.apache.doris.nereids.trees.plans.Plan;
+import org.apache.doris.nereids.trees.plans.algebra.Project;
+import org.apache.doris.nereids.trees.plans.logical.LogicalAggregate;
+import org.apache.doris.nereids.trees.plans.logical.LogicalOlapScan;
+import org.apache.doris.nereids.trees.plans.logical.LogicalProject;
+import org.apache.doris.nereids.trees.plans.physical.PhysicalHashAggregate;
+import org.apache.doris.nereids.trees.plans.physical.PhysicalOlapScan;
+import 
org.apache.doris.nereids.trees.plans.physical.PhysicalStorageLayerAggregate;
+import 
org.apache.doris.nereids.trees.plans.physical.PhysicalStorageLayerAggregate.PushDownAggOp;
+import org.apache.doris.nereids.util.ExpressionUtils;
+import org.apache.doris.qe.ConnectContext;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Collectors;
+import javax.annotation.Nullable;
+
+/** AggregateStrategies */
+@DependsRules({
+    NormalizeAggregate.class,
+    FoldConstantRuleOnFE.class
+})
+public class AggregateStrategies implements ImplementationRuleFactory {
+
+    @Override
+    public List<Rule> buildRules() {
+        PatternDescriptor<LogicalAggregate<GroupPlan>> basePattern = 
logicalAggregate()
+                .when(LogicalAggregate::isNormalized);
+
+        return ImmutableList.of(
+            RuleType.STORAGE_LAYER_AGGREGATE_WITHOUT_PROJECT.build(
+                logicalAggregate(
+                    logicalOlapScan()
+                )
+                .when(agg -> agg.isNormalized() && enablePushDownNoGroupAgg())
+                .thenApply(ctx -> storageLayerAggregate(ctx.root, null, 
ctx.root.child(), ctx.cascadesContext))
+            ),
+            RuleType.STORAGE_LAYER_AGGREGATE_WITH_PROJECT.build(
+                logicalAggregate(
+                    logicalProject(
+                        logicalOlapScan()
+                    )
+                )
+                .when(agg -> agg.isNormalized() && enablePushDownNoGroupAgg())
+                .thenApply(ctx -> {
+                    LogicalAggregate<LogicalProject<LogicalOlapScan>> agg = 
ctx.root;
+                    LogicalProject<LogicalOlapScan> project = agg.child();
+                    LogicalOlapScan olapScan = project.child();
+                    return storageLayerAggregate(agg, project, olapScan, 
ctx.cascadesContext);
+                })
+            ),
+            RuleType.ONE_PHASE_AGGREGATE_WITHOUT_DISTINCT.build(
+                basePattern
+                    .when(agg -> agg.getDistinctArguments().size() == 0)
+                    .thenApplyMulti(ctx -> 
onePhaseAggregateWithoutDistinct(ctx.root, ctx.connectContext))
+            ),
+            RuleType.TWO_PHASE_AGGREGATE_WITHOUT_DISTINCT.build(
+                basePattern
+                    .when(agg -> agg.getDistinctArguments().size() == 0)
+                    .thenApplyMulti(ctx -> 
twoPhaseAggregateWithoutDistinct(ctx.root, ctx.connectContext))
+            ),
+            RuleType.TWO_PHASE_AGGREGATE_WITH_COUNT_DISTINCT_MULTI.build(
+                basePattern
+                    .when(this::containsCountDistinctMultiExpr)
+                    .thenApplyMulti(ctx -> 
twoPhaseAggregateWithCountDistinctMulti(ctx.root, ctx.connectContext))
+            ),
+            RuleType.THREE_PHASE_AGGREGATE_WITH_COUNT_DISTINCT_MULTI.build(
+                basePattern
+                    .when(this::containsCountDistinctMultiExpr)
+                    .thenApplyMulti(ctx -> 
threePhaseAggregateWithCountDistinctMulti(ctx.root, ctx.connectContext))
+            ),
+            RuleType.TWO_PHASE_AGGREGATE_WITH_DISTINCT.build(
+                basePattern
+                    .when(agg -> agg.getDistinctArguments().size() == 1)
+                    .thenApplyMulti(ctx -> 
twoPhaseAggregateWithDistinct(ctx.root, ctx.connectContext))
+            ),
+            RuleType.ONE_PHASE_AGGREGATE_SINGLE_DISTINCT_TO_MULTI.build(
+                basePattern
+                    .when(agg -> agg.getDistinctArguments().size() == 1 && 
enableSingleDistinctColumnOpt())
+                    .thenApplyMulti(ctx -> 
onePhaseAggregateWithMultiDistinct(ctx.root, ctx.connectContext))
+            ),
+            RuleType.TWO_PHASE_AGGREGATE_SINGLE_DISTINCT_TO_MULTI.build(
+                basePattern
+                    .when(agg -> agg.getDistinctArguments().size() == 1 && 
enableSingleDistinctColumnOpt())
+                    .thenApplyMulti(ctx -> 
twoPhaseAggregateWithMultiDistinct(ctx.root, ctx.connectContext))
+            ),
+            RuleType.THREE_PHASE_AGGREGATE_WITH_DISTINCT.build(
+                basePattern
+                    .when(agg -> agg.getDistinctArguments().size() == 1)
+                    .thenApplyMulti(ctx -> 
threePhaseAggregateWithDistinct(ctx.root, ctx.connectContext))
+            ),
+            RuleType.TWO_PHASE_AGGREGATE_WITH_MULTI_DISTINCT.build(
+                basePattern
+                    .when(agg -> agg.getDistinctArguments().size() > 1 && 
!containsCountDistinctMultiExpr(agg))
+                    .thenApplyMulti(ctx -> 
twoPhaseAggregateWithMultiDistinct(ctx.root, ctx.connectContext))
+            )
+        );
+    }
+
+    /**
+     * sql: select count(*) from tbl
+     *
+     * before:
+     *
+     *               LogicalAggregate(groupBy=[], output=[count(*)])
+     *                                |
+     *                       LogicalOlapScan(table=tbl)
+     *
+     * after:
+     *
+     *               LogicalAggregate(groupBy=[], output=[count(*)])
+     *                                |
+     *        PhysicalStorageLayerAggregate(pushAggOp=COUNT, 
table=PhysicalOlapScan(table=tbl))
+     *
+     */
+    private LogicalAggregate<? extends Plan> storageLayerAggregate(
+            LogicalAggregate<? extends Plan> aggregate,
+            @Nullable LogicalProject<? extends Plan> project,
+            LogicalOlapScan olapScan, CascadesContext cascadesContext) {
+        final LogicalAggregate<? extends Plan> canNotPush = aggregate;
+
+        KeysType keysType = olapScan.getTable().getKeysType();
+        if (keysType != KeysType.AGG_KEYS && keysType != KeysType.DUP_KEYS) {
+            return canNotPush;
+        }
+
+        List<Expression> groupByExpressions = 
aggregate.getGroupByExpressions();
+        if (!groupByExpressions.isEmpty() || 
!aggregate.getDistinctArguments().isEmpty()) {
+            return canNotPush;
+        }
+
+        Set<AggregateFunction> aggregateFunctions = 
aggregate.getAggregateFunctions();
+        Set<Class<? extends AggregateFunction>> functionClasses = 
aggregateFunctions
+                .stream()
+                .map(AggregateFunction::getClass)
+                .collect(Collectors.toSet());
+
+        Map<Class, PushDownAggOp> supportedAgg = 
PushDownAggOp.supportedFunctions();
+        if (!supportedAgg.keySet().containsAll(functionClasses)) {
+            return canNotPush;
+        }
+        if (functionClasses.contains(Count.class) && keysType != 
KeysType.DUP_KEYS) {
+            return canNotPush;
+        }
+        if (aggregateFunctions.stream().anyMatch(fun -> fun.arity() > 1)) {
+            return canNotPush;
+        }
+
+        // we already normalize the arguments to slotReference
+        List<Expression> argumentsOfAggregateFunction = 
aggregateFunctions.stream()
+                .flatMap(aggregateFunction -> 
aggregateFunction.getArguments().stream())
+                .collect(ImmutableList.toImmutableList());
+
+        if (project != null) {
+            argumentsOfAggregateFunction = Project.findProject(
+                        (List<SlotReference>) (List) 
argumentsOfAggregateFunction, project.getProjects())
+                    .stream()
+                    .map(p -> p instanceof Alias ? p.child(0) : p)
+                    .collect(ImmutableList.toImmutableList());
+        }
+
+        boolean onlyContainsSlotOrNumericCastSlot = 
argumentsOfAggregateFunction
+                .stream()
+                .allMatch(argument -> {
+                    if (argument instanceof SlotReference) {
+                        return true;
+                    }
+                    if (argument instanceof Cast) {
+                        return argument.child(0) instanceof SlotReference
+                                && argument.getDataType().isNumericType()
+                                && 
argument.child(0).getDataType().isNumericType();
+                    }
+                    return false;
+                });
+        if (!onlyContainsSlotOrNumericCastSlot) {
+            return canNotPush;
+        }
+
+        Set<PushDownAggOp> pushDownAggOps = functionClasses.stream()
+                .map(supportedAgg::get)
+                .collect(Collectors.toSet());
+
+        PushDownAggOp mergeOp = pushDownAggOps.size() == 1
+                ? pushDownAggOps.iterator().next()
+                : PushDownAggOp.MIX;
+
+        Set<SlotReference> aggUsedSlots =
+                ExpressionUtils.collect(argumentsOfAggregateFunction, 
SlotReference.class::isInstance);
+
+        List<SlotReference> usedSlotInTable = (List<SlotReference>) (List) 
Project.findProject(aggUsedSlots,
+                (List<NamedExpression>) (List) olapScan.getOutput());
+
+        for (SlotReference slot : usedSlotInTable) {
+            Column column = slot.getColumn().get();
+            if (keysType == KeysType.AGG_KEYS && !column.isKey()) {
+                return canNotPush;
+            }
+            // The zone map max length of CharFamily is 512, do not
+            // over the length: https://github.com/apache/doris/pull/6293
+            if (mergeOp == PushDownAggOp.MIN_MAX || mergeOp == 
PushDownAggOp.MIX) {
+                PrimitiveType colType = column.getType().getPrimitiveType();
+                if (colType.isArrayType() || colType.isComplexType() || 
colType == PrimitiveType.STRING) {
+                    return canNotPush;
+                }
+                if (colType.isCharFamily() && mergeOp != PushDownAggOp.COUNT 
&& column.getType().getLength() > 512) {
+                    return canNotPush;
+                }
+            }
+            if (mergeOp == PushDownAggOp.COUNT || mergeOp == 
PushDownAggOp.MIX) {
+                // NULL value behavior in `count` function is zero, so
+                // we should not use row_count to speed up query. the col
+                // must be not null
+                if (column.isAllowNull()) {
+                    return canNotPush;
+                }
+            }
+        }
+
+        PhysicalOlapScan physicalOlapScan = (PhysicalOlapScan) new 
LogicalOlapScanToPhysicalOlapScan()
+                .build()
+                .transform(olapScan, cascadesContext)
+                .get(0);
+
+        return aggregate.withChildren(ImmutableList.of(
+            new PhysicalStorageLayerAggregate(physicalOlapScan, mergeOp)
+        ));
+    }
+
+    /**
+     * sql: select count(*) from tbl group by id
+     *
+     * before:
+     *
+     *          LogicalAggregate(groupBy=[id], output=[count(*)])
+     *                       |
+     *               LogicalOlapScan(table=tbl)
+     *
+     * after:
+     *
+     *  single node aggregate:
+     *
+     *             PhysicalHashAggregate(groupBy=[id], output=[count(*)])
+     *                              |
+     *                 PhysicalDistribute(distributionSpec=GATHER)
+     *                             |
+     *                     LogicalOlapScan(table=tbl)
+     *
+     *  distribute node aggregate:
+     *
+     *            PhysicalHashAggregate(groupBy=[id], output=[count(*)])
+     *                                    |
+     *           LogicalOlapScan(table=tbl, **already distribute by id**)
+     *
+     */
+    private List<PhysicalHashAggregate<Plan>> onePhaseAggregateWithoutDistinct(
+            LogicalAggregate<? extends Plan> logicalAgg, ConnectContext 
connectContext) {
+        RequireProperties requireGather = 
RequireProperties.of(PhysicalProperties.GATHER);

Review Comment:
   gather require could be a static member of RequireProperties



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to