huaxingao commented on code in PR #37195:
URL: https://github.com/apache/spark/pull/37195#discussion_r924064288
##########
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanRelationPushDown.scala:
##########
@@ -92,189 +93,199 @@ object V2ScanRelationPushDown extends Rule[LogicalPlan]
with PredicateHelper wit
def pushDownAggregates(plan: LogicalPlan): LogicalPlan = plan.transform {
// update the scan builder with agg pushdown and return a new plan with
agg pushed
- case aggNode @ Aggregate(groupingExpressions, resultExpressions, child) =>
- child match {
- case ScanOperation(project, filters, sHolder: ScanBuilderHolder)
- if filters.isEmpty && CollapseProject.canCollapseExpressions(
- resultExpressions, project, alwaysInline = true) =>
- sHolder.builder match {
- case r: SupportsPushDownAggregates =>
- val aliasMap = getAliasMap(project)
- val actualResultExprs =
resultExpressions.map(replaceAliasButKeepName(_, aliasMap))
- val actualGroupExprs = groupingExpressions.map(replaceAlias(_,
aliasMap))
-
- val aggExprToOutputOrdinal = mutable.HashMap.empty[Expression,
Int]
- val aggregates = collectAggregates(actualResultExprs,
aggExprToOutputOrdinal)
- val normalizedAggregates = DataSourceStrategy.normalizeExprs(
- aggregates,
sHolder.relation.output).asInstanceOf[Seq[AggregateExpression]]
- val normalizedGroupingExpressions =
DataSourceStrategy.normalizeExprs(
- actualGroupExprs, sHolder.relation.output)
- val translatedAggregates =
DataSourceStrategy.translateAggregation(
- normalizedAggregates, normalizedGroupingExpressions)
- val (finalResultExpressions, finalAggregates,
finalTranslatedAggregates) = {
- if (translatedAggregates.isEmpty ||
- r.supportCompletePushDown(translatedAggregates.get) ||
-
translatedAggregates.get.aggregateExpressions().forall(!_.isInstanceOf[Avg])) {
- (actualResultExprs, aggregates, translatedAggregates)
- } else {
- // scalastyle:off
- // The data source doesn't support the complete push-down of
this aggregation.
- // Here we translate `AVG` to `SUM / COUNT`, so that it's
more likely to be
- // pushed, completely or partially.
- // e.g. TABLE t (c1 INT, c2 INT, c3 INT)
- // SELECT avg(c1) FROM t GROUP BY c2;
- // The original logical plan is
- // Aggregate [c2#10],[avg(c1#9) AS avg(c1)#19]
- // +- ScanOperation[...]
- //
- // After convert avg(c1#9) to sum(c1#9)/count(c1#9)
- // we have the following
- // Aggregate [c2#10],[sum(c1#9)/count(c1#9) AS avg(c1)#19]
- // +- ScanOperation[...]
- // scalastyle:on
- val newResultExpressions = actualResultExprs.map { expr =>
- expr.transform {
- case AggregateExpression(avg: aggregate.Average, _,
isDistinct, _, _) =>
- val sum =
aggregate.Sum(avg.child).toAggregateExpression(isDistinct)
- val count =
aggregate.Count(avg.child).toAggregateExpression(isDistinct)
- avg.evaluateExpression transform {
- case a: Attribute if a.semanticEquals(avg.sum) =>
- addCastIfNeeded(sum, avg.sum.dataType)
- case a: Attribute if a.semanticEquals(avg.count) =>
- addCastIfNeeded(count, avg.count.dataType)
- }
- }
- }.asInstanceOf[Seq[NamedExpression]]
- // Because aggregate expressions changed, translate them
again.
- aggExprToOutputOrdinal.clear()
- val newAggregates =
- collectAggregates(newResultExpressions,
aggExprToOutputOrdinal)
- val newNormalizedAggregates =
DataSourceStrategy.normalizeExprs(
- newAggregates,
sHolder.relation.output).asInstanceOf[Seq[AggregateExpression]]
- (newResultExpressions, newAggregates,
DataSourceStrategy.translateAggregation(
- newNormalizedAggregates, normalizedGroupingExpressions))
+ case agg: Aggregate => rewriteAggregate(agg)
+ }
+
+ private def rewriteAggregate(agg: Aggregate): LogicalPlan = agg.child match {
+ case ScanOperation(project, Nil, holder @ ScanBuilderHolder(_, _,
+ r: SupportsPushDownAggregates)) if
CollapseProject.canCollapseExpressions(
+ agg.aggregateExpressions, project, alwaysInline = true) =>
+ val aliasMap = getAliasMap(project)
+ val actualResultExprs =
agg.aggregateExpressions.map(replaceAliasButKeepName(_, aliasMap))
+ val actualGroupExprs = agg.groupingExpressions.map(replaceAlias(_,
aliasMap))
+
+ val aggExprToOutputOrdinal = mutable.HashMap.empty[Expression, Int]
+ val aggregates = collectAggregates(actualResultExprs,
aggExprToOutputOrdinal)
+ val normalizedAggExprs = DataSourceStrategy.normalizeExprs(
+ aggregates,
holder.relation.output).asInstanceOf[Seq[AggregateExpression]]
+ val normalizedGroupingExpr = DataSourceStrategy.normalizeExprs(
+ actualGroupExprs, holder.relation.output)
+ val translatedAggOpt = DataSourceStrategy.translateAggregation(
+ normalizedAggExprs, normalizedGroupingExpr)
+ if (translatedAggOpt.isEmpty) {
+ // Cannot translate the catalyst aggregate, return the query plan
unchanged.
+ return agg
+ }
+
+ val (finalResultExprs, finalAggExprs, translatedAgg,
canCompletePushDown) = {
+ if (r.supportCompletePushDown(translatedAggOpt.get)) {
+ (actualResultExprs, normalizedAggExprs, translatedAggOpt.get, true)
+ } else if
(!translatedAggOpt.get.aggregateExpressions().exists(_.isInstanceOf[Avg])) {
+ (actualResultExprs, normalizedAggExprs, translatedAggOpt.get, false)
+ } else {
+ // scalastyle:off
+ // The data source doesn't support the complete push-down of this
aggregation.
+ // Here we translate `AVG` to `SUM / COUNT`, so that it's more
likely to be
+ // pushed, completely or partially.
+ // e.g. TABLE t (c1 INT, c2 INT, c3 INT)
+ // SELECT avg(c1) FROM t GROUP BY c2;
+ // The original logical plan is
+ // Aggregate [c2#10],[avg(c1#9) AS avg(c1)#19]
+ // +- ScanOperation[...]
+ //
+ // After convert avg(c1#9) to sum(c1#9)/count(c1#9)
+ // we have the following
+ // Aggregate [c2#10],[sum(c1#9)/count(c1#9) AS avg(c1)#19]
+ // +- ScanOperation[...]
+ // scalastyle:on
+ val newResultExpressions = actualResultExprs.map { expr =>
+ expr.transform {
+ case AggregateExpression(avg: aggregate.Average, _, isDistinct,
_, _) =>
+ val sum =
aggregate.Sum(avg.child).toAggregateExpression(isDistinct)
+ val count =
aggregate.Count(avg.child).toAggregateExpression(isDistinct)
+ avg.evaluateExpression transform {
+ case a: Attribute if a.semanticEquals(avg.sum) =>
+ addCastIfNeeded(sum, avg.sum.dataType)
+ case a: Attribute if a.semanticEquals(avg.count) =>
+ addCastIfNeeded(count, avg.count.dataType)
}
- }
+ }
+ }.asInstanceOf[Seq[NamedExpression]]
+ // Because aggregate expressions changed, translate them again.
+ aggExprToOutputOrdinal.clear()
+ val newAggregates =
+ collectAggregates(newResultExpressions, aggExprToOutputOrdinal)
+ val newNormalizedAggExprs = DataSourceStrategy.normalizeExprs(
+ newAggregates,
holder.relation.output).asInstanceOf[Seq[AggregateExpression]]
+ val newTranslatedAggOpt = DataSourceStrategy.translateAggregation(
+ newNormalizedAggExprs, normalizedGroupingExpr)
+ if (newTranslatedAggOpt.isEmpty) {
+ // Ideally we should never reach here. But if we end up with not
able to translate
+ // new aggregate with AVG replaced by SUM/COUNT, revert to the
original one.
+ (actualResultExprs, normalizedAggExprs, translatedAggOpt.get,
false)
+ } else {
+ (newResultExpressions, newNormalizedAggExprs,
newTranslatedAggOpt.get,
+ r.supportCompletePushDown(newTranslatedAggOpt.get))
+ }
+ }
+ }
- if (finalTranslatedAggregates.isEmpty) {
- aggNode // return original plan node
- } else if
(!r.supportCompletePushDown(finalTranslatedAggregates.get) &&
- !supportPartialAggPushDown(finalTranslatedAggregates.get)) {
- aggNode // return original plan node
- } else {
- val pushedAggregates =
finalTranslatedAggregates.filter(r.pushAggregation)
- if (pushedAggregates.isEmpty) {
- aggNode // return original plan node
- } else {
- // No need to do column pruning because only the aggregate
columns are used as
- // DataSourceV2ScanRelation output columns. All the other
columns are not
- // included in the output.
- val scan = sHolder.builder.build()
-
- // scalastyle:off
- // use the group by columns and aggregate columns as the
output columns
- // e.g. TABLE t (c1 INT, c2 INT, c3 INT)
- // SELECT min(c1), max(c1) FROM t GROUP BY c2;
- // Use c2, min(c1), max(c1) as output for
DataSourceV2ScanRelation
- // We want to have the following logical plan:
- // == Optimized Logical Plan ==
- // Aggregate [c2#10], [min(min(c1)#21) AS min(c1)#17,
max(max(c1)#22) AS max(c1)#18]
- // +- RelationV2[c2#10, min(c1)#21, max(c1)#22]
- // scalastyle:on
- val newOutput = scan.readSchema().toAttributes
- assert(newOutput.length == groupingExpressions.length +
finalAggregates.length)
- val groupByExprToOutputOrdinal =
mutable.HashMap.empty[Expression, Int]
- val groupAttrs =
normalizedGroupingExpressions.zip(newOutput).zipWithIndex.map {
- case ((a: Attribute, b: Attribute), _) =>
b.withExprId(a.exprId)
- case ((expr, attr), ordinal) =>
- if
(!groupByExprToOutputOrdinal.contains(expr.canonicalized)) {
- groupByExprToOutputOrdinal(expr.canonicalized) =
ordinal
- }
- attr
- }
- val aggOutput = newOutput.drop(groupAttrs.length)
- val output = groupAttrs ++ aggOutput
-
- logInfo(
- s"""
- |Pushing operators to ${sHolder.relation.name}
- |Pushed Aggregate Functions:
- |
${pushedAggregates.get.aggregateExpressions.mkString(", ")}
- |Pushed Group by:
- | ${pushedAggregates.get.groupByExpressions.mkString(",
")}
- |Output: ${output.mkString(", ")}
- """.stripMargin)
-
- val wrappedScan = getWrappedScan(scan, sHolder,
pushedAggregates)
- val scanRelation =
- DataSourceV2ScanRelation(sHolder.relation, wrappedScan,
output)
- if (r.supportCompletePushDown(pushedAggregates.get)) {
- val projectExpressions = finalResultExpressions.map { expr
=>
- expr.transformDown {
- case agg: AggregateExpression =>
- val ordinal =
aggExprToOutputOrdinal(agg.canonicalized)
- val child =
- addCastIfNeeded(aggOutput(ordinal),
agg.resultAttribute.dataType)
- Alias(child,
agg.resultAttribute.name)(agg.resultAttribute.exprId)
- case expr if
groupByExprToOutputOrdinal.contains(expr.canonicalized) =>
- val ordinal =
groupByExprToOutputOrdinal(expr.canonicalized)
- addCastIfNeeded(groupAttrs(ordinal), expr.dataType)
- }
- }.asInstanceOf[Seq[NamedExpression]]
- Project(projectExpressions, scanRelation)
+ if (!canCompletePushDown && !supportPartialAggPushDown(translatedAgg)) {
+ return agg
+ }
+ if (!r.pushAggregation(translatedAgg)) {
+ return agg
+ }
+
+ // scalastyle:off
+ // use the group by columns and aggregate columns as the output columns
+ // e.g. TABLE t (c1 INT, c2 INT, c3 INT)
+ // SELECT min(c1), max(c1) FROM t GROUP BY c2;
+ // Use c2, min(c1), max(c1) as output for DataSourceV2ScanRelation
+ // We want to have the following logical plan:
+ // == Optimized Logical Plan ==
Review Comment:
I think this example is not accurate any more. Do we need to update this?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]