cloud-fan commented on a change in pull request #35130:
URL: https://github.com/apache/spark/pull/35130#discussion_r786033834
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanRelationPushDown.scala
##########
@@ -88,25 +88,65 @@ object V2ScanRelationPushDown extends Rule[LogicalPlan]
with PredicateHelper {
sHolder.builder match {
case r: SupportsPushDownAggregates =>
val aggExprToOutputOrdinal = mutable.HashMap.empty[Expression,
Int]
- var ordinal = 0
- val aggregates = resultExpressions.flatMap { expr =>
- expr.collect {
- // Do not push down duplicated aggregate expressions. For
example,
- // `SELECT max(a) + 1, max(a) + 2 FROM ...`, we should only
push down one
- // `max(a)` to the data source.
- case agg: AggregateExpression
- if !aggExprToOutputOrdinal.contains(agg.canonicalized) =>
- aggExprToOutputOrdinal(agg.canonicalized) = ordinal
- ordinal += 1
- agg
- }
- }
+ val aggregates = collectAggregates(resultExpressions,
aggExprToOutputOrdinal)
val normalizedAggregates = DataSourceStrategy.normalizeExprs(
aggregates,
sHolder.relation.output).asInstanceOf[Seq[AggregateExpression]]
val normalizedGroupingExpressions =
DataSourceStrategy.normalizeExprs(
groupingExpressions, sHolder.relation.output)
- val pushedAggregates = PushDownUtils.pushAggregates(
- r, normalizedAggregates, normalizedGroupingExpressions)
+ val translatedAggregates =
DataSourceStrategy.translateAggregation(
+ normalizedAggregates, normalizedGroupingExpressions)
+ val (finalResultExpressions, finalAggregates,
finalTranslatedAggregates) = {
+ if (translatedAggregates.isEmpty ||
+ r.supportCompletePushDown(translatedAggregates.get) ||
+
translatedAggregates.get.aggregateExpressions().forall(!_.isInstanceOf[Avg])) {
+ (resultExpressions, aggregates, translatedAggregates)
+ } else {
+ // scalastyle:off
+ // The data source doesn't support the complete push-down of
this aggregation.
+ // Here we translate `AVG` to `SUM / COUNT`, so that it's
more likely to be
+ // pushed, completely or partially.
+ // e.g. TABLE t (c1 INT, c2 INT, c3 INT)
+ // SELECT avg(c1) FROM t GROUP BY c2;
+ // The original logical plan is
+ // Aggregate [c2#10],[avg(c1#9) AS avg(c1)#19]
+ // +- ScanOperation[...]
+ //
+ // After convert avg(c1#9) to sum(c1#9)/count(c1#9)
+ // we have the following
+ // Aggregate [c2#10],[sum(c1#9)/count(c1#9) AS avg(c1)#19]
+ // +- ScanOperation[...]
+ // scalastyle:on
+ val newResultExpressions = resultExpressions.map { expr =>
+ expr.transform {
+ case AggregateExpression(avg: aggregate.Average, _,
isDistinct, _, _) =>
+ val sum =
aggregate.Sum(avg.child).toAggregateExpression(isDistinct)
Review comment:
to make the `Sum` work properly, we should cast the input to a wider
type:
`aggregate.Sum(addCastIfNeeded(avg.child, avg.sumDataType)).toAgg...`
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]