cloud-fan commented on a change in pull request #35130:
URL: https://github.com/apache/spark/pull/35130#discussion_r783130063
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanRelationPushDown.scala
##########
@@ -88,25 +88,40 @@ object V2ScanRelationPushDown extends Rule[LogicalPlan]
with PredicateHelper {
sHolder.builder match {
case r: SupportsPushDownAggregates =>
val aggExprToOutputOrdinal = mutable.HashMap.empty[Expression,
Int]
- var ordinal = 0
- val aggregates = resultExpressions.flatMap { expr =>
- expr.collect {
- // Do not push down duplicated aggregate expressions. For
example,
- // `SELECT max(a) + 1, max(a) + 2 FROM ...`, we should only
push down one
- // `max(a)` to the data source.
- case agg: AggregateExpression
- if !aggExprToOutputOrdinal.contains(agg.canonicalized) =>
- aggExprToOutputOrdinal(agg.canonicalized) = ordinal
- ordinal += 1
- agg
- }
- }
+ val aggregates = collectAggregates(resultExpressions,
aggExprToOutputOrdinal)
val normalizedAggregates = DataSourceStrategy.normalizeExprs(
aggregates,
sHolder.relation.output).asInstanceOf[Seq[AggregateExpression]]
val normalizedGroupingExpressions =
DataSourceStrategy.normalizeExprs(
groupingExpressions, sHolder.relation.output)
- val pushedAggregates = PushDownUtils.pushAggregates(
- r, normalizedAggregates, normalizedGroupingExpressions)
+ val translatedAggregates =
DataSourceStrategy.translateAggregation(
+ normalizedAggregates, normalizedGroupingExpressions)
+ val (finalResultExpressions, finalAggregates,
finalTranslatedAggregates) = {
+ if (translatedAggregates.isEmpty ||
+ r.supportCompletePushDown(translatedAggregates.get)) {
+ (resultExpressions, aggregates, translatedAggregates)
+ } else {
+ val newResultExpressions = resultExpressions.map { expr =>
+ expr.transform {
+ case AggregateExpression(avg: aggregate.Average, _,
isDistinct, _, _) =>
+ val left = addCastIfNeeded(aggregate.Sum(avg.child)
+ .toAggregateExpression(isDistinct), avg.dataType)
Review comment:
I don't think we can retain the original data type of AVG by doing so.
Please check the implementation of Spark AVG.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]