xuanyuanking commented on a change in pull request #28501:
URL: https://github.com/apache/spark/pull/28501#discussion_r425808687
##########
File path:
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
##########
@@ -2163,22 +2232,33 @@ class Analyzer(
alias.toAttribute
}
}
-
- // Push the aggregate expressions into the aggregate (if any).
if (aggregateExpressions.nonEmpty) {
- Project(agg.output,
- Filter(transformedAggregateFilter,
- agg.copy(aggregateExpressions = agg.aggregateExpressions ++
aggregateExpressions)))
+ Some(aggregateExpressions, transformedAggregateFilter)
} else {
- filter
+ None
}
} else {
- filter
+ None
}
} catch {
// Attempting to resolve in the aggregate can result in ambiguity.
When this happens,
// just return the original plan.
Review comment:
Thanks, done in 1de0c75
##########
File path:
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
##########
@@ -595,8 +601,71 @@ class Analyzer(
}
}
- // This require transformUp to replace grouping()/grouping_id() in
resolved Filter/Sort
- def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperatorsUp {
+ private def tryResolveHavingCondition(
+ a: UnresolvedHaving, agg: LogicalPlan): LogicalPlan = {
Review comment:
Copy that, done in 1de0c75.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]