maropu commented on a change in pull request #28490:
URL: https://github.com/apache/spark/pull/28490#discussion_r435029537
##########
File path:
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
##########
@@ -506,31 +506,55 @@ class Analyzer(
aggregations: Seq[NamedExpression],
groupByAliases: Seq[Alias],
groupingAttrs: Seq[Expression],
- gid: Attribute): Seq[NamedExpression] = aggregations.map {
- // collect all the found AggregateExpression, so we can check an
expression is part of
- // any AggregateExpression or not.
- val aggsBuffer = ArrayBuffer[Expression]()
- // Returns whether the expression belongs to any expressions in
`aggsBuffer` or not.
- def isPartOfAggregation(e: Expression): Boolean = {
- aggsBuffer.exists(a => a.find(_ eq e).isDefined)
+ gid: Attribute): Seq[NamedExpression] = {
+ val resolvedGroupByAliases = groupByAliases.map(_.transformDown {
Review comment:
> The shown query plan above seems before ResolveGroupingAnalytics? So
without CUBE is it possible to encounter similar issue? @maropu
Yea, all the cases have the same issue;
```
scala> spark.range(1).selectExpr("'x' AS a", "1 AS b",
"array(named_struct('row_id', 1, 'json_string', 'y')) AS
c").createOrReplaceTempView("t")
// ROLLUP
scala> sql("""
| select a,
coalesce(get_json_object(each.json_string,'$.iType'),'-127') as iType, sum(b)
| from t
| LATERAL VIEW explode(c) x AS each
| group by a, get_json_object(each.json_string,'$.iType')
| with rollup
| """).show()
org.apache.spark.sql.AnalysisException: expression 'x.`each`' is neither
present in the group by, nor is it an aggregate function. Add to group by or
wrap in first() (or first_value) if you don't care which value you get.;;
Aggregate [a#17, get_json_object(each#9.json_string AS json_string#10,
$.iType)#18, spark_grouping_id#16L], [a#17,
coalesce(get_json_object(each#9.json_string, $.iType), -127) AS iType#8,
sum(cast(b#3 as bigint)) AS sum(b)#13L]
+- Expand [ArrayBuffer(a#2, b#3, c#4, each#9, a#14,
get_json_object(each#9.json_string AS json_string#10, $.iType)#15, 0),
ArrayBuffer(a#2, b#3, c#4, each#9, a#14, null, 1), ArrayBuffer(a#2, b#3, c#4,
each#9, null, null, 3)], [a#2, b#3, c#4, each#9, a#17,
get_json_object(each#9.json_string AS json_string#10, $.iType)#18,
spark_grouping_id#16L]
+- Project [a#2, b#3, c#4, each#9, a#2 AS a#14,
get_json_object(each#9.json_string, $.iType) AS
get_json_object(each#9.json_string AS json_string#10, $.iType)#15]
+- Generate explode(c#4), false, x, [each#9]
+- SubqueryAlias t
+- Project [x AS a#2, 1 AS b#3, array(named_struct(row_id, 1,
json_string, y)) AS c#4]
+- Range (0, 1, step=1, splits=Some(4))
// GROUPING SETS
scala> sql("""
| select a,
coalesce(get_json_object(each.json_string,'$.iType'),'-127') as iType, sum(b)
| from t
| LATERAL VIEW explode(c) x AS each
| group by grouping sets((a,
get_json_object(each.json_string,'$.iType')))
| """).show()
org.apache.spark.sql.AnalysisException: expression 'x.`each`' is neither
present in the group by, nor is it an aggregate function. Add to group by or
wrap in first() (or first_value) if you don't care which value you get.;;
Aggregate [a#28, get_json_object(each#20.json_string AS json_string#21,
$.iType)#29, spark_grouping_id#27L], [a#28,
coalesce(get_json_object(each#20.json_string, $.iType), -127) AS iType#19,
sum(cast(b#3 as bigint)) AS sum(b)#24L]
+- Expand [ArrayBuffer(a#2, b#3, c#4, each#20, a#25,
get_json_object(each#20.json_string AS json_string#21, $.iType)#26, 0)], [a#2,
b#3, c#4, each#20, a#28, get_json_object(each#20.json_string AS json_string#21,
$.iType)#29, spark_grouping_id#27L]
+- Project [a#2, b#3, c#4, each#20, a#2 AS a#25,
get_json_object(each#20.json_string, $.iType) AS
get_json_object(each#20.json_string AS json_string#21, $.iType)#26]
+- Generate explode(c#4), false, x, [each#20]
+- SubqueryAlias t
+- Project [x AS a#2, 1 AS b#3, array(named_struct(row_id, 1,
json_string, y)) AS c#4]
+- Range (0, 1, step=1, splits=Some(4))
```
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]