maropu commented on a change in pull request #29087:
URL: https://github.com/apache/spark/pull/29087#discussion_r551647083



##########
File path: 
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
##########
@@ -1519,12 +1519,7 @@ class Analyzer(override val catalogManager: 
CatalogManager)
         }
       // If the script transformation input contains Stars, expand it.
       case t: ScriptTransformation if containsStar(t.input) =>
-        t.copy(
-          input = t.input.flatMap {
-            case s: Star => s.expand(t.child, resolver)
-            case o => o :: Nil
-          }
-        )
+        t.copy(input = t.child.output)

Review comment:
       Could you add tests for `UnresolvedRegex` 
(`spark.sql.parser.quotedRegexColumnNames`=true case)?

##########
File path: sql/core/src/test/resources/sql-tests/inputs/pivot.sql
##########
@@ -46,7 +46,7 @@ SELECT * FROM (
   SELECT course, earnings FROM courseSales
 )
 PIVOT (
-  sum(earnings)
+  sum(earnings)LATERAL VIEW

Review comment:
       This update is related to this PR? It seems this change causes the test 
failure in GA.

##########
File path: sql/core/src/test/resources/sql-tests/inputs/transform.sql
##########
@@ -183,3 +189,114 @@ SELECT a, b, decode(c, 'UTF-8'), d, e, f, g, h, i, j, k, 
l FROM (
     NULL DEFINED AS 'NULL'
   FROM t
 ) tmp;
+
+SELECT TRANSFORM (b, a, CAST(c AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4;
+
+SELECT TRANSFORM (1, 2, 3)
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4;
+
+SELECT TRANSFORM(1, 2)
+USING 'cat' AS (a INT, b INT)
+FROM script_trans
+LIMIT 1;
+
+SELECT TRANSFORM (
+b AS d5, a,

Review comment:
       nit: add indents.

##########
File path: sql/core/src/test/resources/sql-tests/inputs/transform.sql
##########
@@ -183,3 +189,114 @@ SELECT a, b, decode(c, 'UTF-8'), d, e, f, g, h, i, j, k, 
l FROM (
     NULL DEFINED AS 'NULL'
   FROM t
 ) tmp;
+
+SELECT TRANSFORM (b, a, CAST(c AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4;
+
+SELECT TRANSFORM (1, 2, 3)
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4;
+
+SELECT TRANSFORM(1, 2)
+USING 'cat' AS (a INT, b INT)
+FROM script_trans
+LIMIT 1;
+
+SELECT TRANSFORM (
+b AS d5, a,
+CASE
+  WHEN c > 100 THEN 1
+  WHEN c < 100 THEN 2
+ELSE 3 END)
+USING 'cat' AS (a, b,  c)
+FROM script_trans
+WHERE a <= 4;
+
+SELECT TRANSFORM (b, a, c + 1)
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4;
+
+SELECT TRANSFORM (*)
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4;
+
+SELECT TRANSFORM (b AS d, MAX(a) as max_a, CAST(SUM(c) AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4
+GROUP BY b;
+
+SELECT TRANSFORM (b AS d, MAX(a) FILTER (WHERE a > 3) AS max_a, CAST(SUM(c) AS 
STRING))
+USING 'cat' AS (a,b,c)
+FROM script_trans
+WHERE a <= 4
+GROUP BY b;
+
+SELECT TRANSFORM (b, MAX(a) as max_a, CAST(sum(c) AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 2
+GROUP BY b;
+
+SELECT TRANSFORM (b, MAX(a) as max_a, CAST(SUM(c) AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4
+GROUP BY b
+HAVING max_a > 0;
+
+SELECT TRANSFORM (b, MAX(a) as max_a, CAST(SUM(c) AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4
+GROUP BY b
+HAVING max(a) > 1;
+
+SELECT TRANSFORM (b, MAX(a) OVER w as max_a, CAST(SUM(c) OVER w AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4
+WINDOW w AS (PARTITION BY b ORDER BY a);
+
+set spark.sql.legacy.bucketedTableScan.outputOrdering=true;

Review comment:
       Why do we need to set this config here?

##########
File path: 
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
##########
@@ -1519,12 +1519,7 @@ class Analyzer(override val catalogManager: 
CatalogManager)
         }
       // If the script transformation input contains Stars, expand it.

Review comment:
       Please update the comment here (describe why it's okay just to pass 
`t.child.output` into `ScriptTransformation.input`).

##########
File path: sql/core/src/test/resources/sql-tests/inputs/transform.sql
##########
@@ -183,3 +189,114 @@ SELECT a, b, decode(c, 'UTF-8'), d, e, f, g, h, i, j, k, 
l FROM (
     NULL DEFINED AS 'NULL'
   FROM t
 ) tmp;
+
+SELECT TRANSFORM (b, a, CAST(c AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4;
+
+SELECT TRANSFORM (1, 2, 3)
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4;
+
+SELECT TRANSFORM(1, 2)
+USING 'cat' AS (a INT, b INT)
+FROM script_trans
+LIMIT 1;
+
+SELECT TRANSFORM (
+b AS d5, a,
+CASE
+  WHEN c > 100 THEN 1
+  WHEN c < 100 THEN 2
+ELSE 3 END)
+USING 'cat' AS (a, b,  c)
+FROM script_trans
+WHERE a <= 4;
+
+SELECT TRANSFORM (b, a, c + 1)
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4;
+
+SELECT TRANSFORM (*)
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4;
+
+SELECT TRANSFORM (b AS d, MAX(a) as max_a, CAST(SUM(c) AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4
+GROUP BY b;
+
+SELECT TRANSFORM (b AS d, MAX(a) FILTER (WHERE a > 3) AS max_a, CAST(SUM(c) AS 
STRING))
+USING 'cat' AS (a,b,c)
+FROM script_trans
+WHERE a <= 4
+GROUP BY b;
+
+SELECT TRANSFORM (b, MAX(a) as max_a, CAST(sum(c) AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 2
+GROUP BY b;
+
+SELECT TRANSFORM (b, MAX(a) as max_a, CAST(SUM(c) AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4
+GROUP BY b
+HAVING max_a > 0;
+
+SELECT TRANSFORM (b, MAX(a) as max_a, CAST(SUM(c) AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4
+GROUP BY b
+HAVING max(a) > 1;
+
+SELECT TRANSFORM (b, MAX(a) OVER w as max_a, CAST(SUM(c) OVER w AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4
+WINDOW w AS (PARTITION BY b ORDER BY a);
+
+set spark.sql.legacy.bucketedTableScan.outputOrdering=true;
+
+SELECT TRANSFORM (MAX(a) as max_a, CAST(SUM(c) AS STRING))
+USING 'cat' AS (a, b)
+FROM script_trans
+WHERE a <= 4
+HAVING max(a) > 1;
+
+SELECT TRANSFORM (b, MAX(a) as max_a, CAST(SUM(c) AS STRING), myCol, myCol2)
+USING 'cat' AS (a, b, c, d, e)
+FROM script_trans
+LATERAL VIEW explode(array(array(1,2,3))) myTable AS myCol
+LATERAL VIEW explode(myTable.myCol) myTable2 AS myCol2
+WHERE a <= 4
+GROUP BY b, myCol, myCol2
+HAVING max(a) > 1;
+
+
+FROM
+(FROM script_trans SELECT TRANSFORM(a, b) USING 'cat' AS (`a` INT, b STRING)) t

Review comment:
       nit: add indents

##########
File path: sql/core/src/test/resources/sql-tests/inputs/transform.sql
##########
@@ -183,3 +189,114 @@ SELECT a, b, decode(c, 'UTF-8'), d, e, f, g, h, i, j, k, 
l FROM (
     NULL DEFINED AS 'NULL'
   FROM t
 ) tmp;
+
+SELECT TRANSFORM (b, a, CAST(c AS STRING))

Review comment:
       nit: follow the other formats: `TRANSFORM (` -> `TRANSFORM(`

##########
File path: 
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
##########
@@ -659,12 +670,21 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with 
SQLConfHelper with Logg
         AttributeReference("value", StringType)()), true)
     }
 
+    val plan = visitCommonSelectQueryClausePlan(relation,
+      lateralView,
+      transformClause.namedExpressionSeq,
+      whereClause,
+      aggregationClause,
+      havingClause,
+      windowClause,
+      isDistinct = false)

Review comment:
       In hive, one cannot use `distinct` for TRANSFORM? e.g., `SELECT 
TRANSFORM(distinct a, b)`?

##########
File path: sql/core/src/test/resources/sql-tests/inputs/transform.sql
##########
@@ -183,3 +189,114 @@ SELECT a, b, decode(c, 'UTF-8'), d, e, f, g, h, i, j, k, 
l FROM (
     NULL DEFINED AS 'NULL'
   FROM t
 ) tmp;
+
+SELECT TRANSFORM (b, a, CAST(c AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4;
+
+SELECT TRANSFORM (1, 2, 3)
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4;
+
+SELECT TRANSFORM(1, 2)
+USING 'cat' AS (a INT, b INT)
+FROM script_trans
+LIMIT 1;
+
+SELECT TRANSFORM (
+b AS d5, a,
+CASE
+  WHEN c > 100 THEN 1
+  WHEN c < 100 THEN 2
+ELSE 3 END)
+USING 'cat' AS (a, b,  c)
+FROM script_trans
+WHERE a <= 4;
+
+SELECT TRANSFORM (b, a, c + 1)
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4;
+
+SELECT TRANSFORM (*)
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4;
+
+SELECT TRANSFORM (b AS d, MAX(a) as max_a, CAST(SUM(c) AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4
+GROUP BY b;
+
+SELECT TRANSFORM (b AS d, MAX(a) FILTER (WHERE a > 3) AS max_a, CAST(SUM(c) AS 
STRING))
+USING 'cat' AS (a,b,c)
+FROM script_trans
+WHERE a <= 4
+GROUP BY b;
+
+SELECT TRANSFORM (b, MAX(a) as max_a, CAST(sum(c) AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 2
+GROUP BY b;
+
+SELECT TRANSFORM (b, MAX(a) as max_a, CAST(SUM(c) AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4
+GROUP BY b
+HAVING max_a > 0;
+
+SELECT TRANSFORM (b, MAX(a) as max_a, CAST(SUM(c) AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4
+GROUP BY b
+HAVING max(a) > 1;
+
+SELECT TRANSFORM (b, MAX(a) OVER w as max_a, CAST(SUM(c) OVER w AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4
+WINDOW w AS (PARTITION BY b ORDER BY a);
+
+set spark.sql.legacy.bucketedTableScan.outputOrdering=true;

Review comment:
       nit: `set` -> `SET`

##########
File path: sql/core/src/test/resources/sql-tests/inputs/transform.sql
##########
@@ -183,3 +189,114 @@ SELECT a, b, decode(c, 'UTF-8'), d, e, f, g, h, i, j, k, 
l FROM (
     NULL DEFINED AS 'NULL'
   FROM t
 ) tmp;
+
+SELECT TRANSFORM (b, a, CAST(c AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4;
+
+SELECT TRANSFORM (1, 2, 3)
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4;
+
+SELECT TRANSFORM(1, 2)
+USING 'cat' AS (a INT, b INT)
+FROM script_trans
+LIMIT 1;
+
+SELECT TRANSFORM (
+b AS d5, a,
+CASE
+  WHEN c > 100 THEN 1
+  WHEN c < 100 THEN 2
+ELSE 3 END)
+USING 'cat' AS (a, b,  c)
+FROM script_trans
+WHERE a <= 4;
+
+SELECT TRANSFORM (b, a, c + 1)
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4;
+
+SELECT TRANSFORM (*)
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4;
+
+SELECT TRANSFORM (b AS d, MAX(a) as max_a, CAST(SUM(c) AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4
+GROUP BY b;
+
+SELECT TRANSFORM (b AS d, MAX(a) FILTER (WHERE a > 3) AS max_a, CAST(SUM(c) AS 
STRING))
+USING 'cat' AS (a,b,c)
+FROM script_trans
+WHERE a <= 4
+GROUP BY b;
+
+SELECT TRANSFORM (b, MAX(a) as max_a, CAST(sum(c) AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 2
+GROUP BY b;
+
+SELECT TRANSFORM (b, MAX(a) as max_a, CAST(SUM(c) AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4
+GROUP BY b
+HAVING max_a > 0;
+
+SELECT TRANSFORM (b, MAX(a) as max_a, CAST(SUM(c) AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4
+GROUP BY b
+HAVING max(a) > 1;
+
+SELECT TRANSFORM (b, MAX(a) OVER w as max_a, CAST(SUM(c) OVER w AS STRING))
+USING 'cat' AS (a, b, c)
+FROM script_trans
+WHERE a <= 4
+WINDOW w AS (PARTITION BY b ORDER BY a);
+
+set spark.sql.legacy.bucketedTableScan.outputOrdering=true;
+
+SELECT TRANSFORM (MAX(a) as max_a, CAST(SUM(c) AS STRING))
+USING 'cat' AS (a, b)
+FROM script_trans
+WHERE a <= 4
+HAVING max(a) > 1;
+
+SELECT TRANSFORM (b, MAX(a) as max_a, CAST(SUM(c) AS STRING), myCol, myCol2)
+USING 'cat' AS (a, b, c, d, e)
+FROM script_trans
+LATERAL VIEW explode(array(array(1,2,3))) myTable AS myCol
+LATERAL VIEW explode(myTable.myCol) myTable2 AS myCol2
+WHERE a <= 4
+GROUP BY b, myCol, myCol2
+HAVING max(a) > 1;
+
+
+FROM
+(FROM script_trans SELECT TRANSFORM(a, b) USING 'cat' AS (`a` INT, b STRING)) t
+SELECT a + 1;
+
+FROM
+(SELECT TRANSFORM(a, SUM(b) b)
+USING 'cat' AS (`a` INT, b STRING)

Review comment:
       nit: add indents




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to