Github user cloud-fan commented on a diff in the pull request:

    https://github.com/apache/spark/pull/9343#discussion_r43622494
  
    --- Diff: sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala 
---
    @@ -1921,4 +1921,89 @@ class SQLQuerySuite extends QueryTest with 
SharedSQLContext {
           assert(sampled.count() == sampledOdd.count() + sampledEven.count())
         }
       }
    +
    +  test("Struct Star Expansion") {
    +    val structDf = testData2.select("a", "b").as("record")
    +
    +    checkAnswer(
    +      structDf.select($"record.a", $"record.b"),
    +      Row(1, 1) :: Row(1, 2) :: Row(2, 1) :: Row(2, 2) :: Row(3, 1) :: 
Row(3, 2) :: Nil)
    +
    +    checkAnswer(
    +      structDf.select($"record.*"),
    +      Row(1, 1) :: Row(1, 2) :: Row(2, 1) :: Row(2, 2) :: Row(3, 1) :: 
Row(3, 2) :: Nil)
    +
    +    checkAnswer(
    +      structDf.select($"record.*", $"record.*"),
    +      Row(1, 1, 1, 1) :: Row(1, 2, 1, 2) :: Row(2, 1, 2, 1) :: Row(2, 2, 
2, 2) ::
    +        Row(3, 1, 3, 1) :: Row(3, 2, 3, 2) :: Nil)
    +
    +    checkAnswer(
    +      sql("select struct(a, b) as r1, struct(b, a) as r2 from 
testData2").select($"r1.*", $"r2.*"),
    +      Row(1, 1, 1, 1) :: Row(1, 2, 2, 1) :: Row(2, 1, 1, 2) :: Row(2, 2, 
2, 2) ::
    +        Row(3, 1, 1, 3) :: Row(3, 2, 2, 3) :: Nil)
    +
    +    checkAnswer(sql(
    +      """
    +        | SELECT min(struct(record.*)) FROM
    +        |   (select struct(a,b) as record from testData2) tmp
    +      """.stripMargin),
    +      Row(Row(1, 1)) :: Nil)
    +
    +    // Try with an alias on the select list
    +    checkAnswer(sql(
    +      """
    +        | SELECT max(struct(record.*)) as r FROM
    +        |   (select struct(a,b) as record from testData2) tmp
    +      """.stripMargin).select($"r.*"),
    +      Row(3, 2) :: Nil)
    +
    +    // With GROUP BY
    +    checkAnswer(sql(
    +      """
    +        | SELECT min(struct(record.*)) FROM
    +        |   (select a as a, struct(a,b) as record from testData2) tmp
    +        | GROUP BY a
    +      """.stripMargin),
    +      Row(Row(1, 1)) :: Row(Row(2, 1)) :: Row(Row(3, 1)) :: Nil)
    +
    +    // With GROUP BY and alias
    +    checkAnswer(sql(
    +      """
    +        | SELECT max(struct(record.*)) as r FROM
    +        |   (select a as a, struct(a,b) as record from testData2) tmp
    +        | GROUP BY a
    +      """.stripMargin).select($"r.*"),
    +      Row(1, 2) :: Row(2, 2) :: Row(3, 2) :: Nil)
    +
    +    // With GROUP BY and alias and additional fields in the struct
    +    checkAnswer(sql(
    +      """
    +        | SELECT max(struct(a, record.*, b)) as r FROM
    +        |   (select a as a, b as b, struct(a,b) as record from testData2) 
tmp
    +        | GROUP BY a
    +      """.stripMargin).select($"r.*"),
    +      Row(1, 1, 2, 2) :: Row(2, 2, 2, 2) :: Row(3, 3, 2, 2) :: Nil)
    +
    +    // Create a data set that contains nested structs.
    +    val nestedStructData = sql(
    +      """
    +        | SELECT struct(r1, r2) as record FROM
    +        |   (SELECT struct(a, b) as r1, struct(b, a) as r2 FROM testData2) 
tmp
    +      """.stripMargin)
    +
    +    checkAnswer(nestedStructData.select($"record.*"),
    +      Row(Row(1, 1), Row(1, 1)) :: Row(Row(1, 2), Row(2, 1)) :: Row(Row(2, 
1), Row(1, 2)) ::
    +        Row(Row(2, 2), Row(2, 2)) :: Row(Row(3, 1), Row(1, 3)) :: 
Row(Row(3, 2), Row(2, 3)) :: Nil)
    +    checkAnswer(nestedStructData.select($"record.r1"),
    +      Row(Row(1, 1)) :: Row(Row(1, 2)) :: Row(Row(2, 1)) :: Row(Row(2, 2)) 
::
    +        Row(Row(3, 1)) :: Row(Row(3, 2)) :: Nil)
    +    checkAnswer(
    +      nestedStructData.select($"record.r1.*"),
    --- End diff --
    
    can you also try pure SQL test case like `sql("select record.r1.* from 
xxx")`? AFAIK, in dataframe we have 
[rule](https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/Column.scala#L62-L63):
    ```
    case "*" => UnresolvedStar(None)
    case _ if name.endsWith(".*") => UnresolvedStar(Some(name.substring(0, 
name.length - 2)))
    ```
    So the `UnresolvedStar.target` will be `record.r1` and work well.
    
    However, for pure SQL, we only have 
[rule](https://github.com/apache/spark/blob/master/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala#L468-L469):
    ```
    ( "*" ^^^ UnresolvedStar(None)
    | ident <~ "." ~ "*" ^^ { case tableName => 
UnresolvedStar(Option(tableName)) }
    ```
    which can not parse `record.r1.*`.
    
    A possible solution: `(ident <~ ".").+ <~ "*" ^^ { case target => 
UnresolvedStar(Option(target.mkString("."))) }`


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to