Github user yhuai commented on a diff in the pull request:

    https://github.com/apache/spark/pull/9404#discussion_r43660128
  
    --- Diff: 
sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala ---
    @@ -353,4 +354,44 @@ class CachedTableSuite extends QueryTest with 
SharedSQLContext {
         assert(sparkPlan.collect { case e: InMemoryColumnarTableScan => e 
}.size === 3)
         assert(sparkPlan.collect { case e: PhysicalRDD => e }.size === 0)
       }
    +
    +  /**
    +   * Verifies that the plan for `df` contains `expected` number of 
Exchange operators.
    +   */
    +  private def verifyNumExchanges(df: DataFrame, expected: Int): Unit = {
    +    assert(df.queryExecution.executedPlan.collect { case e: Exchange => e 
}.size == expected)
    +  }
    +
    +  test("A cached table preserves the partitioning and ordering of its 
cached SparkPlan") {
    +    val table3x = testData.unionAll(testData).unionAll(testData)
    +    table3x.registerTempTable("testData3x")
    +
    +    sql("SELECT key, value FROM testData3x ORDER BY 
key").registerTempTable("orderedTable")
    +    sqlContext.cacheTable("orderedTable")
    +    assertCached(sqlContext.table("orderedTable"))
    +    // Should not have an exchange as the query is already sorted on the 
group by key.
    +    verifyNumExchanges(sql("SELECT key, count(*) FROM orderedTable GROUP 
BY key"), 0)
    +    checkAnswer(
    +      sql("SELECT key, count(*) FROM orderedTable GROUP BY key ORDER BY 
key"),
    +      sql("SELECT key, count(*) FROM testData3x GROUP BY key ORDER BY 
key").collect())
    +    sqlContext.uncacheTable("orderedTable")
    +
    +    // Set up two tables distributed in the same way.
    +    testData.distributeBy(Column("key") :: Nil, 5).registerTempTable("t1")
    +    testData2.distributeBy(Column("a") :: Nil, 5).registerTempTable("t2")
    +    sqlContext.cacheTable("t1")
    +    sqlContext.cacheTable("t2")
    +
    +    // Joining them should result in no exchanges.
    +    verifyNumExchanges(sql("SELECT * FROM t1 t1 JOIN t2 t2 ON t1.key = 
t2.a"), 0)
    +
    +    // Grouping on the partition key should result in no exchanges
    +    verifyNumExchanges(sql("SELECT count(*) FROM t1 GROUP BY key"), 0)
    +
    +    // TODO: this is an issue with self joins. The number of exchanges 
should be 0.
    +    verifyNumExchanges(sql("SELECT * FROM t1 t1 JOIN t1 t2 on t1.key = 
t2.key"), 1)
    +
    +    sqlContext.uncacheTable("t1")
    +    sqlContext.uncacheTable("t2")
    +  }
    --- End diff --
    
    How about we also check the results?


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to