Github user cloud-fan commented on a diff in the pull request:

    https://github.com/apache/spark/pull/21564#discussion_r196298655
  
    --- Diff: 
sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala ---
    @@ -686,6 +686,67 @@ class PlannerSuite extends SharedSQLContext {
           Range(1, 2, 1, 1)))
         df.queryExecution.executedPlan.execute()
       }
    +
    +  test("SPARK-24556: always rewrite output partitioning in 
ReusedExchangeExec " +
    +    "and InMemoryTableScanExec") {
    +    def checkOutputPartitioningRewrite(
    +        plans: Seq[SparkPlan],
    +        expectedPartitioningClass: Class[_]): Unit = {
    +      plans.foreach { plan =>
    +        val partitioning = plan.outputPartitioning
    +        assert(partitioning.getClass == expectedPartitioningClass)
    +        val partitionedAttrs = 
partitioning.asInstanceOf[Expression].references
    +        assert(partitionedAttrs.subsetOf(plan.outputSet))
    +      }
    +    }
    +
    +    def checkReusedExchangeOutputPartitioningRewrite(
    +        df: DataFrame,
    +        expectedPartitioningClass: Class[_]): Unit = {
    +      val reusedExchange = df.queryExecution.executedPlan.collect {
    +        case r: ReusedExchangeExec => r
    +      }
    +      assert(reusedExchange.size == 1)
    +      checkOutputPartitioningRewrite(reusedExchange, 
expectedPartitioningClass)
    +    }
    +
    +    def checkInMemoryTableScanOutputPartitioningRewrite(
    +        df: DataFrame,
    +        expectedPartitioningClass: Class[_]): Unit = {
    +      val inMemoryScan = df.queryExecution.executedPlan.collect {
    +        case m: InMemoryTableScanExec => m
    +      }
    +      assert(inMemoryScan.size == 1)
    +      checkOutputPartitioningRewrite(inMemoryScan, 
expectedPartitioningClass)
    +    }
    +
    +    // ReusedExchange is HashPartitioning
    +    val df1 = Seq(1 -> "a").toDF("i", "j").repartition($"i")
    +    val df2 = Seq(1 -> "a").toDF("i", "j").repartition($"i")
    +    checkReusedExchangeOutputPartitioningRewrite(df1.union(df2), 
classOf[HashPartitioning])
    +
    +    // ReusedExchange is RangePartitioning
    +    val df3 = Seq(1 -> "a").toDF("i", "j").orderBy($"i")
    +    val df4 = Seq(1 -> "a").toDF("i", "j").orderBy($"i")
    +    checkReusedExchangeOutputPartitioningRewrite(df3.union(df4), 
classOf[RangePartitioning])
    +
    +    // InMemoryTableScan is HashPartitioning
    +    df1.persist()
    --- End diff --
    
    I feel it's better to not reuse the dataframe that were used to test 
ReuseExchange


---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to