This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new 2fbf969  Revert "[SPARK-37779][SQL] Make ColumnarToRowExec plan 
canonicalizable after (de)serialization"
2fbf969 is described below

commit 2fbf969ff1cbbb54dcd46b463a51fc4e47fd327d
Author: Hyukjin Kwon <gurwls...@apache.org>
AuthorDate: Wed Jan 5 17:51:30 2022 +0900

    Revert "[SPARK-37779][SQL] Make ColumnarToRowExec plan canonicalizable 
after (de)serialization"
    
    This reverts commit 8ab7cd3ca7e828e114218ae811a9afebb5c9bcc7.
---
 .../org/apache/spark/sql/execution/Columnar.scala     |  3 +--
 .../apache/spark/sql/execution/SparkPlanSuite.scala   | 19 -------------------
 2 files changed, 1 insertion(+), 21 deletions(-)

diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/Columnar.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/Columnar.scala
index 2fe046b..e01cd85 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/Columnar.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/Columnar.scala
@@ -58,8 +58,7 @@ class ColumnarRule {
  * [[MapPartitionsInRWithArrowExec]]. Eventually this should replace those 
implementations.
  */
 case class ColumnarToRowExec(child: SparkPlan) extends UnaryExecNode with 
CodegenSupport {
-  // supportsColumnar requires to be only called on driver side, see also 
SPARK-37779.
-  assert(TaskContext.get != null || child.supportsColumnar)
+  assert(child.supportsColumnar)
 
   override def output: Seq[Attribute] = child.output
 
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkPlanSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkPlanSuite.scala
index dacf8fe..56fff11 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkPlanSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkPlanSuite.scala
@@ -88,23 +88,4 @@ class SparkPlanSuite extends QueryTest with 
SharedSparkSession {
   test("SPARK-30780 empty LocalTableScan should use RDD without partitions") {
     assert(LocalTableScanExec(Nil, Nil).execute().getNumPartitions == 0)
   }
-
-  test("SPARK-37779: ColumnarToRowExec should be canonicalizable after being 
(de)serialized") {
-    withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> "parquet") {
-      withTempPath { path =>
-        spark.range(1).write.parquet(path.getAbsolutePath)
-        val df = spark.read.parquet(path.getAbsolutePath)
-        val columnarToRowExec =
-          df.queryExecution.executedPlan.collectFirst { case p: 
ColumnarToRowExec => p }.get
-        try {
-          spark.range(1).foreach { _ =>
-            columnarToRowExec.canonicalized
-            ()
-          }
-        } catch {
-          case e: Throwable => fail("ColumnarToRowExec was not 
canonicalizable", e)
-        }
-      }
-    }
-  }
 }

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to