Repository: spark
Updated Branches:
  refs/heads/branch-1.6 b6f607569 -> f445cce94


Revert "[SPARK-16664][SQL] Fix persist call on Data frames with more than 
200…"

This reverts commit 15abbf9d26fd80ae44d6aaee4b435ec4dc08aa95.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/f445cce9
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/f445cce9
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/f445cce9

Branch: refs/heads/branch-1.6
Commit: f445cce94e525fd8005948d69c40c81e31140b1c
Parents: b6f6075
Author: Sean Owen <[email protected]>
Authored: Fri Jul 29 05:40:58 2016 -0700
Committer: Sean Owen <[email protected]>
Committed: Fri Jul 29 05:40:58 2016 -0700

----------------------------------------------------------------------
 .../sql/execution/columnar/GenerateColumnAccessor.scala      | 4 ++--
 .../src/test/scala/org/apache/spark/sql/DataFrameSuite.scala | 8 --------
 .../sql/execution/columnar/InMemoryColumnarQuerySuite.scala  | 3 +--
 3 files changed, 3 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/f445cce9/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/GenerateColumnAccessor.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/GenerateColumnAccessor.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/GenerateColumnAccessor.scala
index a040208..4f6f58b 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/GenerateColumnAccessor.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/GenerateColumnAccessor.scala
@@ -129,7 +129,7 @@ object GenerateColumnAccessor extends 
CodeGenerator[Seq[DataType], ColumnarItera
         val groupedAccessorsItr = 
initializeAccessors.grouped(numberOfStatementsThreshold)
         val groupedExtractorsItr = 
extractors.grouped(numberOfStatementsThreshold)
         var groupedAccessorsLength = 0
-        groupedAccessorsItr.zipWithIndex.foreach { case (body, i) =>
+        groupedAccessorsItr.zipWithIndex.map { case (body, i) =>
           groupedAccessorsLength += 1
           val funcName = s"accessors$i"
           val funcCode = s"""
@@ -139,7 +139,7 @@ object GenerateColumnAccessor extends 
CodeGenerator[Seq[DataType], ColumnarItera
            """.stripMargin
           ctx.addNewFunction(funcName, funcCode)
         }
-        groupedExtractorsItr.zipWithIndex.foreach { case (body, i) =>
+        groupedExtractorsItr.zipWithIndex.map { case (body, i) =>
           val funcName = s"extractors$i"
           val funcCode = s"""
              |private void $funcName() {

http://git-wip-us.apache.org/repos/asf/spark/blob/f445cce9/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
index bf2280a..8994556 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
@@ -1186,12 +1186,4 @@ class DataFrameSuite extends QueryTest with 
SharedSQLContext {
       Seq(1 -> "a").toDF("i", "j").filter($"i".cast(StringType) === "1"),
       Row(1, "a"))
   }
-
-  test("SPARK-16664: persist with more than 200 columns") {
-    val size = 201L
-    val rdd = sparkContext.makeRDD(Seq(Row.fromSeq(Seq.range(0, size))))
-    val schemas = List.range(0, size).map(a => StructField("name" + a, 
LongType, true))
-    val df = spark.createDataFrame(rdd, StructType(schemas), false)
-    assert(df.persist.take(1).apply(0).toSeq(100).asInstanceOf[Long] == 100)
-  }
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/f445cce9/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/InMemoryColumnarQuerySuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/InMemoryColumnarQuerySuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/InMemoryColumnarQuerySuite.scala
index 0029b87..557415b 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/InMemoryColumnarQuerySuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/InMemoryColumnarQuerySuite.scala
@@ -225,8 +225,7 @@ class InMemoryColumnarQuerySuite extends QueryTest with 
SharedSQLContext {
     val columnTypes1 = List.fill(length1)(IntegerType)
     val columnarIterator1 = GenerateColumnAccessor.generate(columnTypes1)
 
-    // SPARK-16664: the limit of janino is 8117
-    val length2 = 8117
+    val length2 = 10000
     val columnTypes2 = List.fill(length2)(IntegerType)
     val columnarIterator2 = GenerateColumnAccessor.generate(columnTypes2)
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to