Repository: spark
Updated Branches:
  refs/heads/branch-1.0 cdcd46717 -> 84bbfbd84


[SPARK-2561][SQL] Fix apply schema

We need to use the analyzed attributes otherwise we end up with a tree that 
will never resolve.

Author: Michael Armbrust <[email protected]>

Closes #1470 from marmbrus/fixApplySchema and squashes the following commits:

f968195 [Michael Armbrust] Use analyzed attributes when applying the schema.
4969015 [Michael Armbrust] Add test case.

(cherry picked from commit 511a7314037219c23e824ea5363bf7f1df55bab3)
Signed-off-by: Michael Armbrust <[email protected]>


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/84bbfbd8
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/84bbfbd8
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/84bbfbd8

Branch: refs/heads/branch-1.0
Commit: 84bbfbd843a5c73ff4998efe8a2dabc2574f033a
Parents: cdcd467
Author: Michael Armbrust <[email protected]>
Authored: Mon Jul 21 18:18:17 2014 -0700
Committer: Michael Armbrust <[email protected]>
Committed: Mon Jul 21 18:18:35 2014 -0700

----------------------------------------------------------------------
 sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala   | 2 +-
 .../src/test/scala/org/apache/spark/sql/DslQuerySuite.scala    | 6 ++++++
 2 files changed, 7 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/84bbfbd8/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala
index d5214a3..d0d2db2 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala
@@ -410,7 +410,7 @@ class SchemaRDD(
    * @group schema
    */
   private def applySchema(rdd: RDD[Row]): SchemaRDD = {
-    new SchemaRDD(sqlContext, SparkLogicalPlan(ExistingRdd(logicalPlan.output, 
rdd)))
+    new SchemaRDD(sqlContext, 
SparkLogicalPlan(ExistingRdd(queryExecution.analyzed.output, rdd)))
   }
 
   // =======================================================================

http://git-wip-us.apache.org/repos/asf/spark/blob/84bbfbd8/sql/core/src/test/scala/org/apache/spark/sql/DslQuerySuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DslQuerySuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/DslQuerySuite.scala
index 05aac66..0a69cbc 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DslQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DslQuerySuite.scala
@@ -34,6 +34,12 @@ class DslQuerySuite extends QueryTest {
       testData.collect().toSeq)
   }
 
+  test("repartition") {
+    checkAnswer(
+      testData.select('key).repartition(10).select('key),
+      testData.select('key).collect().toSeq)
+  }
+
   test("agg") {
     checkAnswer(
       testData2.groupBy('a)('a, Sum('b)),

Reply via email to