Repository: spark
Updated Branches:
  refs/heads/branch-1.0 b6ba54651 -> f9734e270


[SPARK-1570] Fix classloading in JavaSQLContext.applySchema

I think I hit a class loading issue when running JavaSparkSQL example using 
spark-submit in local mode.

Author: Kan Zhang <[email protected]>

Closes #484 from kanzhang/SPARK-1570 and squashes the following commits:

feaaeba [Kan Zhang] [SPARK-1570] Fix classloading in JavaSQLContext.applySchema
(cherry picked from commit ea8cea82a02099bb66f1e77b757e4d96cc31d6e2)

Signed-off-by: Patrick Wendell <[email protected]>


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/f9734e27
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/f9734e27
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/f9734e27

Branch: refs/heads/branch-1.0
Commit: f9734e270d5a39bc7569aeb3e07cce526de74be4
Parents: b6ba546
Author: Kan Zhang <[email protected]>
Authored: Tue Apr 22 15:05:12 2014 -0700
Committer: Patrick Wendell <[email protected]>
Committed: Tue Apr 22 15:05:23 2014 -0700

----------------------------------------------------------------------
 .../scala/org/apache/spark/sql/api/java/JavaSQLContext.scala   | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/f9734e27/sql/core/src/main/scala/org/apache/spark/sql/api/java/JavaSQLContext.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/api/java/JavaSQLContext.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/api/java/JavaSQLContext.scala
index 26922f7..a734708 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/api/java/JavaSQLContext.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/api/java/JavaSQLContext.scala
@@ -28,6 +28,7 @@ import 
org.apache.spark.sql.catalyst.expressions.{AttributeReference, GenericRow
 import org.apache.spark.sql.catalyst.types._
 import org.apache.spark.sql.parquet.ParquetRelation
 import org.apache.spark.sql.execution.{ExistingRdd, SparkLogicalPlan}
+import org.apache.spark.util.Utils
 
 /**
  * The entry point for executing Spark SQL queries from a Java program.
@@ -84,10 +85,11 @@ class JavaSQLContext(sparkContext: JavaSparkContext) {
    */
   def applySchema(rdd: JavaRDD[_], beanClass: Class[_]): JavaSchemaRDD = {
     val schema = getSchema(beanClass)
-    val className = beanClass.getCanonicalName
+    val className = beanClass.getName
     val rowRdd = rdd.rdd.mapPartitions { iter =>
       // BeanInfo is not serializable so we must rediscover it remotely for 
each partition.
-      val localBeanInfo = Introspector.getBeanInfo(Class.forName(className))
+      val localBeanInfo = Introspector.getBeanInfo(
+        Class.forName(className, true, Utils.getContextOrSparkClassLoader))
       val extractors =
         localBeanInfo.getPropertyDescriptors.filterNot(_.getName == 
"class").map(_.getReadMethod)
 

Reply via email to