[
https://issues.apache.org/jira/browse/SPARK-3284?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
]
Pravesh Jain updated SPARK-3284:
--------------------------------
Description:
object parquet {
case class Person(name: String, age: Int)
def main(args: Array[String]) {
val sparkConf = new
SparkConf().setMaster("local").setAppName("HdfsWordCount")
val sc = new SparkContext(sparkConf)
val sqlContext = new org.apache.spark.sql.SQLContext(sc)
// createSchemaRDD is used to implicitly convert an RDD to a SchemaRDD.
import sqlContext.createSchemaRDD
val people =
sc.textFile("C:/Users/pravesh.jain/Desktop/people/people.txt").map(_.split(",")).map(p
=> Person(p(0), p(1).trim.toInt))
people.saveAsParquetFile("C:/Users/pravesh.jain/Desktop/people/people.parquet")
val parquetFile =
sqlContext.parquetFile("C:/Users/pravesh.jain/Desktop/people/people.parquet")
}
}
gives the error
Exception in thread "main" java.lang.NullPointerException at
org.apache.spark.parquet$.main(parquet.scala:16)
which is the line saveAsParquetFile.
This works fine in linux but using in eclipse in windows gives the error.
was:
object parquet {
case class Person(name: String, age: Int)
def main(args: Array[String]) {
val sparkConf = new
SparkConf().setMaster("local").setAppName("HdfsWordCount")
val sc = new SparkContext(sparkConf)
val sqlContext = new org.apache.spark.sql.SQLContext(sc)
// createSchemaRDD is used to implicitly convert an RDD to a SchemaRDD.
import sqlContext.createSchemaRDD
val people =
sc.textFile("C:/Users/pravesh.jain/Desktop/people/people.txt").map(_.split(",")).map(p
=> Person(p(0), p(1).trim.toInt))
people.saveAsParquetFile("C:/Users/pravesh.jain/Desktop/people/people.parquet")
val parquetFile =
sqlContext.parquetFile("C:/Users/pravesh.jain/Desktop/people/people.parquet")
}
}
gives the error
Exception in thread "main" java.lang.NullPointerException at
org.apache.spark.parquet$.main(parquet.scala:16)
which is the line saveAsParquetFile.
> saveAsParquetFile not working on windows
> ----------------------------------------
>
> Key: SPARK-3284
> URL: https://issues.apache.org/jira/browse/SPARK-3284
> Project: Spark
> Issue Type: Bug
> Affects Versions: 1.0.2
> Environment: Windows
> Reporter: Pravesh Jain
> Priority: Minor
>
> object parquet {
> case class Person(name: String, age: Int)
> def main(args: Array[String]) {
> val sparkConf = new
> SparkConf().setMaster("local").setAppName("HdfsWordCount")
> val sc = new SparkContext(sparkConf)
> val sqlContext = new org.apache.spark.sql.SQLContext(sc)
> // createSchemaRDD is used to implicitly convert an RDD to a SchemaRDD.
> import sqlContext.createSchemaRDD
> val people =
> sc.textFile("C:/Users/pravesh.jain/Desktop/people/people.txt").map(_.split(",")).map(p
> => Person(p(0), p(1).trim.toInt))
>
> people.saveAsParquetFile("C:/Users/pravesh.jain/Desktop/people/people.parquet")
> val parquetFile =
> sqlContext.parquetFile("C:/Users/pravesh.jain/Desktop/people/people.parquet")
> }
> }
> gives the error
> Exception in thread "main" java.lang.NullPointerException at
> org.apache.spark.parquet$.main(parquet.scala:16)
> which is the line saveAsParquetFile.
> This works fine in linux but using in eclipse in windows gives the error.
--
This message was sent by Atlassian JIRA
(v6.2#6252)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]