[ 
https://issues.apache.org/jira/browse/SPARK-9985?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14698348#comment-14698348
 ] 

Richard Garris commented on SPARK-9985:
---------------------------------------

val postgresqlDNS = "ec2-54-187-213-99.us-west-2.compute.amazonaws.com"
val url = "jdbc:postgresql://" + postgresqlDNS + "/streamingtest"


import java.util.Properties

val prop : Properties = new Properties()

Map(
  "driver" -> "org.postgresql.Driver",
  "user" -> "streamingtest",
  "password" -> "password",
  "driver" ->  "org.postgresql.Driver"
).toList.map(x => prop.put(x._1,x._2))


val pp1 = sqlContext.read.format("com.databricks.spark.csv").option("header", 
"true").option("delimiter", "\t").option("inferSchema", "true").option("quote", 
" 
").load("dbfs:/databricks-datasets/power-plant/data/Sheet2.tsv").selectExpr("DOUBLE(AT)
 AT" , "DOUBLE(V) V", "DOUBLE(AP) AP", "DOUBLE(RH) RH", "DOUBLE(PE) PE")

// doesn't work -- but does work if you run Cell 2 first
pp1.write.format("jdbc")
  .mode("overwrite")
  .option("driver", "org.postgresql.Driver")
  .option("url", url)
  .option("dbtable", "power_plant_new" prop)



// Not that the above does NOT work... but if you run the code below then the 
code above it does work ..


// This works and then loads the driver ...
val pgTableList = sqlContext.load("jdbc", Map(
  "url" ->  url,
  "dbtable" -> "INFORMATION_SCHEMA.TABLES",
  "driver" -> "org.postgresql.Driver",
  "user" -> "streamingtest",
  "password" -> "password"
))

> DataFrameWriter jdbc method ignore options that have been set
> -------------------------------------------------------------
>
>                 Key: SPARK-9985
>                 URL: https://issues.apache.org/jira/browse/SPARK-9985
>             Project: Spark
>          Issue Type: Bug
>            Reporter: Richard Garris
>            Assignee: Shixiong Zhu
>
> I am working on an RDBMS to DataFrame conversion using Postgres and am 
> hitting a wall where everytime I try to use the Postgresql JDBC driver to get 
> a java.sql.SQLException: No suitable driver found error
> Here is the stack trace:
> {code}
> at java.sql.DriverManager.getConnection(DriverManager.java:596)
> at java.sql.DriverManager.getConnection(DriverManager.java:187)
> at 
> org.apache.spark.sql.jdbc.package$JDBCWriteDetails$.savePartition(jdbc.scala:67)
> at 
> org.apache.spark.sql.jdbc.package$JDBCWriteDetails$$anonfun$saveTable$1.apply(jdbc.scala:189)
> at 
> org.apache.spark.sql.jdbc.package$JDBCWriteDetails$$anonfun$saveTable$1.apply(jdbc.scala:188)
> at 
> org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:878)
> at 
> org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:878)
> at 
> org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1765)
> at 
> org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1765)
> at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:63)
> at org.apache.spark.scheduler.Task.run(Task.scala:70)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)
> at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
> at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
> at java.lang.Thread.run(Thread.java:745)
> {code}
> It appears that DataFrameWriter and DataFrameReader ignores options that we 
> set before invoking {{jdbc}}.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to