Github user vanzin commented on a diff in the pull request:
https://github.com/apache/spark/pull/18630#discussion_r128648079
--- Diff:
core/src/main/scala/org/apache/spark/deploy/worker/DriverWrapper.scala ---
@@ -66,4 +77,68 @@ object DriverWrapper {
System.exit(-1)
}
}
+
+ // R or Python are not supported in cluster mode so download the jars to
the driver side
+ private def setupDependencies(loader: MutableURLClassLoader, userJar:
String): Unit = {
+ val packagesExclusions = sys.props.get("spark.jars.excludes").orNull
+ val packages = sys.props.get("spark.jars.packages").orNull
+ val repositories = sys.props.get("spark.jars.repositories").orNull
+ val hadoopConf = new HadoopConfiguration()
+ val childClasspath = new ArrayBuffer[String]()
+ val ivyRepoPath = sys.props.get("spark.jars.ivy").orNull
+ var jars = sys.props.get("spark.jars").orNull
+
+ val exclusions: Seq[String] =
+ if (!StringUtils.isBlank(packagesExclusions)) {
+ packagesExclusions.split(",")
+ } else {
+ Nil
+ }
+
+ // Create the IvySettings, either load from file or build defaults
+ val ivySettings = sys.props.get("spark.jars.ivySettings").map {
ivySettingsFile =>
+ SparkSubmitUtils.loadIvySettings(ivySettingsFile,
Option(repositories),
+ Option(ivyRepoPath))
+ }.getOrElse {
+ SparkSubmitUtils.buildIvySettings(Option(repositories),
Option(ivyRepoPath))
+ }
+
+ val resolvedMavenCoordinates =
SparkSubmitUtils.resolveMavenCoordinates(packages,
+ ivySettings, exclusions = exclusions)
+
+ if (!StringUtils.isBlank(resolvedMavenCoordinates)) {
+ jars = SparkSubmit.mergeFileLists(jars, resolvedMavenCoordinates)
+ }
+
+ val targetDir = Files.createTempDirectory("tmp").toFile
+ // scalastyle:off runtimeaddshutdownhook
+ Runtime.getRuntime.addShutdownHook(new Thread() {
+ override def run(): Unit = {
+ FileUtils.deleteQuietly(targetDir)
+ }
+ })
+ // scalastyle:on runtimeaddshutdownhook
+
+ val sparkProperties = new mutable.HashMap[String, String]()
+ val securityProperties = List("spark.ssl.fs.trustStore",
"spark.ssl.trustStore",
+ "spark.ssl.fs.trustStorePassword", "spark.ssl.trustStorePassword",
+ "spark.ssl.fs.protocol", "spark.ssl.protocol")
+
+ securityProperties
+ .map {pName => sys.props.get(pName)
+ .map{pValue => sparkProperties.put(pName, pValue)}}
+
+ jars = Option(jars).map(SparkSubmit.resolveGlobPaths(_,
hadoopConf)).orNull
+
+ // Filter out the user jar
+ jars =
jars.split(",").filterNot(_.contains(userJar.split("/").last)).mkString(",")
+ jars = Option(jars)
+ .map(SparkSubmit.downloadFileList(_, targetDir, sparkProperties,
hadoopConf)).orNull
+
+ if (jars != null) {childClasspath ++= jars.split(",")}
+
+ for (jar <- childClasspath) {
+ SparkSubmit.addJarToClasspath(jar, loader)
--- End diff --
So now all the stuff in `spark.jars` that actually exists on the node will
be automatically added to the driver's classpath, which is a change in behavior
from before. It's probably ok, but please call out this kind of thing in the
commit message.
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]