HyukjinKwon commented on a change in pull request #32397:
URL: https://github.com/apache/spark/pull/32397#discussion_r625747983
##########
File path: core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
##########
@@ -308,21 +308,23 @@ private[spark] class SparkSubmit extends Logging {
args.repositories, args.ivyRepoPath, args.ivySettingsPath)
if (resolvedMavenCoordinates.nonEmpty) {
- // In K8s client mode, when in the driver, add resolved jars early as
we might need
- // them at the submit time for artifact downloading.
- // For example we might use the dependencies for downloading
- // files from a Hadoop Compatible fs e.g. S3. In this case the user
might pass:
- // --packages
com.amazonaws:aws-java-sdk:1.7.4:org.apache.hadoop:hadoop-aws:2.7.6
- if (isKubernetesClusterModeDriver) {
- val loader = getSubmitClassLoader(sparkConf)
- for (jar <- resolvedMavenCoordinates) {
- addJarToClasspath(jar, loader)
- }
- } else if (isKubernetesCluster) {
+ if (isKubernetesCluster) {
// We need this in K8s cluster mode so that we can upload local deps
// via the k8s application, like in cluster mode driver
childClasspath ++= resolvedMavenCoordinates
} else {
+ // In K8s client mode, when in the driver, resolved jars are added
in the driver.
+ //
+ // For example we might use the dependencies for downloading
Review comment:
```suggestion
// For example, we might use the dependencies for downloading
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]