Github user mccheah commented on a diff in the pull request:
https://github.com/apache/spark/pull/21092#discussion_r186244157
--- Diff:
resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/KubernetesConf.scala
---
@@ -101,17 +112,29 @@ private[spark] object KubernetesConf {
appId: String,
mainAppResource: Option[MainAppResource],
mainClass: String,
- appArgs: Array[String]):
KubernetesConf[KubernetesDriverSpecificConf] = {
+ appArgs: Array[String],
+ maybePyFiles: Option[String]):
KubernetesConf[KubernetesDriverSpecificConf] = {
val sparkConfWithMainAppJar = sparkConf.clone()
+ val additionalFiles = mutable.ArrayBuffer.empty[String]
mainAppResource.foreach {
- case JavaMainAppResource(res) =>
- val previousJars = sparkConf
- .getOption("spark.jars")
- .map(_.split(","))
- .getOrElse(Array.empty)
- if (!previousJars.contains(res)) {
- sparkConfWithMainAppJar.setJars(previousJars ++ Seq(res))
- }
+ case JavaMainAppResource(res) =>
+ val previousJars = sparkConf
+ .getOption("spark.jars")
+ .map(_.split(","))
+ .getOrElse(Array.empty)
+ if (!previousJars.contains(res)) {
+ sparkConfWithMainAppJar.setJars(previousJars ++ Seq(res))
+ }
+ case nonJVM: NonJVMResource =>
--- End diff --
Why can't we just match `PythonMainAppResource` immediately here - why the
two layers of matching?
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]