dongjoon-hyun commented on a change in pull request #32874:
URL: https://github.com/apache/spark/pull/32874#discussion_r649665934



##########
File path: 
resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesClientApplication.scala
##########
@@ -100,45 +97,56 @@ private[spark] class Client(
     conf: KubernetesDriverConf,
     builder: KubernetesDriverBuilder,
     kubernetesClient: KubernetesClient,
-    watcher: LoggingPodStatusWatcher) extends Logging {
+    watcher: LoggingPodStatusWatcher)
+    extends Logging {
 
   def run(): Unit = {
     val resolvedDriverSpec = builder.buildFromFeatures(conf, kubernetesClient)
     val configMapName = KubernetesClientUtils.configMapNameDriver
-    val confFilesMap = 
KubernetesClientUtils.buildSparkConfDirFilesMap(configMapName,
-      conf.sparkConf, resolvedDriverSpec.systemProperties)
+    val confFilesMap = KubernetesClientUtils.buildSparkConfDirFilesMap(
+      configMapName,
+      conf.sparkConf,
+      resolvedDriverSpec.systemProperties)
     val configMap = KubernetesClientUtils.buildConfigMap(configMapName, 
confFilesMap)
 
     // The include of the ENV_VAR for "SPARK_CONF_DIR" is to allow for the
     // Spark command builder to pickup on the Java Options present in the 
ConfigMap
     val resolvedDriverContainer = new 
ContainerBuilder(resolvedDriverSpec.pod.container)
       .addNewEnv()
-        .withName(ENV_SPARK_CONF_DIR)
-        .withValue(SPARK_CONF_DIR_INTERNAL)
-        .endEnv()
+      .withName(ENV_SPARK_CONF_DIR)
+      .withValue(SPARK_CONF_DIR_INTERNAL)
+      .endEnv()
       .addNewVolumeMount()
-        .withName(SPARK_CONF_VOLUME_DRIVER)
-        .withMountPath(SPARK_CONF_DIR_INTERNAL)
-        .endVolumeMount()
+      .withName(SPARK_CONF_VOLUME_DRIVER)
+      .withMountPath(SPARK_CONF_DIR_INTERNAL)
+      .endVolumeMount()

Review comment:
       Could you revert all whitespace changes? We usually don't mix a 
style-only change and a new feature in a single PR.

##########
File path: 
resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesClientApplication.scala
##########
@@ -100,45 +97,56 @@ private[spark] class Client(
     conf: KubernetesDriverConf,
     builder: KubernetesDriverBuilder,
     kubernetesClient: KubernetesClient,
-    watcher: LoggingPodStatusWatcher) extends Logging {
+    watcher: LoggingPodStatusWatcher)
+    extends Logging {
 
   def run(): Unit = {
     val resolvedDriverSpec = builder.buildFromFeatures(conf, kubernetesClient)
     val configMapName = KubernetesClientUtils.configMapNameDriver
-    val confFilesMap = 
KubernetesClientUtils.buildSparkConfDirFilesMap(configMapName,
-      conf.sparkConf, resolvedDriverSpec.systemProperties)
+    val confFilesMap = KubernetesClientUtils.buildSparkConfDirFilesMap(
+      configMapName,
+      conf.sparkConf,
+      resolvedDriverSpec.systemProperties)
     val configMap = KubernetesClientUtils.buildConfigMap(configMapName, 
confFilesMap)
 
     // The include of the ENV_VAR for "SPARK_CONF_DIR" is to allow for the
     // Spark command builder to pickup on the Java Options present in the 
ConfigMap
     val resolvedDriverContainer = new 
ContainerBuilder(resolvedDriverSpec.pod.container)
       .addNewEnv()
-        .withName(ENV_SPARK_CONF_DIR)
-        .withValue(SPARK_CONF_DIR_INTERNAL)
-        .endEnv()
+      .withName(ENV_SPARK_CONF_DIR)
+      .withValue(SPARK_CONF_DIR_INTERNAL)
+      .endEnv()
       .addNewVolumeMount()
-        .withName(SPARK_CONF_VOLUME_DRIVER)
-        .withMountPath(SPARK_CONF_DIR_INTERNAL)
-        .endVolumeMount()
+      .withName(SPARK_CONF_VOLUME_DRIVER)
+      .withMountPath(SPARK_CONF_DIR_INTERNAL)
+      .endVolumeMount()
       .build()
     val resolvedDriverPod = new PodBuilder(resolvedDriverSpec.pod.pod)
       .editSpec()
-        .addToContainers(resolvedDriverContainer)
-        .addNewVolume()
-          .withName(SPARK_CONF_VOLUME_DRIVER)
-          .withNewConfigMap()
-            
.withItems(KubernetesClientUtils.buildKeyToPathObjects(confFilesMap).asJava)
-            .withName(configMapName)
-            .endConfigMap()
-          .endVolume()
-        .endSpec()
+      .addToContainers(resolvedDriverContainer)
+      .addNewVolume()
+      .withName(SPARK_CONF_VOLUME_DRIVER)
+      .withNewConfigMap()
+      
.withItems(KubernetesClientUtils.buildKeyToPathObjects(confFilesMap).asJava)
+      .withName(configMapName)
+      .endConfigMap()
+      .endVolume()
+      .endSpec()

Review comment:
       Also, this new style doesn't looks better.

##########
File path: 
resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesClientApplication.scala
##########
@@ -77,12 +77,9 @@ private[spark] object ClientArguments {
 
     require(mainClass.isDefined, "Main class must be specified via 
--main-class")
 
-    ClientArguments(
-      mainAppResource,
-      mainClass.get,
-      driverArgs.toArray,
-      proxyUser)
+    ClientArguments(mainAppResource, mainClass.get, driverArgs.toArray, 
proxyUser)

Review comment:
       Why do you change this, @pingsutw ?

##########
File path: 
resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesClientApplication.scala
##########
@@ -77,12 +77,9 @@ private[spark] object ClientArguments {
 
     require(mainClass.isDefined, "Main class must be specified via 
--main-class")
 
-    ClientArguments(
-      mainAppResource,
-      mainClass.get,
-      driverArgs.toArray,
-      proxyUser)
+    ClientArguments(mainAppResource, mainClass.get, driverArgs.toArray, 
proxyUser)

Review comment:
       Why do you change this, @pingsutw ? This is irrelevant from changing an 
error message.

##########
File path: 
resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesClientApplication.scala
##########
@@ -77,12 +77,9 @@ private[spark] object ClientArguments {
 
     require(mainClass.isDefined, "Main class must be specified via 
--main-class")
 
-    ClientArguments(
-      mainAppResource,
-      mainClass.get,
-      driverArgs.toArray,
-      proxyUser)
+    ClientArguments(mainAppResource, mainClass.get, driverArgs.toArray, 
proxyUser)
   }
+

Review comment:
       ditto.

##########
File path: 
resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesClientApplication.scala
##########
@@ -100,45 +97,56 @@ private[spark] class Client(
     conf: KubernetesDriverConf,
     builder: KubernetesDriverBuilder,
     kubernetesClient: KubernetesClient,
-    watcher: LoggingPodStatusWatcher) extends Logging {
+    watcher: LoggingPodStatusWatcher)
+    extends Logging {

Review comment:
       The new indentation is wrong. We use 2 space for `extends`.

##########
File path: 
resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesClientApplication.scala
##########
@@ -100,45 +97,56 @@ private[spark] class Client(
     conf: KubernetesDriverConf,
     builder: KubernetesDriverBuilder,
     kubernetesClient: KubernetesClient,
-    watcher: LoggingPodStatusWatcher) extends Logging {
+    watcher: LoggingPodStatusWatcher)
+    extends Logging {
 
   def run(): Unit = {
     val resolvedDriverSpec = builder.buildFromFeatures(conf, kubernetesClient)
     val configMapName = KubernetesClientUtils.configMapNameDriver
-    val confFilesMap = 
KubernetesClientUtils.buildSparkConfDirFilesMap(configMapName,
-      conf.sparkConf, resolvedDriverSpec.systemProperties)
+    val confFilesMap = KubernetesClientUtils.buildSparkConfDirFilesMap(
+      configMapName,
+      conf.sparkConf,
+      resolvedDriverSpec.systemProperties)

Review comment:
       Please revert this irrelevant change.

##########
File path: 
resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesClientApplication.scala
##########
@@ -136,9 +136,17 @@ private[spark] class Client(
     val driverPodName = resolvedDriverPod.getMetadata.getName
 
     var watch: Watch = null
-    val createdDriverPod = kubernetesClient.pods().create(resolvedDriverPod)
+    var createdDriverPod: Pod = null
     try {
-      val otherKubernetesResources = 
resolvedDriverSpec.driverKubernetesResources ++ Seq(configMap)
+      createdDriverPod = kubernetesClient.pods().create(resolvedDriverPod)
+    } catch {
+      case NonFatal(e) =>
+        logError("Fail to create driver pod, you may use wrong master URL.")
+        throw e
+    }
+    try {
+      val otherKubernetesResources =
+        resolvedDriverSpec.driverKubernetesResources ++ Seq(configMap)

Review comment:
       Could you keep the original one line here?

##########
File path: 
resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesClientApplication.scala
##########
@@ -136,9 +136,17 @@ private[spark] class Client(
     val driverPodName = resolvedDriverPod.getMetadata.getName
 
     var watch: Watch = null
-    val createdDriverPod = kubernetesClient.pods().create(resolvedDriverPod)
+    var createdDriverPod: Pod = null
     try {
-      val otherKubernetesResources = 
resolvedDriverSpec.driverKubernetesResources ++ Seq(configMap)
+      createdDriverPod = kubernetesClient.pods().create(resolvedDriverPod)
+    } catch {
+      case NonFatal(e) =>
+        logError("Fail to create driver pod, you may use wrong master URL.")

Review comment:
       BTW, the error message looks misleading. The following can fail in many 
reasons. And, many user failures are due to the permissions of namespace 
instead of a wrong master URL.
   ```
   kubernetesClient.pods().create(resolvedDriverPod)
   ```




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to