Github user baluchicken commented on a diff in the pull request:

    https://github.com/apache/spark/pull/21067#discussion_r194774780
  
    --- Diff: 
resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterSchedulerBackend.scala
 ---
    @@ -59,16 +59,18 @@ private[spark] class KubernetesClusterSchedulerBackend(
     
       private val kubernetesNamespace = conf.get(KUBERNETES_NAMESPACE)
     
    -  private val kubernetesDriverPodName = conf
    -    .get(KUBERNETES_DRIVER_POD_NAME)
    -    .getOrElse(throw new SparkException("Must specify the driver pod 
name"))
    +  private val kubernetesDriverJobName = conf
    +    .get(KUBERNETES_DRIVER_JOB_NAME)
    +    .getOrElse(throw new SparkException("Must specify the driver job 
name"))
       private implicit val requestExecutorContext = 
ExecutionContext.fromExecutorService(
         requestExecutorsService)
     
    -  private val driverPod = kubernetesClient.pods()
    -    .inNamespace(kubernetesNamespace)
    -    .withName(kubernetesDriverPodName)
    -    .get()
    +  private val driverPod: Pod = {
    +    val pods = kubernetesClient.pods()
    +      .inNamespace(kubernetesNamespace).withLabel("job-name", 
kubernetesDriverJobName).list()
    --- End diff --
    
    Okey I will update the filter with an additional check if the found pod is 
in running state.


---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to