Github user ifilonenko commented on a diff in the pull request:

    https://github.com/apache/spark/pull/20669#discussion_r174348342
  
    --- Diff: 
resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/submit/ClientSuite.scala
 ---
    @@ -108,62 +111,42 @@ class ClientSuite extends SparkFunSuite with 
BeforeAndAfter {
           SecondTestConfigurationStep.containerName)
       }
     
    -  test("The client should create the secondary Kubernetes resources.") {
    +  test("The client should create Kubernetes resources") {
         val submissionClient = new Client(
           submissionSteps,
           new SparkConf(false),
           kubernetesClient,
           false,
           "spark",
    -      loggingPodStatusWatcher)
    +      loggingPodStatusWatcher,
    +      KUBERNETES_RESOURCE_PREFIX)
         submissionClient.run()
         val createdPod = createdPodArgumentCaptor.getValue
         val otherCreatedResources = createdResourcesArgumentCaptor.getAllValues
    -    assert(otherCreatedResources.size === 1)
    -    val createdResource = 
Iterables.getOnlyElement(otherCreatedResources).asInstanceOf[Secret]
    -    assert(createdResource.getMetadata.getName === 
FirstTestConfigurationStep.secretName)
    -    assert(createdResource.getData.asScala ===
    -      Map(FirstTestConfigurationStep.secretKey -> 
FirstTestConfigurationStep.secretData))
    -    val ownerReference = 
Iterables.getOnlyElement(createdResource.getMetadata.getOwnerReferences)
    -    assert(ownerReference.getName === createdPod.getMetadata.getName)
    -    assert(ownerReference.getKind === DRIVER_POD_KIND)
    -    assert(ownerReference.getUid === DRIVER_POD_UID)
    -    assert(ownerReference.getApiVersion === DRIVER_POD_API_VERSION)
    -  }
    -
    -  test("The client should attach the driver container with the appropriate 
JVM options.") {
    -    val sparkConf = new SparkConf(false)
    -      .set("spark.logConf", "true")
    -      .set(
    -        org.apache.spark.internal.config.DRIVER_JAVA_OPTIONS,
    -          "-XX:+HeapDumpOnOutOfMemoryError -XX:+PrintGCDetails")
    -    val submissionClient = new Client(
    -      submissionSteps,
    -      sparkConf,
    -      kubernetesClient,
    -      false,
    -      "spark",
    -      loggingPodStatusWatcher)
    -    submissionClient.run()
    -    val createdPod = createdPodArgumentCaptor.getValue
    +    assert(otherCreatedResources.size === 2)
    +    otherCreatedResources.toArray.foreach{
    --- End diff --
    
    I thought it would be easier to just go through all the resources in a 
single loop, do you think this is better?


---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to