This is an automated email from the ASF dual-hosted git repository.
dongjoon pushed a commit to branch branch-3.4
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/branch-3.4 by this push:
new 66624410b24 [SPARK-43342][K8S] Revert SPARK-39006 Show a directional
error message for executor PVC dynamic allocation failure
66624410b24 is described below
commit 66624410b2462768e618cc43cd13e1117d931e80
Author: Qian.Sun <[email protected]>
AuthorDate: Sat May 6 18:26:49 2023 -0700
[SPARK-43342][K8S] Revert SPARK-39006 Show a directional error message for
executor PVC dynamic allocation failure
### What changes were proposed in this pull request?
This reverts commit b065c945fe27dd5869b39bfeaad8e2b23a8835b5.
### Why are the changes needed?
To remove the regression from SPARK-39006.
### Does this PR introduce _any_ user-facing change?
No.
### How was this patch tested?
Pass the CIs.
Closes #41057
Closes #41069 from dongjoon-hyun/SPARK-43342.
Authored-by: Qian.Sun <[email protected]>
Signed-off-by: Dongjoon Hyun <[email protected]>
(cherry picked from commit 3ba1fa3678a4fcc0aaba8abb0d4312e8fb42efba)
Signed-off-by: Dongjoon Hyun <[email protected]>
---
.../k8s/features/MountVolumesFeatureStep.scala | 16 ---------
.../features/MountVolumesFeatureStepSuite.scala | 39 +---------------------
2 files changed, 1 insertion(+), 54 deletions(-)
diff --git
a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/MountVolumesFeatureStep.scala
b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/MountVolumesFeatureStep.scala
index d47024ca9fe..78dd6ec21ed 100644
---
a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/MountVolumesFeatureStep.scala
+++
b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/MountVolumesFeatureStep.scala
@@ -23,7 +23,6 @@ import io.fabric8.kubernetes.api.model._
import org.apache.spark.deploy.k8s._
import org.apache.spark.deploy.k8s.Constants.{ENV_EXECUTOR_ID,
SPARK_APP_ID_LABEL}
-import org.apache.spark.internal.config.EXECUTOR_INSTANCES
private[spark] class MountVolumesFeatureStep(conf: KubernetesConf)
extends KubernetesFeatureConfigStep {
@@ -72,7 +71,6 @@ private[spark] class MountVolumesFeatureStep(conf:
KubernetesConf)
case KubernetesPVCVolumeConf(claimNameTemplate, storageClass, size) =>
val claimName = conf match {
case c: KubernetesExecutorConf =>
- checkPVCClaimName(claimNameTemplate)
claimNameTemplate
.replaceAll(PVC_ON_DEMAND,
s"${conf.resourceNamePrefix}-exec-${c.executorId}$PVC_POSTFIX-$i")
@@ -122,20 +120,6 @@ private[spark] class MountVolumesFeatureStep(conf:
KubernetesConf)
override def getAdditionalKubernetesResources(): Seq[HasMetadata] = {
additionalResources.toSeq
}
-
- private def checkPVCClaimName(claimName: String): Unit = {
- val executorInstances = conf.get(EXECUTOR_INSTANCES)
- if (executorInstances.isDefined && executorInstances.get > 1) {
- // PVC ClaimName should contain OnDemand or SPARK_EXECUTOR_ID
- // when requiring multiple executors.
- // Else, spark continues to try to create the executor pod.
- if (!claimName.contains(PVC_ON_DEMAND) &&
!claimName.contains(ENV_EXECUTOR_ID)) {
- throw new IllegalArgumentException(s"PVC ClaimName: $claimName " +
- s"should contain $PVC_ON_DEMAND or $ENV_EXECUTOR_ID " +
- "when requiring multiple executors")
- }
- }
- }
}
private[spark] object MountVolumesFeatureStep {
diff --git
a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/features/MountVolumesFeatureStepSuite.scala
b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/features/MountVolumesFeatureStepSuite.scala
index e428e54d661..468d1dde9fb 100644
---
a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/features/MountVolumesFeatureStepSuite.scala
+++
b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/features/MountVolumesFeatureStepSuite.scala
@@ -16,13 +16,10 @@
*/
package org.apache.spark.deploy.k8s.features
-import java.util.UUID
-
import scala.collection.JavaConverters._
-import org.apache.spark.{SparkConf, SparkFunSuite}
+import org.apache.spark.SparkFunSuite
import org.apache.spark.deploy.k8s._
-import org.apache.spark.internal.config.EXECUTOR_INSTANCES
class MountVolumesFeatureStepSuite extends SparkFunSuite {
test("Mounts hostPath volumes") {
@@ -151,40 +148,6 @@ class MountVolumesFeatureStepSuite extends SparkFunSuite {
assert(executorPVC.getClaimName.endsWith("-exec-1-pvc-0"))
}
- test("SPARK-39006: Check PVC ClaimName") {
- val claimName = s"pvc-${UUID.randomUUID().toString}"
- val volumeConf = KubernetesVolumeSpec(
- "testVolume",
- "/tmp",
- "",
- mountReadOnly = true,
- KubernetesPVCVolumeConf(claimName)
- )
- // Create pvc without specified claimName unsuccessfully when requiring
multiple executors
- val conf = new SparkConf().set(EXECUTOR_INSTANCES, 2)
- var executorConf =
- KubernetesTestConf.createExecutorConf(sparkConf = conf, volumes =
Seq(volumeConf))
- var executorStep = new MountVolumesFeatureStep(executorConf)
- assertThrows[IllegalArgumentException] {
- executorStep.configurePod(SparkPod.initialPod())
- }
- assert(intercept[IllegalArgumentException] {
- executorStep.configurePod(SparkPod.initialPod())
- }.getMessage.equals(s"PVC ClaimName: $claimName " +
- "should contain OnDemand or SPARK_EXECUTOR_ID when requiring multiple
executors"))
-
- // Create and mount pvc with any claimName successfully when requiring one
executor
- conf.set(EXECUTOR_INSTANCES, 1)
- executorConf =
- KubernetesTestConf.createExecutorConf(sparkConf = conf, volumes =
Seq(volumeConf))
- executorStep = new MountVolumesFeatureStep(executorConf)
- val executorPod = executorStep.configurePod(SparkPod.initialPod())
-
- assert(executorPod.pod.getSpec.getVolumes.size() === 1)
- val executorPVC =
executorPod.pod.getSpec.getVolumes.get(0).getPersistentVolumeClaim
- assert(executorPVC.getClaimName.equals(claimName))
- }
-
test("Mounts emptyDir") {
val volumeConf = KubernetesVolumeSpec(
"testVolume",
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]