This is an automated email from the ASF dual-hosted git repository.
dongjoon pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/branch-2.4 by this push:
new fa78e68 [SPARK-34407][K8S] KubernetesClusterSchedulerBackend.stop
should clean up K8s resources
fa78e68 is described below
commit fa78e68c2f28c5bac056ad0402cba110b3faf50c
Author: Dongjoon Hyun <[email protected]>
AuthorDate: Mon Feb 8 21:47:23 2021 -0800
[SPARK-34407][K8S] KubernetesClusterSchedulerBackend.stop should clean up
K8s resources
This PR aims to fix `KubernetesClusterSchedulerBackend.stop` to wrap
`super.stop` with `Utils.tryLogNonFatalError`.
[CoarseGrainedSchedulerBackend.stop](https://github.com/apache/spark/blob/master/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala#L559)
may throw `SparkException` and this causes K8s resource (pod and configmap)
leakage.
No. This is a bug fix.
Pass the CI with the newly added test case.
Closes #31533 from dongjoon-hyun/SPARK-34407.
Authored-by: Dongjoon Hyun <[email protected]>
Signed-off-by: Dongjoon Hyun <[email protected]>
(cherry picked from commit ea339c38b43c59931257386efdd490507f7de64d)
Signed-off-by: Dongjoon Hyun <[email protected]>
---
.../cluster/k8s/KubernetesClusterSchedulerBackend.scala | 6 +++++-
.../cluster/k8s/KubernetesClusterSchedulerBackendSuite.scala | 11 ++++++++++-
2 files changed, 15 insertions(+), 2 deletions(-)
diff --git
a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterSchedulerBackend.scala
b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterSchedulerBackend.scala
index bdd4134..bc89002 100644
---
a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterSchedulerBackend.scala
+++
b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterSchedulerBackend.scala
@@ -81,7 +81,11 @@ private[spark] class KubernetesClusterSchedulerBackend(
}
override def stop(): Unit = {
- super.stop()
+ // When `CoarseGrainedSchedulerBackend.stop` throws `SparkException`,
+ // K8s cluster scheduler should log and proceed in order to delete the K8s
cluster resources.
+ Utils.tryLogNonFatalError {
+ super.stop()
+ }
Utils.tryLogNonFatalError {
snapshotsStore.stop()
diff --git
a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterSchedulerBackendSuite.scala
b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterSchedulerBackendSuite.scala
index fbff1d7..90dfc0c 100644
---
a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterSchedulerBackendSuite.scala
+++
b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterSchedulerBackendSuite.scala
@@ -28,7 +28,7 @@ import org.apache.spark.deploy.k8s.Constants._
import org.apache.spark.deploy.k8s.Fabric8Aliases._
import org.apache.spark.rpc.{RpcEndpoint, RpcEndpointRef, RpcEnv}
import org.apache.spark.scheduler.{ExecutorKilled, TaskSchedulerImpl}
-import
org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages.RemoveExecutor
+import
org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages.{RemoveExecutor,
StopDriver}
import org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend
import
org.apache.spark.scheduler.cluster.k8s.ExecutorLifecycleTestUtils.TEST_SPARK_APP_ID
@@ -147,4 +147,13 @@ class KubernetesClusterSchedulerBackendSuite extends
SparkFunSuite with BeforeAn
verify(podAllocator).setTotalExpectedExecutors(5)
}
+ test("SPARK-34407: CoarseGrainedSchedulerBackend.stop may throw
SparkException") {
+ schedulerBackendUnderTest.start()
+
+ when(driverEndpointRef.askSync[Boolean](StopDriver)).thenThrow(new
RuntimeException)
+ schedulerBackendUnderTest.stop()
+
+ // Verify the last operation of `schedulerBackendUnderTest.stop`.
+ verify(kubernetesClient).close()
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]