This is an automated email from the ASF dual-hosted git repository.

dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 2a70103db6b5 [SPARK-55485][K8S] Add `Constants.POD_DELETION_COST` for 
reuse
2a70103db6b5 is described below

commit 2a70103db6b59ef2798ce9605fbb6acd219e1a3e
Author: Dongjoon Hyun <[email protected]>
AuthorDate: Wed Feb 11 10:08:22 2026 -0800

    [SPARK-55485][K8S] Add `Constants.POD_DELETION_COST` for reuse
    
    ### What changes were proposed in this pull request?
    
    This PR aims to add `Constants.POD_DELETION_COST` for reuse.
    
    ### Why are the changes needed?
    
    To allow the downstream projects like Apache Spark K8s Operator to reuse 
this constant.
    
    ### Does this PR introduce _any_ user-facing change?
    
    No.
    
    ### How was this patch tested?
    
    Pass the CIs.
    
    ### Was this patch authored or co-authored using generative AI tooling?
    
    Generated-by: `Gemini 3 Pro (High)` on `Antigravity`
    
    Closes #54270 from dongjoon-hyun/SPARK-55485.
    
    Authored-by: Dongjoon Hyun <[email protected]>
    Signed-off-by: Dongjoon Hyun <[email protected]>
---
 .../src/main/scala/org/apache/spark/deploy/k8s/Constants.scala    | 1 +
 .../scheduler/cluster/k8s/KubernetesClusterSchedulerBackend.scala | 8 +++-----
 2 files changed, 4 insertions(+), 5 deletions(-)

diff --git 
a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/Constants.scala
 
b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/Constants.scala
index 218ddbac70dd..d9b3c3df945a 100644
--- 
a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/Constants.scala
+++ 
b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/Constants.scala
@@ -108,6 +108,7 @@ object Constants {
   val NON_JVM_MEMORY_OVERHEAD_FACTOR = 0.4d
   val CONNECT_GRPC_BINDING_PORT = "spark.connect.grpc.binding.port"
   val EXIT_EXCEPTION_ANNOTATION = "spark.exit-exception"
+  val POD_DELETION_COST = "controller.kubernetes.io/pod-deletion-cost"
 
   // Hadoop Configuration
   val HADOOP_CONF_VOLUME = "hadoop-properties"
diff --git 
a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterSchedulerBackend.scala
 
b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterSchedulerBackend.scala
index 606a59f88269..381cd0f788c4 100644
--- 
a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterSchedulerBackend.scala
+++ 
b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterSchedulerBackend.scala
@@ -76,10 +76,6 @@ private[spark] class KubernetesClusterSchedulerBackend(
 
   private val PATCH_CONTEXT = PatchContext.of(PatchType.STRATEGIC_MERGE)
 
-  // KEP 2255: When a Deployment or Replicaset is scaled down, the pods will 
be deleted in the
-  // order of the value of this annotation, ascending.
-  private val podDeletionCostAnnotation = 
"controller.kubernetes.io/pod-deletion-cost"
-
   // Allow removeExecutor to be accessible by ExecutorPodsLifecycleManager
   private[k8s] def doRemoveExecutor(executorId: String, reason: 
ExecutorLossReason): Unit = {
     removeExecutor(executorId, reason)
@@ -202,6 +198,8 @@ private[spark] class KubernetesClusterSchedulerBackend(
     super.getExecutorIds()
   }
 
+  // KEP 2255: When a Deployment or Replicaset is scaled down, the pods will 
be deleted in the
+  // order of the value of this annotation, ascending.
   private def annotateExecutorDeletionCost(execIds: Seq[String]): Unit = {
     conf.get(KUBERNETES_EXECUTOR_POD_DELETION_COST).foreach { cost =>
       logInfo(s"Annotating executor pod(s) ${execIds.mkString(",")} with 
deletion cost $cost")
@@ -217,7 +215,7 @@ private[spark] class KubernetesClusterSchedulerBackend(
             .forEach { podResource =>
               podResource.patch(PATCH_CONTEXT, new PodBuilder()
                 .withNewMetadata()
-                .addToAnnotations(podDeletionCostAnnotation, cost.toString)
+                .addToAnnotations(POD_DELETION_COST, cost.toString)
                 .endMetadata()
                 .build())
             }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to