This is an automated email from the ASF dual-hosted git repository.

chengpan pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kyuubi.git

commit 9ff46a3c633534c2266ad8e6316b9fddaa024a6c
Author: zhouyifan279 <[email protected]>
AuthorDate: Thu Jun 8 20:16:39 2023 +0800

    [KYUUBI #4935] More than target num of executors may survive after 
FinalStageResourceManager did kill
    
    ### _Why are the changes needed?_
    When FinalStageResourceManager chooses executors to be killed, it may add 
dead executors to the kill list.
    This will leave more than target num of executors survived and cause 
resource waste.
    
    ### _How was this patch tested?_
    - [ ] Add some test cases that check the changes thoroughly including 
negative and positive cases if possible
    
    - [ ] Add screenshots for manual tests if appropriate
    
    - [x] [Run 
test](https://kyuubi.readthedocs.io/en/master/develop_tools/testing.html#running-tests)
 locally before make a pull request
    
    Closes #4936 from zhouyifan279/kill-executor.
    
    Closes #4936
    
    2aaa84cb1 [zhouyifan279] [KYUUBI#4935][Improvement] More than target num of 
executors may survive after FinalStageResourceManager did kill
    
    Authored-by: zhouyifan279 <[email protected]>
    Signed-off-by: Cheng Pan <[email protected]>
---
 .../src/main/scala/org/apache/spark/sql/FinalStageResourceManager.scala | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/spark/sql/FinalStageResourceManager.scala
 
b/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/spark/sql/FinalStageResourceManager.scala
index 7a0ae1592..dc573f838 100644
--- 
a/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/spark/sql/FinalStageResourceManager.scala
+++ 
b/extensions/spark/kyuubi-extension-spark-3-3/src/main/scala/org/apache/spark/sql/FinalStageResourceManager.scala
@@ -170,7 +170,7 @@ case class FinalStageResourceManager(session: SparkSession)
 
     // Evict the rest executors according to the shuffle block size
     executorToBlockSize.toSeq.sortBy(_._2).foreach { case (id, _) =>
-      if (executorIdsToKill.length < expectedNumExecutorToKill) {
+      if (executorIdsToKill.length < expectedNumExecutorToKill && 
existedExecutors.contains(id)) {
         executorIdsToKill.append(id)
       }
     }

Reply via email to