This is an automated email from the ASF dual-hosted git repository.

ruifengz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 55eafb4eb3b [SPARK-43184][YARN] Resume using enumeration to compare 
`NodeState.DECOMMISSIONING` state
55eafb4eb3b is described below

commit 55eafb4eb3b1dfa1ff0683307610472bc0351d6b
Author: yangjie01 <yangji...@baidu.com>
AuthorDate: Thu Apr 20 17:37:10 2023 +0800

    [SPARK-43184][YARN] Resume using enumeration to compare 
`NodeState.DECOMMISSIONING` state
    
    ### What changes were proposed in this pull request?
    https://github.com/apache/spark/pull/36917 change to use using String to 
compare `NodeState.DECOMMISSIONING` for compatibility with hadoop-2.7.  After 
SPARK-42452, Spark no longer supported build&test with hadoop 2, so this pr 
resume using enumeration to compare `NodeState.DECOMMISSIONING`.
    
    ### Why are the changes needed?
    No longer requires compatibility with Hadoop 2
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    Pass GitHub Actions
    
    Closes #40846 from LuciferYang/SPARK-43184.
    
    Authored-by: yangjie01 <yangji...@baidu.com>
    Signed-off-by: Ruifeng Zheng <ruife...@apache.org>
---
 .../src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala | 6 +-----
 .../scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala     | 5 ++---
 2 files changed, 3 insertions(+), 8 deletions(-)

diff --git 
a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala
 
b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala
index 75d7dad6d31..4b55d48cda3 100644
--- 
a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala
+++ 
b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala
@@ -458,11 +458,7 @@ private[yarn] class YarnAllocator(
     // resources on those nodes for earlier allocateResource calls, so 
notifying driver
     // to put those executors in decommissioning state
     allocateResponse.getUpdatedNodes.asScala.filter (node =>
-      // SPARK-39491: Hadoop 2.7 does not support `NodeState.DECOMMISSIONING`,
-      // there change to use string comparison instead for compilation.
-      // Should revert to `node.getNodeState == NodeState.DECOMMISSIONING` when
-      // Hadoop 2.7 is no longer supported.
-      node.getNodeState.toString.equals("DECOMMISSIONING") &&
+      node.getNodeState == NodeState.DECOMMISSIONING &&
         !decommissioningNodesCache.containsKey(getHostAddress(node)))
       .foreach { node =>
         val host = getHostAddress(node)
diff --git 
a/resource-managers/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala
 
b/resource-managers/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala
index a5ca382fb46..59b1e57aa5e 100644
--- 
a/resource-managers/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala
+++ 
b/resource-managers/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala
@@ -47,7 +47,7 @@ import org.apache.spark.resource.TestResourceIDs._
 import org.apache.spark.rpc.RpcEndpointRef
 import org.apache.spark.scheduler.SplitInfo
 import 
org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages.DecommissionExecutorsOnHost
-import org.apache.spark.util.{ManualClock, VersionUtils}
+import org.apache.spark.util.ManualClock
 
 class MockResolver extends SparkRackResolver(SparkHadoopUtil.get.conf) {
 
@@ -767,7 +767,6 @@ class YarnAllocatorSuite extends SparkFunSuite with 
Matchers {
   }
 
   test("Test YARN container decommissioning") {
-    assume(VersionUtils.isHadoop3)
     val rmClient: AMRMClient[ContainerRequest] = AMRMClient.createAMRMClient()
     val rmClientSpy = spy(rmClient)
     val allocateResponse = mock(classOf[AllocateResponse])
@@ -816,7 +815,7 @@ class YarnAllocatorSuite extends SparkFunSuite with 
Matchers {
 
     // host1 is now in DECOMMISSIONING state
     val httpAddress1 = "host1:420"
-    
when(nodeReport.getNodeState).thenReturn(NodeState.valueOf("DECOMMISSIONING"))
+    when(nodeReport.getNodeState).thenReturn(NodeState.DECOMMISSIONING)
     when(nodeReport.getNodeId).thenReturn(nodeId)
     when(nodeId.getHost).thenReturn("host1")
     when(allocateResponse.getUpdatedNodes).thenReturn(nodeReportList)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to