attilapiros commented on a change in pull request #29226:
URL: https://github.com/apache/spark/pull/29226#discussion_r460505054



##########
File path: 
core/src/test/scala/org/apache/spark/storage/BlockManagerDecommissionIntegrationSuite.scala
##########
@@ -107,6 +115,21 @@ class BlockManagerDecommissionIntegrationSuite extends 
SparkFunSuite with LocalS
       }
 
       override def onBlockUpdated(blockUpdated: SparkListenerBlockUpdated): 
Unit = {
+        if (blockUpdated.blockUpdatedInfo.blockId.isRDD && persist) {
+          // Persisted RDD blocks are a bit weirder than shuffle blocks: Even 
though
+          // the tasks are run say on executors (0, 1, 2), the RDD blocks 
might end up only
+          // on executors 0 and 1. So we cannot just indiscriminately 
decommission any executor.
+          // Instead we must decommission an executor that actually has an RDD 
block.
+          // Fortunately, this isn't the case for shuffle blocks which are 
indeed present on all
+          // executors and thus any executor can be decommissioned when 
`persist` is false.
+          val candidateExecToDecom = 
blockUpdated.blockUpdatedInfo.blockManagerId.executorId
+          if (execToDecommission.compareAndSet(null, candidateExecToDecom)) {
+            val decomContext = s"Decommissioning executor 
${candidateExecToDecom} for persist"
+            logInfo(decomContext)
+            sched.decommissionExecutor(candidateExecToDecom,
+              ExecutorDecommissionInfo(decomContext, false))

Review comment:
       duplicated code




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to