holdenk commented on a change in pull request #34650:
URL: https://github.com/apache/spark/pull/34650#discussion_r768111424



##########
File path: 
core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala
##########
@@ -776,16 +801,55 @@ class CoarseGrainedSchedulerBackend(scheduler: 
TaskSchedulerImpl, val rpcEnv: Rp
     val resourceProfileToNumExecutors = resourceProfileIdToNumExecutors.map { 
case (rpid, num) =>
       (scheduler.sc.resourceProfileManager.resourceProfileFromId(rpid), num)
     }
+    val oldResourceProfileToNumExecutors = 
requestedTotalExecutorsPerResourceProfile.map {
+      case (rp, num) =>
+        (rp.id, num)
+    }.toMap

Review comment:
       Good point

##########
File path: 
core/src/main/scala/org/apache/spark/scheduler/cluster/ExecutorInfo.scala
##########
@@ -72,6 +82,6 @@ class ExecutorInfo(
   override def hashCode(): Int = {
     val state = Seq(executorHost, totalCores, logUrlMap, attributes, 
resourcesInfo,
       resourceProfileId)
-    state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
+    state.filter(_ != null).map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)

Review comment:
       This is semi-related, it's changed so that we can compare executorinfo 
objects during testing which may have components that are null.

##########
File path: 
core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala
##########
@@ -257,9 +263,27 @@ class CoarseGrainedSchedulerBackend(scheduler: 
TaskSchedulerImpl, val rpcEnv: Rp
               
.resourceProfileFromId(resourceProfileId).getNumSlotsPerAddress(rName, conf)
             (info.name, new ExecutorResourceInfo(info.name, info.addresses, 
numParts))
           }
+          // If we've requested the executor figure out when we did.
+          val reqTs: Option[Long] = {
+            execRequestTimes.get(resourceProfileId).flatMap {

Review comment:
       Yeah good point

##########
File path: 
core/src/test/scala/org/apache/spark/ExecutorAllocationManagerSuite.scala
##########
@@ -1744,7 +1744,8 @@ class ExecutorAllocationManagerSuite extends 
SparkFunSuite {
       id: String,
       rp: ResourceProfile): Unit = {
     val cores = rp.getExecutorCores.getOrElse(1)
-    val execInfo = new ExecutorInfo("host1", cores, Map.empty, Map.empty, 
Map.empty, rp.id)
+    val execInfo = new ExecutorInfo("host1", cores, Map.empty, Map.empty, 
Map.empty, rp.id,
+      None, None)

Review comment:
       That's a good point, let me back out the unrelated test changes (and 
that will also ensure the constructor change captured what was desired).




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to