squito commented on a change in pull request #23951: [SPARK-27038][CORE][YARN] 
Re-implement RackResolver to reduce resolving time
URL: https://github.com/apache/spark/pull/23951#discussion_r263455284
 
 

 ##########
 File path: 
core/src/test/scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala
 ##########
 @@ -1602,4 +1637,27 @@ class TaskSetManagerSuite extends SparkFunSuite with 
LocalSparkContext with Logg
     verify(sched.dagScheduler).taskEnded(manager.tasks(3), Success, 
result.value(),
       result.accumUpdates, info3)
   }
+
+  test("SPARK-27038: Verify the rack resolving time has been reduced") {
+    sc = new SparkContext("local", "test")
+    for (i <- 1 to 100) {
+      FakeRackUtil.assignHostToRack("host" + i, "rack" + i)
+    }
+    sched = new FakeTaskScheduler(sc,
+      ("execA", "host1"), ("execB", "host2"), ("execC", "host3"))
+    sched.slowRackResolve = true
+    val locations = new ArrayBuffer[Seq[TaskLocation]]()
+    for (i <- 1 to 100) {
+      locations += Seq(TaskLocation("host" + i))
+    }
+    val taskSet = FakeTask.createTaskSet(100, locations: _*)
+    val clock = new ManualClock
+    val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock 
= clock)
+    var total = 0
+    for (i <- 1 to 100) {
+      total += manager.getPendingTasksForRack("rack" + i).length
 
 Review comment:
   it woudl be worth adding a check that the rack assignment is still done 
correctly here, eg. `assert(manger.getPendingTasksForRack("rack" + i).length == 
1)`.  Though maybe we should update so that multiple hosts are assigned to the 
same rack, eg. above you would call `FakeRackUtil.assignHostToRack("host" + i, 
"rack" + (i % 20))`

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to