Ngone51 commented on a change in pull request #23377: [SPARK-26439][CORE][WIP] 
Introduce WorkerOffer reservation mechanism for Barrier TaskSet
URL: https://github.com/apache/spark/pull/23377#discussion_r243914021
 
 

 ##########
 File path: core/src/main/scala/org/apache/spark/internal/config/package.scala
 ##########
 @@ -715,6 +715,24 @@ package object config {
       .checkValue(v => v > 0, "The max failures should be a positive value.")
       .createWithDefault(40)
 
+  private[spark] val BARRIER_NO_SUFFICIENT_RESOURCE_TIMEOUT =
+    ConfigBuilder("spark.scheduler.barrier.noSufficientResource.timeout")
+      .doc("Time in minutes to wait before a barrier TaskSet get sufficient 
resource " +
+        "to launch tasks. Abort the barrier TaskSet once it expires to avoid 
job " +
+        "hanging indefinitely.")
+      .timeConf(TimeUnit.MINUTES)
+      .checkValue(v => v > 0, "Time value should be a positive value.")
+      .createWithDefault(5)
+
+  private[spark] val BARRIER_MAX_CONSECUTIVE_NO_BARRIER_TASKSET_LAUNCH_TIMES =
+    
ConfigBuilder("spark.scheduler.barrier.maxConsecutiveNoBarrierTaskSetLaunchTimes")
+      .doc("Number of max consecutive times of no any barrier taskSet launched 
in each " +
+        "resourceOffers round. TaskScheduler will ask barrier taskSets to 
release reserved" +
+        "WorkOffers if it reach this point.")
+      .intConf
+      .checkValue(v => v > 0, "The maxConsecutiveNoTaskSetLaunchTimes should 
be a positive value.")
+      .createWithDefault(5)
 
 Review comment:
   ditto

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to