ivoson commented on code in PR #36716:
URL: https://github.com/apache/spark/pull/36716#discussion_r902793990


##########
core/src/main/scala/org/apache/spark/resource/ResourceProfile.scala:
##########
@@ -336,9 +340,23 @@ object ResourceProfile extends Logging {
 
   private def getDefaultExecutorResources(conf: SparkConf): Map[String, 
ExecutorResourceRequest] = {
     val ereqs = new ExecutorResourceRequests()
-    val cores = conf.get(EXECUTOR_CORES)
-    ereqs.cores(cores)
-    val memory = conf.get(EXECUTOR_MEMORY)
+
+    val isStandalone = 
conf.getOption("spark.master").exists(_.startsWith("spark://"))
+    val isLocalCluster = 
conf.getOption("spark.master").exists(_.startsWith("local-cluster"))
+    // By default, standalone executors take all available cores, do not have 
a specific value.
+    val cores = if (isStandalone || isLocalCluster) {
+      conf.getOption(EXECUTOR_CORES.key).map(_.toInt)
+    } else {
+      Some(conf.get(EXECUTOR_CORES))
+    }
+    cores.foreach(ereqs.cores)
+
+    // Setting all resources here, cluster managers will take the resources 
they respect.

Review Comment:
   Sorry for the confusion. I am trying to say that besides executor cores and 
memory, we may also set memory overhead /offheap memory in spark conf, and 
these resources will finally exist in default resource profile . But cluster 
managers like standalone only use executors cores and memory, it will ignore 
other built-in resources.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to