This is an automated email from the ASF dual-hosted git repository.

dongjoon pushed a commit to branch branch-3.5
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.5 by this push:
     new bf29ab9eae79 [SPARK-50421][CORE][3.5] Fix executor related memory 
config incorrect when multiple resource profiles worked
bf29ab9eae79 is described below

commit bf29ab9eae79e73c6844881b0cd3a5e284960618
Author: Terry Wang <[email protected]>
AuthorDate: Fri Dec 6 09:29:57 2024 -0800

    [SPARK-50421][CORE][3.5] Fix executor related memory config incorrect when 
multiple resource profiles worked
    
    ### What changes were proposed in this pull request?
    
    Reset the executor's env memory related config when resource profile is not 
as the default resource profile!
    
    ### Why are the changes needed?
    When multiple resource profile exists in the same spark application, now 
the executor's memory related config is not override by resource profile's 
memory size, which will cause maxOffHeap in `UnifiedMemoryManager` is not 
correct.
    See https://issues.apache.org/jira/browse/SPARK-50421 for more details
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    Tests in our inner spark version and jobs.
    
    ### Was this patch authored or co-authored using generative AI tooling?
    No
    
    This is a backporting from https://github.com/apache/spark/pull/48963 to 
branch 3.5
    
    Closes #49090 from zjuwangg/m35_fixConfig.
    
    Authored-by: Terry Wang <[email protected]>
    Signed-off-by: Dongjoon Hyun <[email protected]>
---
 .../executor/CoarseGrainedExecutorBackend.scala     | 21 +++++++++++++++++++++
 1 file changed, 21 insertions(+)

diff --git 
a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
 
b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
index 537522326fc7..fe90895cacb5 100644
--- 
a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
+++ 
b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
@@ -474,6 +474,27 @@ private[spark] object CoarseGrainedExecutorBackend extends 
Logging {
       }
 
       driverConf.set(EXECUTOR_ID, arguments.executorId)
+      // Set executor memory related config here according to resource profile
+      if (cfg.resourceProfile.id != 
ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID) {
+        cfg.resourceProfile
+          .executorResources
+          .foreach {
+            case (ResourceProfile.OFFHEAP_MEM, request) =>
+              driverConf.set(MEMORY_OFFHEAP_SIZE.key, request.amount.toString 
+ "m")
+              logInfo(s"Set executor off-heap memory to $request")
+            case (ResourceProfile.MEMORY, request) =>
+              driverConf.set(EXECUTOR_MEMORY.key, request.amount.toString + 
"m")
+              logInfo(s"Set executor memory to $request")
+            case (ResourceProfile.OVERHEAD_MEM, request) =>
+              // Maybe don't need to set this since it's nearly used by tasks.
+              driverConf.set(EXECUTOR_MEMORY_OVERHEAD.key, 
request.amount.toString + "m")
+              logInfo(s"Set executor memory_overhead to $request")
+            case (ResourceProfile.CORES, request) =>
+              driverConf.set(EXECUTOR_CORES.key, request.amount.toString)
+              logInfo(s"Set executor cores to $request")
+            case _ =>
+          }
+      }
       val env = SparkEnv.createExecutorEnv(driverConf, arguments.executorId, 
arguments.bindAddress,
         arguments.hostname, arguments.cores, cfg.ioEncryptionKey, isLocal = 
false)
       // Set the application attemptId in the BlockStoreClient if available.


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to