wypoon commented on a change in pull request #23767: [SPARK-26329][CORE][WIP] 
Faster polling of executor memory metrics.
URL: https://github.com/apache/spark/pull/23767#discussion_r262704536
 
 

 ##########
 File path: core/src/main/scala/org/apache/spark/executor/Executor.scala
 ##########
 @@ -840,8 +952,25 @@ private[spark] class Executor(
     val accumUpdates = new ArrayBuffer[(Long, Seq[AccumulatorV2[_, _]])]()
     val curGCTime = computeTotalGcTime()
 
-    // get executor level memory metrics
-    val executorUpdates = heartbeater.getCurrentMetrics()
+    // if not polling in a separater poller, poll here
+    if (poller == null) {
+      poll()
+    }
+
+    // build the executor level memory metrics
+    val executorUpdates = new HashMap[StageKey, ExecutorMetrics]
+
+    def peaksForStage(k: StageKey, v: AtomicLong): (StageKey, AtomicLongArray) 
=
+      if (v.get() > 0) (k, stageMetricPeaks.get(k)) else null
+
+    def addPeaks(nested: (StageKey, AtomicLongArray)): Unit = {
+      val (k, v) = nested
+      executorUpdates.put(k, new ExecutorMetrics(v))
+      // at the same time, reset the peaks in stageMetricPeaks
+      stageMetricPeaks.put(k, new 
AtomicLongArray(ExecutorMetricType.numMetrics))
+    }
+
+    activeStages.forEach[(StageKey, AtomicLongArray)](LONG_MAX_VALUE, 
peaksForStage, addPeaks)
 
 Review comment:
   Let me look into adding the task metrics in the failure handling.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to