kevin85421 commented on code in PR #37411:
URL: https://github.com/apache/spark/pull/37411#discussion_r947199549


##########
core/src/main/scala/org/apache/spark/HeartbeatReceiver.scala:
##########
@@ -199,41 +241,120 @@ private[spark] class HeartbeatReceiver(sc: SparkContext, 
clock: Clock)
     removeExecutor(executorRemoved.executorId)
   }
 
+  private def killExecutor(executorId: String, timeout: Long): Unit = {
+    logWarning(s"Removing executor $executorId with no recent heartbeats: " +
+      s"${timeout} ms exceeds timeout $executorTimeoutMs ms")
+    killExecutorThread.submit(new Runnable {
+      override def run(): Unit = Utils.tryLogNonFatalError {
+        // Note: we want to get an executor back after expiring this one,
+        // so do not simply call `sc.killExecutor` here (SPARK-8119)
+        sc.killAndReplaceExecutor(executorId)
+        // SPARK-27348: in case of the executors which are not gracefully shut 
down,
+        // we should remove lost executors from CoarseGrainedSchedulerBackend 
manually
+        // here to guarantee two things:
+        // 1) explicitly remove executor information from 
CoarseGrainedSchedulerBackend for
+        //    a lost executor instead of waiting for disconnect message
+        // 2) call scheduler.executorLost() underlying to fail any tasks 
assigned to
+        //    those executors to avoid app hang
+        sc.schedulerBackend match {
+          case backend: CoarseGrainedSchedulerBackend =>
+            // TODO (SPARK-39984): Update causedByApp when we have a hanging 
task detector
+            backend.driverEndpoint.send(RemoveExecutor(executorId,
+              ExecutorProcessLost(
+                s"Executor heartbeat timed out after ${timeout} ms")))
+          // LocalSchedulerBackend is used locally and only has one single 
executor
+          case _: LocalSchedulerBackend =>
+
+          case other => throw new UnsupportedOperationException(
+            s"Unknown scheduler backend: ${other.getClass}")
+        }
+      }
+    })
+  }
+
+  private def isStandalone(): Boolean = {
+    sc.schedulerBackend match {
+      case backend: StandaloneSchedulerBackend => true
+      case _ => false
+    }
+  }
+
+  private def removeExecutorFromExpiryCandidates(executorId: String): Unit = {
+    if (checkWorkerLastHeartbeat && isStandalone()) {
+      executorExpiryCandidates.remove(executorId)
+    }
+  }
+
   private def expireDeadHosts(): Unit = {
+  /**
+   * [SPARK-39984]
+   * The driver’s HeartbeatReceiver will expire an executor if it does not 
receive any heartbeat
+   * from the executor for `executorTimeoutMs` (default 120s) seconds. 
However, lowering from 120
+   * seconds has other challenges. For example: when executor is performing 
full GC, it cannot
+   * send/reply any message for tens of seconds (based on your environment). 
Hence,
+   * HeartbeatReceiver cannot whether the heartbeat loss is caused by network 
issues or other
+   * reasons (e.g. full GC). To address this, we designed a new Heartbeat 
Receiver mechanism for
+   * standalone deployments.
+   *
+   * For standalone deployments:
+   * If driver does not receive any heartbeat from the executor for 
`executorTimeoutMs` seconds,
+   * HeartbeatReceiver will send a request to master to ask for the latest 
heartbeat from the
+   * worker which the executor runs on. HeartbeatReceiver can determine 
whether the heartbeat loss
+   * is caused by network issues or other issues (e.g. GC). If the heartbeat 
loss is not caused by
+   * network issues, the HeartbeatReceiver will put the executor into 
`executorExpiryCandidates`
+   * rather than expiring it immediately.
+   */
     logTrace("Checking for hosts with no recent heartbeats in 
HeartbeatReceiver.")
+    logWarning(s"Keep `expiryCandidatesTimeout` larger than `HEARTBEAT_MILLIS` 
in" +
+      s"deploy/worker/Worker.scala to know whether master lost any heartbeat 
from the" +
+      s"worker or not.")
     val now = clock.getTimeMillis()
-    for ((executorId, lastSeenMs) <- executorLastSeen) {
-      if (now - lastSeenMs > executorTimeoutMs) {
-        logWarning(s"Removing executor $executorId with no recent heartbeats: 
" +
-          s"${now - lastSeenMs} ms exceeds timeout $executorTimeoutMs ms")
-        // Asynchronously kill the executor to avoid blocking the current 
thread
-        killExecutorThread.submit(new Runnable {
-          override def run(): Unit = Utils.tryLogNonFatalError {
-            // Note: we want to get an executor back after expiring this one,
-            // so do not simply call `sc.killExecutor` here (SPARK-8119)
-            sc.killAndReplaceExecutor(executorId)
-            // SPARK-27348: in case of the executors which are not gracefully 
shut down,
-            // we should remove lost executors from 
CoarseGrainedSchedulerBackend manually
-            // here to guarantee two things:
-            // 1) explicitly remove executor information from 
CoarseGrainedSchedulerBackend for
-            //    a lost executor instead of waiting for disconnect message
-            // 2) call scheduler.executorLost() underlying to fail any tasks 
assigned to
-            //    those executors to avoid app hang
-            sc.schedulerBackend match {
-              case backend: CoarseGrainedSchedulerBackend =>
-                backend.driverEndpoint.send(RemoveExecutor(executorId,
-                  ExecutorProcessLost(
-                    s"Executor heartbeat timed out after ${now - lastSeenMs} 
ms")))
-
-              // LocalSchedulerBackend is used locally and only has one single 
executor
-              case _: LocalSchedulerBackend =>
-
-              case other => throw new UnsupportedOperationException(
-                s"Unknown scheduler backend: ${other.getClass}")
-            }
+    if (!checkWorkerLastHeartbeat || !isStandalone()) {
+      for ((executorId, lastSeenMs) <- executorLastSeen) {
+        if (now - lastSeenMs > executorTimeoutMs) {
+          killExecutor(executorId, now - lastSeenMs)
+          executorLastSeen.remove(executorId)
+        }
+      }
+    } else {
+      for ((executorId, workerLastHeartbeat) <- executorExpiryCandidates) {
+        if (now - workerLastHeartbeat > expiryCandidatesTimeout) {
+          killExecutor(executorId, now - workerLastHeartbeat)
+          executorExpiryCandidates.remove(executorId)
+          executorLastSeen.remove(executorId)
+        }
+      }

Review Comment:
   Updated 
https://github.com/apache/spark/pull/37411/commits/e9fe8d1db2bc5307a437e903d0991c3f4c7d1949
   
   There is a slight difference when `backend.client.workerLastHeartbeat` 
matches the case `None`. I set `executorExpiryCandidates(executorId)` to 
`lastSeenMs` instead of `Long.Max`.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to