HyukjinKwon commented on a change in pull request #30547:
URL: https://github.com/apache/spark/pull/30547#discussion_r533239491
##########
File path: core/src/main/scala/org/apache/spark/HeartbeatReceiver.scala
##########
@@ -80,7 +80,7 @@ private[spark] class HeartbeatReceiver(sc: SparkContext,
clock: Clock)
// executor ID -> timestamp of when the last heartbeat from this executor
was received
private val executorLastSeen = new HashMap[String, Long]
- private val executorTimeoutMs =
sc.conf.get(config.STORAGE_BLOCKMANAGER_HEARTBEAT_TIMEOUT)
+ private val executorTimeoutMs =
Utils.blockManagerHeartbeatTimeoutAsMs(sc.conf)
Review comment:
Could we maybe do something like this?
```diff
diff --git a/core/src/main/scala/org/apache/spark/HeartbeatReceiver.scala
b/core/src/main/scala/org/apache/spark/HeartbeatReceiver.scala
index 233ad884a72..9aabe46e721 100644
--- a/core/src/main/scala/org/apache/spark/HeartbeatReceiver.scala
+++ b/core/src/main/scala/org/apache/spark/HeartbeatReceiver.scala
@@ -80,7 +80,9 @@ private[spark] class HeartbeatReceiver(sc: SparkContext,
clock: Clock)
// executor ID -> timestamp of when the last heartbeat from this executor
was received
private val executorLastSeen = new HashMap[String, Long]
- private val executorTimeoutMs =
sc.conf.get(config.STORAGE_BLOCKMANAGER_HEARTBEAT_TIMEOUT)
+ private val executorTimeoutMs = sc.conf.get(
+ config.STORAGE_BLOCKMANAGER_HEARTBEAT_TIMEOUT
+ ).getOrElse(sc.conf.getTimeAsMs(Network.NETWORK_TIMEOUT.key))
private val checkTimeoutIntervalMs =
sc.conf.get(Network.NETWORK_TIMEOUT_INTERVAL)
diff --git
a/core/src/main/scala/org/apache/spark/internal/config/package.scala
b/core/src/main/scala/org/apache/spark/internal/config/package.scala
index 093a0ecf58d..f42388e24ce 100644
--- a/core/src/main/scala/org/apache/spark/internal/config/package.scala
+++ b/core/src/main/scala/org/apache/spark/internal/config/package.scala
@@ -504,7 +504,7 @@ package object config {
.version("0.7.0")
.withAlternative("spark.storage.blockManagerSlaveTimeoutMs")
.timeConf(TimeUnit.MILLISECONDS)
- .createWithDefaultString(Network.NETWORK_TIMEOUT.defaultValueString)
+ .createOptional
private[spark] val STORAGE_CLEANUP_FILES_AFTER_EXECUTOR_EXIT =
ConfigBuilder("spark.storage.cleanupFilesAfterExecutorExit")
diff --git
a/resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala
b/resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala
index efcef09132f..88fc8b29d35 100644
---
a/resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala
+++
b/resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala
@@ -651,7 +651,9 @@ private[spark] class MesosCoarseGrainedSchedulerBackend(
.registerDriverWithShuffleService(
agent.hostname,
externalShufflePort,
- sc.conf.get(config.STORAGE_BLOCKMANAGER_HEARTBEAT_TIMEOUT),
+ sc.conf.get(
+ config.STORAGE_BLOCKMANAGER_HEARTBEAT_TIMEOUT
+ ).getOrElse(sc.conf.getTimeAsMs(Network.NETWORK_TIMEOUT.key)),
sc.conf.get(config.EXECUTOR_HEARTBEAT_INTERVAL))
agent.shuffleRegistered = true
}
```
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]