holdenk commented on a change in pull request #33563:
URL: https://github.com/apache/spark/pull/33563#discussion_r678745532
##########
File path:
resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsPollingSnapshotSource.scala
##########
@@ -55,14 +56,17 @@ private[spark] class ExecutorPodsPollingSnapshotSource(
private class PollRunnable(applicationId: String) extends Runnable {
override def run(): Unit = Utils.tryLogNonFatalError {
logDebug(s"Resynchronizing full executor pod state from Kubernetes.")
- snapshotsStore.replaceSnapshot(kubernetesClient
+ val pods = kubernetesClient
.pods()
.withLabel(SPARK_APP_ID_LABEL, applicationId)
.withLabel(SPARK_ROLE_LABEL, SPARK_POD_EXECUTOR_ROLE)
.withoutLabel(SPARK_EXECUTOR_INACTIVE_LABEL, "true")
- .list()
- .getItems
- .asScala.toSeq)
+ val list = if
(conf.get(KUBERNETES_EXECUTOR_API_POLLING_WITH_RESOURCE_VERSION)) {
+ pods.list(new ListOptionsBuilder().withResourceVersion("0").build())
+ } else {
+ pods.list()
+ }
+ snapshotsStore.replaceSnapshot(list.getItems.asScala.toSeq)
Review comment:
Would it make sense to check the snapShot resource verison returned and
only replace if it's newer than the previous snapShot? Since in HA fail-over we
could go back in time which might be confusing to Spark.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]