rmatharu commented on a change in pull request #1104: SAMZA-2266: Introduce a
backoff when there are repeated failures for host-affinity allocations
URL: https://github.com/apache/samza/pull/1104#discussion_r309809323
##########
File path:
samza-core/src/main/java/org/apache/samza/clustermanager/ContainerProcessManager.java
##########
@@ -540,6 +476,114 @@ boolean getJobFailureCriteriaMet() {
return processorFailures;
}
+ /**
+ * Called within {@link #onResourceCompleted(SamzaResourceStatus)} for
unknown exit statuses. These exit statuses
+ * correspond to container completion other than container
run-to-completion, abort or preemption, or disk failure
+ * (e.g., detected by YARN's NM healthchecks).
+ * @param resourceStatus reported resource status.
+ * @param containerId container ID
+ * @param processorId processor ID (aka. logical container ID)
+ * @param exitStatus exit status from the {@link
#onResourceCompleted(SamzaResourceStatus)} callback.
+ */
+ @VisibleForTesting
+ void onResourceCompletedWithUnknownStatus(SamzaResourceStatus
resourceStatus, String containerId, String processorId,
+ int exitStatus) {
+ LOG.info("Container ID: {} for Processor ID: {} failed with exit code:
{}.", containerId, processorId, exitStatus);
+ Instant now = Instant.now();
+ state.failedContainers.incrementAndGet();
+ state.failedContainersStatus.put(containerId, resourceStatus);
+ state.jobHealthy.set(false);
+
+ state.neededProcessors.incrementAndGet();
+ // Find out previously running container location
+ String lastSeenOn =
state.jobModelManager.jobModel().getContainerToHostValue(processorId,
SetContainerHostMapping.HOST_KEY);
+ if (!hostAffinityEnabled || lastSeenOn == null) {
+ lastSeenOn = ResourceRequestState.ANY_HOST;
+ }
+ LOG.info("Container ID: {} for Processor ID: {} was last seen on host
{}.", containerId, processorId, lastSeenOn);
+ // A container failed for an unknown reason. Let's check to see if
+ // we need to shutdown the whole app master if too many container
+ // failures have happened. The rules for failing are that the
+ // failure count for a task group id must be > the configured retry
+ // count, and the last failure (the one prior to this one) must have
+ // happened less than retry window ms ago. If retry count is set to
+ // 0, the app master will fail on any container failure. If the
+ // retry count is set to a number < 0, a container failure will
+ // never trigger an app master failure.
+ int retryCount = clusterManagerConfig.getContainerRetryCount();
+ int retryWindowMs = clusterManagerConfig.getContainerRetryWindowMs();
+ int currentFailCount;
+
+ if (retryCount == 0) {
+ LOG.error("Processor ID: {} (current Container ID: {}) failed, and retry
count is set to 0, " +
+ "so shutting down the application master and marking the job as
failed.", processorId, containerId);
+
+ jobFailureCriteriaMet = true;
+ } else if (retryCount > 0) {
+ long durationSinceLastRetryMs;
+ if (processorFailures.containsKey(processorId)) {
+ ProcessorFailure failure = processorFailures.get(processorId);
+ currentFailCount = failure.getCount() + 1;
+ Duration lastRetryDelay =
Review comment:
Nit: Could this `Duration lastRetryDelay =
processorFailures.containsKey(processorId)
? processorFailures.get(processorId).getLastRetryDelay()
: Duration.ZERO;` be extracted into a method, since it is
used below on line 574
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services