StefanRRichter commented on a change in pull request #7009: [FLINK-10712]
Support to restore state when using RestartPipelinedRegionStrategy
URL: https://github.com/apache/flink/pull/7009#discussion_r236285508
##########
File path:
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/StateAssignmentOperation.java
##########
@@ -105,13 +106,70 @@ public boolean assignStates() throws Exception {
continue;
}
- assignAttemptState(task.getValue(), operatorStates);
+ Set<Integer> executionVertexIndices = new HashSet<>();
+ for (ExecutionVertex executionVertex :
task.getValue().getTaskVertices()) {
+
executionVertexIndices.add(executionVertex.getParallelSubtaskIndex());
+ }
+ assignAttemptState(task.getValue(), operatorStates,
executionVertexIndices);
+ }
+
+ return true;
+ }
+
+ /**
+ * Assign states to given execution vertices.
+ */
+ public boolean assignStates(List<ExecutionVertex> executionVertices)
throws Exception {
+ Map<OperatorID, OperatorState> localOperators = new
HashMap<>(operatorStates);
+ Map<JobVertexID, ExecutionJobVertex> localTasks = this.tasks;
+
+ checkStateMappingCompleteness(allowNonRestoredState,
operatorStates, tasks);
+
+ // get job vertex and its subTaskIndex from given
executionVertices.
+ Map<JobVertexID, Set<Integer>> jobVertexIDSetMap = new
HashMap<>();
+ for (ExecutionVertex executionVertex : executionVertices) {
+ JobVertexID jobvertexId =
executionVertex.getJobvertexId();
+ jobVertexIDSetMap.putIfAbsent(jobvertexId, new
HashSet<>());
+
jobVertexIDSetMap.get(jobvertexId).add(executionVertex.getParallelSubtaskIndex());
+ }
+
+ for (Map.Entry<JobVertexID, ExecutionJobVertex> task :
localTasks.entrySet()) {
+ final ExecutionJobVertex executionJobVertex =
task.getValue();
+
+ // find the states of all operators belonging to this
task
+ List<OperatorID> operatorIDs =
executionJobVertex.getOperatorIDs();
+ List<OperatorID> altOperatorIDs =
executionJobVertex.getUserDefinedOperatorIDs();
+ List<OperatorState> operatorStates = new ArrayList<>();
+ boolean statelessTask = true;
+ for (int x = 0; x < operatorIDs.size(); x++) {
+ OperatorID operatorID = altOperatorIDs.get(x)
== null
+ ? operatorIDs.get(x)
+ : altOperatorIDs.get(x);
+
+ OperatorState operatorState =
localOperators.remove(operatorID);
+ if (operatorState == null) {
+ operatorState = new OperatorState(
+ operatorID,
+
executionJobVertex.getParallelism(),
+
executionJobVertex.getMaxParallelism());
+ } else {
+ statelessTask = false;
+ }
+ operatorStates.add(operatorState);
+ }
+ if (statelessTask) { // skip tasks where no operator
has any state
+ continue;
+ }
+
+ if
(jobVertexIDSetMap.containsKey(executionJobVertex.getJobVertexId())) {
+ assignAttemptState(executionJobVertex,
operatorStates, jobVertexIDSetMap.get(executionJobVertex.getJobVertexId()));
+ }
}
return true;
}
- private void assignAttemptState(ExecutionJobVertex executionJobVertex,
List<OperatorState> operatorStates) {
+ private void assignAttemptState(ExecutionJobVertex executionJobVertex,
List<OperatorState> operatorStates, Set<Integer> subTaskIndices) {
Review comment:
I doubt that `Set<Integer>` is the best representation of subtask indexes.
At least from the interface leven, an `Iterable<Integer>` could do the job if
we rewite the loop as I suggested. Forthermore, we can have a more memory
friendly implementation to back this, for example `boolean[]` or `Bitset`.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services