reswqa commented on code in PR #21388:
URL: https://github.com/apache/flink/pull/21388#discussion_r1054098702


##########
flink-runtime/src/main/java/org/apache/flink/runtime/executiongraph/EdgeManagerBuildUtil.java:
##########
@@ -201,10 +206,25 @@ private static ConsumedPartitionGroup 
createAndRegisterConsumedPartitionGroupToE
         ConsumedPartitionGroup consumedPartitionGroup =
                 ConsumedPartitionGroup.fromMultiplePartitions(
                         numConsumers, consumedPartitions, 
intermediateResult.getResultType());
+        finishAllDataProducedPartitions(
+                intermediateResult, consumedPartitions, 
consumedPartitionGroup);
         registerConsumedPartitionGroupToEdgeManager(consumedPartitionGroup, 
intermediateResult);
         return consumedPartitionGroup;
     }
 
+    private static void finishAllDataProducedPartitions(
+            IntermediateResult intermediateResult,
+            List<IntermediateResultPartitionID> consumedPartitionIds,
+            ConsumedPartitionGroup consumedPartitionGroup) {
+        for (IntermediateResultPartitionID consumedPartitionId : 
consumedPartitionIds) {
+            // this is for dynamic graph as consumedPartitionGroup has not 
been created when the
+            // partition is finish.

Review Comment:
   fixed.



##########
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/DefaultExecutionGraphConstructionTest.java:
##########
@@ -337,6 +337,159 @@ void testRegisterConsumedPartitionGroupToEdgeManager() 
throws Exception {
                         partition1.getPartitionId(), 
partition2.getPartitionId());
     }
 
+    @Test
+    void testPointWiseConsumedPartitionGroupPartitionFinished() throws 
Exception {
+        JobVertex v1 = new JobVertex("source");
+        JobVertex v2 = new JobVertex("sink");
+
+        v1.setParallelism(4);
+        v2.setParallelism(2);
+
+        v2.connectNewDataSetAsInput(
+                v1, DistributionPattern.POINTWISE, 
ResultPartitionType.BLOCKING);
+
+        List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2));
+        ExecutionGraph eg = createDefaultExecutionGraph(ordered);
+        eg.attachJobGraph(ordered);
+
+        IntermediateResult result =
+                
Objects.requireNonNull(eg.getJobVertex(v1.getID())).getProducedDataSets()[0];
+
+        IntermediateResultPartition partition1 = result.getPartitions()[0];
+        IntermediateResultPartition partition2 = result.getPartitions()[1];
+        IntermediateResultPartition partition3 = result.getPartitions()[2];
+        IntermediateResultPartition partition4 = result.getPartitions()[3];
+
+        ConsumedPartitionGroup consumedPartitionGroup1 =
+                partition1.getConsumedPartitionGroups().get(0);
+
+        ConsumedPartitionGroup consumedPartitionGroup2 =
+                partition4.getConsumedPartitionGroups().get(0);
+
+        
assertThat(consumedPartitionGroup1.getNumberOfUnfinishedPartitions()).isEqualTo(2);
+        
assertThat(consumedPartitionGroup2.getNumberOfUnfinishedPartitions()).isEqualTo(2);
+        partition1.markFinished();
+        partition2.markFinished();
+        
assertThat(consumedPartitionGroup1.getNumberOfUnfinishedPartitions()).isZero();
+        partition3.markFinished();
+        partition4.markFinished();
+        
assertThat(consumedPartitionGroup1.getNumberOfUnfinishedPartitions()).isZero();

Review Comment:
   fixed.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to