zentol commented on a change in pull request #10362: 
[FLINK-14792][coordination] Implement TE cluster partition release
URL: https://github.com/apache/flink/pull/10362#discussion_r397693621
 
 

 ##########
 File path: 
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/TaskExecutorPartitionTrackerImpl.java
 ##########
 @@ -76,39 +78,58 @@ public void 
promoteJobPartitions(Collection<ResultPartitionID> partitionsToPromo
 
                final Collection<PartitionTrackerEntry<JobID, 
TaskExecutorPartitionInfo>> partitionTrackerEntries = 
stopTrackingPartitions(partitionsToPromote);
 
-               final Map<TaskExecutorPartitionInfo, Set<ResultPartitionID>> 
newClusterPartitions = partitionTrackerEntries.stream()
-                       .collect(Collectors.groupingBy(
-                               PartitionTrackerEntry::getMetaInfo,
-                               
Collectors.mapping(PartitionTrackerEntry::getResultPartitionId, 
Collectors.toSet())));
-
-               newClusterPartitions.forEach(
-                       (dataSetMetaInfo, newPartitionEntries) -> 
clusterPartitions.compute(dataSetMetaInfo, (ignored, existingPartitions) -> {
-                               if (existingPartitions == null) {
-                                       return newPartitionEntries;
+               partitionTrackerEntries.forEach(
+                       partitionTrackerEntry -> 
clusterPartitions.compute(partitionTrackerEntry.getMetaInfo().getIntermediateDataSetId(),
 (key, existingEntry) -> {
+                               if (existingEntry == null) {
+                                       final Set<ResultPartitionID> newSet = 
new HashSet<>();
+                                       
newSet.add(partitionTrackerEntry.getResultPartitionId());
+                                       return new PartitionEntry(newSet , 
partitionTrackerEntry.getMetaInfo().getNumberOfPartitions());
                                } else {
-                                       
existingPartitions.addAll(newPartitionEntries);
-                                       return existingPartitions;
+                                       
existingEntry.addPartition(partitionTrackerEntry.getResultPartitionId());
+                                       return existingEntry;
                                }
-                       }));
+                       })
+               );
        }
 
        @Override
        public void stopTrackingAndReleaseAllClusterPartitions() {
-               
clusterPartitions.values().forEach(shuffleEnvironment::releasePartitionsLocally);
+               
clusterPartitions.values().stream().map(PartitionEntry::getPartitionIds).forEach(shuffleEnvironment::releasePartitionsLocally);
                clusterPartitions.clear();
        }
 
        @Override
        public ClusterPartitionReport createClusterPartitionReport() {
-               List<ClusterPartitionReport.ClusterPartitionReportEntry> 
collect = clusterPartitions.entrySet().stream().map(entry -> {
-                       TaskExecutorPartitionInfo dataSetMetaInfo = 
entry.getKey();
-                       Set<ResultPartitionID> partitionsIds = entry.getValue();
-                       return new 
ClusterPartitionReport.ClusterPartitionReportEntry(
-                               dataSetMetaInfo.getIntermediateDataSetId(),
-                               partitionsIds,
-                               dataSetMetaInfo.getNumberOfPartitions());
-               }).collect(Collectors.toList());
-
-               return new ClusterPartitionReport(collect);
+               List<ClusterPartitionReport.ClusterPartitionReportEntry> 
reportEntries = clusterPartitions.entrySet().stream().map(entry ->
+                       new ClusterPartitionReport.ClusterPartitionReportEntry(
+                               entry.getKey(),
+                               entry.getValue().getPartitionIds(),
+                               entry.getValue().getTotalNumberOfPartitions()))
+                       .collect(Collectors.toList());
+
+               return new ClusterPartitionReport(reportEntries);
+       }
+
+       private static class PartitionEntry {
 
 Review comment:
   renaming to `DataSetEntry`, which is closed aligned to the 
`IntermediateDataSetID` key of the containing map.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to