zhuanshenbsj1 commented on code in PR #7159:
URL: https://github.com/apache/hudi/pull/7159#discussion_r1115286599
##########
hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/client/clustering/plan/strategy/FlinkSizeBasedClusteringPlanStrategy.java:
##########
@@ -63,27 +63,41 @@ protected Stream<HoodieClusteringGroup>
buildClusteringGroupsForPartition(String
List<Pair<List<FileSlice>, Integer>> fileSliceGroups = new ArrayList<>();
List<FileSlice> currentGroup = new ArrayList<>();
+
+ // Sort fileSlices before dividing, which makes dividing more compact
+ List<FileSlice> sortedFileSlices = new ArrayList<>(fileSlices);
+ sortedFileSlices.sort((o1, o2) -> (int)
+ ((o2.getBaseFile().isPresent() ? o2.getBaseFile().get().getFileSize()
: writeConfig.getParquetMaxFileSize())
+ - (o1.getBaseFile().isPresent() ?
o1.getBaseFile().get().getFileSize() : writeConfig.getParquetMaxFileSize())));
+
long totalSizeSoFar = 0;
- for (FileSlice currentSlice : fileSlices) {
+ for (FileSlice currentSlice : sortedFileSlices) {
+ long currentSize = currentSlice.getBaseFile().isPresent() ?
currentSlice.getBaseFile().get().getFileSize() :
writeConfig.getParquetMaxFileSize();
// check if max size is reached and create new group, if needed.
- // in now, every clustering group out put is 1 file group.
- if (totalSizeSoFar >= writeConfig.getClusteringTargetFileMaxBytes() &&
!currentGroup.isEmpty()) {
+ if (totalSizeSoFar + currentSize >
writeConfig.getClusteringMaxBytesInGroup() && !currentGroup.isEmpty()) {
+ int numOutputGroups = getNumberOfOutputFileGroups(totalSizeSoFar,
writeConfig.getClusteringTargetFileMaxBytes());
LOG.info("Adding one clustering group " + totalSizeSoFar + " max
bytes: "
- + writeConfig.getClusteringMaxBytesInGroup() + " num input slices:
" + currentGroup.size());
- fileSliceGroups.add(Pair.of(currentGroup, 1));
+ + writeConfig.getClusteringMaxBytesInGroup() + " num input slices:
" + currentGroup.size() + " output groups: " + numOutputGroups);
+ fileSliceGroups.add(Pair.of(currentGroup, numOutputGroups));
currentGroup = new ArrayList<>();
totalSizeSoFar = 0;
}
// Add to the current file-group
currentGroup.add(currentSlice);
// assume each file group size is ~= parquet.max.file.size
- totalSizeSoFar += currentSlice.getBaseFile().isPresent() ?
currentSlice.getBaseFile().get().getFileSize() :
writeConfig.getParquetMaxFileSize();
+ totalSizeSoFar += currentSize;
}
if (!currentGroup.isEmpty()) {
- fileSliceGroups.add(Pair.of(currentGroup, 1));
+ if (currentGroup.size() == 1 &&
!writeConfig.shouldClusteringSingleGroup()) {
+ return Stream.empty();
+ }
Review Comment:
Resolved.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]