rdblue commented on code in PR #15006:
URL: https://github.com/apache/iceberg/pull/15006#discussion_r2719142559
##########
core/src/main/java/org/apache/iceberg/MergingSnapshotProducer.java:
##########
@@ -1073,6 +1088,125 @@ private List<ManifestFile> newDeleteFilesAsManifests() {
return cachedNewDeleteManifests;
}
+ // Merge duplicates, internally takes care of updating newDeleteFilesBySpec
to remove
+ // duplicates and add the newly merged DV
+ private void mergeDVsAndWrite() {
+ Map<String, DeleteFileSet> dataFilesWithDuplicateDVs =
+ dvsByReferencedFile.entrySet().stream()
+ .filter(entry -> entry.getValue().size() > 1)
+ .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
+
+ List<MergedDVContent> mergedDVs =
Collections.synchronizedList(Lists.newArrayList());
+ Tasks.foreach(dataFilesWithDuplicateDVs.entrySet())
+ .executeWith(ThreadPools.getDeleteWorkerPool())
+ .stopOnFailure()
+ .throwFailureWhenFinished()
+ .run(
+ entry -> {
+ String referencedLocation = entry.getKey();
+ DeleteFileSet duplicateDVs = entry.getValue();
+ mergedDVs.add(mergePositions(referencedLocation, duplicateDVs));
+ });
+
+ // Update newDeleteFilesBySpec to remove all the duplicates
+ mergedDVs.forEach(
+ mergedDV ->
newDeleteFilesBySpec.get(mergedDV.specId).removeAll(mergedDV.duplicateDVs));
+
+ writeMergedDVs(mergedDVs);
+ }
+
+ // Produces a Puffin per partition spec containing the merged DVs for that
spec
+ private void writeMergedDVs(List<MergedDVContent> mergedDVs) {
+ try (DVFileWriter dvFileWriter =
+ new BaseDVFileWriter(
+ // Use an unpartitioned spec for the location provider for the
puffin containing
+ // all the merged DVs
+ OutputFileFactory.builderFor(
+ ops(), PartitionSpec.unpartitioned(), FileFormat.PUFFIN,
1, 1)
+ .build(),
+ path -> null)) {
+
+ for (MergedDVContent mergedDV : mergedDVs) {
+ LOG.warn(
+ "Merged {} duplicate deletion vectors for data file {} in table
{}. The duplicate DVs are orphaned, and writers should merge DVs per file
before committing",
+ mergedDV.duplicateDVs.size(),
+ mergedDV.referencedLocation,
+ tableName);
+ dvFileWriter.delete(
+ mergedDV.referencedLocation,
+ mergedDV.mergedPositions,
+ spec(mergedDV.specId),
+ mergedDV.partition);
+ }
+
+ dvFileWriter.close();
+ DeleteWriteResult result = dvFileWriter.result();
+ result.deleteFiles().forEach(this::addPendingDelete);
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+ }
+
+ private void addPendingDelete(DeleteFile file) {
+ newDeleteFilesBySpec
+ .get(file.specId())
+ .add(Delegates.pendingDeleteFile(file, file.dataSequenceNumber()));
+ }
+
+ // Data class for referenced file, the duplicate DVs, the merged position
delete index,
+ // partition spec and tuple
+ private static class MergedDVContent {
Review Comment:
This seems a bit heavy to me. I'd combine this with whatever cache mechanism
you end up using.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]