the-other-tim-brown commented on code in PR #661: URL: https://github.com/apache/incubator-xtable/pull/661#discussion_r1986350997
########## xtable-api/src/main/java/org/apache/xtable/model/storage/InternalDeletionVector.java: ########## @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.xtable.model.storage; + +import java.util.Iterator; +import java.util.function.Supplier; + +import lombok.AccessLevel; +import lombok.EqualsAndHashCode; +import lombok.Getter; +import lombok.NonNull; +import lombok.ToString; +import lombok.experimental.Accessors; +import lombok.experimental.FieldDefaults; +import lombok.experimental.SuperBuilder; + +@Accessors(fluent = true) +@SuperBuilder(toBuilder = true) +@FieldDefaults(makeFinal = true, level = lombok.AccessLevel.PRIVATE) +@Getter +@ToString(callSuper = true) +@EqualsAndHashCode(callSuper = true) +public class InternalDeletionVector extends InternalFile { + // path (absolute with scheme) of data file to which this deletion vector belongs + @NonNull String dataFilePath; + + // super.getFileSizeBytes() is the size of the deletion vector file + // super.getPhysicalPath() is the absolute path (with scheme) of the deletion vector file + // super.getRecordCount() is the count of records in the deletion vector file + + // offset of deletion vector start in a deletion vector file + int offset; + + /** + * binary representation of the deletion vector. The consumer can use the {@link + * #ordinalsIterator()} to extract the ordinals represented in the binary format. + */ + byte[] binaryRepresentation; Review Comment: Currently when this field is set to a non-null value the `ordinalsIterator` is also set. I think it may be cleaner to remove this and rely directly on the `ordinalsIterator`. Is there something in the future though where this may be used directly? My main worry is that future developers implementing support for deletion vectors may eagerly parse the data into this field. ########## xtable-core/src/test/java/org/apache/xtable/delta/TestDeltaActionsConverter.java: ########## @@ -18,50 +18,176 @@ package org.apache.xtable.delta; -import java.net.URISyntaxException; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; +import java.util.Arrays; +import java.util.Iterator; +import java.util.UUID; + +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.apache.spark.sql.delta.DeltaLog; import org.apache.spark.sql.delta.Snapshot; import org.apache.spark.sql.delta.actions.AddFile; import org.apache.spark.sql.delta.actions.DeletionVectorDescriptor; +import org.apache.spark.sql.delta.deletionvectors.RoaringBitmapArray; +import org.apache.spark.sql.delta.deletionvectors.RoaringBitmapArrayFormat; import scala.Option; +import org.apache.xtable.model.storage.InternalDeletionVector; + class TestDeltaActionsConverter { + private final String basePath = "https://container.blob.core.windows.net/tablepath/"; + private final int size = 372; + private final long time = 376; + private final boolean dataChange = true; + private final String stats = ""; + private final int cardinality = 42; + private final int offset = 634; + @Test - void extractDeletionVector() throws URISyntaxException { + void extractMissingDeletionVector() { DeltaActionsConverter actionsConverter = DeltaActionsConverter.getInstance(); - int size = 123; - long time = 234L; - boolean dataChange = true; - String stats = ""; - String filePath = "https://container.blob.core.windows.net/tablepath/file_path"; + String filePath = basePath + "file_path"; Snapshot snapshot = Mockito.mock(Snapshot.class); - DeltaLog deltaLog = Mockito.mock(DeltaLog.class); DeletionVectorDescriptor deletionVector = null; AddFile addFileAction = new AddFile(filePath, null, size, time, dataChange, stats, null, deletionVector); - Assertions.assertNull(actionsConverter.extractDeletionVectorFile(snapshot, addFileAction)); + InternalDeletionVector internaldeletionVector = + actionsConverter.extractDeletionVector(snapshot, addFileAction); + assertNull(internaldeletionVector); + } - deletionVector = + @Test + void extractDeletionVectorInFileAbsolutePath() { + DeltaActionsConverter actionsConverter = spy(DeltaActionsConverter.getInstance()); + + String dataFilePath = "data_file"; + String deleteFilePath = "https://container.blob.core.windows.net/tablepath/delete_file"; + Snapshot snapshot = Mockito.mock(Snapshot.class); + + DeletionVectorDescriptor deletionVector = DeletionVectorDescriptor.onDiskWithAbsolutePath( - filePath, size, 42, Option.empty(), Option.empty()); + deleteFilePath, size, cardinality, Option.apply(offset), Option.empty()); - addFileAction = - new AddFile(filePath, null, size, time, dataChange, stats, null, deletionVector); + AddFile addFileAction = + new AddFile(dataFilePath, null, size, time, dataChange, stats, null, deletionVector); + + Configuration conf = new Configuration(); + DeltaLog deltaLog = Mockito.mock(DeltaLog.class); + when(snapshot.deltaLog()).thenReturn(deltaLog); + when(deltaLog.dataPath()).thenReturn(new Path(basePath)); + when(deltaLog.newDeltaHadoopConf()).thenReturn(conf); + + long[] ordinals = {45, 78, 98}; + Mockito.doReturn(ordinals) + .when(actionsConverter) + .parseOrdinalFile(conf, new Path(deleteFilePath), size, offset); Review Comment: Can you pull the common testing setup into a helper method? Similarly, the assertions below can be added to a common method so there are less places update if the assertions need to update due to new field or something like that. ########## xtable-core/src/test/java/org/apache/xtable/delta/ITDeltaDeleteVectorConvert.java: ########## @@ -133,40 +159,48 @@ public void testInsertsUpsertsAndDeletes() { .collect(Collectors.toList()); rowsToDelete.addAll(rows.subList(35, 45)); testSparkDeltaTable.deleteRows(rowsToDelete); - allActiveFiles.add(testSparkDeltaTable.getAllActiveFiles()); + tableFiles = collectActiveFilesAfterCommit(testSparkDeltaTable); + testTableStates.add(new TableState(tableFiles, rowsToDelete)); + validateDeletedRecordCount(testSparkDeltaTable.getDeltaLog(), 2, 15); assertEquals(135L, testSparkDeltaTable.getNumRows()); - validateDeletedRecordCount(testSparkDeltaTable.getDeltaLog(), allActiveFiles.size() + 1, 2, 15); testSparkDeltaTable.insertRows(50); - allActiveFiles.add(testSparkDeltaTable.getAllActiveFiles()); + tableFiles = collectActiveFilesAfterCommit(testSparkDeltaTable); + testTableStates.add(new TableState(tableFiles)); + validateDeletedRecordCount(testSparkDeltaTable.getDeltaLog(), 2, 15); assertEquals(185L, testSparkDeltaTable.getNumRows()); // delete a few rows from a file which already has a deletion vector, this should generate a // merged deletion vector file. Some rows were already deleted in the previous delete step. // This deletion step intentionally deletes the same rows again to test the merge. rowsToDelete = rows1.subList(5, 15); testSparkDeltaTable.deleteRows(rowsToDelete); - allActiveFiles.add(testSparkDeltaTable.getAllActiveFiles()); + tableFiles = collectActiveFilesAfterCommit(testSparkDeltaTable); + testTableStates.add(new TableState(tableFiles, rowsToDelete)); + validateDeletedRecordCount(testSparkDeltaTable.getDeltaLog(), 2, 22); assertEquals(178L, testSparkDeltaTable.getNumRows()); - validateDeletedRecordCount(testSparkDeltaTable.getDeltaLog(), allActiveFiles.size() + 1, 2, 22); testSparkDeltaTable.insertRows(50); - allActiveFiles.add(testSparkDeltaTable.getAllActiveFiles()); + tableFiles = collectActiveFilesAfterCommit(testSparkDeltaTable); + testTableStates.add(new TableState(tableFiles)); + validateDeletedRecordCount(testSparkDeltaTable.getDeltaLog(), 2, 22); assertEquals(228L, testSparkDeltaTable.getNumRows()); + String tableBasePath = testSparkDeltaTable.getBasePath(); SourceTable tableConfig = SourceTable.builder() .name(testSparkDeltaTable.getTableName()) - .basePath(testSparkDeltaTable.getBasePath()) + .basePath(tableBasePath) .formatName(TableFormat.DELTA) .build(); DeltaConversionSource conversionSource = conversionSourceProvider.getConversionSourceInstance(tableConfig); InternalSnapshot internalSnapshot = conversionSource.getCurrentSnapshot(); // validateDeltaPartitioning(internalSnapshot); Review Comment: Can you remove this comment? ########## xtable-core/src/test/java/org/apache/xtable/delta/ITDeltaDeleteVectorConvert.java: ########## @@ -91,11 +99,24 @@ void setUp() { conversionSourceProvider.init(hadoopConf); } + private static class TableState { + Map<String, AddFile> activeFiles; + List<Row> rowsToDelete; Review Comment: This list looks like it is unused, is that intentional? ########## xtable-core/src/main/java/org/apache/xtable/delta/DeltaConversionSource.java: ########## @@ -151,7 +153,7 @@ public TableChange getTableChangeForCommit(Long versionNumber) { // entry which is replaced by a new entry, AddFile with delete vector information. Since the // same data file is removed and added, we need to remove it from the added and removed file // maps which are used to track actual added and removed data files. - for (String deletionVector : deletionVectors) { + for (String deletionVector : deletionVectors.keySet()) { Review Comment: nitpick: the name `deletionVector` is no longer representative of the actual string. Something like `dataFileForDeletionVector` would be more clear ########## xtable-core/src/test/java/org/apache/xtable/delta/ITDeltaDeleteVectorConvert.java: ########## @@ -179,13 +213,126 @@ public void testInsertsUpsertsAndDeletes() { TableChange tableChange = conversionSource.getTableChangeForCommit(version); allTableChanges.add(tableChange); } - ValidationTestHelper.validateTableChanges(allActiveFiles, allTableChanges); + + List<List<String>> allActiveDataFilePaths = + testTableStates.stream() + .map(s -> s.activeFiles) + .map(Map::keySet) + .map(ArrayList::new) + .collect(Collectors.toList()); + ValidationTestHelper.validateTableChanges(allActiveDataFilePaths, allTableChanges); + + validateDeletionInfo(testTableStates, allTableChanges); + } + + // collects active files in the current snapshot as a map and adds it to the list + private Map<String, AddFile> collectActiveFilesAfterCommit( + TestSparkDeltaTable testSparkDeltaTable) { + Map<String, AddFile> allFiles = + testSparkDeltaTable.getAllActiveFilesInfo().stream() + .collect( + Collectors.toMap( + file -> getAddFileAbsolutePath(file, testSparkDeltaTable.getBasePath()), + file -> file)); + return allFiles; + } + + private void validateDeletionInfo( + List<TableState> testTableStates, List<TableChange> allTableChanges) { + if (allTableChanges.isEmpty() && testTableStates.size() <= 1) { + return; + } + + assertEquals( + allTableChanges.size(), + testTableStates.size() - 1, + "Number of table changes should be equal to number of commits - 1"); + + for (int i = 0; i < allTableChanges.size() - 1; i++) { + Map<String, AddFile> activeFileAfterCommit = testTableStates.get(i + 1).activeFiles; + Map<String, AddFile> activeFileBeforeCommit = testTableStates.get(i).activeFiles; + + Map<String, AddFile> activeFilesWithUpdatedDeleteInfo = + activeFileAfterCommit.entrySet().stream() + .filter(e -> e.getValue().deletionVector() != null) + .filter( + entry -> { + if (activeFileBeforeCommit.get(entry.getKey()) == null) { + return true; + } + if (activeFileBeforeCommit.get(entry.getKey()).deletionVector() == null) { + return true; + } + DeletionVectorDescriptor deletionVectorDescriptor = + activeFileBeforeCommit.get(entry.getKey()).deletionVector(); + return !deletionVectorDescriptor.equals(entry.getValue().deletionVector()); + }) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + if (activeFilesWithUpdatedDeleteInfo.isEmpty()) { + continue; + } + + // validate all new delete vectors are correctly detected + validateDeletionInfoForCommit( + testTableStates.get(i + 1), activeFilesWithUpdatedDeleteInfo, allTableChanges.get(i)); + } + } + + private void validateDeletionInfoForCommit( + TableState tableState, + Map<String, AddFile> activeFilesAfterCommit, + TableChange changeDetectedForCommit) { + Map<String, InternalDeletionVector> detectedDeleteInfos = + changeDetectedForCommit.getFilesDiff().getFilesAdded().stream() + .filter(file -> file instanceof InternalDeletionVector) + .map(file -> (InternalDeletionVector) file) + .collect(Collectors.toMap(InternalDeletionVector::dataFilePath, file -> file)); + + Map<String, AddFile> filesWithDeleteVectors = + activeFilesAfterCommit.entrySet().stream() + .filter(file -> file.getValue().deletionVector() != null) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + assertEquals(filesWithDeleteVectors.size(), detectedDeleteInfos.size()); + + for (Map.Entry<String, AddFile> fileWithDeleteVector : filesWithDeleteVectors.entrySet()) { + InternalDeletionVector deleteInfo = detectedDeleteInfos.get(fileWithDeleteVector.getKey()); + assertNotNull(deleteInfo); + DeletionVectorDescriptor deletionVectorDescriptor = + fileWithDeleteVector.getValue().deletionVector(); + assertEquals(deletionVectorDescriptor.cardinality(), deleteInfo.getRecordCount()); + assertEquals(deletionVectorDescriptor.sizeInBytes(), deleteInfo.getFileSizeBytes()); + assertEquals(deletionVectorDescriptor.offset().get(), deleteInfo.offset()); + + String deletionFilePath = + deletionVectorDescriptor + .absolutePath(new org.apache.hadoop.fs.Path(testSparkDeltaTable.getBasePath())) + .toString(); + assertEquals(deletionFilePath, deleteInfo.getPhysicalPath()); + + Iterator<Long> iterator = deleteInfo.ordinalsIterator(); + List<Long> deletes = new ArrayList<>(); + iterator.forEachRemaining(deletes::add); + assertEquals(deletes.size(), deleteInfo.getRecordCount()); Review Comment: Should we also validate the ordinals are correct here? ########## xtable-core/src/test/java/org/apache/xtable/delta/ITDeltaDeleteVectorConvert.java: ########## @@ -179,13 +213,126 @@ public void testInsertsUpsertsAndDeletes() { TableChange tableChange = conversionSource.getTableChangeForCommit(version); allTableChanges.add(tableChange); } - ValidationTestHelper.validateTableChanges(allActiveFiles, allTableChanges); + + List<List<String>> allActiveDataFilePaths = + testTableStates.stream() + .map(s -> s.activeFiles) + .map(Map::keySet) + .map(ArrayList::new) + .collect(Collectors.toList()); + ValidationTestHelper.validateTableChanges(allActiveDataFilePaths, allTableChanges); + + validateDeletionInfo(testTableStates, allTableChanges); + } + + // collects active files in the current snapshot as a map and adds it to the list + private Map<String, AddFile> collectActiveFilesAfterCommit( + TestSparkDeltaTable testSparkDeltaTable) { + Map<String, AddFile> allFiles = + testSparkDeltaTable.getAllActiveFilesInfo().stream() + .collect( + Collectors.toMap( + file -> getAddFileAbsolutePath(file, testSparkDeltaTable.getBasePath()), + file -> file)); + return allFiles; + } + + private void validateDeletionInfo( + List<TableState> testTableStates, List<TableChange> allTableChanges) { + if (allTableChanges.isEmpty() && testTableStates.size() <= 1) { + return; + } + + assertEquals( + allTableChanges.size(), + testTableStates.size() - 1, + "Number of table changes should be equal to number of commits - 1"); + + for (int i = 0; i < allTableChanges.size() - 1; i++) { + Map<String, AddFile> activeFileAfterCommit = testTableStates.get(i + 1).activeFiles; + Map<String, AddFile> activeFileBeforeCommit = testTableStates.get(i).activeFiles; + + Map<String, AddFile> activeFilesWithUpdatedDeleteInfo = + activeFileAfterCommit.entrySet().stream() + .filter(e -> e.getValue().deletionVector() != null) + .filter( + entry -> { + if (activeFileBeforeCommit.get(entry.getKey()) == null) { + return true; + } + if (activeFileBeforeCommit.get(entry.getKey()).deletionVector() == null) { + return true; + } + DeletionVectorDescriptor deletionVectorDescriptor = + activeFileBeforeCommit.get(entry.getKey()).deletionVector(); + return !deletionVectorDescriptor.equals(entry.getValue().deletionVector()); + }) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + if (activeFilesWithUpdatedDeleteInfo.isEmpty()) { + continue; + } + + // validate all new delete vectors are correctly detected + validateDeletionInfoForCommit( + testTableStates.get(i + 1), activeFilesWithUpdatedDeleteInfo, allTableChanges.get(i)); + } + } + + private void validateDeletionInfoForCommit( + TableState tableState, Review Comment: This is unused in the method, is that intentional? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
