hemantk-12 commented on code in PR #4678: URL: https://github.com/apache/ozone/pull/4678#discussion_r1220130423
########## hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/SnapshotTestUtils.java: ########## @@ -0,0 +1,177 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.snapshot; + +import org.apache.hadoop.util.ClosableIterator; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; + +/** + * Util classes for Snapshot Persistent DataStructures for tests. + */ +public class SnapshotTestUtils { + + private static <K> String getStringKey(K key) { Review Comment: I don't think it is needed. ########## hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java: ########## @@ -972,12 +962,10 @@ private void validateEstimatedKeyChangesAreInLimits( } } - private long generateDiffReport( - final String jobId, + long generateDiffReport(final String jobId, final PersistentSet<byte[]> objectIDsToCheck, final PersistentMap<byte[], byte[]> oldObjIdToKeyMap, - final PersistentMap<byte[], byte[]> newObjIdToKeyMap - ) { + final PersistentMap<byte[], byte[]> newObjIdToKeyMap) { Review Comment: Alignment is not correct. ########## hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java: ########## @@ -0,0 +1,499 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.snapshot; + +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; +import org.apache.hadoop.hdds.utils.db.CodecRegistry; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.IntegerCodec; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.hadoop.hdfs.DFSUtilClient; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.helpers.WithObjectID; +import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; +import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; +import org.apache.ozone.rocksdb.util.ManagedSstFileReader; +import org.apache.ozone.rocksdb.util.RdbUtil; +import org.apache.ozone.rocksdiff.DifferSnapshotInfo; +import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; +import org.apache.ozone.rocksdiff.RocksDiffUtils; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.ValueSource; +import org.mockito.Matchers; +import org.mockito.Mock; +import org.mockito.MockedConstruction; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.junit.jupiter.MockitoExtension; +import org.mockito.junit.jupiter.MockitoSettings; +import org.mockito.quality.Strictness; +import org.mockito.stubbing.Answer; +import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.RocksDB; +import org.rocksdb.RocksDBException; +import org.rocksdb.RocksIterator; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.LongStream; + +/** + * Test class for SnapshotDiffManager Class. + */ +@ExtendWith(MockitoExtension.class) +@MockitoSettings(strictness = Strictness.LENIENT) +public class TestSnapshotDiffManager { + + @Mock + private ManagedRocksDB snapdiffDB; + + @Mock + private RocksDBCheckpointDiffer differ; + + @Mock + private OzoneManager ozoneManager; + + private LoadingCache<String, OmSnapshot> snapshotCache; + + @Mock + private ColumnFamilyHandle snapdiffJobCFH; + + @Mock + private ColumnFamilyHandle snapdiffReportCFH; + + @Mock + private ManagedColumnFamilyOptions columnFamilyOptions; + + @Mock + private RocksDB rocksDB; + + @Mock + private RocksIterator jobTableIterator; + + private static CodecRegistry codecRegistry; + + @BeforeAll + public static void initCodecRegistry() { + // Integers are used for indexing persistent list. + codecRegistry = CodecRegistry.newBuilder() + .addCodec(Integer.class, new IntegerCodec()) + .addCodec(SnapshotDiffReportOzone.DiffReportEntry.class, + SnapshotDiffReportOzone.getDiffReportEntryCodec()) + .addCodec(SnapshotDiffJob.class, + new SnapshotDiffJob.SnapshotDiffJobCodec()).build(); + } + + private DBStore getMockedDBStore(String dbStorePath) { + DBStore dbStore = Mockito.mock(DBStore.class); + Mockito.when(dbStore.getDbLocation()).thenReturn(new File(dbStorePath)); + return dbStore; + } + + private OmSnapshot getMockedOmSnapshot(String snapshot) { + OmSnapshot omSnapshot = Mockito.mock(OmSnapshot.class); + OMMetadataManager metadataManager = Mockito.mock(OMMetadataManager.class); + DBStore dbStore = getMockedDBStore(snapshot); + Mockito.when(omSnapshot.getName()).thenReturn(snapshot); + Mockito.when(omSnapshot.getMetadataManager()).thenReturn(metadataManager); + Mockito.when(metadataManager.getStore()).thenReturn(dbStore); + return omSnapshot; + } + + private SnapshotDiffManager getMockedSnapshotDiffManager(int cacheSize) { + + Mockito.when(snapdiffDB.get()).thenReturn(rocksDB); + Mockito.when(rocksDB.newIterator(snapdiffJobCFH)) + .thenReturn(jobTableIterator); + CacheLoader<String, OmSnapshot> loader = + new CacheLoader<String, OmSnapshot>() { + @Override + public OmSnapshot load(String key) { + return getMockedOmSnapshot(key); + } + }; + snapshotCache = CacheBuilder.newBuilder() + .maximumSize(cacheSize) + .build(loader); + Mockito.when(ozoneManager.getConfiguration()) + .thenReturn(new OzoneConfiguration()); + SnapshotDiffManager snapshotDiffManager = Mockito.spy( + new SnapshotDiffManager(snapdiffDB, differ, ozoneManager, snapshotCache, + snapdiffJobCFH, snapdiffReportCFH, columnFamilyOptions, + codecRegistry)); + return snapshotDiffManager; + } + + private SnapshotInfo getMockedSnapshotInfo(String snapshot) { + SnapshotInfo snapshotInfo = Mockito.mock(SnapshotInfo.class); + Mockito.when(snapshotInfo.getSnapshotID()).thenReturn(snapshot); + return snapshotInfo; + } + + @ParameterizedTest + @ValueSource(ints = {1, 2, 5, 10, 100, 1000, 10000}) + public void testGetDeltaFilesWithDag(int numberOfFiles) + throws ExecutionException, RocksDBException, IOException { + + SnapshotDiffManager snapshotDiffManager = getMockedSnapshotDiffManager(10); + String snap1 = "snap1"; + String snap2 = "snap2"; + + String diffDir = Files.createTempDirectory("snapdiff_dir").toString(); + Set<String> randomStrings = IntStream.range(0, numberOfFiles) + .mapToObj(i -> RandomStringUtils.randomAlphabetic(10)) + .collect(Collectors.toSet()); + Mockito.when(differ.getSSTDiffListWithFullPath(Mockito.any(), + Mockito.any(), Mockito.eq(diffDir))) + .thenReturn(Lists.newArrayList(randomStrings)); + SnapshotInfo fromSnapshotInfo = getMockedSnapshotInfo(snap1); + SnapshotInfo toSnapshotInfo = getMockedSnapshotInfo(snap1); + Mockito.when(jobTableIterator.isValid()).thenReturn(false); + Set<String> deltaFiles = snapshotDiffManager.getDeltaFiles( + snapshotCache.get(snap1), snapshotCache.get(snap2), + Arrays.asList("cf1", "cf2"), fromSnapshotInfo, toSnapshotInfo, false, + Collections.EMPTY_MAP, diffDir); + Assertions.assertEquals(randomStrings, deltaFiles); + } + + @ParameterizedTest + @CsvSource({"0,true", "1,true", "2,true", "5,true", "10,true", "100,true", + "1000,true", "10000,true", "0,false", "1,false", "2,false", "5,false", + "10,false", "100,false", "1000,false", "10000,false"}) + public void testGetDeltaFilesWithFullDiff(int numberOfFiles, + boolean useFullDiff) + throws ExecutionException, RocksDBException, IOException { + try (MockedStatic<RdbUtil> mockedRdbUtil = + Mockito.mockStatic(RdbUtil.class); + MockedStatic<RocksDiffUtils> mockedRocksDiffUtils = + Mockito.mockStatic(RocksDiffUtils.class)) { + Set<String> deltaStrings = new HashSet<>(); + mockedRdbUtil.when( + () -> RdbUtil.getSSTFilesForComparison(Matchers.anyString(), + Matchers.anyList())) + .thenAnswer((Answer<Set<String>>) invocation -> { + Set<String> retVal = IntStream.range(0, numberOfFiles) + .mapToObj(i -> RandomStringUtils.randomAlphabetic(10)) + .collect(Collectors.toSet()); + deltaStrings.addAll(retVal); + return retVal; + }); + mockedRocksDiffUtils.when(() -> RocksDiffUtils.filterRelevantSstFiles( + Matchers.anySet(), Matchers.anyMap())) + .thenAnswer((Answer<Void>) invocationOnMock -> { + invocationOnMock.getArgument(0, Set.class).stream() + .findAny().ifPresent(val -> { + Assertions.assertTrue(deltaStrings.contains(val)); + invocationOnMock.getArgument(0, Set.class).remove(val); + deltaStrings.remove(val); + }); + return null; + }); + SnapshotDiffManager snapshotDiffManager = + getMockedSnapshotDiffManager(10); + String snap1 = "snap1"; + String snap2 = "snap2"; + if (!useFullDiff) { + Set<String> randomStrings = Collections.emptySet(); + Mockito.when(differ.getSSTDiffListWithFullPath( + Mockito.any(DifferSnapshotInfo.class), + Mockito.any(DifferSnapshotInfo.class), + Matchers.anyString())) + .thenReturn(Lists.newArrayList(randomStrings)); + } + SnapshotInfo fromSnapshotInfo = getMockedSnapshotInfo(snap1); + SnapshotInfo toSnapshotInfo = getMockedSnapshotInfo(snap1); + Mockito.when(jobTableIterator.isValid()).thenReturn(false); + Set<String> deltaFiles = snapshotDiffManager.getDeltaFiles( + snapshotCache.get(snap1), snapshotCache.get(snap2), + Arrays.asList("cf1", "cf2"), fromSnapshotInfo, toSnapshotInfo, false, + Collections.EMPTY_MAP, Files.createTempDirectory("snapdiff_dir") + .toAbsolutePath().toString()); + Assertions.assertEquals(deltaStrings, deltaFiles); + } + } + + private Table<String, ? extends WithObjectID> getMockedTable( + Map<String, WithObjectID> map, String tableName) + throws IOException { + Table<String, ? extends WithObjectID> mocked = Mockito.mock(Table.class); + Mockito.when(mocked.get(Matchers.any())) + .thenAnswer(invocation -> map.get(invocation.getArgument(0))); + Mockito.when(mocked.getName()).thenReturn(tableName); + return mocked; + } + + private WithObjectID getObjectID(int objectId, int updateId, + String snapshotTableName) { + String name = "key" + objectId; + if (snapshotTableName.equals(OmMetadataManagerImpl.DIRECTORY_TABLE)) { + return OmDirectoryInfo.newBuilder() + .setObjectID(objectId).setName(name).build(); + } + return new OmKeyInfo.Builder().setObjectID(objectId) + .setVolumeName("vol").setBucketName("bucket").setUpdateID(updateId) + .setReplicationConfig(new ECReplicationConfig(3, 2)) + .setKeyName(name).build(); + } + + /** + * Test mocks the SSTFileReader to return object Ids from 0-50 + * when not reading tombstones & Object Ids 0-100 when reading tombstones. + * Creating a mock snapshot table where the from Snapshot Table contains + * Object Ids in the range 0-25 & 50-100 and to Snaphshot Table contains data + * with object Ids in the range 0-50. + * Function should return 25-50 in the new Persistent map. + * In the case of reading tombstones old Snapshot Persistent map should have + * object Ids in the range 50-100 & should be empty otherwise + * + * @param nativeLibraryLoaded + * @param snapshotTableName + * @throws NativeLibraryNotLoadedException + * @throws IOException + */ + @SuppressFBWarnings({"DLS_DEAD_LOCAL_STORE", + "RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT"}) + @ParameterizedTest + @CsvSource({"false," + OmMetadataManagerImpl.DIRECTORY_TABLE, + "true," + OmMetadataManagerImpl.DIRECTORY_TABLE, + "false," + OmMetadataManagerImpl.FILE_TABLE, + "true," + OmMetadataManagerImpl.FILE_TABLE, + "false," + OmMetadataManagerImpl.KEY_TABLE, + "true," + OmMetadataManagerImpl.KEY_TABLE}) + public void testObjectIdMapWithTombstoneEntries(boolean nativeLibraryLoaded, + String snapshotTableName) + throws NativeLibraryNotLoadedException, IOException, RocksDBException { + Set<String> keysWithTombstones = IntStream.range(0, 100) + .boxed().map(i -> "key" + i).collect(Collectors.toSet()); + Set<String> keys = IntStream.range(0, 50).boxed() + .map(i -> "key" + i).collect(Collectors.toSet()); + try (MockedConstruction<ManagedSstFileReader> mocked = + Mockito.mockConstruction(ManagedSstFileReader.class, + (mock, context) -> { + Mockito.when(mock.getKeyStreamWithTombstone(Matchers.any())) + .thenReturn(keysWithTombstones.stream()); + Mockito.when(mock.getKeyStream()) + .thenReturn(keys.stream()); + })) { + Map<String, WithObjectID> toSnapshotTableMap = + IntStream.concat(IntStream.range(0, 25), IntStream.range(50, 100)) + .boxed().collect(Collectors.toMap(i -> "key" + i, + i -> getObjectID(i, i, snapshotTableName))); + Table<String, ? extends WithObjectID> toSnapshotTable = + getMockedTable(toSnapshotTableMap, snapshotTableName); + Map<String, WithObjectID> fromSnapshotTableMap = + IntStream.range(0, 50) + .boxed().collect(Collectors.toMap(i -> "key" + i, + i -> getObjectID(i, i, snapshotTableName))); + Table<String, ? extends WithObjectID> fromSnapshotTable = + getMockedTable(fromSnapshotTableMap, snapshotTableName); + SnapshotDiffManager snapshotDiffManager = + getMockedSnapshotDiffManager(10); + Mockito.doAnswer((Answer<Boolean>) invocationOnMock -> + Integer.parseInt(invocationOnMock.getArgument(0, String.class) + .substring(3)) % 2 == 0).when(snapshotDiffManager) + .isKeyInBucket(Matchers.anyString(), Matchers.anyMap(), + Matchers.anyString()); + PersistentMap<byte[], byte[]> oldObjectIdKeyMap = + new SnapshotTestUtils.HashPersistentMap<>(); + PersistentMap<byte[], byte[]> newObjectIdKeyMap = + new SnapshotTestUtils.HashPersistentMap<>(); + PersistentSet<byte[]> objectIdsToCheck = + new SnapshotTestUtils.HashPersistentSet<>(); + snapshotDiffManager.addToObjectIdMap(toSnapshotTable, + fromSnapshotTable, Mockito.mock(Set.class), nativeLibraryLoaded, + oldObjectIdKeyMap, newObjectIdKeyMap, objectIdsToCheck, + Maps.newHashMap()); + + Iterator<Map.Entry<byte[], byte[]>> oldObjectIdIter = + oldObjectIdKeyMap.iterator(); + int oldObjectIdCnt = 0; + while (oldObjectIdIter.hasNext()) { + Map.Entry<byte[], byte[]> v = oldObjectIdIter.next(); + long objectId = this.codecRegistry.asObject(v.getKey(), Long.class); + Assertions.assertTrue(objectId % 2 == 0); + Assertions.assertTrue(objectId >= 50); + Assertions.assertTrue(objectId < 100); + oldObjectIdCnt += 1; + } + Assertions.assertEquals(nativeLibraryLoaded ? 25 : 0, oldObjectIdCnt); + Iterator<Map.Entry<byte[], byte[]>> newObjectIdIter = + newObjectIdKeyMap.iterator(); + int newObjectIdCnt = 0; + while (newObjectIdIter.hasNext()) { + Map.Entry<byte[], byte[]> v = newObjectIdIter.next(); + long objectId = this.codecRegistry.asObject(v.getKey(), Long.class); + Assertions.assertTrue(objectId % 2 == 0); + Assertions.assertTrue(objectId >= 26); + Assertions.assertTrue(objectId < 50); + newObjectIdCnt += 1; + } + Assertions.assertEquals(12, newObjectIdCnt); + + Iterator<byte[]> objectIdsToCheckIter = objectIdsToCheck.iterator(); + int objectIdCnt = 0; + while (objectIdsToCheckIter.hasNext()) { + byte[] v = objectIdsToCheckIter.next(); + long objectId = this.codecRegistry.asObject(v, Long.class); + Assertions.assertTrue(objectId % 2 == 0); + Assertions.assertTrue(objectId >= 26); + Assertions.assertTrue(objectId < (nativeLibraryLoaded ? 100 : 50)); + objectIdCnt += 1; + } + Assertions.assertEquals(nativeLibraryLoaded ? 37 : 12, objectIdCnt); + } + } + + @Test + public void testGenerateDiffReport() throws IOException { + try ( + MockedConstruction<RocksDbPersistentMap> mockedRocksDbPersistentMap = Review Comment: Test for `RocksDbPersistentMap` are independent of this. I think of it as check to make sure that `RocksDbPersistentMap` objects creation are correct. ########## hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java: ########## @@ -481,7 +479,7 @@ private SnapshotDiffReportOzone createPageResponse( private void checkReportsIntegrity(final SnapshotDiffJob diffJob, final int totalDiffEntries) throws IOException { - if (diffJob.getTotalDiffEntries() != totalDiffEntries) { + if (diffJob.getTotalDiffEntries() > totalDiffEntries) { Review Comment: `!=` was correct because it should be neither lower nor higher. Has to exactly equal. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
