hemantk-12 commented on code in PR #4678:
URL: https://github.com/apache/ozone/pull/4678#discussion_r1190309367
##########
hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java:
##########
@@ -98,6 +98,7 @@ public synchronized boolean loadLibrary(final String
libraryName) {
if (isLibraryLoaded(libraryName)) {
return true;
}
+ LOG.info("Loading Library: {}", libraryName);
Review Comment:
Did you add it for debugging purpose? or it has an actual use?
##########
hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestManagedSstFileReader.java:
##########
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.rocksdb.util;
+
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedEnvOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileWriter;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.ValueSource;
+import org.rocksdb.RocksDBException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import java.util.stream.Stream;
+
+/**
+ * ManagedSstFileReader tests.
+ */
+public class TestManagedSstFileReader {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestManagedSstFileReader.class);
+
+ private static final String KEY_PREFIX = IntStream.range(0, 256).boxed()
+ .map(i -> String.format("%c", i))
+ .collect(Collectors.joining(""));
+
+ private String createRandomSSTFile(TreeMap<String, Integer> keys)
+ throws IOException, RocksDBException {
+ File file = File.createTempFile("tmp_sst_file", ".sst");
+ file.deleteOnExit();
+
+ try (ManagedOptions managedOptions = new ManagedOptions();
+ ManagedEnvOptions managedEnvOptions = new ManagedEnvOptions();
+ ManagedSstFileWriter sstFileWriter = new ManagedSstFileWriter(
+ managedEnvOptions, managedOptions)) {
+ sstFileWriter.open(file.getAbsolutePath());
+ for (Map.Entry<String, Integer> entry : keys.entrySet()) {
+ if (entry.getValue() == 0) {
+
sstFileWriter.delete(entry.getKey().getBytes(StandardCharsets.UTF_8));
Review Comment:
nit: you can extract out `entry.getKey().getBytes(StandardCharsets.UTF_8)`
to a local variable and use that other places. Also use
`StringUtils.string2Bytes()`.
##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java:
##########
@@ -115,6 +116,8 @@ public class TestOmSnapshot {
private static OzoneBucket ozoneBucket;
+ private static AtomicInteger counter;
Review Comment:
Is there any particular reason to use `AtomicInteger` instead of
`RandomStringUtils`?
##########
hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java:
##########
@@ -0,0 +1,500 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.snapshot;
+
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException;
+import org.apache.hadoop.hdds.utils.db.CodecRegistry;
+import org.apache.hadoop.hdds.utils.db.DBStore;
+import org.apache.hadoop.hdds.utils.db.IntegerCodec;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
+import org.apache.hadoop.hdfs.DFSUtilClient;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.OmSnapshot;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
+import org.apache.hadoop.ozone.om.helpers.WithObjectID;
+import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone;
+import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse;
+import org.apache.ozone.rocksdb.util.ManagedSstFileReader;
+import org.apache.ozone.rocksdb.util.RdbUtil;
+import org.apache.ozone.rocksdiff.DifferSnapshotInfo;
+import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer;
+import org.apache.ozone.rocksdiff.RocksDiffUtils;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.CsvSource;
+import org.junit.jupiter.params.provider.ValueSource;
+import org.mockito.Matchers;
+import org.mockito.Mock;
+import org.mockito.MockedConstruction;
+import org.mockito.MockedStatic;
+import org.mockito.Mockito;
+import org.mockito.junit.jupiter.MockitoExtension;
+import org.mockito.junit.jupiter.MockitoSettings;
+import org.mockito.quality.Strictness;
+import org.mockito.stubbing.Answer;
+import org.rocksdb.ColumnFamilyHandle;
+import org.rocksdb.RocksDB;
+import org.rocksdb.RocksDBException;
+import org.rocksdb.RocksIterator;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import java.util.stream.LongStream;
+
+
+/**
+ * Test class for SnapshotDiffManager Class.
+ */
+@ExtendWith(MockitoExtension.class)
+@MockitoSettings(strictness = Strictness.LENIENT)
+public class TestSnapshotDiffManager {
+
+ @Mock
+ private ManagedRocksDB snapdiffDB;
+
+ @Mock
+ private RocksDBCheckpointDiffer differ;
+
+ @Mock
+ private OzoneManager ozoneManager;
+
+ private LoadingCache<String, OmSnapshot> snapshotCache;
+
+ @Mock
+ private ColumnFamilyHandle snapdiffJobCFH;
+
+ @Mock
+ private ColumnFamilyHandle snapdiffReportCFH;
+
+ @Mock
+ private ManagedColumnFamilyOptions columnFamilyOptions;
+
+ @Mock
+ private RocksDB rocksDB;
+
+ @Mock
+ private RocksIterator jobTableIterator;
+
+ private static CodecRegistry codecRegistry;
+
+ @BeforeAll
+ public static void initCodecRegistry() {
+ // Integers are used for indexing persistent list.
+ codecRegistry = CodecRegistry.newBuilder()
+ .addCodec(Integer.class, new IntegerCodec())
+ .addCodec(SnapshotDiffReportOzone.DiffReportEntry.class,
+ SnapshotDiffReportOzone.getDiffReportEntryCodec())
+ .addCodec(SnapshotDiffJob.class,
+ new SnapshotDiffJob.SnapshotDiffJobCodec()).build();
+ }
+
+ private DBStore getMockedDBStore(String dbStorePath) {
+ DBStore dbStore = Mockito.mock(DBStore.class);
+ Mockito.when(dbStore.getDbLocation()).thenReturn(new File(dbStorePath));
+ return dbStore;
+ }
+
+ private OmSnapshot getMockedOmSnapshot(String snapshot) {
+ OmSnapshot omSnapshot = Mockito.mock(OmSnapshot.class);
+ OMMetadataManager metadataManager = Mockito.mock(OMMetadataManager.class);
+ DBStore dbStore = getMockedDBStore(snapshot);
+ Mockito.when(omSnapshot.getName()).thenReturn(snapshot);
+ Mockito.when(omSnapshot.getMetadataManager()).thenReturn(metadataManager);
+ Mockito.when(metadataManager.getStore()).thenReturn(dbStore);
+ return omSnapshot;
+ }
+
+ private SnapshotDiffManager getMockedSnapshotDiffManager(int cacheSize) {
+
+ Mockito.when(snapdiffDB.get()).thenReturn(rocksDB);
+ Mockito.when(rocksDB.newIterator(snapdiffJobCFH))
+ .thenReturn(jobTableIterator);
+ CacheLoader<String, OmSnapshot> loader =
+ new CacheLoader<String, OmSnapshot>() {
+ @Override
+ public OmSnapshot load(String key) {
+ return getMockedOmSnapshot(key);
+ }
+ };
+ snapshotCache = CacheBuilder.newBuilder()
+ .maximumSize(cacheSize)
+ .build(loader);
+ Mockito.when(ozoneManager.getConfiguration())
+ .thenReturn(new OzoneConfiguration());
+ SnapshotDiffManager snapshotDiffManager = Mockito.spy(
+ new SnapshotDiffManager(snapdiffDB, differ, ozoneManager,
snapshotCache,
+ snapdiffJobCFH, snapdiffReportCFH, columnFamilyOptions,
+ codecRegistry));
+ return snapshotDiffManager;
+ }
+
+ private SnapshotInfo getMockedSnapshotInfo(String snapshot) {
+ SnapshotInfo snapshotInfo = Mockito.mock(SnapshotInfo.class);
+ Mockito.when(snapshotInfo.getSnapshotID()).thenReturn(snapshot);
+ return snapshotInfo;
+ }
+
+ @ParameterizedTest
+ @ValueSource(ints = {1, 2, 5, 10, 100, 1000, 10000})
+ public void testGetDeltaFilesWithDag(int numberOfFiles)
+ throws ExecutionException, RocksDBException, IOException {
+
+ SnapshotDiffManager snapshotDiffManager = getMockedSnapshotDiffManager(10);
+ String snap1 = "snap1";
+ String snap2 = "snap2";
+
+ Set<String> randomStrings = IntStream.range(0, numberOfFiles)
+ .mapToObj(i -> RandomStringUtils.randomAlphabetic(10))
+ .collect(Collectors.toSet());
+ Mockito.when(differ.getSSTDiffListWithFullPath(Mockito.any(),
+ Mockito.any(), Mockito.anyString()))
+ .thenReturn(Lists.newArrayList(randomStrings));
Review Comment:
I think you should verify few of the parameters if possible.
e.g.
```
when(differ.getSSTDiffListWithFullPath(any(), any(),
eq("snapdiff_dir"))).thenReturn(Lists.newArrayList(randomStrings));
```
##########
hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestManagedSstFileReader.java:
##########
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.rocksdb.util;
+
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedEnvOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileWriter;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.ValueSource;
+import org.rocksdb.RocksDBException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import java.util.stream.Stream;
+
+/**
+ * ManagedSstFileReader tests.
+ */
+public class TestManagedSstFileReader {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestManagedSstFileReader.class);
+
+ private static final String KEY_PREFIX = IntStream.range(0, 256).boxed()
+ .map(i -> String.format("%c", i))
+ .collect(Collectors.joining(""));
+
+ private String createRandomSSTFile(TreeMap<String, Integer> keys)
+ throws IOException, RocksDBException {
+ File file = File.createTempFile("tmp_sst_file", ".sst");
+ file.deleteOnExit();
+
+ try (ManagedOptions managedOptions = new ManagedOptions();
+ ManagedEnvOptions managedEnvOptions = new ManagedEnvOptions();
+ ManagedSstFileWriter sstFileWriter = new ManagedSstFileWriter(
+ managedEnvOptions, managedOptions)) {
+ sstFileWriter.open(file.getAbsolutePath());
+ for (Map.Entry<String, Integer> entry : keys.entrySet()) {
+ if (entry.getValue() == 0) {
+
sstFileWriter.delete(entry.getKey().getBytes(StandardCharsets.UTF_8));
+ } else {
+ sstFileWriter.put(entry.getKey().getBytes(StandardCharsets.UTF_8),
+ entry.getKey().getBytes(StandardCharsets.UTF_8));
+ }
+ }
+ sstFileWriter.finish();
+ }
+ return file.getAbsolutePath();
+ }
+
+ private Map<String, Integer> createKeys(int startRange, int endRange) {
+ return IntStream.range(startRange, endRange).boxed()
+ .collect(Collectors.toMap(i -> KEY_PREFIX + i,
+ i -> i % 2));
+ }
+
+ private Pair<Map<String, Integer>, List<String>> createDummyData(
+ int numberOfFiles) throws RocksDBException, IOException {
+ List<String> files = new ArrayList<>();
+ int numberOfKeysPerFile = 1000;
+ Map<String, Integer> keys = new HashMap<>();
+ int cnt = 0;
+ for (int i = 0; i < numberOfFiles; i++) {
+ TreeMap<String, Integer> fileKeys = new TreeMap<>(createKeys(cnt,
+ cnt + numberOfKeysPerFile));
+ cnt += fileKeys.size();
+ String tmpSSTFile = createRandomSSTFile(fileKeys);
+ files.add(tmpSSTFile);
+ keys.putAll(fileKeys);
+ }
+ return Pair.of(keys, files);
+ }
+
+
Review Comment:
nit: please remove extra line.
##########
hadoop-ozone/dev-support/checks/junit.sh:
##########
@@ -19,6 +19,7 @@ set -u -o pipefail
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
cd "$DIR/../../.." || exit 1
+: ${CANCEL_NATIVE_VERSION_CHECK:="false"}
Review Comment:
Is it to cancel or skip the native version check? If it is to skip better
naming would be `SKIP_NATIVE_VERSION_CHECK`
##########
hadoop-ozone/ozone-manager/pom.xml:
##########
@@ -258,6 +258,18 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<groupId>org.apache.ozone</groupId>
<artifactId>hdds-rocks-native</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-junit-jupiter</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.powermock</groupId>
+ <artifactId>powermock-module-junit4</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.powermock</groupId>
+ <artifactId>powermock-api-mockito2</artifactId>
+ </dependency>
Review Comment:
I don't see any use of these? Please correct me if I'm wrong.
##########
hadoop-ozone/dev-support/checks/junit.sh:
##########
@@ -41,6 +42,13 @@ else
MAVEN_OPTIONS="${MAVEN_OPTIONS} --fail-at-end"
fi
+if [[ "${CANCEL_NATIVE_VERSION_CHECK}" != "true" ]]; then
Review Comment:
Is it possible to avoid double negation? e.g. `if [[
"${CANCEL_NATIVE_VERSION_CHECK}" == "true" ]]` then do something otherwise
default.
##########
hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestManagedSstFileReader.java:
##########
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.rocksdb.util;
+
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedEnvOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileWriter;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.ValueSource;
+import org.rocksdb.RocksDBException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import java.util.stream.Stream;
+
+/**
+ * ManagedSstFileReader tests.
+ */
+public class TestManagedSstFileReader {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestManagedSstFileReader.class);
+
+ private static final String KEY_PREFIX = IntStream.range(0, 256).boxed()
Review Comment:
Is it supposed to be any random key prefix or have to specifically this? If
it is first one, you can use `RandomStringUtils`.
##########
hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestManagedSstFileReader.java:
##########
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.rocksdb.util;
+
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedEnvOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileWriter;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.ValueSource;
+import org.rocksdb.RocksDBException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import java.util.stream.Stream;
+
+/**
+ * ManagedSstFileReader tests.
+ */
+public class TestManagedSstFileReader {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestManagedSstFileReader.class);
+
+ private static final String KEY_PREFIX = IntStream.range(0, 256).boxed()
+ .map(i -> String.format("%c", i))
+ .collect(Collectors.joining(""));
+
+ private String createRandomSSTFile(TreeMap<String, Integer> keys)
+ throws IOException, RocksDBException {
+ File file = File.createTempFile("tmp_sst_file", ".sst");
+ file.deleteOnExit();
+
+ try (ManagedOptions managedOptions = new ManagedOptions();
+ ManagedEnvOptions managedEnvOptions = new ManagedEnvOptions();
+ ManagedSstFileWriter sstFileWriter = new ManagedSstFileWriter(
+ managedEnvOptions, managedOptions)) {
+ sstFileWriter.open(file.getAbsolutePath());
+ for (Map.Entry<String, Integer> entry : keys.entrySet()) {
+ if (entry.getValue() == 0) {
+
sstFileWriter.delete(entry.getKey().getBytes(StandardCharsets.UTF_8));
+ } else {
+ sstFileWriter.put(entry.getKey().getBytes(StandardCharsets.UTF_8),
+ entry.getKey().getBytes(StandardCharsets.UTF_8));
+ }
+ }
+ sstFileWriter.finish();
+ }
+ return file.getAbsolutePath();
+ }
+
+ private Map<String, Integer> createKeys(int startRange, int endRange) {
+ return IntStream.range(startRange, endRange).boxed()
+ .collect(Collectors.toMap(i -> KEY_PREFIX + i,
+ i -> i % 2));
+ }
+
+ private Pair<Map<String, Integer>, List<String>> createDummyData(
+ int numberOfFiles) throws RocksDBException, IOException {
+ List<String> files = new ArrayList<>();
+ int numberOfKeysPerFile = 1000;
+ Map<String, Integer> keys = new HashMap<>();
+ int cnt = 0;
+ for (int i = 0; i < numberOfFiles; i++) {
+ TreeMap<String, Integer> fileKeys = new TreeMap<>(createKeys(cnt,
+ cnt + numberOfKeysPerFile));
+ cnt += fileKeys.size();
+ String tmpSSTFile = createRandomSSTFile(fileKeys);
+ files.add(tmpSSTFile);
+ keys.putAll(fileKeys);
+ }
+ return Pair.of(keys, files);
+ }
+
+
+ @ParameterizedTest
+ @ValueSource(ints = {0, 1, 2, 3, 7, 10})
+ public void testGetKeyStream(int numberOfFiles)
+ throws RocksDBException, IOException, NativeLibraryNotLoadedException {
+ Pair<Map<String, Integer>, List<String>> data =
+ createDummyData(numberOfFiles);
+ List<String> files = data.getRight();
+ Map<String, Integer> keys = data.getLeft();
+ try (Stream<String> keyStream =
+ new ManagedSstFileReader(files).getKeyStream()) {
+ keyStream.forEach(key -> {
+ Assertions.assertEquals(keys.get(key), 1);
+ keys.remove(key);
+ });
+ keys.values().forEach(val -> Assertions.assertEquals(0, val));
+ }
+ }
+
+ @ParameterizedTest
+ @ValueSource(ints = {0, 1, 2, 3, 7, 10})
+ public void testGetKeyStreamWithTombstone(int numberOfFiles)
+ throws RocksDBException, IOException, NativeLibraryNotLoadedException {
+ Pair<Map<String, Integer>, List<String>> data =
+ createDummyData(numberOfFiles);
+ List<String> files = data.getRight();
+ Map<String, Integer> keys = data.getLeft();
+ ExecutorService executorService = new ThreadPoolExecutor(0,
+ 1, 60, TimeUnit.SECONDS,
+ new SynchronousQueue<>(), new ThreadFactoryBuilder()
+ .setNameFormat("snapshot-diff-manager-sst-dump-tool-TID-%d")
+ .build(), new ThreadPoolExecutor.DiscardPolicy());
+ ManagedSSTDumpTool sstDumpTool =
+ new ManagedSSTDumpTool(executorService, 256);
+
+ try (Stream<String> keyStream = new ManagedSstFileReader(files)
+ .getKeyStreamWithTombstone(sstDumpTool)) {
+ keyStream.forEach(keys::remove);
+ Assertions.assertEquals(0, keys.size());
+ }
Review Comment:
nit: move `executorService.shutdown();` to finally block as a good coding
practice.
```suggestion
} finally {
executorService.shutdown();
}
```
##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java:
##########
@@ -831,7 +835,7 @@ private void getDeltaFilesAndDiffKeysToObjectIdToKeyMap(
}
@SuppressWarnings("checkstyle:ParameterNumber")
- private void addToObjectIdMap(Table<String, ? extends WithObjectID> fsTable,
+ void addToObjectIdMap(Table<String, ? extends WithObjectID> fsTable,
Table<String, ? extends WithObjectID> tsTable,
Set<String> deltaFiles,
boolean nativeRocksToolsLoaded,
Review Comment:
nit: alignment is bit off.
##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java:
##########
@@ -899,7 +903,7 @@ private String getKeyOrDirectoryName(boolean isDirectory,
}
@SuppressWarnings("checkstyle:ParameterNumber")
- private Set<String> getDeltaFiles(OmSnapshot fromSnapshot,
+ Set<String> getDeltaFiles(OmSnapshot fromSnapshot,
Review Comment:
Same as above.
##########
hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java:
##########
@@ -0,0 +1,500 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.snapshot;
+
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException;
+import org.apache.hadoop.hdds.utils.db.CodecRegistry;
+import org.apache.hadoop.hdds.utils.db.DBStore;
+import org.apache.hadoop.hdds.utils.db.IntegerCodec;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
+import org.apache.hadoop.hdfs.DFSUtilClient;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.OmSnapshot;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
+import org.apache.hadoop.ozone.om.helpers.WithObjectID;
+import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone;
+import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse;
+import org.apache.ozone.rocksdb.util.ManagedSstFileReader;
+import org.apache.ozone.rocksdb.util.RdbUtil;
+import org.apache.ozone.rocksdiff.DifferSnapshotInfo;
+import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer;
+import org.apache.ozone.rocksdiff.RocksDiffUtils;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.CsvSource;
+import org.junit.jupiter.params.provider.ValueSource;
+import org.mockito.Matchers;
+import org.mockito.Mock;
+import org.mockito.MockedConstruction;
+import org.mockito.MockedStatic;
+import org.mockito.Mockito;
+import org.mockito.junit.jupiter.MockitoExtension;
+import org.mockito.junit.jupiter.MockitoSettings;
+import org.mockito.quality.Strictness;
+import org.mockito.stubbing.Answer;
+import org.rocksdb.ColumnFamilyHandle;
+import org.rocksdb.RocksDB;
+import org.rocksdb.RocksDBException;
+import org.rocksdb.RocksIterator;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import java.util.stream.LongStream;
+
+
Review Comment:
nit: please remove extra line.
##########
hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/SnapshotTestUtils.java:
##########
@@ -0,0 +1,190 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.snapshot;
+
+import org.apache.hadoop.util.ClosableIterator;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+
+/**
+ * Util classes for mocking Snapshot Persistent DataStructures.
Review Comment:
I don't think you are mocking it. It is more like test implementation of
persistent DS.
##########
hadoop-ozone/s3gateway/pom.xml:
##########
@@ -177,6 +177,10 @@
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.junit.jupiter</groupId>
+ <artifactId>junit-jupiter-params</artifactId>
+ </dependency>
Review Comment:
Is it needed for HDDS-8477?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]