This is an automated email from the ASF dual-hosted git repository.
sammichen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 48230b6fe0 HDDS-7158. ldb cli command supports to scan container V3.
(#3705)
48230b6fe0 is described below
commit 48230b6fe09128024148e04b3deca01ceedf105a
Author: Sammi Chen <[email protected]>
AuthorDate: Fri Aug 26 12:45:40 2022 +0800
HDDS-7158. ldb cli command supports to scan container V3. (#3705)
---
.../metadata/DatanodeSchemaThreeDBDefinition.java | 2 +-
.../utils/db/managed/ManagedRocksObjectUtils.java | 2 +-
...agedRocksObjectUtils.java => ManagedSlice.java} | 32 ++----
.../org/apache/ozone/test/GenericTestUtils.java | 42 +++++++
.../om/{TestOmLDBCli.java => TestLDBCli.java} | 124 ++++++++++++++++++++-
.../hadoop/ozone/debug/DBDefinitionFactory.java | 3 +-
.../org/apache/hadoop/ozone/debug/DBScanner.java | 61 +++++++++-
7 files changed, 233 insertions(+), 33 deletions(-)
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java
index 049db773b0..f69484f2a4 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java
@@ -148,7 +148,7 @@ public class DatanodeSchemaThreeDBDefinition
+ separator;
}
- private static int getContainerKeyPrefixLength() {
+ public static int getContainerKeyPrefixLength() {
return FixedLengthStringUtils.string2Bytes(
getContainerKeyPrefix(0L)).length;
}
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
index 3a8f0e698c..026044e279 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
@@ -29,7 +29,7 @@ public final class ManagedRocksObjectUtils {
private ManagedRocksObjectUtils() {
}
- private static final Logger LOG =
+ public static final Logger LOG =
LoggerFactory.getLogger(ManagedRocksObjectUtils.class);
static void assertClosed(RocksObject rocksObject) {
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSlice.java
similarity index 54%
copy from
hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
copy to
hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSlice.java
index 3a8f0e698c..05c98ac709 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSlice.java
@@ -18,35 +18,25 @@
*/
package org.apache.hadoop.hdds.utils.db.managed;
-import org.rocksdb.RocksObject;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import org.rocksdb.Slice;
/**
- * Utilities to help assert RocksObject closures.
+ * Managed Slice.
*/
-public final class ManagedRocksObjectUtils {
- private ManagedRocksObjectUtils() {
- }
-
- private static final Logger LOG =
- LoggerFactory.getLogger(ManagedRocksObjectUtils.class);
+public class ManagedSlice extends Slice {
- static void assertClosed(RocksObject rocksObject) {
- ManagedRocksObjectMetrics.INSTANCE.increaseManagedObject();
- if (rocksObject.isOwningHandle()) {
- ManagedRocksObjectMetrics.INSTANCE.increaseLeakObject();
- LOG.warn("{} is not closed properly",
- rocksObject.getClass().getSimpleName());
- }
+ public ManagedSlice(byte[] var1) {
+ super(var1);
}
- static void assertClosed(RocksObject rocksObject, Throwable stack) {
+ @Override
+ protected void finalize() throws Throwable {
ManagedRocksObjectMetrics.INSTANCE.increaseManagedObject();
- if (rocksObject.isOwningHandle()) {
+ if (this.isOwningHandle()) {
ManagedRocksObjectMetrics.INSTANCE.increaseLeakObject();
- LOG.warn("{} is not closed properly",
- rocksObject.getClass().getSimpleName(), stack);
+ ManagedRocksObjectUtils.LOG.warn("{} is not closed properly",
+ this.getClass().getSimpleName());
}
+ super.finalize();
}
}
diff --git
a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java
b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java
index ec357b3742..46f371e6ce 100644
---
a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java
+++
b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java
@@ -337,6 +337,48 @@ public abstract class GenericTestUtils {
}
}
+ /**
+ * Capture output printed to {@link System#out}.
+ * <p>
+ * Usage:
+ * <pre>
+ * try (SystemOutCapturer capture = new SystemOutCapturer()) {
+ * ...
+ * // Call capture.getOutput() to get the output string
+ * }
+ * </pre>
+ * <p>
+ * TODO: Add lambda support once Java 8 is common.
+ * <pre>
+ * SystemOutCapturer.withCapture(capture -> {
+ * ...
+ * })
+ * </pre>
+ */
+ public static class SystemOutCapturer implements AutoCloseable {
+ private final ByteArrayOutputStream bytes;
+ private final PrintStream bytesPrintStream;
+ private final PrintStream oldOut;
+
+ public SystemOutCapturer() throws
+ UnsupportedEncodingException {
+ bytes = new ByteArrayOutputStream();
+ bytesPrintStream = new PrintStream(bytes, false, UTF_8.name());
+ oldOut = System.out;
+ System.setOut(new TeePrintStream(oldOut, bytesPrintStream));
+ }
+
+ public String getOutput() throws UnsupportedEncodingException {
+ return bytes.toString(UTF_8.name());
+ }
+
+ @Override
+ public void close() throws Exception {
+ IOUtils.closeQuietly(bytesPrintStream);
+ System.setOut(oldOut);
+ }
+ }
+
/**
* Prints output to one {@link PrintStream} while copying to the other.
* <p>
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestLDBCli.java
similarity index 54%
rename from
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java
rename to
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestLDBCli.java
index a670cf69d4..aa401d5f56 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestLDBCli.java
@@ -18,16 +18,25 @@ package org.apache.hadoop.ozone.om;
import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.hdds.StringUtils;
+import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
+import org.apache.hadoop.hdds.utils.db.FixedLengthStringUtils;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.ClientVersion;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
+import
org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition;
import org.apache.hadoop.ozone.debug.DBScanner;
import org.apache.hadoop.ozone.debug.RDBParser;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
+import org.apache.ozone.test.GenericTestUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
@@ -47,9 +56,9 @@ import static java.nio.charset.StandardCharsets.UTF_8;
/**
- * This class tests the Debug LDB CLI that reads from an om.db file.
+ * This class tests the Debug LDB CLI that reads from rocks db file.
*/
-public class TestOmLDBCli {
+public class TestLDBCli {
private OzoneConfiguration conf;
private RDBParser rdbParser;
@@ -73,6 +82,10 @@ public class TestOmLDBCli {
if (dbStore != null) {
dbStore.close();
}
+ // Restore the static fields in DBScanner
+ DBScanner.setContainerId(-1);
+ DBScanner.setDnDBSchemaVersion("V2");
+ DBScanner.setWithKey(false);
}
@Test
@@ -92,7 +105,7 @@ public class TestOmLDBCli {
OmKeyInfo value = OMRequestTestUtils.createOmKeyInfo("sampleVol",
"sampleBuck", "key" + (i + 1),
HddsProtos.ReplicationType.STAND_ALONE,
HddsProtos.ReplicationFactor.ONE);
- String key = "key" + (i);
+ String key = "key" + (i + 1);
Table<byte[], byte[]> keyTable = dbStore.getTable("keyTable");
byte[] arr = value
.getProtobuf(ClientVersion.CURRENT_VERSION).toByteArray();
@@ -184,4 +197,109 @@ public class TestOmLDBCli {
}
return keyNames;
}
+
+ @Test
+ public void testDNDBSchemaV3() throws Exception {
+ File newFolder = folder.newFolder();
+ if (!newFolder.exists()) {
+ Assert.assertTrue(newFolder.mkdirs());
+ }
+
+ conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED, true);
+ dbStore = BlockUtils.getUncachedDatanodeStore(newFolder.getAbsolutePath() +
+ "/" + OzoneConsts.CONTAINER_DB_NAME, OzoneConsts.SCHEMA_V3, conf,
+ false).getStore();
+
+ // insert 2 containers, each with 2 blocks
+ final int containerCount = 2;
+ final int blockCount = 2;
+ int blockId = 1;
+ Table<byte[], byte[]> blockTable = dbStore.getTable("block_data");
+ for (int i = 1; i <= containerCount; i++) {
+ for (int j = 1; j <= blockCount; j++, blockId++) {
+ String key =
+ DatanodeSchemaThreeDBDefinition.getContainerKeyPrefix(i) + blockId;
+ BlockData blockData = new BlockData(new BlockID(i, blockId));
+ blockTable.put(FixedLengthStringUtils.string2Bytes(key),
+ blockData.getProtoBufMessage().toByteArray());
+ }
+ }
+
+ rdbParser.setDbPath(dbStore.getDbLocation().getAbsolutePath());
+ dbScanner.setParent(rdbParser);
+ dbScanner.setTableName("block_data");
+ DBScanner.setDnDBSchemaVersion("V3");
+ DBScanner.setWithKey(true);
+
+ // Scan all container
+ try (GenericTestUtils.SystemOutCapturer capture =
+ new GenericTestUtils.SystemOutCapturer()) {
+ dbScanner.call();
+ // Assert that output has info for container 2 block 4
+ Assert.assertTrue(capture.getOutput().contains("2: 4"));
+ // Assert that output has info for container 1 block 1
+ Assert.assertTrue(capture.getOutput().contains("1: 1"));
+ }
+
+ // Scan container 1
+ DBScanner.setContainerId(1);
+ try (GenericTestUtils.SystemOutCapturer capture =
+ new GenericTestUtils.SystemOutCapturer()) {
+ dbScanner.call();
+ // Assert that output doesn't have info for container 2 block 4
+ Assert.assertFalse(capture.getOutput().contains("2: 4"));
+ // Assert that output has info for container 1 block 1
+ Assert.assertTrue(capture.getOutput().contains("1: 1"));
+ }
+
+ // Scan container 2
+ DBScanner.setContainerId(2);
+ try (GenericTestUtils.SystemOutCapturer capture =
+ new GenericTestUtils.SystemOutCapturer()) {
+ dbScanner.call();
+ // Assert that output has info for container 2 block 4
+ Assert.assertTrue(capture.getOutput().contains("2: 4"));
+ // Assert that output doesn't have info for container 1 block 1
+ Assert.assertFalse(capture.getOutput().contains("1: 1"));
+ }
+ }
+
+ @Test
+ public void testDNDBSchemaV2() throws Exception {
+ File newFolder = folder.newFolder();
+ if (!newFolder.exists()) {
+ Assert.assertTrue(newFolder.mkdirs());
+ }
+
+ conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED, false);
+ dbStore = BlockUtils.getUncachedDatanodeStore(newFolder.getAbsolutePath() +
+ "/" + OzoneConsts.CONTAINER_DB_NAME, OzoneConsts.SCHEMA_V2, conf,
+ false).getStore();
+
+ // insert 1 containers with 2 blocks
+ final long cid = 1;
+ final int blockCount = 2;
+ int blockId = 1;
+ Table<byte[], byte[]> blockTable = dbStore.getTable("block_data");
+ for (int j = 1; j <= blockCount; j++, blockId++) {
+ String key = String.valueOf(blockId);
+ BlockData blockData = new BlockData(new BlockID(cid, blockId));
+ blockTable.put(StringUtils.string2Bytes(key),
+ blockData.getProtoBufMessage().toByteArray());
+ }
+
+ rdbParser.setDbPath(dbStore.getDbLocation().getAbsolutePath());
+ dbScanner.setParent(rdbParser);
+ dbScanner.setTableName("block_data");
+ DBScanner.setDnDBSchemaVersion("V2");
+ DBScanner.setWithKey(true);
+
+ // Scan all container
+ try (GenericTestUtils.SystemOutCapturer capture =
+ new GenericTestUtils.SystemOutCapturer()) {
+ dbScanner.call();
+ // Assert that output has info for block 2
+ Assert.assertTrue(capture.getOutput().contains("2"));
+ }
+ }
}
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java
index 369da54c73..e2836bbd1a 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java
@@ -25,6 +25,7 @@ import java.util.HashMap;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition;
import org.apache.hadoop.hdds.utils.db.DBDefinition;
+import org.apache.hadoop.ozone.OzoneConsts;
import
org.apache.hadoop.ozone.container.metadata.DatanodeSchemaOneDBDefinition;
import
org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition;
import
org.apache.hadoop.ozone.container.metadata.DatanodeSchemaTwoDBDefinition;
@@ -75,7 +76,7 @@ public final class DBDefinitionFactory {
"Path is required to identify the used db scheme");
}
String dbName = fileName.toString();
- if (dbName.endsWith("container.db")) {
+ if (dbName.endsWith(OzoneConsts.CONTAINER_DB_SUFFIX)) {
switch (dnDBSchemaVersion) {
case "V1":
return new DatanodeSchemaOneDBDefinition(
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java
index 1e4f7ea108..84ffcc13d8 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java
@@ -18,15 +18,20 @@
package org.apache.hadoop.ozone.debug;
+import com.google.common.primitives.Longs;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import org.apache.hadoop.hdds.cli.SubcommandWithParent;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition;
import org.apache.hadoop.hdds.utils.db.DBDefinition;
+import org.apache.hadoop.hdds.utils.db.FixedLengthStringUtils;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedReadOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedSlice;
import org.apache.hadoop.ozone.OzoneConsts;
+import
org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition;
import org.kohsuke.MetaInfServices;
import org.rocksdb.ColumnFamilyDescriptor;
import org.rocksdb.ColumnFamilyHandle;
@@ -46,7 +51,7 @@ import java.util.List;
import java.util.concurrent.Callable;
/**
- * Parser for scm.db file.
+ * Parser for scm.db, om.db or container db file.
*/
@CommandLine.Command(
name = "scan",
@@ -76,10 +81,15 @@ public class DBScanner implements Callable<Void>,
SubcommandWithParent {
private static String fileName;
@CommandLine.Option(names = {"--dnSchema", "-d"},
- description = "Datanode DB Schema Version : V1/V2",
+ description = "Datanode DB Schema Version : V1/V2/V3",
defaultValue = "V2")
private static String dnDBSchemaVersion;
+ @CommandLine.Option(names = {"--container-id", "-cid"},
+ description = "Container ID when datanode DB Schema is V3",
+ defaultValue = "-1")
+ private static long containerId;
+
@CommandLine.ParentCommand
private RDBParser parent;
@@ -90,7 +100,6 @@ public class DBScanner implements Callable<Void>,
SubcommandWithParent {
private static List<Object> displayTable(ManagedRocksIterator iterator,
DBColumnFamilyDefinition dbColumnFamilyDefinition) throws IOException {
List<Object> outputs = new ArrayList<>();
- iterator.get().seekToFirst();
Writer fileWriter = null;
PrintWriter printWriter = null;
@@ -100,13 +109,25 @@ public class DBScanner implements Callable<Void>,
SubcommandWithParent {
new FileOutputStream(fileName), StandardCharsets.UTF_8);
printWriter = new PrintWriter(fileWriter);
}
+
+ boolean schemaV3 = dnDBSchemaVersion != null &&
+ dnDBSchemaVersion.equals("V3");
while (iterator.get().isValid()) {
StringBuilder result = new StringBuilder();
if (withKey) {
Object key = dbColumnFamilyDefinition.getKeyCodec()
.fromPersistedFormat(iterator.get().key());
Gson gson = new GsonBuilder().setPrettyPrinting().create();
- result.append(gson.toJson(key));
+ if (schemaV3) {
+ int index =
+ DatanodeSchemaThreeDBDefinition.getContainerKeyPrefixLength();
+ String cid = key.toString().substring(0, index);
+ String blockId = key.toString().substring(index);
+ result.append(gson.toJson(Longs.fromByteArray(
+ FixedLengthStringUtils.string2Bytes(cid)) + ": " + blockId));
+ } else {
+ result.append(gson.toJson(key));
+ }
result.append(" -> ");
}
Object o = dbColumnFamilyDefinition.getValueCodec()
@@ -160,6 +181,18 @@ public class DBScanner implements Callable<Void>,
SubcommandWithParent {
DBScanner.fileName = name;
}
+ public static void setContainerId(long id) {
+ DBScanner.containerId = id;
+ }
+
+ public static void setDnDBSchemaVersion(String version) {
+ DBScanner.dnDBSchemaVersion = version;
+ }
+
+ public static void setWithKey(boolean withKey) {
+ DBScanner.withKey = withKey;
+ }
+
private static ColumnFamilyHandle getColumnFamilyHandle(
byte[] name, List<ColumnFamilyHandle> columnFamilyHandles) {
return columnFamilyHandles
@@ -230,8 +263,24 @@ public class DBScanner implements Callable<Void>,
SubcommandWithParent {
if (columnFamilyHandle == null) {
throw new IllegalArgumentException("columnFamilyHandle is null");
}
- ManagedRocksIterator iterator = new ManagedRocksIterator(
- rocksDB.get().newIterator(columnFamilyHandle));
+ ManagedRocksIterator iterator;
+ if (containerId > 0 && dnDBSchemaVersion != null &&
+ dnDBSchemaVersion.equals("V3")) {
+ ManagedReadOptions readOptions = new ManagedReadOptions();
+ readOptions.setIterateUpperBound(new ManagedSlice(
+ FixedLengthStringUtils.string2Bytes(
+ DatanodeSchemaThreeDBDefinition.getContainerKeyPrefix(
+ containerId + 1))));
+ iterator = new ManagedRocksIterator(
+ rocksDB.get().newIterator(columnFamilyHandle, readOptions));
+ iterator.get().seek(FixedLengthStringUtils.string2Bytes(
+ DatanodeSchemaThreeDBDefinition.getContainerKeyPrefix(
+ containerId)));
+ } else {
+ iterator = new ManagedRocksIterator(
+ rocksDB.get().newIterator(columnFamilyHandle));
+ iterator.get().seekToFirst();
+ }
scannedObjects = displayTable(iterator, columnFamilyDefinition);
}
} else {
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]