This is an automated email from the ASF dual-hosted git repository.
sammichen pushed a commit to branch HDDS-3630
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/HDDS-3630 by this push:
new 49be0c06f3 HDDS-6540. Add a Cache for per-disk RocksDB Instance.
(#3283)
49be0c06f3 is described below
commit 49be0c06f3814babc65b70eaf58596afc168bc1b
Author: Gui Hecheng <[email protected]>
AuthorDate: Mon Apr 11 11:49:20 2022 +0800
HDDS-6540. Add a Cache for per-disk RocksDB Instance. (#3283)
---
.../container/common/utils/DatanodeStoreCache.java | 78 ++++++++++++++++++++++
.../ozone/container/keyvalue/KeyValueHandler.java | 2 +
.../container/keyvalue/helpers/BlockUtils.java | 63 ++++++++++++-----
.../keyvalue/helpers/KeyValueContainerUtil.java | 2 +-
.../container/keyvalue/impl/BlockManagerImpl.java | 3 +-
.../container/common/TestDatanodeStoreCache.java | 78 ++++++++++++++++++++++
6 files changed, 207 insertions(+), 19 deletions(-)
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/DatanodeStoreCache.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/DatanodeStoreCache.java
new file mode 100644
index 0000000000..646fc2a2f3
--- /dev/null
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/DatanodeStoreCache.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.common.utils;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * Cache for all per-disk DB handles under schema v3.
+ */
+public final class DatanodeStoreCache {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(DatanodeStoreCache.class);
+
+ /**
+ * Use container db absolute path as key.
+ */
+ private final Map<String, RawDB> datanodeStoreMap;
+
+ private static DatanodeStoreCache cache;
+
+ private DatanodeStoreCache() {
+ datanodeStoreMap = new ConcurrentHashMap<>();
+ }
+
+ public static synchronized DatanodeStoreCache getInstance() {
+ if (cache == null) {
+ cache = new DatanodeStoreCache();
+ }
+ return cache;
+ }
+
+ public void addDB(String containerDBPath, RawDB db) {
+ datanodeStoreMap.putIfAbsent(containerDBPath, db);
+ }
+
+ public RawDB getDB(String containerDBPath) {
+ return datanodeStoreMap.get(containerDBPath);
+ }
+
+ public void removeDB(String containerDBPath) {
+ datanodeStoreMap.remove(containerDBPath);
+ }
+
+ public void shutdownCache() {
+ for (Map.Entry<String, RawDB> entry : datanodeStoreMap.entrySet()) {
+ try {
+ entry.getValue().getStore().stop();
+ } catch (Exception e) {
+ LOG.warn("Stop DatanodeStore: {} failed", entry.getKey(), e);
+ }
+ }
+ datanodeStoreMap.clear();
+ }
+
+ public int size() {
+ return datanodeStoreMap.size();
+ }
+}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 73df5e4d95..f7e41c4f97 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -177,6 +177,8 @@ public class KeyValueHandler extends Handler {
@Override
public void stop() {
+ chunkManager.shutdown();
+ blockManager.shutdown();
}
@Override
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
index 4dccba6e12..6959773d6c 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
@@ -28,6 +28,8 @@ import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.interfaces.DBHandle;
import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
+import org.apache.hadoop.ozone.container.common.utils.DatanodeStoreCache;
+import org.apache.hadoop.ozone.container.common.utils.RawDB;
import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
@@ -35,6 +37,7 @@ import
org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaOneImpl;
+import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl;
import static
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_BLOCK;
@@ -73,6 +76,9 @@ public final class BlockUtils {
store = new DatanodeStoreSchemaOneImpl(conf, containerDBPath, readOnly);
} else if (schemaVersion.equals(OzoneConsts.SCHEMA_V2)) {
store = new DatanodeStoreSchemaTwoImpl(conf, containerDBPath, readOnly);
+ } else if (schemaVersion.equals(OzoneConsts.SCHEMA_V3)) {
+ store = new DatanodeStoreSchemaThreeImpl(conf, containerDBPath,
+ readOnly);
} else {
throw new IllegalArgumentException(
"Unrecognized database schema version: " + schemaVersion);
@@ -112,13 +118,21 @@ public final class BlockUtils {
public static DBHandle getDB(KeyValueContainerData containerData,
ConfigurationSource conf) throws StorageContainerException {
Preconditions.checkNotNull(containerData);
- ContainerCache cache = ContainerCache.getInstance(conf);
- Preconditions.checkNotNull(cache);
Preconditions.checkNotNull(containerData.getDbFile());
+
+ String containerDBPath = containerData.getDbFile().getAbsolutePath();
try {
- return cache.getDB(containerData.getContainerID(), containerData
- .getContainerDBType(), containerData.getDbFile().getAbsolutePath(),
- containerData.getSchemaVersion(), conf);
+ if (containerData.getSchemaVersion().equals(OzoneConsts.SCHEMA_V3)) {
+ DatanodeStoreCache cache = DatanodeStoreCache.getInstance();
+ Preconditions.checkNotNull(cache);
+ return cache.getDB(containerDBPath);
+ } else {
+ ContainerCache cache = ContainerCache.getInstance(conf);
+ Preconditions.checkNotNull(cache);
+ return cache.getDB(containerData.getContainerID(), containerData
+ .getContainerDBType(), containerDBPath,
+ containerData.getSchemaVersion(), conf);
+ }
} catch (IOException ex) {
onFailure(containerData.getVolume());
String message = String.format("Error opening DB. Container:%s " +
@@ -136,18 +150,28 @@ public final class BlockUtils {
public static void removeDB(KeyValueContainerData container,
ConfigurationSource conf) {
Preconditions.checkNotNull(container);
- ContainerCache cache = ContainerCache.getInstance(conf);
- Preconditions.checkNotNull(cache);
- cache.removeDB(container.getDbFile().getAbsolutePath());
+ Preconditions.checkNotNull(container.getDbFile());
+
+ String containerDBPath = container.getDbFile().getAbsolutePath();
+ if (container.getSchemaVersion().equals(OzoneConsts.SCHEMA_V3)) {
+ DatanodeStoreCache cache = DatanodeStoreCache.getInstance();
+ Preconditions.checkNotNull(cache);
+ cache.removeDB(containerDBPath);
+ } else {
+ ContainerCache cache = ContainerCache.getInstance(conf);
+ Preconditions.checkNotNull(cache);
+ cache.removeDB(containerDBPath);
+ }
}
/**
* Shutdown all DB Handles.
*
- * @param cache - Cache for DB Handles.
+ * @param config
*/
- public static void shutdownCache(ContainerCache cache) {
- cache.shutdownCache();
+ public static void shutdownCache(ConfigurationSource config) {
+ ContainerCache.getInstance(config).shutdownCache();
+ DatanodeStoreCache.getInstance().shutdownCache();
}
/**
@@ -156,13 +180,20 @@ public final class BlockUtils {
* @param store - low-level DatanodeStore for DB.
* @param containerDBPath - DB path of the container.
* @param conf configuration.
+ * @param schemaVersion schemaVersion.
*/
public static void addDB(DatanodeStore store, String containerDBPath,
- ConfigurationSource conf) {
- ContainerCache cache = ContainerCache.getInstance(conf);
- Preconditions.checkNotNull(cache);
- cache.addDB(containerDBPath,
- new ReferenceCountedDB(store, containerDBPath));
+ ConfigurationSource conf, String schemaVersion) {
+ if (schemaVersion.equals(OzoneConsts.SCHEMA_V3)) {
+ DatanodeStoreCache cache = DatanodeStoreCache.getInstance();
+ Preconditions.checkNotNull(cache);
+ cache.addDB(containerDBPath, new RawDB(store, containerDBPath));
+ } else {
+ ContainerCache cache = ContainerCache.getInstance(conf);
+ Preconditions.checkNotNull(cache);
+ cache.addDB(containerDBPath,
+ new ReferenceCountedDB(store, containerDBPath));
+ }
}
/**
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index a1486c4964..85bacc9a31 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -115,7 +115,7 @@ public final class KeyValueContainerUtil {
}
//add db handler into cache
- BlockUtils.addDB(store, dbFile.getAbsolutePath(), conf);
+ BlockUtils.addDB(store, dbFile.getAbsolutePath(), conf, schemaVersion);
}
/**
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
index 65c764bb9e..35c38c0692 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.interfaces.Container;
import org.apache.hadoop.ozone.container.common.interfaces.DBHandle;
-import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
@@ -348,7 +347,7 @@ public class BlockManagerImpl implements BlockManager {
*/
@Override
public void shutdown() {
- BlockUtils.shutdownCache(ContainerCache.getInstance(config));
+ BlockUtils.shutdownCache(config);
}
private BlockData getBlockByID(DBHandle db, BlockID blockID,
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java
new file mode 100644
index 0000000000..b26ed68009
--- /dev/null
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.common;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.container.common.utils.DatanodeStoreCache;
+import org.apache.hadoop.ozone.container.common.utils.RawDB;
+import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
+import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.io.IOException;
+
+/**
+ * Test DatanodeStoreCache.
+ */
+public class TestDatanodeStoreCache {
+ @Rule
+ public TemporaryFolder folder = new TemporaryFolder();
+
+ private OzoneConfiguration conf = new OzoneConfiguration();
+
+ @Test
+ public void testBasicOperations() throws IOException {
+ DatanodeStoreCache cache = DatanodeStoreCache.getInstance();
+ String dbPath1 = folder.newFolder("basic1").getAbsolutePath();
+ String dbPath2 = folder.newFolder("basic2").getAbsolutePath();
+ DatanodeStore store1 = new DatanodeStoreSchemaThreeImpl(conf, dbPath1,
+ false);
+ DatanodeStore store2 = new DatanodeStoreSchemaThreeImpl(conf, dbPath2,
+ false);
+
+ // test normal add
+ cache.addDB(dbPath1, new RawDB(store1, dbPath1));
+ cache.addDB(dbPath2, new RawDB(store2, dbPath2));
+ Assert.assertEquals(2, cache.size());
+
+ // test duplicate add
+ cache.addDB(dbPath1, new RawDB(store1, dbPath1));
+ Assert.assertEquals(2, cache.size());
+
+ // test get, test reference the same object using ==
+ Assert.assertTrue(store1 == cache.getDB(dbPath1).getStore());
+
+ // test remove
+ cache.removeDB(dbPath1);
+ Assert.assertEquals(1, cache.size());
+
+ // test remove non-exist
+ try {
+ cache.removeDB(dbPath1);
+ } catch (Exception e) {
+ Assert.fail("Should not throw " + e);
+ }
+
+ // test shutdown
+ cache.shutdownCache();
+ Assert.assertEquals(0, cache.size());
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]