This is an automated email from the ASF dual-hosted git repository.
msingh pushed a commit to branch HDDS-3630
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/HDDS-3630 by this push:
new 3c38c3c HDDS-5921. [Merge rocksdb in datanode]Decouple DatanodeStore
impl classes from container. (#2791)
3c38c3c is described below
commit 3c38c3c4ebe0a580fe7fd1e439702bc42810df01
Author: Gui Hecheng <[email protected]>
AuthorDate: Mon Feb 28 20:08:10 2022 +0800
HDDS-5921. [Merge rocksdb in datanode]Decouple DatanodeStore impl classes
from container. (#2791)
---
.../ozone/container/common/utils/ContainerCache.java | 2 +-
.../ozone/container/keyvalue/KeyValueContainer.java | 2 +-
.../ozone/container/keyvalue/KeyValueContainerCheck.java | 3 ++-
.../keyvalue/KeyValueContainerMetadataInspector.java | 9 +++++----
.../ozone/container/keyvalue/helpers/BlockUtils.java | 16 +++++++---------
.../keyvalue/helpers/KeyValueContainerUtil.java | 14 +++++++-------
.../ozone/container/metadata/AbstractDatanodeStore.java | 9 ++++-----
.../hadoop/ozone/container/metadata/DatanodeStore.java | 8 ++++----
.../container/metadata/DatanodeStoreSchemaOneImpl.java | 7 +++----
.../container/metadata/DatanodeStoreSchemaTwoImpl.java | 7 +++----
.../ozone/container/common/TestContainerCache.java | 2 +-
.../common/TestSchemaOneBackwardsCompatibility.java | 5 +++--
.../container/keyvalue/TestKeyValueBlockIterator.java | 12 ++++++------
.../container/keyvalue/TestKeyValueContainerCheck.java | 2 +-
.../ozone/client/rpc/TestOzoneRpcClientAbstract.java | 4 ++--
15 files changed, 50 insertions(+), 52 deletions(-)
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
index af0958a..1551836 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
@@ -156,7 +156,7 @@ public final class ContainerCache extends LRUMap {
try {
long start = Time.monotonicNow();
- DatanodeStore store = BlockUtils.getUncachedDatanodeStore(containerID,
+ DatanodeStore store = BlockUtils.getUncachedDatanodeStore(
containerDBPath, schemaVersion, conf, false);
db = new ReferenceCountedDB(store, containerDBPath);
metrics.incDbOpenLatency(Time.monotonicNow() - start);
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index ba131ff..fe087b0 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -157,7 +157,7 @@ public class KeyValueContainer implements
Container<KeyValueContainerData> {
containerData.setSchemaVersion(
VersionedDatanodeFeatures.SchemaV2.chooseSchemaVersion());
- KeyValueContainerUtil.createContainerMetaData(containerID,
+ KeyValueContainerUtil.createContainerMetaData(
containerMetaDataPath, chunksPath, dbFile,
containerData.getSchemaVersion(), config);
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
index 40d527d..b5c68d3 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
@@ -232,7 +232,8 @@ public class KeyValueContainerCheck {
try (ReferenceCountedDB db =
BlockUtils.getDB(onDiskContainerData, checkConfig);
- BlockIterator<BlockData> kvIter = db.getStore().getBlockIterator()) {
+ BlockIterator<BlockData> kvIter = db.getStore().getBlockIterator(
+ onDiskContainerData.getContainerID())) {
while (kvIter.hasNext()) {
BlockData block = kvIter.nextBlock();
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java
index 614b63d..bb7fdc6 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java
@@ -208,7 +208,8 @@ public class KeyValueContainerMetadataInspector implements
ContainerInspector {
containerJson.add("dBMetadata", dBMetadata);
// Build aggregate values.
- JsonObject aggregates = getAggregateValues(store, schemaVersion);
+ JsonObject aggregates = getAggregateValues(store,
+ containerData.getContainerID(), schemaVersion);
containerJson.add("aggregates", aggregates);
// Build info about chunks directory.
@@ -242,7 +243,7 @@ public class KeyValueContainerMetadataInspector implements
ContainerInspector {
}
private JsonObject getAggregateValues(DatanodeStore store,
- String schemaVersion) throws IOException {
+ long containerID, String schemaVersion) throws IOException {
JsonObject aggregates = new JsonObject();
long usedBytesTotal = 0;
@@ -250,7 +251,7 @@ public class KeyValueContainerMetadataInspector implements
ContainerInspector {
long pendingDeleteBlockCountTotal = 0;
// Count normal blocks.
try (BlockIterator<BlockData> blockIter =
- store.getBlockIterator(
+ store.getBlockIterator(containerID,
MetadataKeyFilters.getUnprefixedKeyFilter())) {
while (blockIter.hasNext()) {
@@ -262,7 +263,7 @@ public class KeyValueContainerMetadataInspector implements
ContainerInspector {
// Count pending delete blocks.
if (schemaVersion.equals(OzoneConsts.SCHEMA_V1)) {
try (BlockIterator<BlockData> blockIter =
- store.getBlockIterator(
+ store.getBlockIterator(containerID,
MetadataKeyFilters.getDeletingKeyFilter())) {
while (blockIter.hasNext()) {
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
index 6339234..b6a5b6a 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
@@ -52,28 +52,26 @@ public final class BlockUtils {
}
/**
- * Obtain a DB handler for a given container. This handler is not cached and
- * the caller must close it after using it.
+ * Obtain a DB handler for a given container or the underlying volume.
+ * This handler is not cached and the caller must close it after using it.
* If another thread attempts to open the same container when it is already
* opened by this thread, the other thread will get a RocksDB exception.
- * @param containerID The containerID
* @param containerDBPath The absolute path to the container database folder
* @param schemaVersion The Container Schema version
* @param conf Configuration
+ * @param readOnly open DB in read-only mode or not
* @return Handler to the given container.
* @throws IOException
*/
- public static DatanodeStore getUncachedDatanodeStore(long containerID,
+ public static DatanodeStore getUncachedDatanodeStore(
String containerDBPath, String schemaVersion,
ConfigurationSource conf, boolean readOnly) throws IOException {
DatanodeStore store;
if (schemaVersion.equals(OzoneConsts.SCHEMA_V1)) {
- store = new DatanodeStoreSchemaOneImpl(conf,
- containerID, containerDBPath, readOnly);
+ store = new DatanodeStoreSchemaOneImpl(conf, containerDBPath, readOnly);
} else if (schemaVersion.equals(OzoneConsts.SCHEMA_V2)) {
- store = new DatanodeStoreSchemaTwoImpl(conf,
- containerID, containerDBPath, readOnly);
+ store = new DatanodeStoreSchemaTwoImpl(conf, containerDBPath, readOnly);
} else {
throw new IllegalArgumentException(
"Unrecognized database schema version: " + schemaVersion);
@@ -94,7 +92,7 @@ public final class BlockUtils {
public static DatanodeStore getUncachedDatanodeStore(
KeyValueContainerData containerData, ConfigurationSource conf,
boolean readOnly) throws IOException {
- return getUncachedDatanodeStore(containerData.getContainerID(),
+ return getUncachedDatanodeStore(
containerData.getDbFile().getAbsolutePath(),
containerData.getSchemaVersion(), conf, readOnly);
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index 476eeef..0a30c36 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -79,7 +79,7 @@ public final class KeyValueContainerUtil {
* @param conf The configuration to use for this container.
* @throws IOException
*/
- public static void createContainerMetaData(long containerID,
+ public static void createContainerMetaData(
File containerMetaDataPath, File chunksPath, File dbFile,
String schemaVersion, ConfigurationSource conf) throws IOException {
Preconditions.checkNotNull(containerMetaDataPath);
@@ -104,11 +104,11 @@ public final class KeyValueContainerUtil {
DatanodeStore store;
if (schemaVersion.equals(OzoneConsts.SCHEMA_V1)) {
- store = new DatanodeStoreSchemaOneImpl(conf,
- containerID, dbFile.getAbsolutePath(), false);
+ store = new DatanodeStoreSchemaOneImpl(conf, dbFile.getAbsolutePath(),
+ false);
} else if (schemaVersion.equals(OzoneConsts.SCHEMA_V2)) {
- store = new DatanodeStoreSchemaTwoImpl(conf,
- containerID, dbFile.getAbsolutePath(), false);
+ store = new DatanodeStoreSchemaTwoImpl(conf, dbFile.getAbsolutePath(),
+ false);
} else {
throw new IllegalArgumentException(
"Unrecognized schema version for container: " + schemaVersion);
@@ -302,7 +302,7 @@ public final class KeyValueContainerUtil {
long usedBytes = 0;
try (BlockIterator<BlockData> blockIter =
- store.getBlockIterator(
+ store.getBlockIterator(kvData.getContainerID(),
MetadataKeyFilters.getUnprefixedKeyFilter())) {
while (blockIter.hasNext()) {
@@ -317,7 +317,7 @@ public final class KeyValueContainerUtil {
// Count all deleting blocks.
try (BlockIterator<BlockData> blockIter =
- store.getBlockIterator(
+ store.getBlockIterator(kvData.getContainerID(),
MetadataKeyFilters.getDeletingKeyFilter())) {
while (blockIter.hasNext()) {
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
index f9f794d..9adf673 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
@@ -67,7 +67,6 @@ public abstract class AbstractDatanodeStore implements
DatanodeStore {
LoggerFactory.getLogger(AbstractDatanodeStore.class);
private DBStore store;
private final AbstractDatanodeDBDefinition dbDef;
- private final long containerID;
private final ColumnFamilyOptions cfOptions;
private static DatanodeDBProfile dbProfile;
@@ -79,7 +78,7 @@ public abstract class AbstractDatanodeStore implements
DatanodeStore {
* @param config - Ozone Configuration.
* @throws IOException - on Failure.
*/
- protected AbstractDatanodeStore(ConfigurationSource config, long containerID,
+ protected AbstractDatanodeStore(ConfigurationSource config,
AbstractDatanodeDBDefinition dbDef, boolean openReadOnly)
throws IOException {
@@ -92,7 +91,6 @@ public abstract class AbstractDatanodeStore implements
DatanodeStore {
cfOptions = dbProfile.getColumnFamilyOptions(config);
this.dbDef = dbDef;
- this.containerID = containerID;
this.openReadOnly = openReadOnly;
start(config);
}
@@ -178,13 +176,14 @@ public abstract class AbstractDatanodeStore implements
DatanodeStore {
}
@Override
- public BlockIterator<BlockData> getBlockIterator() {
+ public BlockIterator<BlockData> getBlockIterator(long containerID) {
return new KeyValueBlockIterator(containerID,
blockDataTableWithIterator.iterator());
}
@Override
- public BlockIterator<BlockData> getBlockIterator(KeyPrefixFilter filter) {
+ public BlockIterator<BlockData> getBlockIterator(long containerID,
+ KeyPrefixFilter filter) {
return new KeyValueBlockIterator(containerID,
blockDataTableWithIterator.iterator(), filter);
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java
index 5a0ce7a..cc6ecaa 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.container.metadata;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
+import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
import org.apache.hadoop.hdds.utils.db.BatchOperationHandler;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.Table;
@@ -87,8 +87,8 @@ public interface DatanodeStore {
void compactDB() throws IOException;
- BlockIterator<BlockData> getBlockIterator();
+ BlockIterator<BlockData> getBlockIterator(long containerID);
- BlockIterator<BlockData>
- getBlockIterator(MetadataKeyFilters.KeyPrefixFilter filter);
+ BlockIterator<BlockData> getBlockIterator(long containerID,
+ KeyPrefixFilter filter);
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java
index b72f19e..463ec87 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java
@@ -34,10 +34,9 @@ public class DatanodeStoreSchemaOneImpl extends
AbstractDatanodeStore {
* @param config - Ozone Configuration.
* @throws IOException - on Failure.
*/
- public DatanodeStoreSchemaOneImpl(ConfigurationSource config,
- long containerID, String dbPath, boolean openReadOnly)
- throws IOException {
- super(config, containerID, new DatanodeSchemaOneDBDefinition(dbPath),
+ public DatanodeStoreSchemaOneImpl(ConfigurationSource config, String dbPath,
+ boolean openReadOnly) throws IOException {
+ super(config, new DatanodeSchemaOneDBDefinition(dbPath),
openReadOnly);
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaTwoImpl.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaTwoImpl.java
index db8fe6b..9669c8d 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaTwoImpl.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaTwoImpl.java
@@ -42,10 +42,9 @@ public class DatanodeStoreSchemaTwoImpl extends
AbstractDatanodeStore {
* @param config - Ozone Configuration.
* @throws IOException - on Failure.
*/
- public DatanodeStoreSchemaTwoImpl(ConfigurationSource config,
- long containerID, String dbPath, boolean openReadOnly)
- throws IOException {
- super(config, containerID, new DatanodeSchemaTwoDBDefinition(dbPath),
+ public DatanodeStoreSchemaTwoImpl(ConfigurationSource config, String dbPath,
+ boolean openReadOnly) throws IOException {
+ super(config, new DatanodeSchemaTwoDBDefinition(dbPath),
openReadOnly);
this.deleteTransactionTable = new DatanodeSchemaTwoDBDefinition(dbPath)
.getDeleteTransactionsColumnFamily().getTable(getStore());
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
index e55d68c..a6ef461 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
@@ -54,7 +54,7 @@ public class TestContainerCache {
private void createContainerDB(OzoneConfiguration conf, File dbFile)
throws Exception {
DatanodeStore store = new DatanodeStoreSchemaTwoImpl(
- conf, 1, dbFile.getAbsolutePath(), false);
+ conf, dbFile.getAbsolutePath(), false);
// we close since the SCM pre-creates containers.
// we will open and put Db handle into a cache when keys are being created
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
index a80adca..cea7fa9 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
@@ -401,7 +401,7 @@ public class TestSchemaOneBackwardsCompatibility {
// Test reading blocks with block iterator.
try (BlockIterator<BlockData> iter =
- refCountedDB.getStore().getBlockIterator()) {
+ refCountedDB.getStore().getBlockIterator(TestDB.CONTAINER_ID)) {
List<String> iteratorBlockIDs = new ArrayList<>();
@@ -453,7 +453,8 @@ public class TestSchemaOneBackwardsCompatibility {
MetadataKeyFilters.getDeletingKeyFilter();
try (BlockIterator<BlockData> iter =
- refCountedDB.getStore().getBlockIterator(filter)) {
+ refCountedDB.getStore().getBlockIterator(TestDB.CONTAINER_ID,
+ filter)) {
List<String> iteratorBlockIDs = new ArrayList<>();
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
index 7f14ccf..1e13446 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
@@ -129,7 +129,7 @@ public class TestKeyValueBlockIterator {
// Default filter used is all unprefixed blocks.
List<Long> unprefixedBlockIDs = blockIDs.get("");
try (BlockIterator<BlockData> keyValueBlockIterator =
- db.getStore().getBlockIterator()) {
+ db.getStore().getBlockIterator(CONTAINER_ID)) {
Iterator<Long> blockIDIter = unprefixedBlockIDs.iterator();
while (keyValueBlockIterator.hasNext()) {
@@ -161,7 +161,7 @@ public class TestKeyValueBlockIterator {
public void testKeyValueBlockIteratorWithNextBlock() throws Exception {
List<Long> blockIDs = createContainerWithBlocks(CONTAINER_ID, 2);
try (BlockIterator<BlockData> keyValueBlockIterator =
- db.getStore().getBlockIterator()) {
+ db.getStore().getBlockIterator(CONTAINER_ID)) {
assertEquals((long)blockIDs.get(0),
keyValueBlockIterator.nextBlock().getLocalID());
assertEquals((long)blockIDs.get(1),
@@ -180,7 +180,7 @@ public class TestKeyValueBlockIterator {
public void testKeyValueBlockIteratorWithHasNext() throws Exception {
List<Long> blockIDs = createContainerWithBlocks(CONTAINER_ID, 2);
try (BlockIterator<BlockData> blockIter =
- db.getStore().getBlockIterator()) {
+ db.getStore().getBlockIterator(CONTAINER_ID)) {
// Even calling multiple times hasNext() should not move entry forward.
assertTrue(blockIter.hasNext());
@@ -218,7 +218,7 @@ public class TestKeyValueBlockIterator {
Map<String, List<Long>> blockIDs = createContainerWithBlocks(CONTAINER_ID,
normalBlocks, deletingBlocks);
try (BlockIterator<BlockData> keyValueBlockIterator =
- db.getStore().getBlockIterator(
+ db.getStore().getBlockIterator(CONTAINER_ID,
MetadataKeyFilters.getDeletingKeyFilter())) {
List<Long> deletingBlockIDs =
blockIDs.get(OzoneConsts.DELETING_KEY_PREFIX);
@@ -239,7 +239,7 @@ public class TestKeyValueBlockIterator {
Exception {
createContainerWithBlocks(CONTAINER_ID, 0, 5);
try (BlockIterator<BlockData> keyValueBlockIterator =
- db.getStore().getBlockIterator()) {
+ db.getStore().getBlockIterator(CONTAINER_ID)) {
//As all blocks are deleted blocks, blocks does not match with normal key
// filter.
assertFalse(keyValueBlockIterator.hasNext());
@@ -297,7 +297,7 @@ public class TestKeyValueBlockIterator {
private void testWithFilter(MetadataKeyFilters.KeyPrefixFilter filter,
List<Long> expectedIDs) throws Exception {
try (BlockIterator<BlockData> iterator =
- db.getStore().getBlockIterator(filter)) {
+ db.getStore().getBlockIterator(CONTAINER_ID, filter)) {
// Test seek.
iterator.seekToFirst();
long firstID = iterator.nextBlock().getLocalID();
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java
index 99812f3..2fb9c8b 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java
@@ -115,7 +115,7 @@ public class TestKeyValueContainerCheck
try (ReferenceCountedDB ignored =
BlockUtils.getDB(containerData, conf);
BlockIterator<BlockData> kvIter =
- ignored.getStore().getBlockIterator()) {
+ ignored.getStore().getBlockIterator(containerID)) {
BlockData block = kvIter.nextBlock();
assertFalse(block.getChunks().isEmpty());
ContainerProtos.ChunkInfo c = block.getChunks().get(0);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index 128c407..e638c13 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -1481,7 +1481,7 @@ public abstract class TestOzoneRpcClientAbstract {
try (ReferenceCountedDB db = BlockUtils.getDB(containerData,
cluster.getConf());
BlockIterator<BlockData> keyValueBlockIterator =
- db.getStore().getBlockIterator()) {
+ db.getStore().getBlockIterator(containerID)) {
while (keyValueBlockIterator.hasNext()) {
BlockData blockData = keyValueBlockIterator.nextBlock();
if (blockData.getBlockID().getLocalID() == localID) {
@@ -1741,7 +1741,7 @@ public abstract class TestOzoneRpcClientAbstract {
try (ReferenceCountedDB db = BlockUtils.getDB(containerData,
cluster.getConf());
BlockIterator<BlockData> keyValueBlockIterator =
- db.getStore().getBlockIterator()) {
+ db.getStore().getBlockIterator(containerID)) {
// Find the block corresponding to the key we put. We use the localID of
// the BlockData to identify out key.
BlockData blockData = null;
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]