This is an automated email from the ASF dual-hosted git repository.
bharat pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
The following commit(s) were added to refs/heads/master by this push:
new a2ab8d6 HDDS-3217. Datanode startup is slow due to iterating
container DB 2-3 times. (#742)
a2ab8d6 is described below
commit a2ab8d6e35f60af9762a191265942071755329be
Author: Bharat Viswanadham <[email protected]>
AuthorDate: Thu May 7 16:36:59 2020 -0700
HDDS-3217. Datanode startup is slow due to iterating container DB 2-3
times. (#742)
---
.../java/org/apache/hadoop/ozone/OzoneConsts.java | 20 ++
.../ozone/container/common/impl/ContainerData.java | 10 +
.../commandhandler/DeleteBlocksCommandHandler.java | 29 ++-
.../container/keyvalue/KeyValueContainer.java | 1 -
.../container/keyvalue/KeyValueContainerData.java | 29 +++
.../keyvalue/helpers/KeyValueContainerUtil.java | 136 ++++++++++---
.../container/keyvalue/impl/BlockManagerImpl.java | 45 ++++-
.../background/BlockDeletingService.java | 12 +-
.../ozone/container/ozoneimpl/ContainerReader.java | 70 +------
.../container/common/TestBlockDeletingService.java | 37 +++-
.../container/keyvalue/TestKeyValueContainer.java | 5 +
.../container/ozoneimpl/TestContainerReader.java | 222 +++++++++++++++++++++
.../container/ozoneimpl/TestOzoneContainer.java | 7 +
.../hadoop/hdds/utils/MetadataKeyFilters.java | 5 +-
14 files changed, 514 insertions(+), 114 deletions(-)
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 9181d34..e29c72a 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.ozone;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
+import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.ratis.thirdparty.io.grpc.Context;
import org.apache.ratis.thirdparty.io.grpc.Metadata;
@@ -139,6 +140,25 @@ public final class OzoneConsts {
public static final String DELETE_TRANSACTION_KEY_PREFIX = "#delTX#";
public static final String BLOCK_COMMIT_SEQUENCE_ID_PREFIX = "#BCSID";
+ public static final String BLOCK_COUNT = "#BLOCKCOUNT";
+ public static final String CONTAINER_BYTES_USED = "#BYTESUSED";
+ public static final String PENDING_DELETE_BLOCK_COUNT =
+ "#PENDINGDELETEBLOCKCOUNT";
+
+
+ public static final byte[] DB_BLOCK_COUNT_KEY =
+ DFSUtil.string2Bytes(OzoneConsts.BLOCK_COUNT);
+ public static final byte[] DB_CONTAINER_BYTES_USED_KEY =
+ DFSUtil.string2Bytes(OzoneConsts.CONTAINER_BYTES_USED);
+ public static final byte[] DB_PENDING_DELETE_BLOCK_COUNT_KEY =
+ DFSUtil.string2Bytes(PENDING_DELETE_BLOCK_COUNT);
+ public static final byte[] DB_CONTAINER_DELETE_TRANSACTION_KEY =
+ DFSUtil.string2Bytes(DELETE_TRANSACTION_KEY_PREFIX);
+ public static final byte[] DB_BLOCK_COMMIT_SEQUENCE_ID_KEY =
+ DFSUtil.string2Bytes(BLOCK_COMMIT_SEQUENCE_ID_PREFIX);
+
+
+
/**
* OM LevelDB prefixes.
*
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
index 00627ff..ba34a29 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
@@ -24,6 +24,7 @@ import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.time.Instant;
import java.util.List;
+
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
ContainerType;
@@ -504,6 +505,15 @@ public abstract class ContainerData {
}
/**
+ * Decrease the count of keys in the container.
+ *
+ * @param deletedKeyCount
+ */
+ public void decrKeyCount(long deletedKeyCount) {
+ this.keyCount.addAndGet(-1 * deletedKeyCount);
+ }
+
+ /**
* Returns number of keys in the container.
* @return key count
*/
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
index 2098257..4324875 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
@@ -58,6 +58,8 @@ import java.util.function.Consumer;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.Result.CONTAINER_NOT_FOUND;
+import static
org.apache.hadoop.ozone.OzoneConsts.DB_CONTAINER_DELETE_TRANSACTION_KEY;
+import static
org.apache.hadoop.ozone.OzoneConsts.DB_PENDING_DELETE_BLOCK_COUNT_KEY;
/**
* Handle block deletion commands.
@@ -251,12 +253,27 @@ public class DeleteBlocksCommandHandler implements
CommandHandler {
}
}
- containerDB.getStore()
- .put(DFSUtil.string2Bytes(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX),
- Longs.toByteArray(delTX.getTxID()));
- containerData
- .updateDeleteTransactionId(delTX.getTxID());
- // update pending deletion blocks count in in-memory container status
+ // Finally commit the DB counters.
+ BatchOperation batchOperation = new BatchOperation();
+
+ // In memory is updated only when existing delete transactionID is
+ // greater.
+ if (delTX.getTxID() > containerData.getDeleteTransactionId()) {
+ // Update in DB pending delete key count and delete transaction ID.
+ batchOperation.put(DB_CONTAINER_DELETE_TRANSACTION_KEY,
+ Longs.toByteArray(delTX.getTxID()));
+ }
+
+ batchOperation.put(DB_PENDING_DELETE_BLOCK_COUNT_KEY, Longs.toByteArray(
+ containerData.getNumPendingDeletionBlocks() + newDeletionBlocks));
+
+ containerDB.getStore().writeBatch(batchOperation);
+
+
+ // update pending deletion blocks count and delete transaction ID in
+ // in-memory container status
+ containerData.updateDeleteTransactionId(delTX.getTxID());
+
containerData.incrPendingDeletionBlocks(newDeletionBlocks);
}
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index 200bfe4..9892cb5 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -492,7 +492,6 @@ public class KeyValueContainer implements
Container<KeyValueContainerData> {
containerData.setState(originalContainerData.getState());
containerData
.setContainerDBType(originalContainerData.getContainerDBType());
- containerData.setBytesUsed(originalContainerData.getBytesUsed());
//rewriting the yaml file with new checksum calculation.
update(originalContainerData.getMetadata(), true);
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
index caae215..373b322 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
@@ -20,13 +20,18 @@ package org.apache.hadoop.ozone.container.keyvalue;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
+
+import java.io.IOException;
import java.util.Collections;
+import com.google.common.primitives.Longs;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerDataProto;
+import org.apache.hadoop.hdds.utils.BatchOperation;
import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
import org.apache.hadoop.ozone.container.common.impl.ContainerData;
+import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
import org.yaml.snakeyaml.nodes.Tag;
@@ -36,9 +41,12 @@ import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import static java.lang.Math.max;
+import static org.apache.hadoop.ozone.OzoneConsts.DB_BLOCK_COUNT_KEY;
import static org.apache.hadoop.ozone.OzoneConsts.CHUNKS_PATH;
+import static org.apache.hadoop.ozone.OzoneConsts.DB_CONTAINER_BYTES_USED_KEY;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE;
import static org.apache.hadoop.ozone.OzoneConsts.METADATA_PATH;
+import static
org.apache.hadoop.ozone.OzoneConsts.DB_PENDING_DELETE_BLOCK_COUNT_KEY;
/**
* This class represents the KeyValueContainer metadata, which is the
@@ -248,4 +256,25 @@ public class KeyValueContainerData extends ContainerData {
return Collections.unmodifiableList(KV_YAML_FIELDS);
}
+ /**
+ * Update DB counters related to block metadata.
+ * @param db - Reference to container DB.
+ * @param batchOperation - Batch Operation to batch DB operations.
+ * @param deletedBlockCount - Number of blocks deleted.
+ * @throws IOException
+ */
+ public void updateAndCommitDBCounters(
+ ReferenceCountedDB db, BatchOperation batchOperation,
+ int deletedBlockCount) throws IOException {
+ // Set Bytes used and block count key.
+ batchOperation.put(DB_CONTAINER_BYTES_USED_KEY,
+ Longs.toByteArray(getBytesUsed()));
+ batchOperation.put(DB_BLOCK_COUNT_KEY, Longs.toByteArray(
+ getKeyCount() - deletedBlockCount));
+ batchOperation.put(DB_PENDING_DELETE_BLOCK_COUNT_KEY, Longs.toByteArray(
+ getNumPendingDeletionBlocks() - deletedBlockCount));
+ db.getStore().writeBatch(batchOperation);
+ }
+
+
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index 9fda44b..2141bed 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -22,16 +22,18 @@ import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
-import java.util.Map;
+import com.google.common.primitives.Ints;
import com.google.common.primitives.Longs;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueBlockIterator;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
import org.apache.hadoop.hdds.utils.MetadataStore;
import org.apache.hadoop.hdds.utils.MetadataStoreBuilder;
@@ -41,6 +43,12 @@ import
org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import static
org.apache.hadoop.ozone.OzoneConsts.DB_BLOCK_COMMIT_SEQUENCE_ID_KEY;
+import static org.apache.hadoop.ozone.OzoneConsts.DB_BLOCK_COUNT_KEY;
+import static org.apache.hadoop.ozone.OzoneConsts.DB_CONTAINER_BYTES_USED_KEY;
+import static
org.apache.hadoop.ozone.OzoneConsts.DB_CONTAINER_DELETE_TRANSACTION_KEY;
+import static
org.apache.hadoop.ozone.OzoneConsts.DB_PENDING_DELETE_BLOCK_COUNT_KEY;
+
/**
* Class which defines utility methods for KeyValueContainer.
*/
@@ -126,7 +134,9 @@ public final class KeyValueContainerUtil {
}
/**
- * Parse KeyValueContainerData and verify checksum.
+ * Parse KeyValueContainerData and verify checksum. Set block related
+ * metadata like block commit sequence id, block count, bytes used and
+ * pending delete block count and delete transaction id.
* @param kvContainerData
* @param config
* @throws IOException
@@ -150,29 +160,109 @@ public final class KeyValueContainerUtil {
}
kvContainerData.setDbFile(dbFile);
- try(ReferenceCountedDB metadata =
- BlockUtils.getDB(kvContainerData, config)) {
- long bytesUsed = 0;
- List<Map.Entry<byte[], byte[]>> liveKeys = metadata.getStore()
- .getRangeKVs(null, Integer.MAX_VALUE,
- MetadataKeyFilters.getNormalKeyFilter());
- bytesUsed = liveKeys.parallelStream().mapToLong(e-> {
- BlockData blockData;
+ boolean isBlockMetadataSet = false;
+
+ try(ReferenceCountedDB containerDB = BlockUtils.getDB(kvContainerData,
+ config)) {
+
+ // Set pending deleted block count.
+ byte[] pendingDeleteBlockCount =
+ containerDB.getStore().get(DB_PENDING_DELETE_BLOCK_COUNT_KEY);
+ if (pendingDeleteBlockCount != null) {
+ kvContainerData.incrPendingDeletionBlocks(
+ Ints.fromByteArray(pendingDeleteBlockCount));
+ } else {
+ // Set pending deleted block count.
+ MetadataKeyFilters.KeyPrefixFilter filter =
+ new MetadataKeyFilters.KeyPrefixFilter()
+ .addFilter(OzoneConsts.DELETING_KEY_PREFIX);
+ int numPendingDeletionBlocks =
+ containerDB.getStore().getSequentialRangeKVs(null,
+ Integer.MAX_VALUE, filter)
+ .size();
+ kvContainerData.incrPendingDeletionBlocks(numPendingDeletionBlocks);
+ }
+
+ // Set delete transaction id.
+ byte[] delTxnId =
+ containerDB.getStore().get(DB_CONTAINER_DELETE_TRANSACTION_KEY);
+ if (delTxnId != null) {
+ kvContainerData
+ .updateDeleteTransactionId(Longs.fromByteArray(delTxnId));
+ }
+
+ // Set BlockCommitSequenceId.
+ byte[] bcsId = containerDB.getStore().get(
+ DB_BLOCK_COMMIT_SEQUENCE_ID_KEY);
+ if (bcsId != null) {
+ kvContainerData
+ .updateBlockCommitSequenceId(Longs.fromByteArray(bcsId));
+ }
+
+ // Set bytes used.
+ // commitSpace for Open Containers relies on usedBytes
+ byte[] bytesUsed =
+ containerDB.getStore().get(DB_CONTAINER_BYTES_USED_KEY);
+ if (bytesUsed != null) {
+ isBlockMetadataSet = true;
+ kvContainerData.setBytesUsed(Longs.fromByteArray(bytesUsed));
+ }
+
+ // Set block count.
+ byte[] blockCount = containerDB.getStore().get(DB_BLOCK_COUNT_KEY);
+ if (blockCount != null) {
+ isBlockMetadataSet = true;
+ kvContainerData.setKeyCount(Longs.fromByteArray(blockCount));
+ }
+ }
+
+ if (!isBlockMetadataSet) {
+ initializeUsedBytesAndBlockCount(kvContainerData);
+ }
+ }
+
+
+ /**
+ * Initialize bytes used and block count.
+ * @param kvContainerData
+ * @throws IOException
+ */
+ private static void initializeUsedBytesAndBlockCount(
+ KeyValueContainerData kvContainerData) throws IOException {
+
+ long blockCount = 0;
+ try (KeyValueBlockIterator blockIter = new KeyValueBlockIterator(
+ kvContainerData.getContainerID(),
+ new File(kvContainerData.getContainerPath()))) {
+ long usedBytes = 0;
+
+
+ boolean success = true;
+ while (success) {
try {
- blockData = BlockUtils.getBlockData(e.getValue());
- return blockData.getSize();
+ if (blockIter.hasNext()) {
+ BlockData block = blockIter.nextBlock();
+ long blockLen = 0;
+
+ List< ContainerProtos.ChunkInfo > chunkInfoList =
block.getChunks();
+ for (ContainerProtos.ChunkInfo chunk : chunkInfoList) {
+ ChunkInfo info = ChunkInfo.getFromProtoBuf(chunk);
+ blockLen += info.getLen();
+ }
+
+ usedBytes += blockLen;
+ blockCount++;
+ } else {
+ success = false;
+ }
} catch (IOException ex) {
- return 0L;
+ LOG.error("Failed to parse block data for Container {}",
+ kvContainerData.getContainerID());
}
- }).sum();
- kvContainerData.setBytesUsed(bytesUsed);
- kvContainerData.setKeyCount(liveKeys.size());
- byte[] bcsId = metadata.getStore().get(DFSUtil.string2Bytes(
- OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX));
- if (bcsId != null) {
-
kvContainerData.updateBlockCommitSequenceId(Longs.fromByteArray(bcsId));
}
+ kvContainerData.setBytesUsed(usedBytes);
+ kvContainerData.setKeyCount(blockCount);
}
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
index ee663d8..940383e 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -25,8 +25,6 @@ import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
@@ -47,6 +45,10 @@ import java.util.Map;
import static
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_BLOCK;
import static
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNKNOWN_BCSID;
import static
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.BCSID_MISMATCH;
+import static
org.apache.hadoop.ozone.OzoneConsts.DB_BLOCK_COMMIT_SEQUENCE_ID_KEY;
+import static org.apache.hadoop.ozone.OzoneConsts.DB_BLOCK_COUNT_KEY;
+import static org.apache.hadoop.ozone.OzoneConsts.DB_CONTAINER_BYTES_USED_KEY;
+
/**
* This class is for performing block related operations on the KeyValue
* Container.
@@ -54,8 +56,7 @@ import static
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Res
public class BlockManagerImpl implements BlockManager {
static final Logger LOG = LoggerFactory.getLogger(BlockManagerImpl.class);
- private static byte[] blockCommitSequenceIdKey =
- DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX);
+
private ConfigurationSource config;
@@ -116,11 +117,25 @@ public class BlockManagerImpl implements BlockManager {
BatchOperation batch = new BatchOperation();
batch.put(Longs.toByteArray(data.getLocalID()),
data.getProtoBufMessage().toByteArray());
- batch.put(blockCommitSequenceIdKey,
- Longs.toByteArray(bcsId));
+ batch.put(DB_BLOCK_COMMIT_SEQUENCE_ID_KEY, Longs.toByteArray(bcsId));
+
+ // Set Bytes used, this bytes used will be updated for every write and
+ // only get committed for every put block. In this way, when datanode
+ // is up, for computation of disk space by container only committed
+ // block length is used, And also on restart the blocks committed to DB
+ // is only used to compute the bytes used. This is done to keep the
+ // current behavior and avoid DB write during write chunk operation.
+ batch.put(DB_CONTAINER_BYTES_USED_KEY,
+ Longs.toByteArray(container.getContainerData().getBytesUsed()));
+
+ // Set Block Count for a container.
+ batch.put(DB_BLOCK_COUNT_KEY,
+ Longs.toByteArray(container.getContainerData().getKeyCount() + 1));
+
db.getStore().writeBatch(batch);
+
container.updateBlockCommitSequenceId(bcsId);
- // Increment keycount here
+ // Increment block count finally here for in-memory.
container.getContainerData().incrKeyCount();
if (LOG.isDebugEnabled()) {
LOG.debug(
@@ -224,11 +239,21 @@ public class BlockManagerImpl implements BlockManager {
// are not atomic. Leaving it here since the impact is refusing
// to delete a Block which might have just gotten inserted after
// the get check.
- byte[] kKey = Longs.toByteArray(blockID.getLocalID());
+ byte[] blockKey = Longs.toByteArray(blockID.getLocalID());
getBlockByID(db, blockID);
- db.getStore().delete(kKey);
- // Decrement blockcount here
+
+ // Update DB to delete block and set block count and bytes used.
+ BatchOperation batch = new BatchOperation();
+ batch.delete(blockKey);
+ // Update DB to delete block and set block count.
+ // No need to set bytes used here, as bytes used is taken care during
+ // delete chunk.
+ batch.put(DB_BLOCK_COUNT_KEY,
+ Longs.toByteArray(container.getContainerData().getKeyCount() - 1));
+ db.getStore().writeBatch(batch);
+
+ // Decrement block count here
container.getContainerData().decrKeyCount();
}
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
index efd16c6..375263c 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
@@ -303,9 +303,15 @@ public class BlockDeletingService extends
BackgroundService {
DFSUtil.string2Bytes(blockId));
batch.delete(DFSUtil.string2Bytes(entry));
});
- meta.getStore().writeBatch(batch);
- // update count of pending deletion blocks in in-memory container
status
- containerData.decrPendingDeletionBlocks(succeedBlocks.size());
+
+
+ int deleteBlockCount = succeedBlocks.size();
+ containerData.updateAndCommitDBCounters(meta, batch, deleteBlockCount);
+
+ // update count of pending deletion blocks and block count in in-memory
+ // container status.
+ containerData.decrPendingDeletionBlocks(deleteBlockCount);
+ containerData.decrKeyCount(deleteBlockCount);
if (!succeedBlocks.isEmpty()) {
LOG.info("Container: {}, deleted blocks: {}, task elapsed time:
{}ms",
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
index e923d33..1b9b3d6 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
@@ -21,32 +21,22 @@ package org.apache.hadoop.ozone.container.ozoneimpl;
import java.io.File;
import java.io.FileFilter;
import java.io.IOException;
-import java.util.List;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ozone.OzoneConsts;
+import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.common.Storage;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.ozone.container.common.impl.ContainerData;
import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueBlockIterator;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import
org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
-import com.google.common.base.Preconditions;
-import com.google.common.primitives.Longs;
+import
org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -191,36 +181,8 @@ public class ContainerReader implements Runnable {
KeyValueContainerUtil.parseKVContainerData(kvContainerData, config);
KeyValueContainer kvContainer = new KeyValueContainer(
kvContainerData, config);
- try(ReferenceCountedDB containerDB = BlockUtils.getDB(kvContainerData,
- config)) {
- MetadataKeyFilters.KeyPrefixFilter filter =
- new MetadataKeyFilters.KeyPrefixFilter()
- .addFilter(OzoneConsts.DELETING_KEY_PREFIX);
- int numPendingDeletionBlocks =
- containerDB.getStore().getSequentialRangeKVs(null,
- Integer.MAX_VALUE, filter)
- .size();
- kvContainerData.incrPendingDeletionBlocks(numPendingDeletionBlocks);
- byte[] delTxnId = containerDB.getStore().get(
- DFSUtil.string2Bytes(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX));
- if (delTxnId != null) {
- kvContainerData
- .updateDeleteTransactionId(Longs.fromByteArray(delTxnId));
- }
- // sets the BlockCommitSequenceId.
- byte[] bcsId = containerDB.getStore().get(DFSUtil.string2Bytes(
- OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX));
- if (bcsId != null) {
- kvContainerData
- .updateBlockCommitSequenceId(Longs.fromByteArray(bcsId));
- }
- if (kvContainer.getContainerState()
- == ContainerProtos.ContainerDataProto.State.OPEN) {
- // commitSpace for Open Containers relies on usedBytes
- initializeUsedBytes(kvContainer);
- }
- containerSet.addContainer(kvContainer);
- }
+
+ containerSet.addContainer(kvContainer);
} else {
throw new StorageContainerException("Container File is corrupted. " +
"ContainerType is KeyValueContainer but cast to " +
@@ -234,28 +196,4 @@ public class ContainerReader implements Runnable {
ContainerProtos.Result.UNKNOWN_CONTAINER_TYPE);
}
}
-
- private void initializeUsedBytes(KeyValueContainer container)
- throws IOException {
- try (KeyValueBlockIterator blockIter = new KeyValueBlockIterator(
- container.getContainerData().getContainerID(),
- new File(container.getContainerData().getContainerPath()))) {
- long usedBytes = 0;
-
- while (blockIter.hasNext()) {
- BlockData block = blockIter.nextBlock();
- long blockLen = 0;
-
- List<ContainerProtos.ChunkInfo> chunkInfoList = block.getChunks();
- for (ContainerProtos.ChunkInfo chunk : chunkInfoList) {
- ChunkInfo info = ChunkInfo.getFromProtoBuf(chunk);
- blockLen += info.getLen();
- }
-
- usedBytes += blockLen;
- }
-
- container.getContainerData().setBytesUsed(usedBytes);
- }
- }
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index a19ad64..b4c1ae5 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -17,6 +17,7 @@
package org.apache.hadoop.ozone.container.common;
+
import java.io.File;
import java.io.IOException;
import java.util.List;
@@ -25,6 +26,10 @@ import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
+import com.google.common.collect.Lists;
+import com.google.common.primitives.Ints;
+import com.google.common.primitives.Longs;
+import org.apache.commons.io.FileUtils;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -58,8 +63,7 @@ import
org.apache.hadoop.ozone.container.testutils.BlockDeletingServiceTestImpl;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import com.google.common.collect.Lists;
-import org.apache.commons.io.FileUtils;
+
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL;
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER;
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
@@ -69,6 +73,10 @@ import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
+
+
+import static org.apache.hadoop.ozone.OzoneConsts.DB_BLOCK_COUNT_KEY;
+import static
org.apache.hadoop.ozone.OzoneConsts.DB_PENDING_DELETE_BLOCK_COUNT_KEY;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
@@ -136,6 +144,8 @@ public class TestBlockDeletingService {
containerSet.addContainer(container);
data = (KeyValueContainerData) containerSet.getContainer(
containerID).getContainerData();
+
+ long blockLength = 100;
try(ReferenceCountedDB metadata = BlockUtils.getDB(data, conf)) {
for (int j = 0; j < numOfBlocksPerContainer; j++) {
BlockID blockID =
@@ -148,7 +158,7 @@ public class TestBlockDeletingService {
ContainerProtos.ChunkInfo info =
ContainerProtos.ChunkInfo.newBuilder()
.setChunkName(blockID.getLocalID() + "_chunk_" + k)
- .setLen(0)
+ .setLen(blockLength)
.setOffset(0)
.setChecksumData(Checksum.getNoChecksumDataProto())
.build();
@@ -159,6 +169,17 @@ public class TestBlockDeletingService {
kd.getProtoBufMessage().toByteArray());
container.getContainerData().incrPendingDeletionBlocks(1);
}
+
+ container.getContainerData().setKeyCount(numOfBlocksPerContainer);
+ container.getContainerData().setBytesUsed(
+ blockLength * numOfBlocksPerContainer);
+ // Set block count, bytes used and pending delete block count.
+ metadata.getStore().put(DB_BLOCK_COUNT_KEY,
+ Longs.toByteArray(numOfBlocksPerContainer));
+ metadata.getStore().put(OzoneConsts.DB_CONTAINER_BYTES_USED_KEY,
+ Longs.toByteArray(blockLength * numOfBlocksPerContainer));
+ metadata.getStore().put(DB_PENDING_DELETE_BLOCK_COUNT_KEY,
+ Ints.toByteArray(numOfBlocksPerContainer));
}
}
}
@@ -182,7 +203,7 @@ public class TestBlockDeletingService {
List<Map.Entry<byte[], byte[]>> underDeletionBlocks =
meta.getStore().getRangeKVs(null, 100,
new MetadataKeyFilters.KeyPrefixFilter()
- .addFilter(OzoneConsts.DELETING_KEY_PREFIX));
+ .addFilter(OzoneConsts.DELETING_KEY_PREFIX));
return underDeletionBlocks.size();
}
@@ -243,6 +264,14 @@ public class TestBlockDeletingService {
deleteAndWait(svc, 3);
Assert.assertEquals(0, getUnderDeletionBlocksCount(meta));
Assert.assertEquals(3, getDeletedBlocksCount(meta));
+
+
+ // Check finally DB counters.
+ // Not checking bytes used, as handler is a mock call.
+ Assert.assertEquals(0, Ints.fromByteArray(
+ meta.getStore().get(DB_PENDING_DELETE_BLOCK_COUNT_KEY)));
+ Assert.assertEquals(0, Longs.fromByteArray(
+ meta.getStore().get(DB_BLOCK_COUNT_KEY)));
}
svc.shutdown();
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index affa83a..8c6ff61 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -215,6 +215,11 @@ public class TestKeyValueContainer {
metadataStore.getStore().put(("test" + i).getBytes(UTF_8),
"test".getBytes(UTF_8));
}
+
+ // As now when we put blocks, we increment block count and update in DB.
+ // As for test, we are doing manually so adding key count to DB.
+ metadataStore.getStore().put(OzoneConsts.DB_BLOCK_COUNT_KEY,
+ Longs.toByteArray(numberOfKeysToWrite));
}
BlockUtils.removeDB(keyValueContainerData, conf);
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
new file mode 100644
index 0000000..6929864
--- /dev/null
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.ozoneimpl;
+
+import com.google.common.primitives.Longs;
+import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
+import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
+import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
+import org.apache.hadoop.ozone.container.common.interfaces.Container;
+import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
+import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
+import
org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.mockito.Mockito;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+
+import static org.apache.hadoop.ozone.OzoneConsts.DB_BLOCK_COUNT_KEY;
+import static org.apache.hadoop.ozone.OzoneConsts.DB_CONTAINER_BYTES_USED_KEY;
+import static
org.apache.hadoop.ozone.OzoneConsts.DB_PENDING_DELETE_BLOCK_COUNT_KEY;
+import static org.mockito.ArgumentMatchers.anyList;
+import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.Mockito.mock;
+
+/**
+ * Test ContainerReader class which loads containers from disks.
+ */
+public class TestContainerReader {
+
+ @Rule
+ public final TemporaryFolder tempDir = new TemporaryFolder();
+
+ private MutableVolumeSet volumeSet;
+ private HddsVolume hddsVolume;
+ private ContainerSet containerSet;
+ private ConfigurationSource conf;
+
+
+ private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy;
+ private UUID datanodeId;
+ private String scmId = UUID.randomUUID().toString();
+ private int blockCount = 10;
+ private long blockLen = 1024;
+
+ @Before
+ public void setup() throws Exception {
+
+ File volumeDir = tempDir.newFolder();
+ volumeSet = Mockito.mock(MutableVolumeSet.class);
+ containerSet = new ContainerSet();
+ conf = new OzoneConfiguration();
+
+ datanodeId = UUID.randomUUID();
+ hddsVolume = new HddsVolume.Builder(volumeDir
+ .getAbsolutePath()).conf(conf).datanodeUuid(datanodeId
+ .toString()).build();
+
+ volumeSet = mock(MutableVolumeSet.class);
+ volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
+ Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
+ .thenReturn(hddsVolume);
+
+ for (int i=0; i<2; i++) {
+ KeyValueContainerData keyValueContainerData = new
KeyValueContainerData(i,
+ ChunkLayOutVersion.FILE_PER_BLOCK,
+ (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
+ datanodeId.toString());
+
+ KeyValueContainer keyValueContainer =
+ new KeyValueContainer(keyValueContainerData,
+ conf);
+ keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
+
+
+ List<Long> blkNames;
+ if (i % 2 == 0) {
+ blkNames = addBlocks(keyValueContainer, true);
+ markBlocksForDelete(keyValueContainer, true, blkNames, i);
+ } else {
+ blkNames = addBlocks(keyValueContainer, false);
+ markBlocksForDelete(keyValueContainer, false, blkNames, i);
+ }
+
+ }
+ }
+
+
+ private void markBlocksForDelete(KeyValueContainer keyValueContainer,
+ boolean setMetaData, List<Long> blockNames, int count) throws Exception {
+ try(ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer
+ .getContainerData(), conf)) {
+
+ for (int i = 0; i < count; i++) {
+ byte[] blkBytes = Longs.toByteArray(blockNames.get(i));
+ byte[] blkInfo = metadataStore.getStore().get(blkBytes);
+
+ byte[] deletingKeyBytes =
+ DFSUtil.string2Bytes(OzoneConsts.DELETING_KEY_PREFIX +
+ blockNames.get(i));
+
+ metadataStore.getStore().delete(blkBytes);
+ metadataStore.getStore().put(deletingKeyBytes, blkInfo);
+ }
+
+ if (setMetaData) {
+ metadataStore.getStore().put(DB_PENDING_DELETE_BLOCK_COUNT_KEY,
+ Longs.toByteArray(count));
+ long blkCount = Longs.fromByteArray(
+ metadataStore.getStore().get(DB_BLOCK_COUNT_KEY));
+ metadataStore.getStore().put(DB_BLOCK_COUNT_KEY,
+ Longs.toByteArray(blkCount - count));
+ long bytesUsed = Longs.fromByteArray(
+ metadataStore.getStore().get(DB_CONTAINER_BYTES_USED_KEY));
+ metadataStore.getStore().put(DB_CONTAINER_BYTES_USED_KEY,
+ Longs.toByteArray(bytesUsed - (count * blockLen)));
+
+ }
+ }
+
+ }
+
+ private List<Long> addBlocks(KeyValueContainer keyValueContainer,
+ boolean setMetaData) throws Exception {
+ long containerId = keyValueContainer.getContainerData().getContainerID();
+
+ List<Long> blkNames = new ArrayList<>();
+ try(ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer
+ .getContainerData(), conf)) {
+
+ for (int i = 0; i < blockCount; i++) {
+ // Creating BlockData
+ BlockID blockID = new BlockID(containerId, i);
+ BlockData blockData = new BlockData(blockID);
+ blockData.addMetadata(OzoneConsts.VOLUME, OzoneConsts.OZONE);
+ blockData.addMetadata(OzoneConsts.OWNER,
+ OzoneConsts.OZONE_SIMPLE_HDFS_USER);
+ List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
+ ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
+ .getLocalID(), 0), 0, blockLen);
+ chunkList.add(info.getProtoBufMessage());
+ blockData.setChunks(chunkList);
+ blkNames.add(blockID.getLocalID());
+ metadataStore.getStore().put(Longs.toByteArray(blockID.getLocalID()),
+ blockData
+ .getProtoBufMessage().toByteArray());
+ }
+
+ if (setMetaData) {
+ metadataStore.getStore().put(DB_BLOCK_COUNT_KEY,
+ Longs.toByteArray(blockCount));
+ metadataStore.getStore().put(OzoneConsts.DB_CONTAINER_BYTES_USED_KEY,
+ Longs.toByteArray(blockCount * blockLen));
+ }
+ }
+
+ return blkNames;
+ }
+
+ @Test
+ public void testContainerReader() throws Exception {
+ ContainerReader containerReader = new ContainerReader(volumeSet,
+ hddsVolume, containerSet, conf);
+
+ Thread thread = new Thread(containerReader);
+ thread.start();
+ thread.join();
+
+ Assert.assertEquals(2, containerSet.containerCount());
+
+ for (int i=0; i < 2; i++) {
+ Container keyValueContainer = containerSet.getContainer(i);
+
+ KeyValueContainerData keyValueContainerData = (KeyValueContainerData)
+ keyValueContainer.getContainerData();
+
+ // Verify block related metadata.
+ Assert.assertEquals(blockCount - i,
+ keyValueContainerData.getKeyCount());
+
+ Assert.assertEquals((blockCount - i) * blockLen,
+ keyValueContainerData.getBytesUsed());
+
+ Assert.assertEquals(i,
+ keyValueContainerData.getNumPendingDeletionBlocks());
+ }
+ }
+}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index efcbb10..77dcb26 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
@@ -239,6 +240,12 @@ public class TestOzoneContainer {
blockData.getProtoBufMessage().toByteArray());
}
+ // Set Block count and used bytes.
+ db.getStore().put(OzoneConsts.DB_BLOCK_COUNT_KEY,
+ Longs.toByteArray(blocks));
+ db.getStore().put(OzoneConsts.DB_CONTAINER_BYTES_USED_KEY,
+ Longs.toByteArray(usedBytes));
+
// remaining available capacity of the container
return (freeBytes - usedBytes);
}
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java
index 9fcc270..371cef6 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java
@@ -44,7 +44,10 @@ public final class MetadataKeyFilters {
.addFilter(OzoneConsts.DELETING_KEY_PREFIX, true)
.addFilter(OzoneConsts.DELETED_KEY_PREFIX, true)
.addFilter(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX, true)
- .addFilter(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX, true);
+ .addFilter(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX, true)
+ .addFilter(OzoneConsts.BLOCK_COUNT, true)
+ .addFilter(OzoneConsts.CONTAINER_BYTES_USED, true)
+ .addFilter(OzoneConsts.PENDING_DELETE_BLOCK_COUNT, true);
private MetadataKeyFilters() {
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]