This is an automated email from the ASF dual-hosted git repository.
sammichen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new aefe07a4d6c HDDS-13467. Introduce pending deletion block bytes of
container in DN. (#9325)
aefe07a4d6c is described below
commit aefe07a4d6c3e3ffb6f9e8efae4ad8f3fb3cc86b
Author: Priyesh Karatha <[email protected]>
AuthorDate: Thu Nov 20 08:04:37 2025 +0530
HDDS-13467. Introduce pending deletion block bytes of container in DN.
(#9325)
---
.../java/org/apache/hadoop/ozone/OzoneConsts.java | 4 +-
.../helpers/BlockDeletingServiceMetrics.java | 12 +++
.../container/common/helpers/ContainerUtils.java | 14 +++
.../common/impl/BlockDeletingService.java | 3 +
.../ozone/container/common/impl/ContainerData.java | 34 +++++--
.../commandhandler/DeleteBlocksCommandHandler.java | 7 +-
.../container/keyvalue/KeyValueContainerData.java | 19 +++-
.../KeyValueContainerMetadataInspector.java | 78 +++++++++-------
.../ozone/container/keyvalue/PendingDelete.java | 49 ++++++++++
.../keyvalue/helpers/KeyValueContainerUtil.java | 34 ++++---
.../statemachine/background/BlockDeletingTask.java | 21 ++++-
.../container/common/TestBlockDeletingService.java | 102 +++++++++++++++++++--
.../common/TestKeyValueContainerData.java | 2 +-
.../TestSchemaTwoBackwardsCompatibility.java | 2 +-
.../impl/TestContainerDeletionChoosingPolicy.java | 6 +-
.../TestDeleteBlocksCommandHandler.java | 72 ++++++++++++++-
.../container/keyvalue/TestKeyValueContainer.java | 4 +
.../container/ozoneimpl/TestContainerReader.java | 81 ++++++++++++----
18 files changed, 454 insertions(+), 90 deletions(-)
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index aecbdfae615..9f7a1421537 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -142,8 +142,8 @@ public final class OzoneConsts {
public static final String BLOCK_COMMIT_SEQUENCE_ID = "#BCSID";
public static final String BLOCK_COUNT = "#BLOCKCOUNT";
public static final String CONTAINER_BYTES_USED = "#BYTESUSED";
- public static final String PENDING_DELETE_BLOCK_COUNT =
- "#PENDINGDELETEBLOCKCOUNT";
+ public static final String PENDING_DELETE_BLOCK_COUNT =
"#PENDINGDELETEBLOCKCOUNT";
+ public static final String PENDING_DELETE_BLOCK_BYTES =
"#PENDINGDELETEBLOCKBYTES";
public static final String CONTAINER_DATA_CHECKSUM = "#DATACHECKSUM";
/**
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockDeletingServiceMetrics.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockDeletingServiceMetrics.java
index 6e4d638e6ac..91bb8fbc59a 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockDeletingServiceMetrics.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockDeletingServiceMetrics.java
@@ -51,6 +51,9 @@ public final class BlockDeletingServiceMetrics {
@Metric(about = "The total number of blocks pending for processing.")
private MutableGaugeLong totalPendingBlockCount;
+ @Metric(about = "The total bytes used by blocks pending for deletion.")
+ private MutableGaugeLong totalPendingBlockBytes;
+
@Metric(about = "The total number of DeleteBlockTransaction received")
private MutableCounterLong receivedTransactionCount;
@@ -155,6 +158,10 @@ public void setTotalPendingBlockCount(long count) {
this.totalPendingBlockCount.set(count);
}
+ public void setTotalPendingBlockBytes(long bytes) {
+ this.totalPendingBlockBytes.set(bytes);
+ }
+
public void incrTotalLockTimeoutTransactionCount() {
totalLockTimeoutTransactionCount.incr();
}
@@ -183,6 +190,10 @@ public long getTotalPendingBlockCount() {
return totalPendingBlockCount.value();
}
+ public long getTotalPendingBlockBytes() {
+ return totalPendingBlockBytes.value();
+ }
+
public long getTotalBlockChosenCount() {
return totalBlockChosenCount.value();
}
@@ -212,6 +223,7 @@ public String toString() {
.append("outOfOrderDeleteBlockTransactionCount = ")
.append(outOfOrderDeleteBlockTransactionCount.value()).append('\t')
.append("totalPendingBlockCount =
").append(totalPendingBlockCount.value()).append('\t')
+ .append("totalPendingBlockBytes =
").append(totalPendingBlockBytes.value()).append('\t')
.append("totalBlockChosenCount =
").append(totalBlockChosenCount.value()).append('\t')
.append("totalContainerChosenCount =
").append(totalContainerChosenCount.value()).append('\t')
.append("receivedTransactionCount =
").append(receivedTransactionCount.value()).append('\t')
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
index 07f4e09aa9e..e38a7666199 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
@@ -329,4 +329,18 @@ public static void assertSpaceAvailability(long
containerId, HddsVolume volume,
+ currentUsage + ", minimum free space spared=" + spared,
DISK_OUT_OF_SPACE);
}
}
+
+ public static long getPendingDeletionBytes(ContainerData containerData) {
+ if (containerData.getContainerType()
+ .equals(ContainerProtos.ContainerType.KeyValueContainer)) {
+ return ((KeyValueContainerData) containerData)
+ .getBlockPendingDeletionBytes();
+ } else {
+ // If another ContainerType is available later, implement it
+ throw new IllegalArgumentException(
+ "getPendingDeletionBlocks for ContainerType: " +
+ containerData.getContainerType() +
+ " not support.");
+ }
+ }
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/BlockDeletingService.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/BlockDeletingService.java
index 6b4146ec079..27b3ec41864 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/BlockDeletingService.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/BlockDeletingService.java
@@ -210,6 +210,7 @@ public List<ContainerBlockInfo>
chooseContainerForBlockDeletion(
throws StorageContainerException {
AtomicLong totalPendingBlockCount = new AtomicLong(0L);
+ AtomicLong totalPendingBlockBytes = new AtomicLong(0L);
Map<Long, ContainerData> containerDataMap =
ozoneContainer.getContainerSet().getContainerMap().entrySet().stream()
.filter(e -> (checkPendingDeletionBlocks(
@@ -222,10 +223,12 @@ public List<ContainerBlockInfo>
chooseContainerForBlockDeletion(
totalPendingBlockCount
.addAndGet(
ContainerUtils.getPendingDeletionBlocks(containerData));
+
totalPendingBlockBytes.addAndGet(ContainerUtils.getPendingDeletionBytes(containerData));
return containerData;
}));
metrics.setTotalPendingBlockCount(totalPendingBlockCount.get());
+ metrics.setTotalPendingBlockBytes(totalPendingBlockBytes.get());
return deletionPolicy
.chooseContainerForBlockDeletion(blockLimit, containerDataMap);
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
index f79c7e3f1df..c334a2d842e 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
@@ -591,11 +591,13 @@ public static class BlockByteAndCounts {
private final long bytes;
private final long count;
private final long pendingDeletion;
+ private final long pendingDeletionBytes;
- public BlockByteAndCounts(long bytes, long count, long pendingDeletion) {
+ public BlockByteAndCounts(long bytes, long count, long pendingDeletion,
long pendingDeletionBytes) {
this.bytes = bytes;
this.count = count;
this.pendingDeletion = pendingDeletion;
+ this.pendingDeletionBytes = pendingDeletionBytes;
}
public long getBytes() {
@@ -609,6 +611,10 @@ public long getCount() {
public long getPendingDeletion() {
return pendingDeletion;
}
+
+ public long getPendingDeletionBytes() {
+ return pendingDeletionBytes;
+ }
}
/**
@@ -625,6 +631,7 @@ public static class Statistics {
private long blockBytes;
private long blockCount;
private long blockPendingDeletion;
+ private long blockPendingDeletionBytes;
public synchronized long getWriteBytes() {
return writeBytes;
@@ -635,13 +642,17 @@ public synchronized long getBlockBytes() {
}
public synchronized BlockByteAndCounts getBlockByteAndCounts() {
- return new BlockByteAndCounts(blockBytes, blockCount,
blockPendingDeletion);
+ return new BlockByteAndCounts(blockBytes, blockCount,
blockPendingDeletion, blockPendingDeletionBytes);
}
public synchronized long getBlockPendingDeletion() {
return blockPendingDeletion;
}
+ public synchronized long getBlockPendingDeletionBytes() {
+ return blockPendingDeletionBytes;
+ }
+
public synchronized void incrementBlockCount() {
blockCount++;
}
@@ -661,16 +672,17 @@ public synchronized void updateWrite(long length, boolean
overwrite) {
writeBytes += length;
}
- public synchronized void updateDeletion(long deletedBytes, long
deletedBlockCount, long processedBlockCount) {
+ public synchronized void decDeletion(long deletedBytes, long
processedBytes, long deletedBlockCount,
+ long processedBlockCount) {
blockBytes -= deletedBytes;
blockCount -= deletedBlockCount;
blockPendingDeletion -= processedBlockCount;
+ blockPendingDeletionBytes -= processedBytes;
}
- public synchronized void updateBlocks(long bytes, long count, long
pendingDeletionIncrement) {
+ public synchronized void updateBlocks(long bytes, long count) {
blockBytes = bytes;
blockCount = count;
- blockPendingDeletion += pendingDeletionIncrement;
}
public synchronized ContainerDataProto.Builder
setContainerDataProto(ContainerDataProto.Builder b) {
@@ -689,12 +701,19 @@ public synchronized ContainerReplicaProto.Builder
setContainerReplicaProto(Conta
.setKeyCount(blockCount);
}
- public synchronized void addBlockPendingDeletion(long count) {
+ public synchronized void setBlockPendingDeletion(long count, long bytes) {
+ blockPendingDeletion = count;
+ blockPendingDeletionBytes = bytes;
+ }
+
+ public synchronized void addBlockPendingDeletion(long count, long bytes) {
blockPendingDeletion += count;
+ blockPendingDeletionBytes += bytes;
}
public synchronized void resetBlockPendingDeletion() {
blockPendingDeletion = 0;
+ blockPendingDeletionBytes = 0;
}
public synchronized void assertRead(long expectedBytes, long
expectedCount) {
@@ -726,7 +745,8 @@ public synchronized String toString() {
return "Statistics{read(" + readBytes + " bytes, #" + readCount + ")"
+ ", write(" + writeBytes + " bytes, #" + writeCount + ")"
+ ", block(" + blockBytes + " bytes, #" + blockCount
- + ", pendingDelete=" + blockPendingDeletion + ")}";
+ + ", pendingDelete=" + blockPendingDeletion
+ + ", pendingDeleteBytes=" + blockPendingDeletionBytes + ")}";
}
}
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
index c4887bf917e..6d59e812f93 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
@@ -645,8 +645,13 @@ private void updateMetaData(KeyValueContainerData
containerData,
// update pending deletion blocks count and delete transaction ID in
// in-memory container status
+ long pendingBytes = containerData.getBlockPendingDeletionBytes() +
delTX.getTotalBlockSize();
+ metadataTable
+ .putWithBatch(batchOperation,
+ containerData.getPendingDeleteBlockBytesKey(),
+ pendingBytes);
+ containerData.incrPendingDeletionBlocks(newDeletionBlocks,
delTX.getTotalBlockSize());
containerData.updateDeleteTransactionId(delTX.getTxID());
- containerData.incrPendingDeletionBlocks(newDeletionBlocks);
}
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
index 0c398d24498..800424076f9 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
@@ -29,6 +29,7 @@
import static org.apache.hadoop.ozone.OzoneConsts.DELETE_TRANSACTION_KEY;
import static org.apache.hadoop.ozone.OzoneConsts.DELETING_KEY_PREFIX;
import static org.apache.hadoop.ozone.OzoneConsts.METADATA_PATH;
+import static org.apache.hadoop.ozone.OzoneConsts.PENDING_DELETE_BLOCK_BYTES;
import static org.apache.hadoop.ozone.OzoneConsts.PENDING_DELETE_BLOCK_COUNT;
import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1;
import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2;
@@ -236,8 +237,8 @@ public void setContainerDBType(String containerDBType) {
*
* @param numBlocks increment number
*/
- public void incrPendingDeletionBlocks(long numBlocks) {
- getStatistics().addBlockPendingDeletion(numBlocks);
+ public void incrPendingDeletionBlocks(long numBlocks, long bytes) {
+ getStatistics().addBlockPendingDeletion(numBlocks, bytes);
}
/**
@@ -247,6 +248,13 @@ public long getNumPendingDeletionBlocks() {
return getStatistics().getBlockPendingDeletion();
}
+ /**
+ * Get the total bytes used by pending deletion blocks.
+ */
+ public long getBlockPendingDeletionBytes() {
+ return getStatistics().getBlockPendingDeletionBytes();
+ }
+
/**
* Sets deleteTransactionId to latest delete transactionId for the container.
*
@@ -377,6 +385,8 @@ public void updateAndCommitDBCounters(DBHandle db,
metadataTable.putWithBatch(batchOperation, getBlockCountKey(),
b.getCount() - deletedBlockCount);
metadataTable.putWithBatch(batchOperation, getPendingDeleteBlockCountKey(),
b.getPendingDeletion() - deletedBlockCount);
+ metadataTable.putWithBatch(batchOperation, getPendingDeleteBlockBytesKey(),
+ b.getPendingDeletionBytes() - releasedBytes);
db.getStore().getBatchHandler().commitBatchOperation(batchOperation);
}
@@ -387,6 +397,7 @@ public void resetPendingDeleteBlockCount(DBHandle db)
throws IOException {
// Reset the metadata on disk.
Table<String, Long> metadataTable = db.getStore().getMetadataTable();
metadataTable.put(getPendingDeleteBlockCountKey(), 0L);
+ metadataTable.put(getPendingDeleteBlockBytesKey(), 0L);
}
// NOTE: Below are some helper functions to format keys according
@@ -428,6 +439,10 @@ public String getPendingDeleteBlockCountKey() {
public String getContainerDataChecksumKey() {
return formatKey(CONTAINER_DATA_CHECKSUM);
}
+
+ public String getPendingDeleteBlockBytesKey() {
+ return formatKey(PENDING_DELETE_BLOCK_BYTES);
+ }
public String getDeletingBlockKeyPrefix() {
return formatKey(DELETING_KEY_PREFIX);
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java
index 36f41ca982f..bf3c3909f1c 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java
@@ -239,6 +239,8 @@ static ObjectNode getDBMetadataJson(Table<String, Long>
metadataTable,
metadataTable.get(containerData.getBytesUsedKey()));
dBMetadata.put(OzoneConsts.PENDING_DELETE_BLOCK_COUNT,
metadataTable.get(containerData.getPendingDeleteBlockCountKey()));
+ dBMetadata.put(OzoneConsts.PENDING_DELETE_BLOCK_BYTES,
+ metadataTable.get(containerData.getPendingDeleteBlockBytesKey()));
dBMetadata.put(OzoneConsts.DELETE_TRANSACTION_KEY,
metadataTable.get(containerData.getLatestDeleteTxnKey()));
dBMetadata.put(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID,
@@ -247,7 +249,7 @@ static ObjectNode getDBMetadataJson(Table<String, Long>
metadataTable,
return dBMetadata;
}
- static ObjectNode getAggregateValues(DatanodeStore store,
+ private static ObjectNode getAggregateValues(DatanodeStore store,
KeyValueContainerData containerData, String schemaVersion)
throws IOException {
@@ -267,6 +269,23 @@ static ObjectNode getAggregateValues(DatanodeStore store,
}
// Count pending delete blocks.
+ final PendingDelete pendingDelete = getAggregatePendingDelete(store,
containerData, schemaVersion);
+
+ if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V1)) {
+ blockCountTotal += pendingDelete.getCount();
+ usedBytesTotal += pendingDelete.getBytes();
+ }
+
+ aggregates.put("blockCount", blockCountTotal);
+ aggregates.put("usedBytes", usedBytesTotal);
+ pendingDelete.addToJson(aggregates);
+
+ return aggregates;
+ }
+
+ public static PendingDelete getAggregatePendingDelete(DatanodeStore store,
+ KeyValueContainerData containerData, String schemaVersion)
+ throws IOException {
final PendingDelete pendingDelete;
if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V1)) {
long pendingDeleteBlockCountTotal = 0;
@@ -276,10 +295,8 @@ static ObjectNode getAggregateValues(DatanodeStore store,
containerData.getDeletingBlockKeyFilter())) {
while (blockIter.hasNext()) {
- blockCountTotal++;
pendingDeleteBlockCountTotal++;
final long bytes = getBlockLength(blockIter.nextBlock());
- usedBytesTotal += bytes;
pendingDeleteBytes += bytes;
}
}
@@ -297,14 +314,9 @@ static ObjectNode getAggregateValues(DatanodeStore store,
countPendingDeletesSchemaV3(schemaThreeStore, containerData);
} else {
throw new IOException("Failed to process deleted blocks for unknown " +
- "container schema " + schemaVersion);
+ "container schema " + schemaVersion);
}
-
- aggregates.put("blockCount", blockCountTotal);
- aggregates.put("usedBytes", usedBytesTotal);
- pendingDelete.addToJson(aggregates);
-
- return aggregates;
+ return pendingDelete;
}
static ObjectNode getChunksDirectoryJson(File chunksDir) throws IOException {
@@ -329,9 +341,7 @@ private boolean checkAndRepair(ObjectNode parent,
KeyValueContainerData containerData, DatanodeStore store) {
ArrayNode errors = JsonUtils.createArrayNode();
boolean passed = true;
-
Table<String, Long> metadataTable = store.getMetadataTable();
-
ObjectNode dBMetadata = (ObjectNode) parent.get("dBMetadata");
ObjectNode aggregates = (ObjectNode) parent.get("aggregates");
@@ -342,7 +352,6 @@ private boolean checkAndRepair(ObjectNode parent,
// If block count is absent from the DB, it is only an error if there are
// a non-zero amount of block keys in the DB.
long blockCountDBLong = blockCountDB.isNull() ? 0 : blockCountDB.asLong();
-
if (blockCountDBLong != blockCountAggregate.asLong()) {
passed = false;
@@ -425,6 +434,30 @@ private boolean checkAndRepair(ObjectNode parent,
errors.add(deleteCountError);
}
+ // check and repair if db delete bytes mismatches delete transaction
+ JsonNode pendingDeletionBlockSize = dBMetadata.path(
+ OzoneConsts.PENDING_DELETE_BLOCK_BYTES);
+ final long dbDeleteBytes = jsonToLong(pendingDeletionBlockSize);
+ final JsonNode pendingDeleteBytesAggregate =
aggregates.path(PendingDelete.BYTES);
+ final long deleteTransactionBytes =
jsonToLong(pendingDeleteBytesAggregate);
+ if (dbDeleteBytes != deleteTransactionBytes) {
+ passed = false;
+ final BooleanSupplier deleteBytesRepairAction = () -> {
+ final String key = containerData.getPendingDeleteBlockBytesKey();
+ try {
+ metadataTable.put(key, deleteTransactionBytes);
+ } catch (IOException ex) {
+ LOG.error("Failed to reset {} for container {}.",
+ key, containerData.getContainerID(), ex);
+ }
+ return false;
+ };
+ final ObjectNode deleteBytesError = buildErrorAndRepair(
+ "dBMetadata." + OzoneConsts.PENDING_DELETE_BLOCK_BYTES,
+ pendingDeleteBytesAggregate, pendingDeletionBlockSize,
deleteBytesRepairAction);
+ errors.add(deleteBytesError);
+ }
+
// check and repair chunks dir.
JsonNode chunksDirPresent = parent.path("chunksDirectory").path("present");
if (!chunksDirPresent.asBoolean()) {
@@ -447,7 +480,6 @@ private boolean checkAndRepair(ObjectNode parent,
JsonNodeFactory.instance.booleanNode(true), chunksDirPresent,
dirRepairAction);
errors.add(chunksDirError);
}
-
parent.put("correct", passed);
parent.set("errors", errors);
return passed;
@@ -473,24 +505,6 @@ private ObjectNode buildErrorAndRepair(String property,
JsonNode expected,
return error;
}
- static class PendingDelete {
- static final String COUNT = "pendingDeleteBlocks";
- static final String BYTES = "pendingDeleteBytes";
-
- private final long count;
- private final long bytes;
-
- PendingDelete(long count, long bytes) {
- this.count = count;
- this.bytes = bytes;
- }
-
- void addToJson(ObjectNode json) {
- json.put(COUNT, count);
- json.put(BYTES, bytes);
- }
- }
-
static PendingDelete countPendingDeletesSchemaV2(
DatanodeStoreSchemaTwoImpl schemaTwoStore,
KeyValueContainerData containerData) throws IOException {
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/PendingDelete.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/PendingDelete.java
new file mode 100644
index 00000000000..0f72d3f37c8
--- /dev/null
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/PendingDelete.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.keyvalue;
+
+import com.fasterxml.jackson.databind.node.ObjectNode;
+
+/**
+ * Class used to hold pending deletion info such as block count and total size
Information.
+ */
+public class PendingDelete {
+ static final String COUNT = "pendingDeleteBlocks";
+ static final String BYTES = "pendingDeleteBytes";
+
+ private final long count;
+ private final long bytes;
+
+ PendingDelete(long count, long bytes) {
+ this.count = count;
+ this.bytes = bytes;
+ }
+
+ void addToJson(ObjectNode json) {
+ json.put(COUNT, count);
+ json.put(BYTES, bytes);
+ }
+
+ public long getCount() {
+ return count;
+ }
+
+ public long getBytes() {
+ return bytes;
+ }
+}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index 420da876a91..13a01acd491 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.container.keyvalue.helpers;
import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1;
+import static
org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerMetadataInspector.getAggregatePendingDelete;
import com.google.common.base.Preconditions;
import java.io.File;
@@ -30,7 +31,6 @@
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerChecksumInfo;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.checksum.ContainerChecksumTreeManager;
@@ -43,6 +43,7 @@
import org.apache.hadoop.ozone.container.common.utils.ContainerInspectorUtil;
import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.keyvalue.PendingDelete;
import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaOneImpl;
import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl;
@@ -319,20 +320,30 @@ private static void populateContainerMetadata(
// Set pending deleted block count.
final long blockPendingDeletion;
+ long blockPendingDeletionBytes = 0L;
+ Long pendingDeletionBlockBytes = metadataTable.get(kvContainerData
+ .getPendingDeleteBlockBytesKey());
Long pendingDeleteBlockCount =
metadataTable.get(kvContainerData
.getPendingDeleteBlockCountKey());
if (pendingDeleteBlockCount != null) {
blockPendingDeletion = pendingDeleteBlockCount;
+ if (pendingDeletionBlockBytes != null) {
+ blockPendingDeletionBytes = pendingDeletionBlockBytes;
+ } else {
+ LOG.warn("Missing pendingDeleteBlocksize from {}: recalculate them
from delete txn tables",
+ metadataTable.getName());
+ PendingDelete pendingDeletions = getAggregatePendingDelete(
+ store, kvContainerData, kvContainerData.getSchemaVersion());
+ blockPendingDeletionBytes = pendingDeletions.getBytes();
+ }
} else {
- // Set pending deleted block count.
- LOG.warn("Missing pendingDeleteBlockCount from {}: recalculate them from
block table", metadataTable.getName());
- MetadataKeyFilters.KeyPrefixFilter filter =
- kvContainerData.getDeletingBlockKeyFilter();
- blockPendingDeletion = store.getBlockDataTable().getRangeKVs(
- kvContainerData.startKeyEmpty(), Integer.MAX_VALUE,
kvContainerData.containerPrefix(), filter, true)
- // TODO: add a count() method to avoid creating a list
- .size();
+ LOG.warn("Missing pendingDeleteBlockCount/size from {}: recalculate them
from delete txn tables",
+ metadataTable.getName());
+ PendingDelete pendingDeletions = getAggregatePendingDelete(
+ store, kvContainerData, kvContainerData.getSchemaVersion());
+ blockPendingDeletion = pendingDeletions.getCount();
+ blockPendingDeletionBytes = pendingDeletions.getBytes();
}
// Set delete transaction id.
Long delTxnId =
@@ -368,7 +379,8 @@ private static void populateContainerMetadata(
blockCount = b.getCount();
}
- kvContainerData.getStatistics().updateBlocks(blockBytes, blockCount,
blockPendingDeletion);
+ kvContainerData.getStatistics().updateBlocks(blockBytes, blockCount);
+
kvContainerData.getStatistics().setBlockPendingDeletion(blockPendingDeletion,
blockPendingDeletionBytes);
// If the container is missing a chunks directory, possibly due to the
// bug fixed by HDDS-6235, create it here.
@@ -437,7 +449,7 @@ private static ContainerData.BlockByteAndCounts
getUsedBytesAndBlockCount(Datano
usedBytes += getBlockLengthTryCatch(blockIter.nextBlock());
}
}
- return new ContainerData.BlockByteAndCounts(usedBytes, blockCount, 0);
+ return new ContainerData.BlockByteAndCounts(usedBytes, blockCount, 0, 0);
}
public static long getBlockLengthTryCatch(BlockData block) {
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java
index 1ff13a140c1..1f66cad476f 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java
@@ -252,7 +252,8 @@ public ContainerBackgroundTaskResult deleteViaSchema1(
// update count of pending deletion blocks, block count and used
// bytes in in-memory container status.
- containerData.getStatistics().updateDeletion(releasedBytes,
deletedBlocksCount, deletedBlocksCount);
+ containerData.getStatistics().decDeletion(releasedBytes, releasedBytes,
+ deletedBlocksCount, deletedBlocksCount);
containerData.getVolume().decrementUsedSpace(releasedBytes);
metrics.incrSuccessCount(deletedBlocksCount);
metrics.incrSuccessBytes(releasedBytes);
@@ -357,6 +358,7 @@ private ContainerBackgroundTaskResult
deleteViaTransactionStore(
int deletedBlocksProcessed = deleteBlocksResult.getBlocksProcessed();
int deletedBlocksCount = deleteBlocksResult.getBlocksDeleted();
long releasedBytes = deleteBlocksResult.getBytesReleased();
+ long processedBytes = deleteBlocksResult.getBytesProcessed();
List<DeletedBlocksTransaction> deletedBlocksTxs =
deleteBlocksResult.deletedBlocksTxs();
deleteBlocksResult.deletedBlocksTxs().forEach(
@@ -393,7 +395,8 @@ private ContainerBackgroundTaskResult
deleteViaTransactionStore(
// update count of pending deletion blocks, block count and used
// bytes in in-memory container status and used space in volume.
- containerData.getStatistics().updateDeletion(releasedBytes,
deletedBlocksCount, deletedBlocksProcessed);
+ containerData.getStatistics().decDeletion(releasedBytes,
processedBytes,
+ deletedBlocksCount, deletedBlocksProcessed);
containerData.getVolume().decrementUsedSpace(releasedBytes);
metrics.incrSuccessCount(deletedBlocksCount);
metrics.incrSuccessBytes(releasedBytes);
@@ -424,6 +427,7 @@ private DeleteTransactionStats deleteTransactions(
int blocksProcessed = 0;
int blocksDeleted = 0;
long bytesReleased = 0;
+ long bytesProcessed = 0;
List<DeletedBlocksTransaction> deletedBlocksTxs = new ArrayList<>();
Instant startTime = Instant.now();
@@ -475,6 +479,7 @@ private DeleteTransactionStats deleteTransactions(
// TODO: handle the bytesReleased correctly for the unexpected
exception.
}
}
+ bytesProcessed += entry.getTotalBlockSize();
deletedBlocksTxs.add(entry);
Duration execTime = Duration.between(startTime, Instant.now());
if (deletedBlocksTxs.size() < delBlocks.size() &&
@@ -491,7 +496,7 @@ private DeleteTransactionStats deleteTransactions(
}
checksumTreeManager.addDeletedBlocks(containerData,
deletedBlocks.values());
return new DeleteTransactionStats(blocksProcessed,
- blocksDeleted, bytesReleased, deletedBlocksTxs);
+ blocksDeleted, bytesReleased, bytesProcessed, deletedBlocksTxs);
}
@Override
@@ -516,13 +521,15 @@ private static class DeleteTransactionStats {
private final int blocksProcessed;
private final int blocksDeleted;
private final long bytesReleased;
+ private final long bytesProcessed;
private final List<DeletedBlocksTransaction> delBlockTxs;
- DeleteTransactionStats(int proceeded, int deleted, long released,
+ DeleteTransactionStats(int proceeded, int deleted, long releasedBytes,
long processedBytes,
List<DeletedBlocksTransaction> delBlocks) {
blocksProcessed = proceeded;
blocksDeleted = deleted;
- bytesReleased = released;
+ bytesReleased = releasedBytes;
+ bytesProcessed = processedBytes;
delBlockTxs = delBlocks;
}
@@ -538,6 +545,10 @@ public long getBytesReleased() {
return bytesReleased;
}
+ public long getBytesProcessed() {
+ return bytesProcessed;
+ }
+
public List<DeletedBlocksTransaction> deletedBlocksTxs() {
return delBlockTxs;
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index 34a6936da9e..4f885961b27 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -134,6 +134,7 @@ public class TestBlockDeletingService {
private String schemaVersion;
private int blockLimitPerInterval;
private MutableVolumeSet volumeSet;
+ private static final int BLOCK_CHUNK_SIZE = 100;
@BeforeEach
public void init() throws IOException {
@@ -226,7 +227,7 @@ private void createPendingDeleteBlocksSchema1(int
numOfBlocksPerContainer,
container, blockID);
kd.setChunks(chunks);
metadata.getStore().getBlockDataTable().put(deleteStateName, kd);
- container.getContainerData().incrPendingDeletionBlocks(1);
+ container.getContainerData().incrPendingDeletionBlocks(1,
BLOCK_CHUNK_SIZE);
}
updateMetaData(data, container, numOfBlocksPerContainer,
numOfChunksPerBlock);
@@ -258,7 +259,7 @@ private void createPendingDeleteBlocksViaTxn(int
numOfBlocksPerContainer,
LOG.warn("Failed to put block: " + blockID.getLocalID()
+ " in BlockDataTable.");
}
- container.getContainerData().incrPendingDeletionBlocks(1);
+ container.getContainerData().incrPendingDeletionBlocks(1,
BLOCK_CHUNK_SIZE);
// Below we are creating one transaction per block just for
// testing purpose
@@ -277,7 +278,9 @@ private void createTxn(KeyValueContainerData data,
List<Long> containerBlocks,
StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction dtx =
StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction
.newBuilder().setTxID(txnID).setContainerID(containerID)
- .addAllLocalID(containerBlocks).setCount(0).build();
+ .addAllLocalID(containerBlocks)
+ .setTotalBlockSize(containerBlocks.size() * BLOCK_CHUNK_SIZE)
+ .setCount(0).build();
try (BatchOperation batch = metadata.getStore().getBatchHandler()
.initBatchOperation()) {
DatanodeStore ds = metadata.getStore();
@@ -304,7 +307,7 @@ private void createTxn(KeyValueContainerData data,
List<Long> containerBlocks,
private void putChunksInBlock(int numOfChunksPerBlock, int i,
List<ContainerProtos.ChunkInfo> chunks, ChunkBuffer buffer,
ChunkManager chunkManager, KeyValueContainer container, BlockID blockID)
{
- long chunkLength = 100;
+ long chunkLength = BLOCK_CHUNK_SIZE;
try {
for (int k = 0; k < numOfChunksPerBlock; k++) {
// This real chunkName should be localID_chunk_chunkIndex, here is for
@@ -334,7 +337,7 @@ private void putChunksInBlock(int numOfChunksPerBlock, int
i,
private void updateMetaData(KeyValueContainerData data,
KeyValueContainer container, int numOfBlocksPerContainer,
int numOfChunksPerBlock) {
- long chunkLength = 100;
+ long chunkLength = BLOCK_CHUNK_SIZE;
try (DBHandle metadata = BlockUtils.getDB(data, conf)) {
container.getContainerData().getStatistics().setBlockCountForTesting(numOfBlocksPerContainer);
// Set block count, bytes used and pending delete block count.
@@ -346,6 +349,9 @@ private void updateMetaData(KeyValueContainerData data,
metadata.getStore().getMetadataTable()
.put(data.getPendingDeleteBlockCountKey(),
(long) numOfBlocksPerContainer);
+ metadata.getStore().getMetadataTable()
+ .put(data.getPendingDeleteBlockBytesKey(),
+ (long) numOfBlocksPerContainer * BLOCK_CHUNK_SIZE);
} catch (IOException exception) {
LOG.warn("Meta Data update was not successful for container: "
+ container);
@@ -443,11 +449,15 @@ public void
testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo)
incorrectData));
assertEquals(0, db.getStore().getMetadataTable()
.get(incorrectData.getPendingDeleteBlockCountKey()).longValue());
+ assertEquals(0, db.getStore().getMetadataTable()
+ .get(incorrectData.getPendingDeleteBlockBytesKey()).longValue());
assertEquals(0,
incorrectData.getNumPendingDeletionBlocks());
+ assertEquals(0,
+ incorrectData.getBlockPendingDeletionBytes());
// Alter the pending delete value in memory and the DB.
- incorrectData.incrPendingDeletionBlocks(blockDeleteLimit);
+ incorrectData.incrPendingDeletionBlocks(blockDeleteLimit, 512);
db.getStore().getMetadataTable().put(
incorrectData.getPendingDeleteBlockCountKey(),
(long)blockDeleteLimit);
@@ -460,14 +470,20 @@ public void
testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo)
// Check its metadata was set up correctly.
assertEquals(correctNumBlocksToDelete,
correctData.getNumPendingDeletionBlocks());
+ assertEquals(correctNumBlocksToDelete * BLOCK_CHUNK_SIZE,
+ correctData.getBlockPendingDeletionBytes());
try (DBHandle db = BlockUtils.getDB(correctData, conf)) {
assertEquals(correctNumBlocksToDelete,
getUnderDeletionBlocksCount(db, correctData));
assertEquals(correctNumBlocksToDelete,
db.getStore().getMetadataTable()
.get(correctData.getPendingDeleteBlockCountKey()).longValue());
+ assertEquals(correctNumBlocksToDelete * BLOCK_CHUNK_SIZE,
+ db.getStore().getMetadataTable()
+ .get(correctData.getPendingDeleteBlockBytesKey()).longValue());
}
+
// Create the deleting service instance with very large interval between
// runs so we can trigger it manually.
ContainerMetrics metrics = ContainerMetrics.create(conf);
@@ -486,6 +502,7 @@ public void
testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo)
// Pending delete block count in the incorrect container should be fixed
// and reset to 0.
assertEquals(0, incorrectData.getNumPendingDeletionBlocks());
+ assertEquals(0, incorrectData.getBlockPendingDeletionBytes());
try (DBHandle db = BlockUtils.getDB(incorrectData, conf)) {
assertEquals(0, getUnderDeletionBlocksCount(db,
incorrectData));
@@ -495,12 +512,17 @@ public void
testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo)
// Correct container should not have been processed.
assertEquals(correctNumBlocksToDelete,
correctData.getNumPendingDeletionBlocks());
+ assertEquals(correctNumBlocksToDelete * BLOCK_CHUNK_SIZE,
+ correctData.getBlockPendingDeletionBytes());
try (DBHandle db = BlockUtils.getDB(correctData, conf)) {
assertEquals(correctNumBlocksToDelete,
getUnderDeletionBlocksCount(db, correctData));
assertEquals(correctNumBlocksToDelete,
db.getStore().getMetadataTable()
.get(correctData.getPendingDeleteBlockCountKey()).longValue());
+ assertEquals(correctNumBlocksToDelete * BLOCK_CHUNK_SIZE,
+ db.getStore().getMetadataTable()
+ .get(correctData.getPendingDeleteBlockBytesKey()).longValue());
}
// On the second run, the correct container should be picked up, because
@@ -510,20 +532,26 @@ public void
testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo)
// The incorrect container should remain in the same state after being
// fixed.
assertEquals(0, incorrectData.getNumPendingDeletionBlocks());
+ assertEquals(0, incorrectData.getBlockPendingDeletionBytes());
try (DBHandle db = BlockUtils.getDB(incorrectData, conf)) {
assertEquals(0, getUnderDeletionBlocksCount(db,
incorrectData));
assertEquals(0, db.getStore().getMetadataTable()
.get(incorrectData.getPendingDeleteBlockCountKey()).longValue());
+ assertEquals(0, db.getStore().getMetadataTable()
+ .get(incorrectData.getPendingDeleteBlockBytesKey()).longValue());
}
// The correct container should have been processed this run and had its
// blocks deleted.
assertEquals(0, correctData.getNumPendingDeletionBlocks());
+ assertEquals(0, correctData.getBlockPendingDeletionBytes());
try (DBHandle db = BlockUtils.getDB(correctData, conf)) {
assertEquals(0, getUnderDeletionBlocksCount(db,
correctData));
assertEquals(0, db.getStore().getMetadataTable()
.get(correctData.getPendingDeleteBlockCountKey()).longValue());
+ assertEquals(0, db.getStore().getMetadataTable()
+ .get(correctData.getPendingDeleteBlockBytesKey()).longValue());
}
}
@@ -579,6 +607,8 @@ public void testBlockDeletion(ContainerTestVersionInfo
versionInfo)
assertEquals(3, getUnderDeletionBlocksCount(meta, data));
assertEquals(3, meta.getStore().getMetadataTable()
.get(data.getPendingDeleteBlockCountKey()).longValue());
+ assertEquals(3 * BLOCK_CHUNK_SIZE, meta.getStore().getMetadataTable()
+ .get(data.getPendingDeleteBlockBytesKey()).longValue());
// Container contains 3 blocks. So, space used by the container
// should be greater than zero.
@@ -612,6 +642,9 @@ public void testBlockDeletion(ContainerTestVersionInfo
versionInfo)
assertEquals(3,
deletingServiceMetrics.getTotalPendingBlockCount());
+ assertEquals(3 * BLOCK_CHUNK_SIZE,
+ deletingServiceMetrics.getTotalPendingBlockBytes());
+
deleteAndWait(svc, 2);
containerData.forEach(c -> assertDeletionsInChecksumFile(c, 3));
@@ -645,10 +678,65 @@ public void testBlockDeletion(ContainerTestVersionInfo
versionInfo)
// So the Pending Block count will be 1
assertEquals(1,
deletingServiceMetrics.getTotalPendingBlockCount());
+ assertEquals(BLOCK_CHUNK_SIZE,
+ deletingServiceMetrics.getTotalPendingBlockBytes());
}
svc.shutdown();
}
+ @ContainerTestVersionInfo.ContainerTest
+ public void
testBlockDeletionMetricsUpdatedProperlyAfterEachExecution(ContainerTestVersionInfo
versionInfo)
+ throws Exception {
+ setLayoutAndSchemaForTest(versionInfo);
+ DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class);
+ dnConf.setBlockDeletionLimit(1);
+ this.blockLimitPerInterval = dnConf.getBlockDeletionLimit();
+ conf.setFromObject(dnConf);
+ ContainerSet containerSet = newContainerSet();
+
+ // Create transactions including duplicates
+ createToDeleteBlocks(containerSet, 1, 3, 1);
+
+ ContainerMetrics metrics = ContainerMetrics.create(conf);
+ BlockDeletingServiceMetrics blockDeletingServiceMetrics =
BlockDeletingServiceMetrics.create();
+ KeyValueHandler keyValueHandler =
+ ContainerTestUtils.getKeyValueHandler(conf, datanodeUuid,
containerSet, volumeSet, metrics);
+ BlockDeletingServiceTestImpl svc =
+ getBlockDeletingService(containerSet, conf, keyValueHandler);
+ svc.start();
+ GenericTestUtils.waitFor(svc::isStarted, 100, 3000);
+
+ // Ensure 1 container was created
+ List<ContainerData> containerData = Lists.newArrayList();
+ containerSet.listContainer(0L, 1, containerData);
+ assertEquals(1, containerData.size());
+ KeyValueContainerData data = (KeyValueContainerData) containerData.get(0);
+
+ try (DBHandle meta = BlockUtils.getDB(data, conf)) {
+ //Execute fist delete to update metrics
+ deleteAndWait(svc, 1);
+
+ assertEquals(3, blockDeletingServiceMetrics.getTotalPendingBlockCount());
+ assertEquals(3 * BLOCK_CHUNK_SIZE,
blockDeletingServiceMetrics.getTotalPendingBlockBytes());
+
+ //Execute the second delete to check whether metrics values decreased
+ deleteAndWait(svc, 2);
+
+ assertEquals(2, blockDeletingServiceMetrics.getTotalPendingBlockCount());
+ assertEquals(2 * BLOCK_CHUNK_SIZE,
blockDeletingServiceMetrics.getTotalPendingBlockBytes());
+
+ //Execute the third delete to check whether metrics values decreased
+ deleteAndWait(svc, 3);
+
+ assertEquals(1, blockDeletingServiceMetrics.getTotalPendingBlockCount());
+ assertEquals(1 * BLOCK_CHUNK_SIZE,
blockDeletingServiceMetrics.getTotalPendingBlockBytes());
+
+ } catch (Exception ex) {
+ ex.printStackTrace();
+ fail("Test failed with exception: " + ex.getMessage());
+ }
+ }
+
@ContainerTestVersionInfo.ContainerTest
public void testWithUnrecordedBlocks(ContainerTestVersionInfo versionInfo)
throws Exception {
@@ -723,7 +811,7 @@ public void
testWithUnrecordedBlocks(ContainerTestVersionInfo versionInfo)
createTxn(ctr1, unrecordedBlockIds, 100, ctr1.getContainerID());
ctr1.updateDeleteTransactionId(100);
- ctr1.incrPendingDeletionBlocks(numUnrecordedBlocks);
+ ctr1.incrPendingDeletionBlocks(numUnrecordedBlocks, BLOCK_CHUNK_SIZE);
updateMetaData(ctr1, (KeyValueContainer) containerSet.getContainer(
ctr1.getContainerID()), 3, 1);
// Ensure there are 3 + 4 = 7 blocks under deletion
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
index 509806f4551..2e54a12ef8f 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
@@ -91,7 +91,7 @@ public void testKeyValueData(ContainerTestVersionInfo
versionInfo) {
statistics.updateRead(10);
statistics.incrementBlockCount();
kvData.updateWriteStats(10, true);
- kvData.incrPendingDeletionBlocks(1);
+ kvData.incrPendingDeletionBlocks(1, 256);
kvData.setSchemaVersion(
VersionedDatanodeFeatures.SchemaV3.chooseSchemaVersion(conf));
long expectedDataHash = 1234L;
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
index e47f94981c3..1bdac646b73 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
@@ -304,7 +304,7 @@ private KeyValueContainer createTestContainer() throws
IOException {
db.getStore().getBatchHandler().commitBatchOperation(batch);
cData.updateDeleteTransactionId(txn.getTxID());
- cData.incrPendingDeletionBlocks(BLOCKS_PER_TXN);
+ cData.incrPendingDeletionBlocks(BLOCKS_PER_TXN, 256);
}
}
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
index 54be2b842fa..4cef3c8c45f 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
@@ -92,7 +92,7 @@ public void testRandomChoosingPolicy(ContainerLayoutVersion
layout)
layout,
ContainerTestHelper.CONTAINER_MAX_SIZE, UUID.randomUUID().toString(),
UUID.randomUUID().toString());
- data.incrPendingDeletionBlocks(20);
+ data.incrPendingDeletionBlocks(20, 256);
data.closeContainer();
KeyValueContainer container = new KeyValueContainer(data, conf);
containerSet.addContainer(container);
@@ -189,7 +189,7 @@ private KeyValueContainerData createContainerWithState(
containerId, layout, ContainerTestHelper.CONTAINER_MAX_SIZE,
UUID.randomUUID().toString(), UUID.randomUUID().toString());
- data.incrPendingDeletionBlocks(5);
+ data.incrPendingDeletionBlocks(5, 5 * 256);
data.setState(state);
containerSet.addContainer(new KeyValueContainer(data, conf));
@@ -227,7 +227,7 @@ public void
testTopNOrderedChoosingPolicy(ContainerLayoutVersion layout)
if (i != numContainers) {
int deletionBlocks = random.nextInt(numContainers) + 1;
numberOfBlocks.add(deletionBlocks);
- data.incrPendingDeletionBlocks(deletionBlocks);
+ data.incrPendingDeletionBlocks(deletionBlocks, 256);
name2Count.put(containerId, deletionBlocks);
}
KeyValueContainer container = new KeyValueContainer(data, conf);
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java
index 50b08c7aa2f..2b6b387dbe7 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java
@@ -411,6 +411,76 @@ public void testDuplicateDeleteBlocksCommand(
((KeyValueContainerData)
container.getContainerData()).getNumPendingDeletionBlocks());
}
+ @ContainerTestVersionInfo.ContainerTest
+ public void testDuplicateTxFromSCMHandledByDeleteBlocksCommandHandler(
+ ContainerTestVersionInfo versionInfo) throws Exception {
+ prepareTest(versionInfo);
+ assertThat(containerSet.containerCount()).isGreaterThan(0);
+ Container<?> container = containerSet.getContainerIterator(volume1).next();
+ KeyValueContainerData containerData = (KeyValueContainerData)
container.getContainerData();
+
+ // Create a delete transaction with specific block count and size
+ DeletedBlocksTransaction transaction =
DeletedBlocksTransaction.newBuilder()
+ .setContainerID(container.getContainerData().getContainerID())
+ .setCount(0)
+ .addLocalID(1L)
+ .addLocalID(2L)
+ .addLocalID(3L) // 3 blocks
+ .setTxID(100)
+ .setTotalBlockSize(768L) // 3 blocks * 256 bytes each
+ .build();
+
+ // Record initial state
+ long initialPendingBlocks = containerData.getNumPendingDeletionBlocks();
+ long initialPendingBytes = containerData.getBlockPendingDeletionBytes();
+
+ // Execute the first transaction - should succeed
+ List<DeleteBlockTransactionResult> results1 =
+ handler.executeCmdWithRetry(Arrays.asList(transaction));
+
+ // Verify first execution succeeded
+ assertEquals(1, results1.size());
+ assertTrue(results1.get(0).getSuccess());
+
+ // Verify pending block count and size increased
+ long afterFirstPendingBlocks = containerData.getNumPendingDeletionBlocks();
+ long afterFirstPendingBytes = containerData.getBlockPendingDeletionBytes();
+ assertEquals(initialPendingBlocks + 3, afterFirstPendingBlocks);
+ assertEquals(initialPendingBytes + 768L, afterFirstPendingBytes);
+
+ // Execute the same transaction again (duplicate) - should be handled as
duplicate
+ List<DeleteBlockTransactionResult> results2 =
+ handler.executeCmdWithRetry(Arrays.asList(transaction));
+
+ // Verify duplicate execution succeeded but didn't change counters
+ assertEquals(1, results2.size());
+ assertTrue(results2.get(0).getSuccess());
+
+ // Verify pending block count and size remained the same (no double
counting)
+ assertEquals(afterFirstPendingBlocks,
containerData.getNumPendingDeletionBlocks());
+ assertEquals(afterFirstPendingBytes,
containerData.getBlockPendingDeletionBytes());
+
+ long afterSecondPendingBlocks =
containerData.getNumPendingDeletionBlocks();
+ long afterSecondPendingBytes =
containerData.getBlockPendingDeletionBytes();
+ DeletedBlocksTransaction transaction2 =
DeletedBlocksTransaction.newBuilder()
+ .setContainerID(container.getContainerData().getContainerID())
+ .setCount(0)
+ .addLocalID(1L)
+ .addLocalID(2L)
+ .addLocalID(3L) // 3 blocks
+ .setTxID(90)
+ .setTotalBlockSize(768L) // 3 blocks * 256 bytes each
+ .build();
+
+ List<DeleteBlockTransactionResult> results3 =
+ handler.executeCmdWithRetry(Arrays.asList(transaction2));
+ assertEquals(1, results3.size());
+ assertTrue(results3.get(0).getSuccess());
+ // Verify pending block count and size increased since its processed.
+ assertEquals(afterSecondPendingBlocks + 3,
containerData.getNumPendingDeletionBlocks());
+ assertEquals(afterSecondPendingBytes + 768L,
containerData.getBlockPendingDeletionBytes());
+ }
+
private DeletedBlocksTransaction createDeletedBlocksTransaction(long txID,
long containerID) {
return DeletedBlocksTransaction.newBuilder()
@@ -428,7 +498,7 @@ public void handle(KeyValueContainerData containerData,
if
(DeleteBlocksCommandHandler.isDuplicateTransaction(containerData.getContainerID(),
containerData, tx, null)) {
return;
}
- containerData.incrPendingDeletionBlocks(tx.getLocalIDCount());
+ containerData.incrPendingDeletionBlocks(tx.getLocalIDCount(),
tx.getLocalIDCount() * 256L);
containerData.updateDeleteTransactionId(tx.getTxID());
}
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index 28c118b517f..9e66aaeb067 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -1123,6 +1123,8 @@ private void testMixedSchemaImport(String dir,
Table<String, Long> metadataTable = meta.getStore().getMetadataTable();
metadataTable.put(data.getPendingDeleteBlockCountKey(),
pendingDeleteBlockCount);
+ metadataTable.put(data.getPendingDeleteBlockBytesKey(),
+ pendingDeleteBlockCount * 256);
}
container.close();
@@ -1165,6 +1167,8 @@ private void testMixedSchemaImport(String dir,
importedContainer.getContainerData().getSchemaVersion());
assertEquals(pendingDeleteBlockCount,
importedContainer.getContainerData().getNumPendingDeletionBlocks());
+ assertEquals(pendingDeleteBlockCount * 256,
+ importedContainer.getContainerData().getBlockPendingDeletionBytes());
}
@ContainerTestVersionInfo.ContainerTest
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
index 0280296a577..5b633c35cfd 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
@@ -48,6 +48,7 @@
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.utils.db.InMemoryTestTable;
@@ -77,7 +78,9 @@
import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.ozone.container.metadata.ContainerCreateInfo;
+import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaOneImpl;
import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
+import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl;
import
org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStore;
import org.apache.hadoop.util.Time;
import org.apache.ozone.test.GenericTestUtils.LogCapturer;
@@ -167,28 +170,72 @@ private void markBlocksForDelete(KeyValueContainer
keyValueContainer,
KeyValueContainerData cData = keyValueContainer.getContainerData();
try (DBHandle metadataStore = BlockUtils.getDB(cData, conf)) {
- for (int i = 0; i < count; i++) {
- Table<String, BlockData> blockDataTable =
- metadataStore.getStore().getBlockDataTable();
-
- Long localID = blockNames.get(i);
- String blk = cData.getBlockKey(localID);
- BlockData blkInfo = blockDataTable.get(blk);
-
- blockDataTable.delete(blk);
- blockDataTable.put(cData.getDeletingBlockKey(localID), blkInfo);
+ if (metadataStore.getStore() instanceof DatanodeStoreSchemaThreeImpl) {
+ DatanodeStoreSchemaThreeImpl schemaThree =
(DatanodeStoreSchemaThreeImpl) metadataStore.getStore();
+ Table<String,
StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction> delTxTable =
+ schemaThree.getDeleteTransactionTable();
+
+ // Fix: Use the correct container prefix format for the delete
transaction key
+ String containerPrefix = cData.containerPrefix();
+ long txId = System.currentTimeMillis();
+ String txKey = containerPrefix + txId; // This ensures the key matches
the container prefix
+
+
StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction.Builder
deleteTxBuilder =
+
StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction.newBuilder()
+ .setTxID(txId)
+ .setContainerID(cData.getContainerID())
+ .setCount(count);
+
+ for (int i = 0; i < count; i++) {
+ Long localID = blockNames.get(i);
+ deleteTxBuilder.addLocalID(localID);
+ }
+
+ StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction
deleteTx = deleteTxBuilder.build();
+ delTxTable.put(txKey, deleteTx); // Use the properly formatted key
+
+ } else if (metadataStore.getStore() instanceof
DatanodeStoreSchemaTwoImpl) {
+ DatanodeStoreSchemaTwoImpl schemaTwoStore =
(DatanodeStoreSchemaTwoImpl) metadataStore.getStore();
+ Table<Long,
StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction> delTxTable =
+ schemaTwoStore.getDeleteTransactionTable();
+
+ long txId = System.currentTimeMillis();
+
StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction.Builder
deleteTxBuilder =
+
StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction.newBuilder()
+ .setTxID(txId)
+ .setContainerID(cData.getContainerID())
+ .setCount(count);
+
+ for (int i = 0; i < count; i++) {
+ Long localID = blockNames.get(i);
+ deleteTxBuilder.addLocalID(localID);
+ }
+
+ StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction
deleteTx = deleteTxBuilder.build();
+ delTxTable.put(txId, deleteTx);
+
+ } else if (metadataStore.getStore() instanceof
DatanodeStoreSchemaOneImpl) {
+ // Schema 1: Move blocks to deleting prefix (this part looks correct)
+ Table<String, BlockData> blockDataTable =
metadataStore.getStore().getBlockDataTable();
+ for (int i = 0; i < count; i++) {
+ Long localID = blockNames.get(i);
+ String blk = cData.getBlockKey(localID);
+ BlockData blkInfo = blockDataTable.get(blk);
+ blockDataTable.delete(blk);
+ blockDataTable.put(cData.getDeletingBlockKey(localID), blkInfo);
+ }
}
if (setMetaData) {
- // Pending delete blocks are still counted towards the block count
- // and bytes used metadata values, so those do not change.
- Table<String, Long> metadataTable =
- metadataStore.getStore().getMetadataTable();
- metadataTable.put(cData.getPendingDeleteBlockCountKey(),
- (long)count);
+ Table<String, Long> metadataTable =
metadataStore.getStore().getMetadataTable();
+ metadataTable.put(cData.getPendingDeleteBlockCountKey(), (long)count);
+ // Also set the pending deletion size
+ long deletionSize = count * blockLen;
+ metadataTable.put(cData.getPendingDeleteBlockBytesKey(), deletionSize);
}
- }
+ metadataStore.getStore().flushDB();
+ }
}
private List<Long> addBlocks(KeyValueContainer keyValueContainer,
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]