This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 6ff3ad6624 HDDS-12873. Improve ContainerData statistics 
synchronization. (#8305)
6ff3ad6624 is described below

commit 6ff3ad66247e1954f696452b0ac373f5f36a0271
Author: Tsz-Wo Nicholas Sze <[email protected]>
AuthorDate: Thu Jun 12 11:13:25 2025 -0700

    HDDS-12873. Improve ContainerData statistics synchronization. (#8305)
---
 .../ozone/container/common/impl/ContainerData.java | 310 +++++++++++----------
 .../ContainerDeletionChoosingPolicyTemplate.java   |   6 +-
 .../transport/server/ratis/XceiverServerRatis.java |   2 +-
 .../container/keyvalue/KeyValueContainer.java      |  53 +---
 .../container/keyvalue/KeyValueContainerData.java  |  76 ++---
 .../ozone/container/keyvalue/KeyValueHandler.java  |   6 +-
 .../keyvalue/helpers/KeyValueContainerUtil.java    |  78 +++---
 .../container/keyvalue/impl/BlockManagerImpl.java  |  12 +-
 .../keyvalue/impl/ChunkManagerDispatcher.java      |   2 +-
 .../statemachine/background/BlockDeletingTask.java |  28 +-
 .../container/common/TestBlockDeletingService.java |   2 +-
 .../common/TestKeyValueContainerData.java          |  32 +--
 .../common/impl/TestContainerPersistence.java      |  20 +-
 .../container/common/impl/TestHddsDispatcher.java  |   2 +-
 .../TestKeyValueContainerIntegrityChecks.java      |   4 +-
 .../container/keyvalue/TestTarContainerPacker.java |   2 +-
 .../replication/TestReplicationSupervisor.java     |   2 +-
 .../commandhandler/TestDeleteContainerHandler.java |   5 +-
 18 files changed, 295 insertions(+), 347 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
index 7bb59247ca..1ef5ac23d8 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
@@ -29,7 +29,7 @@
 import static org.apache.hadoop.ozone.OzoneConsts.STATE;
 
 import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.google.common.base.Preconditions;
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 import jakarta.annotation.Nullable;
 import java.io.IOException;
@@ -39,14 +39,16 @@
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Optional;
 import java.util.TreeMap;
-import java.util.concurrent.atomic.AtomicLong;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto;
 import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.ratis.util.Preconditions;
 import org.yaml.snakeyaml.Yaml;
 
 /**
@@ -84,13 +86,8 @@ public abstract class ContainerData {
   //ID of the datanode where this container is created
   private final String originNodeId;
 
-  /** parameters for read/write statistics on the container. **/
-  private final AtomicLong readBytes;
-  private final AtomicLong writeBytes;
-  private final AtomicLong readCount;
-  private final AtomicLong writeCount;
-  private final AtomicLong bytesUsed;
-  private final AtomicLong blockCount;
+  /** Read/write/block statistics of this container. **/
+  private final Statistics statistics = new Statistics();
 
   private HddsVolume volume;
 
@@ -137,20 +134,14 @@ protected ContainerData(ContainerType type, long 
containerId,
                           ContainerLayoutVersion layoutVersion, long size,
                           String originPipelineId,
                           String originNodeId) {
-    Preconditions.checkNotNull(type);
-
-    this.containerType = type;
+    this.containerType = Objects.requireNonNull(type, "type == null");
     this.containerID = containerId;
     this.layOutVersion = layoutVersion.getVersion();
     this.metadata = new TreeMap<>();
     this.state = ContainerDataProto.State.OPEN;
-    this.readCount = new AtomicLong(0L);
-    this.readBytes =  new AtomicLong(0L);
-    this.writeCount =  new AtomicLong(0L);
-    this.writeBytes =  new AtomicLong(0L);
-    this.bytesUsed = new AtomicLong(0L);
-    this.blockCount = new AtomicLong(0L);
     this.maxSize = size;
+    Preconditions.assertTrue(maxSize > 0, () -> "maxSize = " + maxSize + " <= 
0");
+
     this.originPipelineId = originPipelineId;
     this.originNodeId = originNodeId;
     this.isEmpty = false;
@@ -368,7 +359,7 @@ public void commitSpace() {
     HddsVolume cVol;
 
     //we don't expect duplicate calls
-    Preconditions.checkState(!committedSpace);
+    Preconditions.assertTrue(!committedSpace);
 
     // Only Open Containers have Committed Space
     if (myState != ContainerDataProto.State.OPEN) {
@@ -383,43 +374,8 @@ public void commitSpace() {
     }
   }
 
-  /**
-   * Get the number of bytes read from the container.
-   * @return the number of bytes read from the container.
-   */
-  public long getReadBytes() {
-    return readBytes.get();
-  }
-
-  /**
-   * Increase the number of bytes read from the container.
-   * @param bytes number of bytes read.
-   */
-  public void incrReadBytes(long bytes) {
-    this.readBytes.addAndGet(bytes);
-  }
-
-  /**
-   * Get the number of times the container is read.
-   * @return the number of times the container is read.
-   */
-  public long getReadCount() {
-    return readCount.get();
-  }
-
-  /**
-   * Increase the number of container read count by 1.
-   */
-  public void incrReadCount() {
-    this.readCount.incrementAndGet();
-  }
-
-  /**
-   * Get the number of bytes write into the container.
-   * @return the number of bytes write into the container.
-   */
-  public long getWriteBytes() {
-    return writeBytes.get();
+  public Statistics getStatistics() {
+    return statistics;
   }
 
   /**
@@ -427,8 +383,7 @@ public long getWriteBytes() {
    * Also decrement committed bytes against the bytes written.
    * @param bytes the number of bytes write into the container.
    */
-  public void incrWriteBytes(long bytes) {
-    this.writeBytes.addAndGet(bytes);
+  private void incrWriteBytes(long bytes) {
     /*
        Increase the cached Used Space in VolumeInfo as it
        maybe not updated, DU or DedicatedDiskSpaceUsage runs
@@ -448,53 +403,12 @@ public void incrWriteBytes(long bytes) {
     }
   }
 
-  /**
-   * Get the number of writes into the container.
-   * @return the number of writes into the container.
-   */
-  public long getWriteCount() {
-    return writeCount.get();
-  }
-
-  /**
-   * Increase the number of writes into the container by 1.
-   */
-  public void incrWriteCount() {
-    this.writeCount.incrementAndGet();
-  }
-
-  /**
-   * Sets the number of bytes used by the container.
-   * @param used
-   */
-  public void setBytesUsed(long used) {
-    this.bytesUsed.set(used);
-  }
-
   /**
    * Get the number of bytes used by the container.
    * @return the number of bytes used by the container.
    */
   public long getBytesUsed() {
-    return bytesUsed.get();
-  }
-
-  /**
-   * Increase the number of bytes used by the container.
-   * @param used number of bytes used by the container.
-   * @return the current number of bytes used by the container afert increase.
-   */
-  public long incrBytesUsed(long used) {
-    return this.bytesUsed.addAndGet(used);
-  }
-
-  /**
-   * Decrease the number of bytes used by the container.
-   * @param reclaimed the number of bytes reclaimed from the container.
-   * @return the current number of bytes used by the container after decrease.
-   */
-  public long decrBytesUsed(long reclaimed) {
-    return this.bytesUsed.addAndGet(-1L * reclaimed);
+    return getStatistics().getBlockBytes();
   }
 
   /**
@@ -515,35 +429,10 @@ public HddsVolume getVolume() {
     return volume;
   }
 
-  /**
-   * Increments the number of blocks in the container.
-   */
-  public void incrBlockCount() {
-    this.blockCount.incrementAndGet();
-  }
-
-  /**
-   * Decrements number of blocks in the container.
-   */
-  public void decrBlockCount() {
-    this.blockCount.decrementAndGet();
-  }
-
-  /**
-   * Decrease the count of blocks (blocks) in the container.
-   *
-   * @param deletedBlockCount
-   */
-  public void decrBlockCount(long deletedBlockCount) {
-    this.blockCount.addAndGet(-1 * deletedBlockCount);
-  }
-
-  /**
-   * Returns number of blocks in the container.
-   * @return block count
-   */
+  /** For testing only. */
+  @VisibleForTesting
   public long getBlockCount() {
-    return this.blockCount.get();
+    return getStatistics().getBlockByteAndCounts().getCount();
   }
 
   public boolean isEmpty() {
@@ -558,14 +447,6 @@ public void markAsEmpty() {
     this.isEmpty = true;
   }
 
-  /**
-   * Set's number of blocks in the container.
-   * @param count
-   */
-  public void setBlockCount(long count) {
-    this.blockCount.set(count);
-  }
-
   public void setChecksumTo0ByteArray() {
     this.checksum = DUMMY_CHECKSUM;
   }
@@ -654,16 +535,8 @@ public void computeAndSetChecksum(Yaml yaml) throws 
IOException {
    */
   public abstract long getBlockCommitSequenceId();
 
-  public void updateReadStats(long length) {
-    incrReadCount();
-    incrReadBytes(length);
-  }
-
   public void updateWriteStats(long bytesWritten, boolean overwrite) {
-    if (!overwrite) {
-      incrBytesUsed(bytesWritten);
-    }
-    incrWriteCount();
+    getStatistics().updateWrite(bytesWritten, overwrite);
     incrWriteBytes(bytesWritten);
   }
 
@@ -675,4 +548,151 @@ public String toString() {
         + ", ri=" + replicaIndex
         + ", origin=[dn_" + originNodeId + ", pipeline_" + originPipelineId + 
"])";
   }
+
+  /**
+   * Block byte used, block count and pending deletion count.
+   * This class is immutable.
+   */
+  public static class BlockByteAndCounts {
+    private final long bytes;
+    private final long count;
+    private final long pendingDeletion;
+
+    public BlockByteAndCounts(long bytes, long count, long pendingDeletion) {
+      this.bytes = bytes;
+      this.count = count;
+      this.pendingDeletion = pendingDeletion;
+    }
+
+    public long getBytes() {
+      return bytes;
+    }
+
+    public long getCount() {
+      return count;
+    }
+
+    public long getPendingDeletion() {
+      return pendingDeletion;
+    }
+  }
+
+  /**
+   * Read/write/block statistics of a container.
+   * This class is thread-safe -- all methods are synchronized.
+   */
+  public static class Statistics {
+    private long readBytes;
+    private long readCount;
+
+    private long writeBytes;
+    private long writeCount;
+
+    private long blockBytes;
+    private long blockCount;
+    private long blockPendingDeletion;
+
+    public synchronized long getWriteBytes() {
+      return writeBytes;
+    }
+
+    public synchronized long getBlockBytes() {
+      return blockBytes;
+    }
+
+    public synchronized BlockByteAndCounts getBlockByteAndCounts() {
+      return new BlockByteAndCounts(blockBytes, blockCount, 
blockPendingDeletion);
+    }
+
+    public synchronized long getBlockPendingDeletion() {
+      return blockPendingDeletion;
+    }
+
+    public synchronized void incrementBlockCount() {
+      blockCount++;
+    }
+
+    /** Update for reading a block with the given length. */
+    public synchronized void updateRead(long length) {
+      readCount++;
+      readBytes += length;
+    }
+
+    /** Update for writing a block with the given length. */
+    public synchronized void updateWrite(long length, boolean overwrite) {
+      if (!overwrite) {
+        blockBytes += length;
+      }
+      writeCount++;
+      writeBytes += length;
+    }
+
+    public synchronized void updateDeletion(long deletedBytes, long 
deletedBlockCount, long processedBlockCount) {
+      blockBytes -= deletedBytes;
+      blockCount -= deletedBlockCount;
+      blockPendingDeletion -= processedBlockCount;
+    }
+
+    public synchronized void updateBlocks(long bytes, long count, long 
pendingDeletionIncrement) {
+      blockBytes = bytes;
+      blockCount = count;
+      blockPendingDeletion += pendingDeletionIncrement;
+    }
+
+    public synchronized ContainerDataProto.Builder 
setContainerDataProto(ContainerDataProto.Builder b) {
+      if (blockBytes > 0) {
+        b.setBytesUsed(blockBytes);
+      }
+      return b.setBlockCount(blockCount);
+    }
+
+    public synchronized ContainerReplicaProto.Builder 
setContainerReplicaProto(ContainerReplicaProto.Builder b) {
+      return b.setReadBytes(readBytes)
+          .setReadCount(readCount)
+          .setWriteBytes(writeBytes)
+          .setWriteCount(writeCount)
+          .setUsed(blockBytes)
+          .setKeyCount(blockCount);
+    }
+
+    public synchronized void addBlockPendingDeletion(long count) {
+      blockPendingDeletion += count;
+    }
+
+    public synchronized void resetBlockPendingDeletion() {
+      blockPendingDeletion = 0;
+    }
+
+    public synchronized void assertRead(long expectedBytes, long 
expectedCount) {
+      Preconditions.assertSame(expectedBytes, readBytes, "readBytes");
+      Preconditions.assertSame(expectedCount, readCount, "readCount");
+    }
+
+    public synchronized void assertWrite(long expectedBytes, long 
expectedCount) {
+      Preconditions.assertSame(expectedBytes, writeBytes, "writeBytes");
+      Preconditions.assertSame(expectedCount, writeCount, "writeCount");
+    }
+
+    public synchronized void assertBlock(long expectedBytes, long 
expectedCount, long expectedPendingDeletion) {
+      Preconditions.assertSame(expectedBytes, blockBytes, "blockBytes");
+      Preconditions.assertSame(expectedCount, blockCount, "blockCount");
+      Preconditions.assertSame(expectedPendingDeletion, blockPendingDeletion, 
"blockPendingDeletion");
+    }
+
+    public synchronized void setBlockCountForTesting(long count) {
+      blockCount = count;
+    }
+
+    public synchronized void setBlockBytesForTesting(long bytes) {
+      blockBytes = bytes;
+    }
+
+    @Override
+    public synchronized String toString() {
+      return "Statistics{read(" + readBytes + " bytes, #" + readCount + ")"
+          + ", write(" + writeBytes + " bytes, #" + writeCount + ")"
+          + ", block(" + blockBytes + " bytes, #" + blockCount
+          + ", pendingDelete=" + blockPendingDeletion + ")}";
+    }
+  }
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java
index cabe91e6c8..775303a5aa 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java
@@ -80,8 +80,10 @@ public final List<ContainerBlockInfo> 
chooseContainerForBlockDeletion(
         }
       }
     }
-    LOG.info("Chosen {}/{} blocks from {} candidate containers.",
-        (originalBlockCount - blockCount), blockCount, orderedList.size());
+    if (!orderedList.isEmpty()) {
+      LOG.info("Chosen {}/{} blocks from {} candidate containers.",
+          (originalBlockCount - blockCount), blockCount, orderedList.size());
+    }
     return result;
   }
 
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index b2024c3925..62a24401aa 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -781,7 +781,7 @@ private long 
calculatePipelineBytesWritten(HddsProtos.PipelineID pipelineID) {
       ContainerData containerData = container.getContainerData();
       if (containerData.getOriginPipelineId()
           .compareTo(pipelineID.getId()) == 0) {
-        bytesWritten += containerData.getWriteBytes();
+        bytesWritten += containerData.getStatistics().getWriteBytes();
       }
     }
     return bytesWritten;
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index 030392045d..ce3d709e24 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -849,57 +849,8 @@ private void clearPendingPutBlockCache() {
    * Returns KeyValueContainerReport for the KeyValueContainer.
    */
   @Override
-  public ContainerReplicaProto getContainerReport()
-      throws StorageContainerException {
-    ContainerReplicaProto.Builder ciBuilder =
-        ContainerReplicaProto.newBuilder();
-    ciBuilder.setContainerID(containerData.getContainerID())
-        .setReadCount(containerData.getReadCount())
-        .setWriteCount(containerData.getWriteCount())
-        .setReadBytes(containerData.getReadBytes())
-        .setWriteBytes(containerData.getWriteBytes())
-        .setKeyCount(containerData.getBlockCount())
-        .setUsed(containerData.getBytesUsed())
-        .setState(getHddsState())
-        .setReplicaIndex(containerData.getReplicaIndex())
-        .setDeleteTransactionId(containerData.getDeleteTransactionId())
-        .setBlockCommitSequenceId(containerData.getBlockCommitSequenceId())
-        .setOriginNodeId(containerData.getOriginNodeId())
-        .setIsEmpty(containerData.isEmpty());
-    return ciBuilder.build();
-  }
-
-  /**
-   * Returns LifeCycle State of the container.
-   * @return LifeCycle State of the container in HddsProtos format
-   * @throws StorageContainerException
-   */
-  private ContainerReplicaProto.State getHddsState()
-      throws StorageContainerException {
-    ContainerReplicaProto.State state;
-    switch (containerData.getState()) {
-    case OPEN:
-      state = ContainerReplicaProto.State.OPEN;
-      break;
-    case CLOSING:
-      state = ContainerReplicaProto.State.CLOSING;
-      break;
-    case QUASI_CLOSED:
-      state = ContainerReplicaProto.State.QUASI_CLOSED;
-      break;
-    case CLOSED:
-      state = ContainerReplicaProto.State.CLOSED;
-      break;
-    case UNHEALTHY:
-      state = ContainerReplicaProto.State.UNHEALTHY;
-      break;
-    case DELETED:
-      state = ContainerReplicaProto.State.DELETED;
-      break;
-    default:
-      throw new StorageContainerException("Invalid Container state: " + 
containerData, INVALID_CONTAINER_STATE);
-    }
-    return state;
+  public ContainerReplicaProto getContainerReport() throws 
StorageContainerException {
+    return containerData.buildContainerReplicaProto();
   }
 
   /**
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
index f0e350c2fb..8a7758cd84 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.ozone.container.keyvalue;
 
 import static java.lang.Math.max;
+import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.INVALID_CONTAINER_STATE;
 import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID;
 import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_COUNT;
 import static org.apache.hadoop.ozone.OzoneConsts.CHUNKS_PATH;
@@ -44,9 +45,10 @@
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicLong;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
+import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.hdds.utils.db.Table;
@@ -79,11 +81,6 @@ public class KeyValueContainerData extends ContainerData {
 
   private String schemaVersion;
 
-  /**
-   * Number of pending deletion blocks in KeyValueContainer.
-   */
-  private final AtomicLong numPendingDeletionBlocks;
-
   private long deleteTransactionId;
 
   private long blockCommitSequenceId;
@@ -110,7 +107,6 @@ public KeyValueContainerData(long id, 
ContainerLayoutVersion layoutVersion,
       long size, String originPipelineId, String originNodeId) {
     super(ContainerProtos.ContainerType.KeyValueContainer, id, layoutVersion,
         size, originPipelineId, originNodeId);
-    this.numPendingDeletionBlocks = new AtomicLong(0);
     this.deleteTransactionId = 0;
     finalizedBlockSet =  ConcurrentHashMap.newKeySet();
   }
@@ -119,7 +115,6 @@ public KeyValueContainerData(KeyValueContainerData source) {
     super(source);
     Preconditions.checkArgument(source.getContainerType()
         == ContainerProtos.ContainerType.KeyValueContainer);
-    this.numPendingDeletionBlocks = new AtomicLong(0);
     this.deleteTransactionId = 0;
     this.schemaVersion = source.getSchemaVersion();
     finalizedBlockSet = ConcurrentHashMap.newKeySet();
@@ -240,23 +235,14 @@ public void setContainerDBType(String containerDBType) {
    * @param numBlocks increment number
    */
   public void incrPendingDeletionBlocks(long numBlocks) {
-    this.numPendingDeletionBlocks.addAndGet(numBlocks);
-  }
-
-  /**
-   * Decrease the count of pending deletion blocks.
-   *
-   * @param numBlocks decrement number
-   */
-  public void decrPendingDeletionBlocks(long numBlocks) {
-    this.numPendingDeletionBlocks.addAndGet(-1 * numBlocks);
+    getStatistics().addBlockPendingDeletion(numBlocks);
   }
 
   /**
    * Get the number of pending deletion blocks.
    */
   public long getNumPendingDeletionBlocks() {
-    return this.numPendingDeletionBlocks.get();
+    return getStatistics().getBlockPendingDeletion();
   }
 
   /**
@@ -275,6 +261,39 @@ public long getDeleteTransactionId() {
     return deleteTransactionId;
   }
 
+  ContainerReplicaProto buildContainerReplicaProto() throws 
StorageContainerException {
+    return 
getStatistics().setContainerReplicaProto(ContainerReplicaProto.newBuilder())
+        .setContainerID(getContainerID())
+        .setState(getContainerReplicaProtoState(getState()))
+        .setIsEmpty(isEmpty())
+        .setOriginNodeId(getOriginNodeId())
+        .setReplicaIndex(getReplicaIndex())
+        .setBlockCommitSequenceId(getBlockCommitSequenceId())
+        .setDeleteTransactionId(getDeleteTransactionId())
+        .build();
+  }
+
+  // TODO remove one of the State from proto
+  static ContainerReplicaProto.State 
getContainerReplicaProtoState(ContainerDataProto.State state)
+      throws StorageContainerException {
+    switch (state) {
+    case OPEN:
+      return ContainerReplicaProto.State.OPEN;
+    case CLOSING:
+      return ContainerReplicaProto.State.CLOSING;
+    case QUASI_CLOSED:
+      return ContainerReplicaProto.State.QUASI_CLOSED;
+    case CLOSED:
+      return ContainerReplicaProto.State.CLOSED;
+    case UNHEALTHY:
+      return ContainerReplicaProto.State.UNHEALTHY;
+    case DELETED:
+      return ContainerReplicaProto.State.DELETED;
+    default:
+      throw new StorageContainerException("Invalid container state: " + state, 
INVALID_CONTAINER_STATE);
+    }
+  }
+
   /**
    * Add the given localID of a block to the finalizedBlockSet.
    */
@@ -315,7 +334,6 @@ public ContainerDataProto getProtoBufMessage() {
     builder.setContainerID(this.getContainerID());
     builder.setContainerPath(this.getContainerPath());
     builder.setState(this.getState());
-    builder.setBlockCount(this.getBlockCount());
 
     for (Map.Entry<String, String> entry : getMetadata().entrySet()) {
       ContainerProtos.KeyValue.Builder keyValBuilder =
@@ -324,9 +342,7 @@ public ContainerDataProto getProtoBufMessage() {
           .setValue(entry.getValue()).build());
     }
 
-    if (this.getBytesUsed() >= 0) {
-      builder.setBytesUsed(this.getBytesUsed());
-    }
+    getStatistics().setContainerDataProto(builder);
 
     if (this.getContainerType() != null) {
       
builder.setContainerType(ContainerProtos.ContainerType.KeyValueContainer);
@@ -353,20 +369,18 @@ public void updateAndCommitDBCounters(DBHandle db,
     Table<String, Long> metadataTable = db.getStore().getMetadataTable();
 
     // Set Bytes used and block count key.
-    metadataTable.putWithBatch(batchOperation, getBytesUsedKey(),
-            getBytesUsed() - releasedBytes);
-    metadataTable.putWithBatch(batchOperation, getBlockCountKey(),
-            getBlockCount() - deletedBlockCount);
-    metadataTable.putWithBatch(batchOperation,
-        getPendingDeleteBlockCountKey(),
-        getNumPendingDeletionBlocks() - deletedBlockCount);
+    final BlockByteAndCounts b = getStatistics().getBlockByteAndCounts();
+    metadataTable.putWithBatch(batchOperation, getBytesUsedKey(), b.getBytes() 
- releasedBytes);
+    metadataTable.putWithBatch(batchOperation, getBlockCountKey(), 
b.getCount() - deletedBlockCount);
+    metadataTable.putWithBatch(batchOperation, getPendingDeleteBlockCountKey(),
+        b.getPendingDeletion() - deletedBlockCount);
 
     db.getStore().getBatchHandler().commitBatchOperation(batchOperation);
   }
 
   public void resetPendingDeleteBlockCount(DBHandle db) throws IOException {
     // Reset the in memory metadata.
-    numPendingDeletionBlocks.set(0);
+    getStatistics().resetBlockPendingDeletion();
     // Reset the metadata on disk.
     Table<String, Long> metadataTable = db.getStore().getMetadataTable();
     metadataTable.put(getPendingDeleteBlockCountKey(), 0L);
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 7f192afc29..bd0fd4970a 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -1450,6 +1450,7 @@ private void deleteInternal(Container container, boolean 
force)
     long startTime = clock.millis();
     container.writeLock();
     try {
+      final ContainerData data = container.getContainerData();
       if (container.getContainerData().getVolume().isFailed()) {
         // if the  volume in which the container resides fails
         // don't attempt to delete/move it. When a volume fails,
@@ -1474,10 +1475,7 @@ private void deleteInternal(Container container, boolean 
force)
         // container is unhealthy or over-replicated).
         if (container.hasBlocks()) {
           metrics.incContainerDeleteFailedNonEmpty();
-          LOG.error("Received container deletion command for container {} but" 
+
-                  " the container is not empty with blockCount {}",
-              container.getContainerData().getContainerID(),
-              container.getContainerData().getBlockCount());
+          LOG.error("Received container deletion command for non-empty {}: 
{}", data, data.getStatistics());
           // blocks table for future debugging.
           // List blocks
           logBlocksIfNonZero(container);
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index 4b05db71c1..080aa11823 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -34,6 +34,7 @@
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator;
 import org.apache.hadoop.ozone.container.common.interfaces.DBHandle;
 import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
@@ -279,27 +280,25 @@ private static void populateContainerMetadata(
       KeyValueContainerData kvContainerData, DatanodeStore store,
       boolean bCheckChunksFilePath)
       throws IOException {
-    boolean isBlockMetadataSet = false;
     Table<String, Long> metadataTable = store.getMetadataTable();
 
     // Set pending deleted block count.
+    final long blockPendingDeletion;
     Long pendingDeleteBlockCount =
         metadataTable.get(kvContainerData
             .getPendingDeleteBlockCountKey());
     if (pendingDeleteBlockCount != null) {
-      kvContainerData.incrPendingDeletionBlocks(
-          pendingDeleteBlockCount);
+      blockPendingDeletion = pendingDeleteBlockCount;
     } else {
       // Set pending deleted block count.
+      LOG.warn("Missing pendingDeleteBlockCount from {}: recalculate them from 
block table", metadataTable.getName());
       MetadataKeyFilters.KeyPrefixFilter filter =
           kvContainerData.getDeletingBlockKeyFilter();
-      int numPendingDeletionBlocks = store.getBlockDataTable()
+      blockPendingDeletion = store.getBlockDataTable()
               .getSequentialRangeKVs(kvContainerData.startKeyEmpty(),
                   Integer.MAX_VALUE, kvContainerData.containerPrefix(),
                   filter).size();
-      kvContainerData.incrPendingDeletionBlocks(numPendingDeletionBlocks);
     }
-
     // Set delete transaction id.
     Long delTxnId =
         metadataTable.get(kvContainerData.getLatestDeleteTxnKey());
@@ -318,24 +317,24 @@ private static void populateContainerMetadata(
 
     // Set bytes used.
     // commitSpace for Open Containers relies on usedBytes
-    Long bytesUsed =
-        metadataTable.get(kvContainerData.getBytesUsedKey());
-    if (bytesUsed != null) {
-      isBlockMetadataSet = true;
-      kvContainerData.setBytesUsed(bytesUsed);
-    }
-
+    final long blockBytes;
+    final long blockCount;
+    final Long metadataTableBytesUsed = 
metadataTable.get(kvContainerData.getBytesUsedKey());
     // Set block count.
-    Long blockCount = metadataTable.get(
-        kvContainerData.getBlockCountKey());
-    if (blockCount != null) {
-      isBlockMetadataSet = true;
-      kvContainerData.setBlockCount(blockCount);
-    }
-    if (!isBlockMetadataSet) {
-      initializeUsedBytesAndBlockCount(store, kvContainerData);
+    final Long metadataTableBlockCount = 
metadataTable.get(kvContainerData.getBlockCountKey());
+    if (metadataTableBytesUsed != null && metadataTableBlockCount != null) {
+      blockBytes = metadataTableBytesUsed;
+      blockCount = metadataTableBlockCount;
+    } else {
+      LOG.warn("Missing bytesUsed={} or blockCount={} from {}: recalculate 
them from block table",
+          metadataTableBytesUsed, metadataTableBlockCount, 
metadataTable.getName());
+      final ContainerData.BlockByteAndCounts b = 
getUsedBytesAndBlockCount(store, kvContainerData);
+      blockBytes = b.getBytes();
+      blockCount = b.getCount();
     }
 
+    kvContainerData.getStatistics().updateBlocks(blockBytes, blockCount, 
blockPendingDeletion);
+
     // If the container is missing a chunks directory, possibly due to the
     // bug fixed by HDDS-6235, create it here.
     File chunksDir = new File(kvContainerData.getChunksPath());
@@ -376,15 +375,8 @@ private static void populateContainerFinalizeBlock(
     }
   }
 
-  /**
-   * Initialize bytes used and block count.
-   * @param kvData
-   * @throws IOException
-   */
-  private static void initializeUsedBytesAndBlockCount(DatanodeStore store,
+  private static ContainerData.BlockByteAndCounts 
getUsedBytesAndBlockCount(DatanodeStore store,
       KeyValueContainerData kvData) throws IOException {
-    final String errorMessage = "Failed to parse block data for" +
-        " Container " + kvData.getContainerID();
     long blockCount = 0;
     long usedBytes = 0;
 
@@ -394,11 +386,7 @@ private static void 
initializeUsedBytesAndBlockCount(DatanodeStore store,
 
       while (blockIter.hasNext()) {
         blockCount++;
-        try {
-          usedBytes += getBlockLength(blockIter.nextBlock());
-        } catch (Exception ex) {
-          LOG.error(errorMessage, ex);
-        }
+        usedBytes += getBlockLengthTryCatch(blockIter.nextBlock());
       }
     }
 
@@ -409,18 +397,24 @@ private static void 
initializeUsedBytesAndBlockCount(DatanodeStore store,
 
       while (blockIter.hasNext()) {
         blockCount++;
-        try {
-          usedBytes += getBlockLength(blockIter.nextBlock());
-        } catch (IOException ex) {
-          LOG.error(errorMessage);
-        }
+        usedBytes += getBlockLengthTryCatch(blockIter.nextBlock());
       }
     }
-    kvData.setBytesUsed(usedBytes);
-    kvData.setBlockCount(blockCount);
+    return new ContainerData.BlockByteAndCounts(usedBytes, blockCount, 0);
+  }
+
+  public static long getBlockLengthTryCatch(BlockData block) {
+    try {
+      return block.getChunks().stream()
+          .mapToLong(ContainerProtos.ChunkInfo::getLen)
+          .sum();
+    } catch (Exception e) {
+      LOG.error("Failed to getBlockLength for block {}", block.getBlockID(), 
e);
+      return 0;
+    }
   }
 
-  public static long getBlockLength(BlockData block) throws IOException {
+  public static long getBlockLength(BlockData block) {
     return block.getChunks().stream()
         .mapToLong(ContainerProtos.ChunkInfo::getLen)
         .sum();
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
index 38386bdcde..722a76391f 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -33,6 +33,7 @@
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.DBHandle;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
@@ -176,15 +177,12 @@ public long persistPutBlock(KeyValueContainer container,
         // block length is used, And also on restart the blocks committed to DB
         // is only used to compute the bytes used. This is done to keep the
         // current behavior and avoid DB write during write chunk operation.
-        db.getStore().getMetadataTable().putWithBatch(
-            batch, containerData.getBytesUsedKey(),
-            containerData.getBytesUsed());
+        final ContainerData.BlockByteAndCounts b = 
containerData.getStatistics().getBlockByteAndCounts();
+        db.getStore().getMetadataTable().putWithBatch(batch, 
containerData.getBytesUsedKey(), b.getBytes());
 
         // Set Block Count for a container.
         if (incrBlockCount) {
-          db.getStore().getMetadataTable().putWithBatch(
-              batch, containerData.getBlockCountKey(),
-              containerData.getBlockCount() + 1);
+          db.getStore().getMetadataTable().putWithBatch(batch, 
containerData.getBlockCountKey(), b.getCount() + 1);
         }
 
         db.getStore().getBatchHandler().commitBatchOperation(batch);
@@ -197,7 +195,7 @@ public long persistPutBlock(KeyValueContainer container,
       // Increment block count and add block to pendingPutBlockCache
       // in-memory after the DB update.
       if (incrBlockCount) {
-        containerData.incrBlockCount();
+        containerData.getStatistics().incrementBlockCount();
       }
 
       // If the Block is not in PendingPutBlockCache (and it is not 
endOfBlock),
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
index e668c42760..5f37d186b1 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
@@ -108,7 +108,7 @@ public ChunkBufferToByteString readChunk(Container 
container, BlockID blockID,
         .readChunk(container, blockID, info, dispatcherContext);
 
     Preconditions.checkState(data != null);
-    container.getContainerData().updateReadStats(info.getLen());
+    container.getContainerData().getStatistics().updateRead(info.getLen());
 
     return data;
   }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java
index 156657e462..c23e3b4ebc 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java
@@ -193,8 +193,7 @@ public ContainerBackgroundTaskResult deleteViaSchema1(
       }
 
       List<String> succeedBlocks = new LinkedList<>();
-      LOG.debug("Container : {}, To-Delete blocks : {}",
-          containerData.getContainerID(), toDeleteBlocks.size());
+      LOG.debug("{}, toDeleteBlocks: {}", containerData, 
toDeleteBlocks.size());
 
       Handler handler = Objects.requireNonNull(ozoneContainer.getDispatcher()
           .getHandler(container.getContainerType()));
@@ -246,9 +245,7 @@ public ContainerBackgroundTaskResult deleteViaSchema1(
 
         // update count of pending deletion blocks, block count and used
         // bytes in in-memory container status.
-        containerData.decrPendingDeletionBlocks(deletedBlocksCount);
-        containerData.decrBlockCount(deletedBlocksCount);
-        containerData.decrBytesUsed(releasedBytes);
+        containerData.getStatistics().updateDeletion(releasedBytes, 
deletedBlocksCount, deletedBlocksCount);
         containerData.getVolume().decrementUsedSpace(releasedBytes);
         metrics.incrSuccessCount(deletedBlocksCount);
         metrics.incrSuccessBytes(releasedBytes);
@@ -336,10 +333,7 @@ private ContainerBackgroundTaskResult 
deleteViaTransactionStore(
         delBlocks.add(delTx);
       }
       if (delBlocks.isEmpty()) {
-        LOG.info("No transaction found in container {} with pending delete " +
-                "block count {}",
-            containerData.getContainerID(),
-            containerData.getNumPendingDeletionBlocks());
+        LOG.info("Pending block deletion not found in {}: {}", containerData, 
containerData.getStatistics());
         // If the container was queued for delete, it had a positive
         // pending delete block count. After checking the DB there were
         // actually no delete transactions for the container, so reset the
@@ -348,8 +342,7 @@ private ContainerBackgroundTaskResult 
deleteViaTransactionStore(
         return crr;
       }
 
-      LOG.debug("Container : {}, To-Delete blocks : {}",
-          containerData.getContainerID(), delBlocks.size());
+      LOG.debug("{}, delBlocks: {}", containerData, delBlocks.size());
 
       Handler handler = Objects.requireNonNull(ozoneContainer.getDispatcher()
           .getHandler(container.getContainerType()));
@@ -397,9 +390,7 @@ private ContainerBackgroundTaskResult 
deleteViaTransactionStore(
 
         // update count of pending deletion blocks, block count and used
         // bytes in in-memory container status and used space in volume.
-        containerData.decrPendingDeletionBlocks(deletedBlocksProcessed);
-        containerData.decrBlockCount(deletedBlocksCount);
-        containerData.decrBytesUsed(releasedBytes);
+        containerData.getStatistics().updateDeletion(releasedBytes, 
deletedBlocksCount, deletedBlocksProcessed);
         containerData.getVolume().decrementUsedSpace(releasedBytes);
         metrics.incrSuccessCount(deletedBlocksCount);
         metrics.incrSuccessBytes(releasedBytes);
@@ -477,13 +468,8 @@ private DeleteTransactionStats deleteTransactions(
         }
 
         if (deleted) {
-          try {
-            bytesReleased += KeyValueContainerUtil.getBlockLength(blkInfo);
-          } catch (IOException e) {
-            // TODO: handle the bytesReleased correctly for the unexpected
-            //  exception.
-            LOG.error("Failed to get block length for block {}", blkLong, e);
-          }
+          bytesReleased += 
KeyValueContainerUtil.getBlockLengthTryCatch(blkInfo);
+          // TODO: handle the bytesReleased correctly for the unexpected 
exception.
         }
       }
       deletedBlocksTxs.add(entry);
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index ad92510428..dc942892bf 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -329,7 +329,7 @@ private void updateMetaData(KeyValueContainerData data,
       int numOfChunksPerBlock) {
     long chunkLength = 100;
     try (DBHandle metadata = BlockUtils.getDB(data, conf)) {
-      container.getContainerData().setBlockCount(numOfBlocksPerContainer);
+      
container.getContainerData().getStatistics().setBlockCountForTesting(numOfBlocksPerContainer);
       // Set block count, bytes used and pending delete block count.
       metadata.getStore().getMetadataTable()
           .put(data.getBlockCountKey(), (long) numOfBlocksPerContainer);
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
index 9294209509..97798f2bb4 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
@@ -21,10 +21,10 @@
 import static org.mockito.Mockito.mock;
 
 import java.util.UUID;
-import java.util.concurrent.atomic.AtomicLong;
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.keyvalue.ContainerTestVersionInfo;
@@ -59,12 +59,11 @@ public void testKeyValueData(ContainerTestVersionInfo 
versionInfo) {
     String containerDBType = "RocksDB";
     ContainerProtos.ContainerDataProto.State state =
         ContainerProtos.ContainerDataProto.State.CLOSED;
-    AtomicLong val = new AtomicLong(0);
     UUID pipelineId = UUID.randomUUID();
     UUID datanodeId = UUID.randomUUID();
     HddsVolume vol = mock(HddsVolume.class);
 
-    KeyValueContainerData kvData = new KeyValueContainerData(containerId,
+    final KeyValueContainerData kvData = new KeyValueContainerData(containerId,
         layout,
         MAXSIZE, pipelineId.toString(), datanodeId.toString());
     kvData.setVolume(vol);
@@ -75,12 +74,10 @@ public void testKeyValueData(ContainerTestVersionInfo 
versionInfo) {
         .getState());
     assertEquals(0, kvData.getMetadata().size());
     assertEquals(0, kvData.getNumPendingDeletionBlocks());
-    assertEquals(val.get(), kvData.getReadBytes());
-    assertEquals(val.get(), kvData.getWriteBytes());
-    assertEquals(val.get(), kvData.getReadCount());
-    assertEquals(val.get(), kvData.getWriteCount());
-    assertEquals(val.get(), kvData.getBlockCount());
-    assertEquals(val.get(), kvData.getNumPendingDeletionBlocks());
+    final ContainerData.Statistics statistics = kvData.getStatistics();
+    statistics.assertRead(0, 0);
+    statistics.assertWrite(0, 0);
+    statistics.assertBlock(0, 0, 0);
     assertEquals(MAXSIZE, kvData.getMaxSize());
 
     kvData.setState(state);
@@ -88,11 +85,9 @@ public void testKeyValueData(ContainerTestVersionInfo 
versionInfo) {
     kvData.setChunksPath(path);
     kvData.setMetadataPath(path);
     kvData.setReplicaIndex(4);
-    kvData.incrReadBytes(10);
-    kvData.incrWriteBytes(10);
-    kvData.incrReadCount();
-    kvData.incrWriteCount();
-    kvData.incrBlockCount();
+    statistics.updateRead(10);
+    statistics.incrementBlockCount();
+    kvData.updateWriteStats(10, true);
     kvData.incrPendingDeletionBlocks(1);
     kvData.setSchemaVersion(
         VersionedDatanodeFeatures.SchemaV3.chooseSchemaVersion(conf));
@@ -102,12 +97,9 @@ public void testKeyValueData(ContainerTestVersionInfo 
versionInfo) {
     assertEquals(path, kvData.getChunksPath());
     assertEquals(path, kvData.getMetadataPath());
 
-    assertEquals(10, kvData.getReadBytes());
-    assertEquals(10, kvData.getWriteBytes());
-    assertEquals(1, kvData.getReadCount());
-    assertEquals(1, kvData.getWriteCount());
-    assertEquals(1, kvData.getBlockCount());
-    assertEquals(1, kvData.getNumPendingDeletionBlocks());
+    statistics.assertRead(10, 1);
+    statistics.assertWrite(10, 1);
+    statistics.assertBlock(0, 1, 1);
     assertEquals(pipelineId.toString(), kvData.getOriginPipelineId());
     assertEquals(datanodeId.toString(), kvData.getOriginNodeId());
     assertEquals(VersionedDatanodeFeatures.SchemaV3.chooseSchemaVersion(conf),
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index 02f999013e..55e49af289 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -850,11 +850,9 @@ public void testOverWrite(ContainerTestVersionInfo 
versionInfo)
     info.addMetadata(OzoneConsts.CHUNK_OVERWRITE, "true");
     chunkManager.writeChunk(container, blockID, info, data,
         DispatcherContext.getHandleWriteChunk());
-    long bytesUsed = container.getContainerData().getBytesUsed();
-    assertEquals(datalen, bytesUsed);
-
-    long bytesWrite = container.getContainerData().getWriteBytes();
-    assertEquals(datalen * 3, bytesWrite);
+    final ContainerData.Statistics statistics = 
container.getContainerData().getStatistics();
+    statistics.assertWrite(datalen * 3, 3);
+    statistics.assertBlock(datalen, 0, 0);
   }
 
   /**
@@ -991,14 +989,10 @@ public void 
testPutBlockWithLotsOfChunks(ContainerTestVersionInfo versionInfo)
       chunkList.add(info);
     }
 
-    long bytesUsed = container.getContainerData().getBytesUsed();
-    assertEquals(totalSize, bytesUsed);
-    long writeBytes = container.getContainerData().getWriteBytes();
-    assertEquals(chunkCount * datalen, writeBytes);
-    long readCount = container.getContainerData().getReadCount();
-    assertEquals(0, readCount);
-    long writeCount = container.getContainerData().getWriteCount();
-    assertEquals(chunkCount, writeCount);
+    final ContainerData.Statistics statistics = 
container.getContainerData().getStatistics();
+    statistics.assertRead(0, 0);
+    statistics.assertWrite(chunkCount * datalen, chunkCount);
+    statistics.assertBlock(totalSize, 0, 0);
 
     BlockData blockData = new BlockData(blockID);
     List<ContainerProtos.ChunkInfo> chunkProtoList = new LinkedList<>();
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
index 6afcadb809..d209adbb43 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
@@ -168,7 +168,7 @@ public void testContainerCloseActionWhenFull(
           responseOne.getResult());
       verify(context, times(0))
           .addContainerActionIfAbsent(any(ContainerAction.class));
-      containerData.setBytesUsed(Double.valueOf(
+      containerData.getStatistics().setBlockBytesForTesting(Double.valueOf(
           StorageUnit.MB.toBytes(950)).longValue());
       ContainerCommandResponseProto responseTwo = hddsDispatcher
           .dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 2L), null);
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java
index 08c44372b9..adc4b35ba1 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java
@@ -126,9 +126,9 @@ protected KeyValueContainer createContainerWithBlocks(long 
containerId,
     byte[] chunkData = 
RandomStringUtils.secure().nextAscii(CHUNK_LEN).getBytes(UTF_8);
     ChecksumData checksumData = checksum.computeChecksum(chunkData);
 
+    final long size = totalBlocks > 0 ? CHUNKS_PER_BLOCK * CHUNK_LEN * 
totalBlocks : 1;
     KeyValueContainerData containerData = new 
KeyValueContainerData(containerId,
-        containerLayoutTestInfo.getLayout(),
-        (long) CHUNKS_PER_BLOCK * CHUNK_LEN * totalBlocks,
+        containerLayoutTestInfo.getLayout(), size,
         UUID.randomUUID().toString(), UUID.randomUUID().toString());
     KeyValueContainer container = new KeyValueContainer(containerData, conf);
     container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(),
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
index 8228c5182d..564a24b299 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
@@ -167,7 +167,7 @@ private KeyValueContainerData createContainer(Path dir, 
boolean createDir)
 
     KeyValueContainerData containerData = new KeyValueContainerData(
         id, layout,
-        -1, UUID.randomUUID().toString(), UUID.randomUUID().toString());
+        1, UUID.randomUUID().toString(), UUID.randomUUID().toString());
     containerData.setSchemaVersion(schemaVersion);
     containerData.setChunksPath(dataDir.toString());
     containerData.setMetadataPath(metaDir.toString());
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
index 08e18cab3b..157e78ae3c 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
@@ -379,7 +379,7 @@ public void 
testReplicationImportReserveSpace(ContainerLayoutVersion layout)
         ContainerLayoutVersion.FILE_PER_BLOCK, containerMaxSize, "test", 
"test");
     HddsVolume vol1 = (HddsVolume) volumeSet.getVolumesList().get(0);
     containerData.setVolume(vol1);
-    containerData.incrBytesUsed(containerUsedSize);
+    containerData.updateWriteStats(100, false);
     KeyValueContainer container = new KeyValueContainer(containerData, conf);
     ContainerController controllerMock = mock(ContainerController.class);
     Semaphore semaphore = new Semaphore(1);
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
index 5caab9b34d..d299503c13 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
@@ -220,7 +220,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue()
 
     // Set container blockCount to 0 to mock that it is empty as per RocksDB
     getContainerfromDN(hddsDatanodeService, containerId.getId())
-        .getContainerData().setBlockCount(0);
+        .getContainerData().getStatistics().setBlockCountForTesting(0);
 
     // send delete container to the datanode
     SCMCommand<?> command = new DeleteContainerCommand(containerId.getId(),
@@ -430,8 +430,7 @@ public void testDeleteNonEmptyContainerBlockTable()
     // Check the log for the error message when deleting non-empty containers
     LogCapturer logCapturer = LogCapturer.captureLogs(KeyValueHandler.class);
     GenericTestUtils.waitFor(() ->
-            logCapturer.getOutput().
-                contains("the container is not empty with blockCount"),
+            logCapturer.getOutput().contains("Received container deletion 
command for non-empty"),
         500,
         5 * 2000);
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to