This is an automated email from the ASF dual-hosted git repository.

siyao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new a3b89e7318 HDDS-9159. [Snapshot] Implement snapshot disk usage: 
Referenced size (#5175)
a3b89e7318 is described below

commit a3b89e73185894f905b4ad96cd6d78429497d0c9
Author: Siyao Meng <[email protected]>
AuthorDate: Sat Sep 2 08:16:12 2023 -0700

    HDDS-9159. [Snapshot] Implement snapshot disk usage: Referenced size (#5175)
---
 .../apache/hadoop/ozone/client/OzoneSnapshot.java  |  69 ++++++++--
 .../apache/hadoop/ozone/om/helpers/QuotaUtil.java  |  39 +++++-
 .../hadoop/ozone/om/helpers/SnapshotInfo.java      | 145 ++++++++++++++++++---
 .../ozone/om/helpers/TestOmSnapshotInfo.java       |  37 ++++++
 .../hadoop/fs/ozone/TestOzoneFileSystem.java       |  12 +-
 .../hadoop/fs/ozone/TestOzoneFsSnapshot.java       |  14 +-
 .../src/main/proto/OmClientProtocol.proto          |  11 +-
 .../hadoop/ozone/om/OmMetadataManagerImpl.java     |   6 +-
 .../request/snapshot/OMSnapshotCreateRequest.java  |  59 +++++++++
 .../ozone/om/request/OMRequestTestUtils.java       |  19 +++
 .../TestOMDirectoriesPurgeRequestAndResponse.java  |   9 +-
 .../snapshot/TestOMSnapshotCreateRequest.java      |  35 +++--
 .../snapshot/TestOMSnapshotCreateResponse.java     |  11 +-
 .../ozone/om/service/TestQuotaRepairTask.java      |  18 +++
 .../ozone/BasicRootedOzoneClientAdapterImpl.java   |   5 +-
 15 files changed, 438 insertions(+), 51 deletions(-)

diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java
index 91cf655090..8e041d95ec 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java
@@ -34,17 +34,26 @@ public class OzoneSnapshot {
   private final UUID snapshotId;  // UUID
   private final String snapshotPath; // snapshot mask
   private final String checkpointDir;
+  private final long referencedSize;
+  private final long referencedReplicatedSize;
+  private final long exclusiveSize;
+  private final long exclusiveReplicatedSize;
 
   /**
    * Constructs OzoneSnapshot from SnapshotInfo.
-   * @param volumeName Name of the Volume the snapshot belongs to.
-   * @param bucketName Name of the Bucket the snapshot belongs to.
-   * @param name Name of the snapshot.
-   * @param creationTime Creation time of the snapshot.
+   *
+   * @param volumeName     Name of the Volume the snapshot belongs to.
+   * @param bucketName     Name of the Bucket the snapshot belongs to.
+   * @param name           Name of the snapshot.
+   * @param creationTime   Creation time of the snapshot.
    * @param snapshotStatus Status of the snapshot.
-   * @param snapshotId ID of the snapshot.
-   * @param snapshotPath Path of the snapshot.
-   * @param checkpointDir Snapshot checkpoint directory.
+   * @param snapshotId     ID of the snapshot.
+   * @param snapshotPath   Path of the snapshot.
+   * @param checkpointDir  Snapshot checkpoint directory.
+   * @param referencedSize Snapshot referenced size.
+   * @param referencedReplicatedSize Snapshot referenced size after 
replication.
+   * @param exclusiveSize  Snapshot exclusive size.
+   * @param exclusiveReplicatedSize  Snapshot exclusive size after replication.
    */
   @SuppressWarnings("parameternumber")
   public OzoneSnapshot(String volumeName,
@@ -54,7 +63,11 @@ public class OzoneSnapshot {
                        SnapshotStatus snapshotStatus,
                        UUID snapshotId,
                        String snapshotPath,
-                       String checkpointDir) {
+                       String checkpointDir,
+                       long referencedSize,
+                       long referencedReplicatedSize,
+                       long exclusiveSize,
+                       long exclusiveReplicatedSize) {
     this.volumeName = volumeName;
     this.bucketName = bucketName;
     this.name = name;
@@ -63,6 +76,10 @@ public class OzoneSnapshot {
     this.snapshotId = snapshotId;
     this.snapshotPath = snapshotPath;
     this.checkpointDir = checkpointDir;
+    this.referencedSize = referencedSize;
+    this.referencedReplicatedSize = referencedReplicatedSize;
+    this.exclusiveSize = exclusiveSize;
+    this.exclusiveReplicatedSize = exclusiveReplicatedSize;
   }
 
   /**
@@ -136,6 +153,35 @@ public class OzoneSnapshot {
   public String getCheckpointDir() {
     return checkpointDir;
   }
+
+  /**
+   * @return Referenced size of the snapshot.
+   */
+  public long getReferencedSize() {
+    return referencedSize;
+  }
+
+  /**
+   * @return Reference size after replication/EC of the snapshot
+   */
+  public long getReferencedReplicatedSize() {
+    return referencedReplicatedSize;
+  }
+
+  /**
+   * @return Exclusive size of the snapshot.
+   */
+  public long getExclusiveSize() {
+    return exclusiveSize;
+  }
+
+  /**
+   * @return Exclusive size after replication/EC of the snapshot.
+   */
+  public long getExclusiveReplicatedSize() {
+    return exclusiveReplicatedSize;
+  }
+
   public static OzoneSnapshot fromSnapshotInfo(SnapshotInfo snapshotInfo) {
     return new OzoneSnapshot(
         snapshotInfo.getVolumeName(),
@@ -145,6 +191,11 @@ public class OzoneSnapshot {
         snapshotInfo.getSnapshotStatus(),
         snapshotInfo.getSnapshotId(),
         snapshotInfo.getSnapshotPath(),
-        snapshotInfo.getCheckpointDir());
+        snapshotInfo.getCheckpointDir(),
+        snapshotInfo.getReferencedSize(),
+        snapshotInfo.getReferencedReplicatedSize(),
+        snapshotInfo.getExclusiveSize(),
+        snapshotInfo.getExclusiveReplicatedSize()
+    );
   }
 }
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/QuotaUtil.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/QuotaUtil.java
index 09e0440988..6642b6aa50 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/QuotaUtil.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/QuotaUtil.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.ozone.om.helpers;
 import org.apache.hadoop.hdds.client.ECReplicationConfig;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.EC;
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS;
@@ -29,8 +31,11 @@ import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.R
  */
 public final class QuotaUtil {
 
+  private static final Logger LOG =
+      LoggerFactory.getLogger(QuotaUtil.class);
+
   private QuotaUtil() {
-  };
+  }
 
   /**
    * From the used space and replicationConfig, calculate the expected
@@ -42,10 +47,10 @@ public final class QuotaUtil {
   public static long getReplicatedSize(
       long dataSize, ReplicationConfig repConfig) {
     if (repConfig.getReplicationType() == RATIS) {
-      return dataSize * ((RatisReplicationConfig)repConfig)
+      return dataSize * ((RatisReplicationConfig) repConfig)
           .getReplicationFactor().getNumber();
     } else if (repConfig.getReplicationType() == EC) {
-      ECReplicationConfig rc = (ECReplicationConfig)repConfig;
+      ECReplicationConfig rc = (ECReplicationConfig) repConfig;
       int dataStripeSize = rc.getData() * rc.getEcChunkSize();
       long fullStripes = dataSize / dataStripeSize;
       long partialFirstChunk =
@@ -55,8 +60,36 @@ public final class QuotaUtil {
               + partialFirstChunk * rc.getParity();
       return dataSize + replicationOverhead;
     } else {
+      LOG.warn("Unknown replication type '{}'. Returning original data size.",
+          repConfig.getReplicationType());
       return dataSize;
     }
   }
 
+  /**
+   * Get an estimated data size (before replication) from the replicated size.
+   * An (inaccurate) reverse of getReplicatedSize().
+   * @param replicatedSize size after replication.
+   * @param repConfig The replicationConfig used to store the data.
+   * @return Data size before replication.
+   */
+  public static long getDataSize(long replicatedSize,
+                                 ReplicationConfig repConfig) {
+    if (repConfig.getReplicationType() == RATIS) {
+      final int ratisReplicationFactor = ((RatisReplicationConfig) repConfig)
+          .getReplicationFactor().getNumber();
+      // May not be divisible. But it's fine to ignore remainder in our use 
case
+      return replicatedSize / ratisReplicationFactor;
+    } else if (repConfig.getReplicationType() == EC) {
+      ECReplicationConfig rc = (ECReplicationConfig) repConfig;
+      // In the case of EC, we won't know if keys have partial chunks or not,
+      // so we assume no partial chunks as an estimate.
+      return replicatedSize * rc.getData() / rc.getRequiredNodes();
+    } else {
+      LOG.warn("Unknown replication type '{}'. Returning replicatedSize.",
+          repConfig.getReplicationType());
+      return replicatedSize;
+    }
+  }
+
 }
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java
index bdb642d8fb..ec429bc6a6 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java
@@ -123,8 +123,11 @@ public final class SnapshotInfo implements Auditable, 
CopyObject<SnapshotInfo> {
    */
   private long dbTxSequenceNumber;
   private boolean deepClean;
-
   private boolean sstFiltered;
+  private long referencedSize;
+  private long referencedReplicatedSize;
+  private long exclusiveSize;
+  private long exclusiveReplicatedSize;
 
   /**
    * Private constructor, constructed via builder.
@@ -142,6 +145,10 @@ public final class SnapshotInfo implements Auditable, 
CopyObject<SnapshotInfo> {
    * @param checkpointDir - Snapshot checkpoint directory.
    * @param dbTxSequenceNumber - RDB latest transaction sequence number.
    * @param deepCleaned - To be deep cleaned status for snapshot.
+   * @param referencedSize - Snapshot referenced size.
+   * @param referencedReplicatedSize - Snapshot referenced size w/ replication.
+   * @param exclusiveSize - Snapshot exclusive size.
+   * @param exclusiveReplicatedSize - Snapshot exclusive size w/ replication.
    */
   @SuppressWarnings("checkstyle:ParameterNumber")
   private SnapshotInfo(UUID snapshotId,
@@ -157,7 +164,11 @@ public final class SnapshotInfo implements Auditable, 
CopyObject<SnapshotInfo> {
                        String checkpointDir,
                        long dbTxSequenceNumber,
                        boolean deepCleaned,
-                       boolean sstFiltered) {
+                       boolean sstFiltered,
+                       long referencedSize,
+                       long referencedReplicatedSize,
+                       long exclusiveSize,
+                       long exclusiveReplicatedSize) {
     this.snapshotId = snapshotId;
     this.name = name;
     this.volumeName = volumeName;
@@ -172,6 +183,10 @@ public final class SnapshotInfo implements Auditable, 
CopyObject<SnapshotInfo> {
     this.dbTxSequenceNumber = dbTxSequenceNumber;
     this.deepClean = deepCleaned;
     this.sstFiltered = sstFiltered;
+    this.referencedSize = referencedSize;
+    this.referencedReplicatedSize = referencedReplicatedSize;
+    this.exclusiveSize = exclusiveSize;
+    this.exclusiveReplicatedSize = exclusiveReplicatedSize;
   }
 
   public void setName(String name) {
@@ -289,7 +304,11 @@ public final class SnapshotInfo implements Auditable, 
CopyObject<SnapshotInfo> {
         .setSnapshotPath(snapshotPath)
         .setCheckpointDir(checkpointDir)
         .setDeepClean(deepClean)
-        .setSstFiltered(sstFiltered);
+        .setSstFiltered(sstFiltered)
+        .setReferencedSize(referencedSize)
+        .setReferencedReplicatedSize(referencedReplicatedSize)
+        .setExclusiveSize(exclusiveSize)
+        .setExclusiveReplicatedSize(exclusiveReplicatedSize);
   }
 
   /**
@@ -309,8 +328,11 @@ public final class SnapshotInfo implements Auditable, 
CopyObject<SnapshotInfo> {
     private String checkpointDir;
     private long dbTxSequenceNumber;
     private boolean deepClean;
-
     private boolean sstFiltered;
+    private long referencedSize;
+    private long referencedReplicatedSize;
+    private long exclusiveSize;
+    private long exclusiveReplicatedSize;
 
     public Builder() {
       // default values
@@ -387,6 +409,26 @@ public final class SnapshotInfo implements Auditable, 
CopyObject<SnapshotInfo> {
       return this;
     }
 
+    public Builder setReferencedSize(long referencedSize) {
+      this.referencedSize = referencedSize;
+      return this;
+    }
+
+    public Builder setReferencedReplicatedSize(long referencedReplicatedSize) {
+      this.referencedReplicatedSize = referencedReplicatedSize;
+      return this;
+    }
+
+    public Builder setExclusiveSize(long exclusiveSize) {
+      this.exclusiveSize = exclusiveSize;
+      return this;
+    }
+
+    public Builder setExclusiveReplicatedSize(long exclusiveReplicatedSize) {
+      this.exclusiveReplicatedSize = exclusiveReplicatedSize;
+      return this;
+    }
+
     public SnapshotInfo build() {
       Preconditions.checkNotNull(name);
       return new SnapshotInfo(
@@ -403,7 +445,11 @@ public final class SnapshotInfo implements Auditable, 
CopyObject<SnapshotInfo> {
           checkpointDir,
           dbTxSequenceNumber,
           deepClean,
-          sstFiltered
+          sstFiltered,
+          referencedSize,
+          referencedReplicatedSize,
+          exclusiveSize,
+          exclusiveReplicatedSize
       );
     }
   }
@@ -414,14 +460,18 @@ public final class SnapshotInfo implements Auditable, 
CopyObject<SnapshotInfo> {
   public OzoneManagerProtocolProtos.SnapshotInfo getProtobuf() {
     OzoneManagerProtocolProtos.SnapshotInfo.Builder sib =
         OzoneManagerProtocolProtos.SnapshotInfo.newBuilder()
-        .setSnapshotID(toProtobuf(snapshotId))
-        .setName(name)
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setSnapshotStatus(snapshotStatus.toProto())
-        .setCreationTime(creationTime)
-        .setDeletionTime(deletionTime)
-        .setSstFiltered(sstFiltered);
+            .setSnapshotID(toProtobuf(snapshotId))
+            .setName(name)
+            .setVolumeName(volumeName)
+            .setBucketName(bucketName)
+            .setSnapshotStatus(snapshotStatus.toProto())
+            .setCreationTime(creationTime)
+            .setDeletionTime(deletionTime)
+            .setSstFiltered(sstFiltered)
+            .setReferencedSize(referencedSize)
+            .setReferencedReplicatedSize(referencedReplicatedSize)
+            .setExclusiveSize(exclusiveSize)
+            .setExclusiveReplicatedSize(exclusiveReplicatedSize);
 
     if (pathPreviousSnapshotId != null) {
       sib.setPathPreviousSnapshotID(toProtobuf(pathPreviousSnapshotId));
@@ -474,6 +524,26 @@ public final class SnapshotInfo implements Auditable, 
CopyObject<SnapshotInfo> {
       osib.setSstFiltered(snapshotInfoProto.getSstFiltered());
     }
 
+    if (snapshotInfoProto.hasReferencedSize()) {
+      osib.setReferencedSize(
+          snapshotInfoProto.getReferencedSize());
+    }
+
+    if (snapshotInfoProto.hasReferencedReplicatedSize()) {
+      osib.setReferencedReplicatedSize(
+          snapshotInfoProto.getReferencedReplicatedSize());
+    }
+
+    if (snapshotInfoProto.hasExclusiveSize()) {
+      osib.setExclusiveSize(
+          snapshotInfoProto.getExclusiveSize());
+    }
+
+    if (snapshotInfoProto.hasExclusiveReplicatedSize()) {
+      osib.setExclusiveReplicatedSize(
+          snapshotInfoProto.getExclusiveReplicatedSize());
+    }
+
     osib.setSnapshotPath(snapshotInfoProto.getSnapshotPath())
         .setCheckpointDir(snapshotInfoProto.getCheckpointDir())
         .setDbTxSequenceNumber(snapshotInfoProto.getDbTxSequenceNumber());
@@ -526,6 +596,38 @@ public final class SnapshotInfo implements Auditable, 
CopyObject<SnapshotInfo> {
         + OM_KEY_PREFIX + snapshotName;
   }
 
+  public void setReferencedSize(long referencedSize) {
+    this.referencedSize = referencedSize;
+  }
+
+  public long getReferencedSize() {
+    return referencedSize;
+  }
+
+  public void setReferencedReplicatedSize(long referencedReplicatedSize) {
+    this.referencedReplicatedSize = referencedReplicatedSize;
+  }
+
+  public long getReferencedReplicatedSize() {
+    return referencedReplicatedSize;
+  }
+
+  public void setExclusiveSize(long exclusiveSize) {
+    this.exclusiveSize = exclusiveSize;
+  }
+
+  public long getExclusiveSize() {
+    return exclusiveSize;
+  }
+
+  public void setExclusiveReplicatedSize(long exclusiveReplicatedSize) {
+    this.exclusiveReplicatedSize = exclusiveReplicatedSize;
+  }
+
+  public long getExclusiveReplicatedSize() {
+    return exclusiveReplicatedSize;
+  }
+
   /**
    * Generate default name of snapshot, (used if user doesn't provide one).
    */
@@ -586,7 +688,13 @@ public final class SnapshotInfo implements Auditable, 
CopyObject<SnapshotInfo> {
         Objects.equals(
             globalPreviousSnapshotId, that.globalPreviousSnapshotId) &&
         snapshotPath.equals(that.snapshotPath) &&
-        checkpointDir.equals(that.checkpointDir);
+        checkpointDir.equals(that.checkpointDir) &&
+        deepClean == that.deepClean &&
+        sstFiltered == that.sstFiltered &&
+        referencedSize == that.referencedSize &&
+        referencedReplicatedSize == that.referencedReplicatedSize &&
+        exclusiveSize == that.exclusiveSize &&
+        exclusiveReplicatedSize == that.exclusiveReplicatedSize;
   }
 
   @Override
@@ -594,7 +702,10 @@ public final class SnapshotInfo implements Auditable, 
CopyObject<SnapshotInfo> {
     return Objects.hash(snapshotId, name, volumeName, bucketName,
         snapshotStatus,
         creationTime, deletionTime, pathPreviousSnapshotId,
-        globalPreviousSnapshotId, snapshotPath, checkpointDir);
+        globalPreviousSnapshotId, snapshotPath, checkpointDir,
+        deepClean, sstFiltered,
+        referencedSize, referencedReplicatedSize,
+        exclusiveSize, exclusiveReplicatedSize);
   }
 
   /**
@@ -617,6 +728,10 @@ public final class SnapshotInfo implements Auditable, 
CopyObject<SnapshotInfo> {
         .setDbTxSequenceNumber(dbTxSequenceNumber)
         .setDeepClean(deepClean)
         .setSstFiltered(sstFiltered)
+        .setReferencedSize(referencedSize)
+        .setReferencedReplicatedSize(referencedReplicatedSize)
+        .setExclusiveSize(exclusiveSize)
+        .setExclusiveReplicatedSize(exclusiveReplicatedSize)
         .build();
   }
 }
diff --git 
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java
 
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java
index c2224b3f08..bbc416150c 100644
--- 
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java
+++ 
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java
@@ -67,6 +67,11 @@ public class TestOmSnapshotInfo {
         .setCheckpointDir(CHECKPOINT_DIR)
         .setDbTxSequenceNumber(DB_TX_SEQUENCE_NUMBER)
         .setDeepClean(true)
+        .setSstFiltered(false)
+        .setReferencedSize(2000L)
+        .setReferencedReplicatedSize(6000L)
+        .setExclusiveSize(1000L)
+        .setExclusiveReplicatedSize(3000L)
         .build();
   }
 
@@ -86,6 +91,10 @@ public class TestOmSnapshotInfo {
         .setDbTxSequenceNumber(DB_TX_SEQUENCE_NUMBER)
         .setDeepClean(true)
         .setSstFiltered(false)
+        .setReferencedSize(2000L)
+        .setReferencedReplicatedSize(6000L)
+        .setExclusiveSize(1000L)
+        .setExclusiveReplicatedSize(3000L)
         .build();
   }
 
@@ -117,6 +126,19 @@ public class TestOmSnapshotInfo {
         snapshotInfoEntryActual.getSnapshotStatus());
     Assert.assertEquals(snapshotInfoEntryExpected.getDbTxSequenceNumber(),
         snapshotInfoEntryActual.getDbTxSequenceNumber());
+    Assert.assertEquals(snapshotInfoEntryExpected.getDeepClean(),
+        snapshotInfoEntryActual.getDeepClean());
+    Assert.assertEquals(snapshotInfoEntryExpected.getSstFiltered(),
+        snapshotInfoEntryActual.getSstFiltered());
+    Assert.assertEquals(snapshotInfoEntryExpected.getReferencedSize(),
+        snapshotInfoEntryActual.getReferencedSize());
+    
Assert.assertEquals(snapshotInfoEntryExpected.getReferencedReplicatedSize(),
+        snapshotInfoEntryActual.getReferencedReplicatedSize());
+    Assert.assertEquals(snapshotInfoEntryExpected.getExclusiveSize(),
+        snapshotInfoEntryActual.getExclusiveSize());
+    Assert.assertEquals(snapshotInfoEntryExpected.getExclusiveReplicatedSize(),
+        snapshotInfoEntryActual.getExclusiveReplicatedSize());
+
     Assert.assertEquals(snapshotInfoEntryExpected, snapshotInfoEntryActual);
   }
 
@@ -138,6 +160,21 @@ public class TestOmSnapshotInfo {
         snapshotInfoActual.getBucketName());
     Assert.assertEquals(snapshotInfoExpected.getSnapshotStatus(),
         snapshotInfoActual.getSnapshotStatus());
+    Assert.assertEquals(snapshotInfoExpected.getDbTxSequenceNumber(),
+        snapshotInfoActual.getDbTxSequenceNumber());
+    Assert.assertEquals(snapshotInfoExpected.getDeepClean(),
+        snapshotInfoActual.getDeepClean());
+    Assert.assertEquals(snapshotInfoExpected.isSstFiltered(),
+        snapshotInfoActual.isSstFiltered());
+    Assert.assertEquals(snapshotInfoExpected.getReferencedSize(),
+        snapshotInfoActual.getReferencedSize());
+    Assert.assertEquals(snapshotInfoExpected.getReferencedReplicatedSize(),
+        snapshotInfoActual.getReferencedReplicatedSize());
+    Assert.assertEquals(snapshotInfoExpected.getExclusiveSize(),
+        snapshotInfoActual.getExclusiveSize());
+    Assert.assertEquals(snapshotInfoExpected.getExclusiveReplicatedSize(),
+        snapshotInfoActual.getExclusiveReplicatedSize());
+
     Assert.assertEquals(snapshotInfoExpected, snapshotInfoActual);
   }
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
index 17100cdd38..4525595f87 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
@@ -155,7 +155,7 @@ public class TestOzoneFileSystem {
    * Set a timeout for each test.
    */
   @Rule
-  public Timeout timeout = Timeout.seconds(300);
+  public Timeout timeout = Timeout.seconds(600);
 
   private static final Logger LOG =
       LoggerFactory.getLogger(TestOzoneFileSystem.class);
@@ -802,8 +802,18 @@ public class TestOzoneFileSystem {
       return;
     }
     deleteRootRecursively(fileStatuses);
+
+    // Waiting for double buffer flush before calling listStatus() again
+    // seem to have mitigated the flakiness in cleanup(), but at the cost of
+    // almost doubling the test run time. M1 154s->283s (all 4 sets of params)
+    cluster.getOzoneManager().awaitDoubleBufferFlush();
+    // TODO: Investigate whether listStatus() is correctly iterating cache.
+
     fileStatuses = fs.listStatus(ROOT);
     if (fileStatuses != null) {
+      for (FileStatus fileStatus : fileStatuses) {
+        LOG.error("Unexpected file, should have been deleted: {}", fileStatus);
+      }
       Assert.assertEquals(
           "Delete root failed!", 0, fileStatuses.length);
     }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java
index 62ed9a800b..8b1b2adfdf 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java
@@ -21,12 +21,15 @@ import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.util.UUID;
 import java.util.stream.Stream;
 
 import com.google.common.base.Strings;
 
+import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
@@ -45,6 +48,7 @@ import org.junit.jupiter.params.provider.Arguments;
 import org.junit.jupiter.params.provider.MethodSource;
 import org.junit.jupiter.params.provider.ValueSource;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME;
@@ -258,10 +262,12 @@ public class TestOzoneFsSnapshot {
     String newKey = "key-" + RandomStringUtils.randomNumeric(5);
     String newKeyPath = BUCKET_PATH + OM_KEY_PREFIX + newKey;
 
-    // Create new key, while the old one
-    // might be deleted from a previous test.
+    // Write a non-zero byte key.
+    Path tempFile = Files.createTempFile("testFsLsSnapshot-", "any-suffix");
+    FileUtils.write(tempFile.toFile(), "random data", UTF_8);
     execShellCommandAndGetOutput(0,
-        new String[]{"-touch", newKeyPath});
+        new String[]{"-put", tempFile.toString(), newKeyPath});
+    Files.deleteIfExists(tempFile);
 
     // Create snapshot
     String snapshotName = createSnapshot();
@@ -346,7 +352,7 @@ public class TestOzoneFsSnapshot {
                     "testsnap",
                     "Snapshot does not exist",
                     1),
-            Arguments.of("2nd case: invalid bucekt path",
+            Arguments.of("2nd case: invalid bucket path",
                     invalidBucketPath,
                     "testsnap",
                     "No such file or directory",
diff --git 
a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto 
b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index 2d7adffaa3..ace7028128 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -820,7 +820,16 @@ message SnapshotInfo {
   optional int64 dbTxSequenceNumber = 12;
   optional bool deepClean = 13;
   optional bool sstFiltered = 14;
- }
+  // snapshot reference size before any key replication or EC
+  optional uint64 referencedSize = 15;
+  // snapshot reference size after replication
+  optional uint64 referencedReplicatedSize = 16;
+  // snapshot exclusive size before any key replication or EC
+  optional uint64 exclusiveSize = 17;
+  // snapshot exclusive size after replication
+  optional uint64 exclusiveReplicatedSize = 18;
+  // note: shared sizes can be calculated from: referenced - exclusive
+}
 
 message SnapshotDiffJobProto {
   optional uint64 creationTime = 1;
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 748c9094d7..1bbe8bbb5f 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -189,7 +189,7 @@ public class OmMetadataManagerImpl implements 
OMMetadataManager,
    * 
|-------------------------------------------------------------------------|
    * |  Column Family        |        VALUE                                    
|
    * 
|-------------------------------------------------------------------------|
-   * |  snapshotInfoTable    | /volume/bucket/snapshotName -> SnapshotInfo     
|
+   * | snapshotInfoTable     | /volume/bucket/snapshotName -> SnapshotInfo     
|
    * 
|-------------------------------------------------------------------------|
    * | snapshotRenamedTable  | /volumeName/bucketName/objectID -> One of:      
|
    * |                       |  1. /volumeId/bucketId/parentId/dirName         
|
@@ -728,6 +728,8 @@ public class OmMetadataManagerImpl implements 
OMMetadataManager,
         String.class, OmDBTenantState.class);
     checkTableStatus(tenantStateTable, TENANT_STATE_TABLE, addCacheMetrics);
 
+    // TODO: [SNAPSHOT] Consider FULL_CACHE for snapshotInfoTable since
+    //  exclusiveSize in SnapshotInfo can be frequently updated.
     // path -> snapshotInfo (snapshot info for snapshot)
     snapshotInfoTable = this.store.getTable(SNAPSHOT_INFO_TABLE,
         String.class, SnapshotInfo.class);
@@ -1355,7 +1357,7 @@ public class OmMetadataManagerImpl implements 
OMMetadataManager,
       throws IOException {
     try (TableIterator<String, ? extends KeyValue<String, SnapshotInfo>>
              snapshotIter = snapshotInfoTable.iterator()) {
-      KeyValue< String, SnapshotInfo> snapshotinfo;
+      KeyValue<String, SnapshotInfo> snapshotinfo;
       snapshotIter.seek(previous);
       while (snapshotIter.hasNext() && count < maxListResult) {
         snapshotinfo = snapshotIter.next();
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java
index d5d261c7f0..9900ca9340 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java
@@ -18,17 +18,23 @@
 
 package org.apache.hadoop.ozone.om.request.snapshot;
 
+import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.utils.db.RDBStore;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.audit.AuditLogger;
 import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OMMetrics;
 import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.SnapshotChainManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.QuotaUtil;
 import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
@@ -160,6 +166,22 @@ public class OMSnapshotCreateRequest extends 
OMClientRequest {
               .getLatestSequenceNumber();
       snapshotInfo.setDbTxSequenceNumber(dbLatestSequenceNumber);
 
+      // Snapshot referenced size should be bucket's used bytes
+      OmBucketInfo omBucketInfo =
+          getBucketInfo(omMetadataManager, volumeName, bucketName);
+      snapshotInfo.setReferencedReplicatedSize(omBucketInfo.getUsedBytes());
+
+      // Snapshot referenced size in this case is an *estimate* inferred from
+      // the bucket default replication policy right now.
+      // This may well not be the actual sum of all key data sizes in this
+      // bucket because each key can have its own replication policy,
+      // depending on the choice of the client at the time of writing that key.
+      // And we will NOT do an O(n) walk over the keyTable (fileTable) here
+      // because it is a design goal of CreateSnapshot to be an O(1) operation.
+      // TODO: [SNAPSHOT] Assign actual data size once we have the
+      //  pre-replicated key size counter in OmBucketInfo.
+      snapshotInfo.setReferencedSize(estimateBucketDataSize(omBucketInfo));
+
       addSnapshotInfoToSnapshotChainAndCache(omMetadataManager,
           transactionLogIndex);
 
@@ -281,4 +303,41 @@ public class OMSnapshotCreateRequest extends 
OMClientRequest {
           info, exception);
     }
   }
+
+  /**
+   * Same as OMKeyRequest#getBucketInfo.
+   */
+  protected OmBucketInfo getBucketInfo(OMMetadataManager omMetadataManager,
+                                       String volume, String bucket) {
+    String bucketKey = omMetadataManager.getBucketKey(volume, bucket);
+
+    CacheValue<OmBucketInfo> value = omMetadataManager.getBucketTable()
+        .getCacheValue(new CacheKey<>(bucketKey));
+
+    return value != null ? value.getCacheValue() : null;
+  }
+
+  /**
+   * Estimate the sum of data sizes of all keys in the bucket by dividing
+   * bucket used size (w/ replication) by the replication factor of the bucket.
+   * @param bucketInfo OmBucketInfo
+   */
+  private long estimateBucketDataSize(OmBucketInfo bucketInfo) {
+    DefaultReplicationConfig defRC = bucketInfo.getDefaultReplicationConfig();
+    final ReplicationConfig rc;
+    if (defRC == null) {
+      // Note: A lot of tests are not setting bucket DefaultReplicationConfig,
+      //  sometimes intentionally.
+      //  Fall back to config default and print warning level log.
+      rc = ReplicationConfig.getDefault(new OzoneConfiguration());
+      LOG.warn("DefaultReplicationConfig is not correctly set in " +
+          "OmBucketInfo for volume '{}' bucket '{}'. " +
+          "Falling back to config default '{}'",
+          bucketInfo.getVolumeName(), bucketInfo.getBucketName(), rc);
+    } else {
+      rc = defRC.getReplicationConfig();
+    }
+    return QuotaUtil.getDataSize(bucketInfo.getUsedBytes(), rc);
+  }
+
 }
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java
index c830daffb2..e75125d691 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java
@@ -325,13 +325,32 @@ public final class OMRequestTestUtils {
     } else {
       String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
           keyName);
+
+      // Simulate bucket quota (usage) update done in OMKeyCommitRequest
+      String bucketKey = omMetadataManager.getBucketKey(volumeName, 
bucketName);
+      OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get(
+          bucketKey);
+      // omBucketInfo can be null if some mocked tests doesn't use the table
+      if (omBucketInfo != null) {
+        omBucketInfo.incrUsedBytes(omKeyInfo.getReplicatedSize());
+      }
+
       if (addToCache) {
         omMetadataManager.getKeyTable(getDefaultBucketLayout())
             .addCacheEntry(new CacheKey<>(ozoneKey),
                 CacheValue.get(trxnLogIndex, omKeyInfo));
+        if (omBucketInfo != null) {
+          omMetadataManager.getBucketTable()
+              .addCacheEntry(new CacheKey<>(bucketKey),
+                  CacheValue.get(trxnLogIndex + 1, omBucketInfo));
+        }
       }
       omMetadataManager.getKeyTable(getDefaultBucketLayout())
           .put(ozoneKey, omKeyInfo);
+
+      if (omBucketInfo != null) {
+        omMetadataManager.getBucketTable().put(bucketKey, omBucketInfo);
+      }
     }
   }
 
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java
index 7e89ea943e..415b74b618 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java
@@ -233,7 +233,9 @@ public class TestOMDirectoriesPurgeRequestAndResponse 
extends TestOMKeyRequest {
         omMetadataManager);
     omBucketInfo = omMetadataManager.getBucketTable().get(
         bucketKey);
-    omBucketInfo.incrUsedBytes(1000);
+    final long bucketInitialUsedBytes = omBucketInfo.getUsedBytes();
+
+    omBucketInfo.incrUsedBytes(1000L);
     omBucketInfo.incrUsedNamespace(100L);
     omMetadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey),
         CacheValue.get(1L, omBucketInfo));
@@ -241,7 +243,8 @@ public class TestOMDirectoriesPurgeRequestAndResponse 
extends TestOMKeyRequest {
 
     // prevalidate bucket
     omBucketInfo = omMetadataManager.getBucketTable().get(bucketKey);
-    Assert.assertEquals(1000L, omBucketInfo.getUsedBytes());
+    final long bucketExpectedUsedBytes = bucketInitialUsedBytes + 1000L;
+    Assert.assertEquals(bucketExpectedUsedBytes, omBucketInfo.getUsedBytes());
     
     // perform delete
     OMDirectoriesPurgeResponseWithFSO omClientResponse
@@ -252,7 +255,7 @@ public class TestOMDirectoriesPurgeRequestAndResponse 
extends TestOMKeyRequest {
     // validate bucket info, no change expected
     omBucketInfo = omMetadataManager.getBucketTable().get(
         bucketKey);
-    Assert.assertEquals(1000L, omBucketInfo.getUsedBytes());
+    Assert.assertEquals(bucketExpectedUsedBytes, omBucketInfo.getUsedBytes());
 
     performBatchOperationCommit(omClientResponse);
 
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java
index b5b2c362a9..c4ec690c61 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
@@ -187,6 +188,17 @@ public class TestOMSnapshotCreateRequest {
         bucketName, snapshotName1);
     OMSnapshotCreateRequest omSnapshotCreateRequest = doPreExecute(omRequest);
     String key = getTableKey(volumeName, bucketName, snapshotName1);
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+
+    // Add a 1000-byte key to the bucket
+    OmKeyInfo key1 = addKey("key-testValidateAndUpdateCache", 12345L);
+    addKeyToTable(key1);
+
+    OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get(
+        bucketKey);
+    long bucketDataSize = key1.getDataSize();
+    long bucketUsedBytes = omBucketInfo.getUsedBytes();
+    assertEquals(key1.getReplicatedSize(), bucketUsedBytes);
 
     // Value in cache should be null as of now.
     assertNull(omMetadataManager.getSnapshotInfoTable().get(key));
@@ -204,17 +216,24 @@ public class TestOMSnapshotCreateRequest {
     assertEquals(OK, omResponse.getStatus());
 
     // verify table data with response data.
-    SnapshotInfo snapshotInfoFromProto = getFromProtobuf(omClientResponse
-        .getOMResponse()
-        .getCreateSnapshotResponse()
-        .getSnapshotInfo()
-    );
+    OzoneManagerProtocolProtos.SnapshotInfo snapshotInfoProto =
+        omClientResponse
+            .getOMResponse()
+            .getCreateSnapshotResponse()
+            .getSnapshotInfo();
+
+    assertEquals(bucketDataSize, snapshotInfoProto.getReferencedSize());
+    assertEquals(bucketUsedBytes,
+        snapshotInfoProto.getReferencedReplicatedSize());
 
-    // Get value form cache
+    SnapshotInfo snapshotInfoFromProto = getFromProtobuf(snapshotInfoProto);
+
+    // Get value from cache
     SnapshotInfo snapshotInfoInCache =
         omMetadataManager.getSnapshotInfoTable().get(key);
     assertNotNull(snapshotInfoInCache);
     assertEquals(snapshotInfoFromProto, snapshotInfoInCache);
+
     assertEquals(0, omMetrics.getNumSnapshotCreateFails());
     assertEquals(1, omMetrics.getNumSnapshotActive());
     assertEquals(1, omMetrics.getNumSnapshotCreates());
@@ -370,12 +389,12 @@ public class TestOMSnapshotCreateRequest {
 
   private OmKeyInfo addKey(String keyName, long objectId) {
     return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
-        HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE,
+        HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE,
         objectId);
   }
 
   protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception {
-    OMRequestTestUtils.addKeyToTable(false, false, keyInfo, 0, 0L,
+    OMRequestTestUtils.addKeyToTable(false, true, keyInfo, 0, 0L,
         omMetadataManager);
     return omMetadataManager.getOzoneKey(keyInfo.getVolumeName(),
         keyInfo.getBucketName(), keyInfo.getKeyName());
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java
index b8b05708c0..bd551ab5c2 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java
@@ -128,10 +128,13 @@ public class TestOMSnapshotCreateResponse {
         .countRowsInTable(omMetadataManager.getSnapshotInfoTable()));
 
     // Check contents of entry
-    Table.KeyValue<String, SnapshotInfo> keyValue =
-        omMetadataManager.getSnapshotInfoTable().iterator().next();
-    SnapshotInfo storedInfo = keyValue.getValue();
-    Assert.assertEquals(snapshotInfo.getTableKey(), keyValue.getKey());
+    SnapshotInfo storedInfo;
+    try (TableIterator<String, ? extends Table.KeyValue<String, SnapshotInfo>>
+             it = omMetadataManager.getSnapshotInfoTable().iterator()) {
+      Table.KeyValue<String, SnapshotInfo> keyValue = it.next();
+      storedInfo = keyValue.getValue();
+      Assert.assertEquals(snapshotInfo.getTableKey(), keyValue.getKey());
+    }
     Assert.assertEquals(snapshotInfo, storedInfo);
 
     // Check deletedTable and deletedDirectoryTable clean up work as expected
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java
index 7dbbcfdb1f..abd1f9b115 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java
@@ -32,6 +32,8 @@ import org.apache.hadoop.util.Time;
 import org.junit.Assert;
 import org.junit.Test;
 
+import java.io.IOException;
+
 /**
  * Test class for quota repair.
  */
@@ -68,6 +70,10 @@ public class TestQuotaRepairTask extends TestOMKeyRequest {
           fileName, omKeyInfo, -1, 50 + i, omMetadataManager);
     }
 
+    // Intentionally zero out buckets' used bytes first
+    zeroOutBucketUsedBytes(volumeName, bucketName, 1L);
+    zeroOutBucketUsedBytes(volumeName, fsoBucketName, 2L);
+
     // all count is 0 as above is adding directly to key / file table
     // and directory table
     OmBucketInfo obsBucketInfo = omMetadataManager.getBucketTable().get(
@@ -133,4 +139,16 @@ public class TestQuotaRepairTask extends TestOMKeyRequest {
     Assert.assertTrue(volArgsVerify.getQuotaInBytes() == -1);
     Assert.assertTrue(volArgsVerify.getQuotaInNamespace() == -1);
   }
+
+  private void zeroOutBucketUsedBytes(String volumeName, String bucketName,
+                                      long trxnLogIndex)
+      throws IOException {
+    String dbKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get(dbKey);
+    bucketInfo.incrUsedBytes(-bucketInfo.getUsedBytes());
+    omMetadataManager.getBucketTable()
+        .addCacheEntry(new CacheKey<>(dbKey),
+            CacheValue.get(trxnLogIndex, bucketInfo));
+    omMetadataManager.getBucketTable().put(dbKey, bucketInfo);
+  }
 }
diff --git 
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
 
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
index 1e364b3f77..30ef71865b 100644
--- 
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
+++ 
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
@@ -1209,7 +1209,10 @@ public class BasicRootedOzoneClientAdapterImpl
           ozoneSnapshot.getName(), pathStr);
     }
     Path path = new Path(pathStr);
-    return new FileStatusAdapter(0L, 0L, path, true, (short)0, 0L,
+    return new FileStatusAdapter(
+        ozoneSnapshot.getReferencedSize(),
+        ozoneSnapshot.getReferencedReplicatedSize(),
+        path, true, (short) 0, 0L,
         ozoneSnapshot.getCreationTime(), 0L,
         FsPermission.getDirDefault().toShort(),
         owner, group, null, new BlockLocation[0],


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to