This is an automated email from the ASF dual-hosted git repository.

sodonnell pushed a commit to branch HDDS-3816-ec
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/HDDS-3816-ec by this push:
     new 9e3cf98  HDDS-6279. EC: Calculate EC replication correctly when 
updating bucket usage (#3136)
9e3cf98 is described below

commit 9e3cf98a30dbf16508b97aba2d8c0324c378ff87
Author: Stephen O'Donnell <[email protected]>
AuthorDate: Fri Feb 25 20:32:07 2022 +0000

    HDDS-6279. EC: Calculate EC replication correctly when updating bucket 
usage (#3136)
---
 .../apache/hadoop/ozone/om/helpers/OmKeyInfo.java  |  4 +
 .../apache/hadoop/ozone/om/helpers/QuotaUtil.java  | 62 ++++++++++++++
 .../hadoop/ozone/om/helpers/TestQuotaUtil.java     | 98 ++++++++++++++++++++++
 .../ozone/om/request/key/OMKeyCommitRequest.java   |  5 +-
 .../om/request/key/OMKeyCommitRequestWithFSO.java  |  5 +-
 .../hadoop/ozone/om/request/key/OMKeyRequest.java  |  6 +-
 .../multipart/S3MultipartUploadAbortRequest.java   |  7 +-
 .../S3MultipartUploadCommitPartRequest.java        |  2 +-
 8 files changed, 176 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index 7f081aa..7ae3da8 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -133,6 +133,10 @@ public final class OmKeyInfo extends WithParentObjectId {
     return dataSize;
   }
 
+  public long getReplicatedSize() {
+    return QuotaUtil.getReplicatedSize(getDataSize(), replicationConfig);
+  }
+
   public void setDataSize(long size) {
     this.dataSize = size;
   }
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/QuotaUtil.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/QuotaUtil.java
new file mode 100644
index 0000000..09e0440
--- /dev/null
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/QuotaUtil.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
+
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.EC;
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS;
+
+/**
+ * Helper class to calculate quota related usage.
+ */
+public final class QuotaUtil {
+
+  private QuotaUtil() {
+  };
+
+  /**
+   * From the used space and replicationConfig, calculate the expected
+   * replicated size of the data.
+   * @param dataSize The number of bytes of data stored
+   * @param repConfig The replicationConfig used to store the data
+   * @return Number of bytes required to store the dataSize with replication
+   */
+  public static long getReplicatedSize(
+      long dataSize, ReplicationConfig repConfig) {
+    if (repConfig.getReplicationType() == RATIS) {
+      return dataSize * ((RatisReplicationConfig)repConfig)
+          .getReplicationFactor().getNumber();
+    } else if (repConfig.getReplicationType() == EC) {
+      ECReplicationConfig rc = (ECReplicationConfig)repConfig;
+      int dataStripeSize = rc.getData() * rc.getEcChunkSize();
+      long fullStripes = dataSize / dataStripeSize;
+      long partialFirstChunk =
+          Math.min(rc.getEcChunkSize(), dataSize % dataStripeSize);
+      long replicationOverhead =
+          fullStripes * rc.getParity() * rc.getEcChunkSize()
+              + partialFirstChunk * rc.getParity();
+      return dataSize + replicationOverhead;
+    } else {
+      return dataSize;
+    }
+  }
+
+}
diff --git 
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestQuotaUtil.java
 
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestQuotaUtil.java
new file mode 100644
index 0000000..d06932a
--- /dev/null
+++ 
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestQuotaUtil.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
+import org.junit.Assert;
+import org.junit.Test;
+
+import static org.apache.hadoop.hdds.client.ECReplicationConfig.EcCodec.RS;
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
+import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
+
+/**
+ * Tests for the QuotaUtil class.
+ */
+public class TestQuotaUtil {
+
+  private static final int ONE_MB = 1024 * 1024;
+
+  @Test
+  public void testRatisThreeReplication() {
+    RatisReplicationConfig repConfig = new RatisReplicationConfig(THREE);
+    long replicatedSize =
+        QuotaUtil.getReplicatedSize(123 * ONE_MB, repConfig);
+    Assert.assertEquals(123 * ONE_MB * 3, replicatedSize);
+  }
+
+  @Test
+  public void testRatisOneReplication() {
+    RatisReplicationConfig repConfig = new RatisReplicationConfig(ONE);
+    long replicatedSize =
+        QuotaUtil.getReplicatedSize(123 * ONE_MB, repConfig);
+    Assert.assertEquals(123 * ONE_MB, replicatedSize);
+  }
+
+  @Test
+  public void testECFullStripeReplication() {
+    ECReplicationConfig repConfig = new ECReplicationConfig(3, 2, RS, ONE_MB);
+    long dataSize = ONE_MB * 3 * 123; // 123 full stripe
+    long replicatedSize = QuotaUtil.getReplicatedSize(dataSize, repConfig);
+    Assert.assertEquals(dataSize + 123 * ONE_MB * 2, replicatedSize);
+  }
+
+  @Test
+  public void testECPartialStripeIntoFirstChunk() {
+    ECReplicationConfig repConfig = new ECReplicationConfig(3, 2, RS, ONE_MB);
+    long dataSize = ONE_MB * 3 * 123 + 10; // 123 full stripes, plus 10 bytes
+    long replicatedSize = QuotaUtil.getReplicatedSize(dataSize, repConfig);
+    // Expected is 123 parity stripes, plus another 10 bytes in each parity
+    Assert.assertEquals(dataSize + 123 * ONE_MB * 2 + 10 * 2, replicatedSize);
+  }
+
+  @Test
+  public void testECPartialStripeBeyondFirstChunk() {
+    ECReplicationConfig repConfig = new ECReplicationConfig(3, 2, RS, ONE_MB);
+    // 123 full stripes, plus 1MB+10 bytes
+    long dataSize = ONE_MB * 3 * 123 + ONE_MB + 10;
+    long replicatedSize = QuotaUtil.getReplicatedSize(dataSize, repConfig);
+    // Expected is 123 parity stripes, plus another 1MB in each parity
+    Assert.assertEquals(
+        dataSize + 123 * ONE_MB * 2 + ONE_MB * 2, replicatedSize);
+  }
+
+  @Test
+  public void testECPartialSingleStripeFirstChunk() {
+    ECReplicationConfig repConfig = new ECReplicationConfig(3, 2, RS, ONE_MB);
+    long dataSize = 10;
+    long replicatedSize = QuotaUtil.getReplicatedSize(dataSize, repConfig);
+    // Expected is 123 parity stripes, plus another 1MB in each parity
+    Assert.assertEquals(dataSize + 10 * 2, replicatedSize);
+  }
+
+  @Test
+  public void testECPartialSingleBeyondFirstChunk() {
+    ECReplicationConfig repConfig = new ECReplicationConfig(3, 2, RS, ONE_MB);
+    long dataSize = 2 * ONE_MB + 10;
+    long replicatedSize = QuotaUtil.getReplicatedSize(dataSize, repConfig);
+    // Expected is 123 parity stripes, plus another 1MB in each parity
+    Assert.assertEquals(dataSize + ONE_MB * 2, replicatedSize);
+  }
+
+}
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
index 9bdb51f..6d5992c 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
@@ -241,12 +241,11 @@ public class OMKeyCommitRequest extends OMKeyRequest {
       // AllocatedBlock. The space occupied by the Key shall be based on
       // the actual Key size, and the total Block size applied before should
       // be subtracted.
-      long correctedSpace = omKeyInfo.getDataSize() * factor -
+      long correctedSpace = omKeyInfo.getReplicatedSize() -
           allocatedLocationInfoList.size() * scmBlockSize * factor;
       // Subtract the size of blocks to be overwritten.
       if (keyToDelete != null) {
-        correctedSpace -= keyToDelete.getDataSize() *
-            keyToDelete.getReplicationConfig().getRequiredNodes();
+        correctedSpace -= keyToDelete.getReplicatedSize();
       }
 
       omBucketInfo.incrUsedBytes(correctedSpace);
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java
index f662390..54781c2 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java
@@ -175,12 +175,11 @@ public class OMKeyCommitRequestWithFSO extends 
OMKeyCommitRequest {
       // AllocatedBlock. The space occupied by the Key shall be based on
       // the actual Key size, and the total Block size applied before should
       // be subtracted.
-      long correctedSpace = omKeyInfo.getDataSize() * factor -
+      long correctedSpace = omKeyInfo.getReplicatedSize() -
               locationInfoList.size() * scmBlockSize * factor;
       // Subtract the size of blocks to be overwritten.
       if (keyToDelete != null) {
-        correctedSpace -= keyToDelete.getDataSize() *
-            keyToDelete.getReplicationConfig().getRequiredNodes();
+        correctedSpace -= keyToDelete.getReplicatedSize();
       }
       omBucketInfo.incrUsedBytes(correctedSpace);
 
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
index 0b3bedd..629e501 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
 import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.helpers.QuotaUtil;
 import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
@@ -564,11 +565,10 @@ public abstract class OMKeyRequest extends 
OMClientRequest {
    */
   protected static long sumBlockLengths(OmKeyInfo omKeyInfo) {
     long bytesUsed = 0;
-    int keyFactor = omKeyInfo.getReplicationConfig().getRequiredNodes();
-
     for (OmKeyLocationInfoGroup group: omKeyInfo.getKeyLocationVersions()) {
       for (OmKeyLocationInfo locationInfo : group.getLocationList()) {
-        bytesUsed += locationInfo.getLength() * keyFactor;
+        bytesUsed += QuotaUtil.getReplicatedSize(
+            locationInfo.getLength(), omKeyInfo.getReplicationConfig());
       }
     }
 
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
index b7cf656..7d6f820 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
@@ -26,6 +26,7 @@ import java.util.Map;
 import com.google.common.base.Optional;
 import org.apache.hadoop.ozone.om.helpers.BucketLayout;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.QuotaUtil;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
@@ -165,14 +166,14 @@ public class S3MultipartUploadAbortRequest extends 
OMKeyRequest {
       // When abort uploaded key, we need to subtract the PartKey length from
       // the volume usedBytes.
       long quotaReleased = 0;
-      int keyFactor = omKeyInfo.getReplicationConfig().getRequiredNodes();
       Iterator iter =
           multipartKeyInfo.getPartKeyInfoMap().entrySet().iterator();
       while (iter.hasNext()) {
         Map.Entry entry = (Map.Entry)iter.next();
         PartKeyInfo iterPartKeyInfo = (PartKeyInfo)entry.getValue();
-        quotaReleased +=
-            iterPartKeyInfo.getPartKeyInfo().getDataSize() * keyFactor;
+        quotaReleased += QuotaUtil.getReplicatedSize(
+            iterPartKeyInfo.getPartKeyInfo().getDataSize(),
+            omKeyInfo.getReplicationConfig());
       }
       omBucketInfo.incrUsedBytes(-quotaReleased);
 
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
index 717be05..5d52bab 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
@@ -229,7 +229,7 @@ public class S3MultipartUploadCommitPartRequest extends 
OMKeyRequest {
       // AllocatedBlock. The space occupied by the Key shall be based on
       // the actual Key size, and the total Block size applied before should
       // be subtracted.
-      long correctedSpace = omKeyInfo.getDataSize() * factor -
+      long correctedSpace = omKeyInfo.getReplicatedSize() -
           keyArgs.getKeyLocationsList().size() * scmBlockSize * factor;
       omBucketInfo.incrUsedBytes(correctedSpace);
 

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to