This is an automated email from the ASF dual-hosted git repository.

myskov pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 264cbc6192 HDDS-10476. Add metrics about bucket utilization. (#6344)
264cbc6192 is described below

commit 264cbc61925761be9af96c9e22edefc1840af5ce
Author: Ivan Zlenko <[email protected]>
AuthorDate: Tue Mar 19 15:19:52 2024 +0500

    HDDS-10476. Add metrics about bucket utilization. (#6344)
---
 .../container/common/volume/VolumeInfoMetrics.java |   6 +-
 .../hdds/scm/server/SCMContainerMetrics.java       |   1 -
 .../hadoop/ozone/om/BucketUtilizationMetrics.java  | 118 +++++++++++++++++++
 .../hadoop/ozone/om/OmMetadataManagerImpl.java     |  34 ------
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  11 +-
 .../ozone/om/TestBucketUtilizationMetrics.java     | 126 +++++++++++++++++++++
 6 files changed, 255 insertions(+), 41 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java
index 18e7354ec1..e59cab0d53 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java
@@ -32,15 +32,13 @@ import org.apache.hadoop.ozone.OzoneConsts;
 public class VolumeInfoMetrics {
 
   private String metricsSourceName = VolumeInfoMetrics.class.getSimpleName();
-  private String volumeRootStr;
-  private HddsVolume volume;
+  private final HddsVolume volume;
 
   /**
-   * @param identifier Typically, path to volume root. e.g. /data/hdds
+   * @param identifier Typically, path to volume root. E.g. /data/hdds
    */
   public VolumeInfoMetrics(String identifier, HddsVolume ref) {
     this.metricsSourceName += '-' + identifier;
-    this.volumeRootStr = identifier;
     this.volume = ref;
     init();
   }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMContainerMetrics.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMContainerMetrics.java
index d7d47a78b7..dab66cc515 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMContainerMetrics.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMContainerMetrics.java
@@ -63,7 +63,6 @@ public class SCMContainerMetrics implements MetricsSource {
   }
 
   @Override
-  @SuppressWarnings("SuspiciousMethodCalls")
   public void getMetrics(MetricsCollector collector, boolean all) {
     Map<String, Integer> stateCount = scmmxBean.getContainerStateCount();
 
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketUtilizationMetrics.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketUtilizationMetrics.java
new file mode 100644
index 0000000000..d5916c6adc
--- /dev/null
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketUtilizationMetrics.java
@@ -0,0 +1,118 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.hdds.annotation.InterfaceAudience;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+
+import java.util.Iterator;
+import java.util.Map.Entry;
+
+/**
+ * A class for collecting and reporting bucket utilization metrics.
+ * <p>
+ * Available metrics:
+ * <ul>
+ *   <li>Bytes used in bucket.
+ *   <li>Bucket quote in bytes.
+ *   <li>Bucket quota in namespace.
+ *   <li>Bucket available space. Calculated from difference between used bytes 
in bucket and bucket quota.
+ *   If bucket quote is not set then this metric show -1 as value.
+ * </ul>
+ */
[email protected]
+@Metrics(about = "Ozone Bucket Utilization Metrics", context = 
OzoneConsts.OZONE)
+public class BucketUtilizationMetrics implements MetricsSource {
+
+  private static final String SOURCE = 
BucketUtilizationMetrics.class.getSimpleName();
+
+  private final OMMetadataManager metadataManager;
+
+  public BucketUtilizationMetrics(OMMetadataManager metadataManager) {
+    this.metadataManager = metadataManager;
+  }
+
+  public static BucketUtilizationMetrics create(OMMetadataManager 
metadataManager) {
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    return ms.register(SOURCE, "Bucket Utilization Metrics", new 
BucketUtilizationMetrics(metadataManager));
+  }
+
+  @Override
+  public void getMetrics(MetricsCollector collector, boolean all) {
+    Iterator<Entry<CacheKey<String>, CacheValue<OmBucketInfo>>> bucketIterator 
= metadataManager.getBucketIterator();
+
+    while (bucketIterator.hasNext()) {
+      Entry<CacheKey<String>, CacheValue<OmBucketInfo>> entry = 
bucketIterator.next();
+      OmBucketInfo bucketInfo = entry.getValue().getCacheValue();
+      if (bucketInfo == null) {
+        continue;
+      }
+
+      long availableSpace;
+      long quotaInBytes = bucketInfo.getQuotaInBytes();
+      if (quotaInBytes == -1) {
+        availableSpace = quotaInBytes;
+      } else {
+        availableSpace = Math.max(bucketInfo.getQuotaInBytes() - 
bucketInfo.getUsedBytes(), 0);
+      }
+
+      collector.addRecord(SOURCE)
+          .setContext("Bucket metrics")
+          .tag(BucketMetricsInfo.VolumeName, bucketInfo.getVolumeName())
+          .tag(BucketMetricsInfo.BucketName, bucketInfo.getBucketName())
+          .addGauge(BucketMetricsInfo.BucketUsedBytes, 
bucketInfo.getUsedBytes())
+          .addGauge(BucketMetricsInfo.BucketQuotaBytes, 
bucketInfo.getQuotaInBytes())
+          .addGauge(BucketMetricsInfo.BucketQuotaNamespace, 
bucketInfo.getQuotaInNamespace())
+          .addGauge(BucketMetricsInfo.BucketAvailableBytes, availableSpace);
+    }
+  }
+
+  public void unRegister() {
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    ms.unregisterSource(SOURCE);
+  }
+
+  enum BucketMetricsInfo implements MetricsInfo {
+    VolumeName("Volume Metrics."),
+    BucketName("Bucket Metrics."),
+    BucketUsedBytes("Bytes used by bucket."),
+    BucketQuotaBytes("Bucket quote in bytes."),
+    BucketQuotaNamespace("Bucket quota in namespace."),
+    BucketAvailableBytes("Bucket available space.");
+
+    private final String desc;
+
+    BucketMetricsInfo(String desc) {
+      this.desc = desc;
+    }
+
+    @Override
+    public String description() {
+      return desc;
+    }
+  }
+}
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 982e04df04..6aae468d9f 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -892,40 +892,6 @@ public class OmMetadataManagerImpl implements 
OMMetadataManager,
     return omEpoch;
   }
 
-  /**
-   * Returns true if the firstArray startsWith the bytes of secondArray.
-   *
-   * @param firstArray - Byte array
-   * @param secondArray - Byte array
-   * @return true if the first array bytes match the bytes in the second array.
-   */
-  private boolean startsWith(byte[] firstArray, byte[] secondArray) {
-
-    if (firstArray == null) {
-      // if both are null, then the arrays match, else if first is null and
-      // second is not, then this function returns false.
-      return secondArray == null;
-    }
-
-
-    if (secondArray != null) {
-      // If the second array is longer then first array cannot be starting with
-      // the bytes of second array.
-      if (secondArray.length > firstArray.length) {
-        return false;
-      }
-
-      for (int ndx = 0; ndx < secondArray.length; ndx++) {
-        if (firstArray[ndx] != secondArray[ndx]) {
-          return false;
-        }
-      }
-      return true; //match, return true.
-    }
-    return false; // if first is not null and second is null, we define that
-    // array does not start with same chars.
-  }
-
   /**
    * Given a volume, check if it is empty, i.e there are no buckets inside it.
    * We iterate in the bucket table and see if there is any key that starts 
with
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 13e61ce61c..9d18107a5d 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -457,6 +457,7 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
 
   private final OzoneLockProvider ozoneLockProvider;
   private final OMPerformanceMetrics perfMetrics;
+  private final BucketUtilizationMetrics bucketUtilizationMetrics;
 
   private boolean fsSnapshotEnabled;
 
@@ -720,6 +721,8 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
     } else {
       omState = State.INITIALIZED;
     }
+
+    bucketUtilizationMetrics = 
BucketUtilizationMetrics.create(metadataManager);
   }
 
   public boolean isStopped() {
@@ -2297,6 +2300,10 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
         OMHAMetrics.unRegister();
       }
       omRatisServer = null;
+
+      if (bucketUtilizationMetrics != null) {
+        bucketUtilizationMetrics.unRegister();
+      }
       return true;
     } catch (Exception e) {
       LOG.error("OzoneManager stop failed.", e);
@@ -4031,7 +4038,7 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
     startSecretManagerIfNecessary();
     startTrashEmptier(configuration);
 
-    // Set metrics and start metrics back ground thread
+    // Set metrics and start metrics background thread
     metrics.setNumVolumes(metadataManager.countRowsInTable(metadataManager
         .getVolumeTable()));
     metrics.setNumBuckets(metadataManager.countRowsInTable(metadataManager
@@ -4045,7 +4052,7 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
     metrics.setNumFiles(metadataManager
         .countEstimatedRowsInTable(metadataManager.getFileTable()));
 
-    // Delete the omMetrics file if it exists and save the a new metrics file
+    // Delete the omMetrics file if it exists and save a new metrics file
     // with new data
     Files.deleteIfExists(getMetricsStorageFile().toPath());
     saveOmMetrics();
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketUtilizationMetrics.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketUtilizationMetrics.java
new file mode 100644
index 0000000000..1be85d2049
--- /dev/null
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketUtilizationMetrics.java
@@ -0,0 +1,126 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.ozone.om.BucketUtilizationMetrics.BucketMetricsInfo;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.junit.jupiter.api.Test;
+
+import java.util.Iterator;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.OzoneConsts.QUOTA_RESET;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyInt;
+import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test class for BucketUtilizationMetrics.
+ */
+public class TestBucketUtilizationMetrics {
+
+  private static final String VOLUME_NAME_1 = "volume1";
+  private static final String VOLUME_NAME_2 = "volume2";
+  private static final String BUCKET_NAME_1 = "bucket1";
+  private static final String BUCKET_NAME_2 = "bucket2";
+  private static final long USED_BYTES_1 = 100;
+  private static final long USED_BYTES_2 = 200;
+  private static final long QUOTA_IN_BYTES_1 = 200;
+  private static final long QUOTA_IN_BYTES_2 = QUOTA_RESET;
+  private static final long QUOTA_IN_NAMESPACE_1 = 1;
+  private static final long QUOTA_IN_NAMESPACE_2 = 2;
+
+  @Test
+  void testBucketUtilizationMetrics() {
+    OMMetadataManager omMetadataManager = mock(OMMetadataManager.class);
+
+    Map.Entry<CacheKey<String>, CacheValue<OmBucketInfo>> entry1 = 
createMockEntry(VOLUME_NAME_1, BUCKET_NAME_1,
+        USED_BYTES_1, QUOTA_IN_BYTES_1, QUOTA_IN_NAMESPACE_1);
+    Map.Entry<CacheKey<String>, CacheValue<OmBucketInfo>> entry2 = 
createMockEntry(VOLUME_NAME_2, BUCKET_NAME_2,
+        USED_BYTES_2, QUOTA_IN_BYTES_2, QUOTA_IN_NAMESPACE_2);
+
+    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmBucketInfo>>> 
bucketIterator = mock(Iterator.class);
+    when(bucketIterator.hasNext())
+        .thenReturn(true)
+        .thenReturn(true)
+        .thenReturn(false);
+
+    when(bucketIterator.next())
+        .thenReturn(entry1)
+        .thenReturn(entry2);
+
+    when(omMetadataManager.getBucketIterator()).thenReturn(bucketIterator);
+
+    MetricsRecordBuilder mb = mock(MetricsRecordBuilder.class);
+    when(mb.setContext(anyString())).thenReturn(mb);
+    when(mb.tag(any(MetricsInfo.class), anyString())).thenReturn(mb);
+    when(mb.addGauge(any(MetricsInfo.class), anyInt())).thenReturn(mb);
+    when(mb.addGauge(any(MetricsInfo.class), anyLong())).thenReturn(mb);
+
+    MetricsCollector metricsCollector = mock(MetricsCollector.class);
+    when(metricsCollector.addRecord(anyString())).thenReturn(mb);
+
+    BucketUtilizationMetrics containerMetrics = new 
BucketUtilizationMetrics(omMetadataManager);
+
+    containerMetrics.getMetrics(metricsCollector, true);
+
+    verify(mb, times(1)).tag(BucketMetricsInfo.VolumeName, VOLUME_NAME_1);
+    verify(mb, times(1)).tag(BucketMetricsInfo.BucketName, BUCKET_NAME_1);
+    verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketUsedBytes, 
USED_BYTES_1);
+    verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketQuotaBytes, 
QUOTA_IN_BYTES_1);
+    verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketQuotaNamespace, 
QUOTA_IN_NAMESPACE_1);
+    verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketAvailableBytes,
+        QUOTA_IN_BYTES_1 - USED_BYTES_1);
+
+    verify(mb, times(1)).tag(BucketMetricsInfo.VolumeName, VOLUME_NAME_2);
+    verify(mb, times(1)).tag(BucketMetricsInfo.BucketName, BUCKET_NAME_2);
+    verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketUsedBytes, 
USED_BYTES_2);
+    verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketQuotaBytes, 
QUOTA_IN_BYTES_2);
+    verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketQuotaNamespace, 
QUOTA_IN_NAMESPACE_2);
+    verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketAvailableBytes, 
QUOTA_RESET);
+  }
+
+  private static Map.Entry<CacheKey<String>, CacheValue<OmBucketInfo>> 
createMockEntry(String volumeName,
+      String bucketName, long usedBytes, long quotaInBytes, long 
quotaInNamespace) {
+    Map.Entry<CacheKey<String>, CacheValue<OmBucketInfo>> entry = 
mock(Map.Entry.class);
+    CacheValue<OmBucketInfo> cacheValue = mock(CacheValue.class);
+    OmBucketInfo bucketInfo = mock(OmBucketInfo.class);
+
+    when(bucketInfo.getVolumeName()).thenReturn(volumeName);
+    when(bucketInfo.getBucketName()).thenReturn(bucketName);
+    when(bucketInfo.getUsedBytes()).thenReturn(usedBytes);
+    when(bucketInfo.getQuotaInBytes()).thenReturn(quotaInBytes);
+    when(bucketInfo.getQuotaInNamespace()).thenReturn(quotaInNamespace);
+
+    when(cacheValue.getCacheValue()).thenReturn(bucketInfo);
+
+    when(entry.getValue()).thenReturn(cacheValue);
+
+    return entry;
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to