This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch HDDS-7593
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/HDDS-7593 by this push:
     new eff21da97e HDDS-9987. [hsync] Client side metrics. (#6468)
eff21da97e is described below

commit eff21da97e0055f1502ae89090e4a8542b6c8270
Author: Wei-Chiu Chuang <[email protected]>
AuthorDate: Wed Jun 12 16:52:13 2024 -0700

    HDDS-9987. [hsync] Client side metrics. (#6468)
---
 .../hadoop/hdds/scm/ContainerClientMetrics.java    | 42 ++++++++++++++++++++++
 .../java/org/apache/hadoop/util/MetricUtil.java    | 10 ++++++
 .../ozone/client/io/BlockOutputStreamEntry.java    |  3 +-
 .../client/io/BlockOutputStreamEntryPool.java      |  7 ++--
 .../hadoop/ozone/client/io/KeyOutputStream.java    |  9 ++++-
 5 files changed, 67 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java
index 422943fff0..19a5a9cad5 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java
@@ -57,6 +57,9 @@ public final class ContainerClientMetrics {
   private MutableQuantiles[] getCommittedBlockLengthLatency;
   private MutableQuantiles[] readChunkLatency;
   private MutableQuantiles[] getSmallFileLatency;
+  private MutableQuantiles[] hsyncLatencyNs;
+  private MutableQuantiles[] omHsyncLatencyNs;
+  private MutableQuantiles[] datanodeHsyncLatencyNs;
   private final Map<PipelineID, MutableCounterLong> writeChunkCallsByPipeline;
   private final Map<PipelineID, MutableCounterLong> writeChunkBytesByPipeline;
   private final Map<UUID, MutableCounterLong> writeChunksCallsByLeaders;
@@ -96,6 +99,9 @@ public final class ContainerClientMetrics {
     getCommittedBlockLengthLatency = new MutableQuantiles[3];
     readChunkLatency = new MutableQuantiles[3];
     getSmallFileLatency = new MutableQuantiles[3];
+    hsyncLatencyNs = new MutableQuantiles[3];
+    omHsyncLatencyNs = new MutableQuantiles[3];
+    datanodeHsyncLatencyNs = new MutableQuantiles[3];
     int[] intervals = {60, 300, 900};
     for (int i = 0; i < intervals.length; i++) {
       int interval = intervals[i];
@@ -119,6 +125,18 @@ public final class ContainerClientMetrics {
           .newQuantiles("getSmallFileLatency" + interval
                   + "s", "GetSmallFile latency in microseconds", "ops",
               "latency", interval);
+      hsyncLatencyNs[i] = registry
+          .newQuantiles("hsyncLatency" + interval
+                  + "s", "client hsync latency in nanoseconds", "ops",
+              "latency", interval);
+      omHsyncLatencyNs[i] = registry
+          .newQuantiles("omHsyncLatency" + interval
+                  + "s", "client hsync latency to OM in nanoseconds", "ops",
+              "latency", interval);
+      datanodeHsyncLatencyNs[i] = registry
+          .newQuantiles("dnHsyncLatency" + interval
+                  + "s", "client hsync latency to DN in nanoseconds", "ops",
+              "latency", interval);
     }
   }
 
@@ -155,6 +173,14 @@ public final class ContainerClientMetrics {
     }
   }
 
+  public void addHsyncLatency(long hsyncLatencyTime) {
+    for (MutableQuantiles q : hsyncLatencyNs) {
+      if (q != null) {
+        q.add(hsyncLatencyTime);
+      }
+    }
+  }
+
   public void addGetBlockLatency(long latency) {
     for (MutableQuantiles q : getBlockLatency) {
       if (q != null) {
@@ -163,6 +189,14 @@ public final class ContainerClientMetrics {
     }
   }
 
+  public void addOMHsyncLatency(long hsyncLatencyTime) {
+    for (MutableQuantiles q : omHsyncLatencyNs) {
+      if (q != null) {
+        q.add(hsyncLatencyTime);
+      }
+    }
+  }
+
   public void addGetCommittedBlockLengthLatency(long latency) {
     for (MutableQuantiles q : getCommittedBlockLengthLatency) {
       if (q != null) {
@@ -187,6 +221,14 @@ public final class ContainerClientMetrics {
     }
   }
 
+  public void addDataNodeHsyncLatency(long hsyncLatencyTime) {
+    for (MutableQuantiles q : datanodeHsyncLatencyNs) {
+      if (q != null) {
+        q.add(hsyncLatencyTime);
+      }
+    }
+  }
+
   @VisibleForTesting
   public MutableCounterLong getTotalWriteChunkBytes() {
     return totalWriteChunkBytes;
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/MetricUtil.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/MetricUtil.java
index 381314e69c..b1f1fe6420 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/MetricUtil.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/MetricUtil.java
@@ -70,6 +70,16 @@ public final class MetricUtil {
     }
   }
 
+  public static <E extends IOException> void captureLatencyNs(
+      Consumer<Long> latencySetter, CheckedRunnable<E> block) throws E {
+    long start = Time.monotonicNowNanos();
+    try {
+      block.run();
+    } finally {
+      latencySetter.accept(Time.monotonicNowNanos() - start);
+    }
+  }
+
   /**
    * Creates MutableQuantiles metrics with one or multiple intervals.
    *
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java
index ba3850ff39..9516b74881 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdds.scm.storage.BufferPool;
 import org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream;
 import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.MetricUtil;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.ratis.util.JavaUtils;
@@ -160,7 +161,7 @@ public class BlockOutputStreamEntry extends OutputStream {
             out.getClass() + " is not " + Syncable.class.getSimpleName());
       }
 
-      ((Syncable)out).hsync();
+      MetricUtil.captureLatencyNs(clientMetrics::addDataNodeHsyncLatency, () 
-> ((Syncable)out).hsync());
     }
   }
 
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
index 1b7918a45a..bb4e3e3f08 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
@@ -46,6 +46,7 @@ import 
org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.util.MetricUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -350,12 +351,14 @@ public class BlockOutputStreamEntryPool implements 
KeyMetadataAware {
         throw new IOException("Hsync is unsupported for multipart keys.");
       } else {
         if (keyArgs.getLocationInfoList().size() == 0) {
-          omClient.hsyncKey(keyArgs, openID);
+          MetricUtil.captureLatencyNs(clientMetrics::addOMHsyncLatency,
+              () -> omClient.hsyncKey(keyArgs, openID));
         } else {
           ContainerBlockID lastBLockId = 
keyArgs.getLocationInfoList().get(keyArgs.getLocationInfoList().size() - 1)
               .getBlockID().getContainerBlockID();
           if (!lastUpdatedBlockId.equals(lastBLockId)) {
-            omClient.hsyncKey(keyArgs, openID);
+            MetricUtil.captureLatencyNs(clientMetrics::addOMHsyncLatency,
+                () -> omClient.hsyncKey(keyArgs, openID));
             lastUpdatedBlockId = lastBLockId;
           }
         }
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
index 11c644cd55..e080ea2d34 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
@@ -54,6 +54,7 @@ import 
org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.util.MetricUtil;
 import org.apache.ratis.protocol.exceptions.AlreadyClosedException;
 import org.apache.ratis.protocol.exceptions.RaftRetryFailureException;
 import org.slf4j.Logger;
@@ -105,6 +106,7 @@ public class KeyOutputStream extends OutputStream
    * This is essential for operations like S3 put to ensure atomicity.
    */
   private boolean atomicKeyCreation;
+  private ContainerClientMetrics clientMetrics;
 
   public KeyOutputStream(ReplicationConfig replicationConfig, 
BlockOutputStreamEntryPool blockOutputStreamEntryPool) {
     this.replication = replicationConfig;
@@ -154,6 +156,7 @@ public class KeyOutputStream extends OutputStream
     this.clientID = b.getOpenHandler().getId();
     this.atomicKeyCreation = b.getAtomicKeyCreation();
     this.streamBufferArgs = b.getStreamBufferArgs();
+    this.clientMetrics = b.getClientMetrics();
   }
 
   /**
@@ -454,10 +457,14 @@ public class KeyOutputStream extends OutputStream
     }
     checkNotClosed();
     final long hsyncPos = writeOffset;
+
     handleFlushOrClose(StreamAction.HSYNC);
+
     Preconditions.checkState(offset >= hsyncPos,
         "offset = %s < hsyncPos = %s", offset, hsyncPos);
-    blockOutputStreamEntryPool.hsyncKey(hsyncPos);
+
+    MetricUtil.captureLatencyNs(clientMetrics::addHsyncLatency,
+        () -> blockOutputStreamEntryPool.hsyncKey(hsyncPos));
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to