[ 
https://issues.apache.org/jira/browse/HADOOP-19729?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=18033628#comment-18033628
 ] 

ASF GitHub Bot commented on HADOOP-19729:
-----------------------------------------

anujmodi2021 commented on code in PR #8043:
URL: https://github.com/apache/hadoop/pull/8043#discussion_r2470505964


##########
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/SlidingWindowHdrHistogram.java:
##########
@@ -0,0 +1,249 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azurebfs.services;
+import org.HdrHistogram.Histogram;
+import org.HdrHistogram.Recorder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.hadoop.classification.VisibleForTesting;
+
+public class SlidingWindowHdrHistogram {
+  private static final Logger LOG = 
LoggerFactory.getLogger(SlidingWindowHdrHistogram.class);
+
+  // Configuration
+  private final long windowSizeMillis;          // Total analysis window
+  private final long timeSegmentDurationMillis;       // Subdivision on 
analysis window
+  private final int numSegments;
+  private final long highestTrackableValue;
+  private final int significantFigures;
+
+  // Ring buffer of immutable snapshots for completed time segments
+  private final Histogram[] completedSegments;
+  private final AtomicInteger currentIndex = new AtomicInteger(0);
+
+  // Active Time Segment
+  private volatile Recorder activeSegmentRecorder;
+  private Histogram currentSegmentAccumulation;
+  private volatile long currentSegmentStartMillis;
+  private final AtomicLong currentTotalCount = new AtomicLong(0L);
+
+  // Synchronization
+  // Writers never take locks. Readers (queries) and rotation use this lock
+  // to mutate currentAccumulation and ring-buffer pointers safely.
+  private final ReentrantLock rotateLock = new ReentrantLock();
+
+  // Reusable temp histograms to minimize allocations
+  private Histogram tmpForDelta;
+  private Histogram tmpForMerge;
+
+  private final AbfsRestOperationType operationType;
+
+  private boolean isAnalysisWindowFilled = false;
+  private int minSampleSize;
+  private int tailLatencyPercentile;
+  private int tailLatencyMinDeviation;
+
+  private double p50 = 0.0;
+  private double p90 = 0.0;
+  private double p99 = 0.0;
+  private double tailLatency = 0.0;
+  private int deviation = 0;
+
+  public SlidingWindowHdrHistogram(long windowSizeMillis,
+      int numberOfSegments,
+      int minSampleSize,
+      int tailLatencyPercentile,
+      int tailLatencyMinDeviation,
+      long highestTrackableValue,
+      int significantFigures,
+      final AbfsRestOperationType operationType) {
+    if (windowSizeMillis <= 0) throw new 
IllegalArgumentException("windowSizeMillis > 0");
+    if (numberOfSegments <= 0) throw new 
IllegalArgumentException("bucketDurationMillis > 0");
+    if (highestTrackableValue <= 0) throw new 
IllegalArgumentException("highestTrackableValue > 0");
+    if (significantFigures < 1 || significantFigures > 5) throw new 
IllegalArgumentException("significantFigures in [1,5]");
+
+    this.windowSizeMillis = windowSizeMillis;
+    this.numSegments = numberOfSegments;
+    this.timeSegmentDurationMillis = windowSizeMillis/numberOfSegments;
+    this.highestTrackableValue = highestTrackableValue;
+    this.significantFigures = significantFigures;
+    this.operationType = operationType;
+    this.minSampleSize = minSampleSize;
+    this.tailLatencyPercentile = tailLatencyPercentile;
+    this.tailLatencyMinDeviation = tailLatencyMinDeviation; // 5ms
+
+    this.completedSegments = new Histogram[numSegments];
+    long now = System.currentTimeMillis();
+    this.currentSegmentStartMillis = alignToSegmentDuration(now);
+    currentIndex.set(0);
+    this.activeSegmentRecorder = new Recorder(highestTrackableValue, 
significantFigures);
+    this.currentSegmentAccumulation = new Histogram(highestTrackableValue, 
significantFigures);
+    this.tmpForDelta = new Histogram(highestTrackableValue, 
significantFigures);
+    this.tmpForMerge = new Histogram(highestTrackableValue, 
significantFigures);
+
+    LOG.debug("[{}] Initialized SlidingWindowHdrHistogram with WindowSize {}, 
TimeSegmentDur: {}, NumOfSegments: {}", operationType, windowSizeMillis, 
timeSegmentDurationMillis, numSegments);
+  }
+
+  /** Record a single latency value (in your chosen time unit). Thread-safe 
and lock-free. */
+  public void recordValue(long value) {
+    if (value < 0 || value > highestTrackableValue) {
+      LOG.warn("[{}] Value {} outside of range [0, {}]. Ignoring",
+          operationType, value, highestTrackableValue);
+      return;
+    }
+    activeSegmentRecorder.recordValue(value);
+    currentTotalCount.incrementAndGet();
+    LOG.debug("[{}] Recorded latency value: {}. Current total count: {}",
+        operationType, value, currentTotalCount.get());
+  }
+
+  /** Get any percentile over the current sliding window. */
+  public void computeLatency() {
+    if (getCurrentTotalCount() < minSampleSize) {
+      LOG.debug("[{}] Not enough data to report percentiles. Current total 
count: {}",
+          operationType, getCurrentTotalCount());
+    } else {
+      rotateLock.lock();
+      try {
+        tmpForMerge.reset();
+        for (int i = 0; i < numSegments; i++) {
+          Histogram h = completedSegments[i];
+          if (h != null && h.getTotalCount() > 0) {
+            tmpForMerge.add(h);
+          }
+        }
+
+        if (tmpForMerge.getTotalCount() == 0) return;
+
+        tailLatency = tmpForMerge.getValueAtPercentile(tailLatencyPercentile);
+        p50 = tmpForMerge.getValueAtPercentile(50);
+        p90 = tmpForMerge.getValueAtPercentile(90);
+        p99 = tmpForMerge.getValueAtPercentile(99);
+        deviation = (int) ((tailLatency - p50)/p50 * 100);
+      } finally {
+        rotateLock.unlock();
+      }
+    }
+    LOG.debug("[{}] Computed Latencies. p50: {}, p90: {}, p99: {}, 
tailLatency: {}, deviation with p50: {} Current total count: {}",
+        operationType, p50, p90, p99, tailLatency, deviation, 
getCurrentTotalCount());
+  }
+
+  private long alignToSegmentDuration(long timeMs) {
+    return timeMs - (timeMs % timeSegmentDurationMillis);
+  }
+
+  /** Ensure active bucket is aligned to current time; rotate if we've crossed 
a boundary. */
+  public void rotateIfNeeded() {
+    LOG.debug("[{}] Triggering Histogram Rotation", operationType);
+    long now = System.currentTimeMillis();

Review Comment:
   Taken





> ABFS: [Perf] Network Profiling of Tailing Requests and Killing Bad 
> Connections Proactively
> ------------------------------------------------------------------------------------------
>
>                 Key: HADOOP-19729
>                 URL: https://issues.apache.org/jira/browse/HADOOP-19729
>             Project: Hadoop Common
>          Issue Type: Sub-task
>          Components: fs/azure
>    Affects Versions: 3.4.2
>            Reporter: Anuj Modi
>            Assignee: Anuj Modi
>            Priority: Major
>              Labels: pull-request-available
>
> It has been observed that certain requests taking more time than expected to 
> complete hinders the performance of whole workload. Such requests are known 
> as tailing requests. They can be taking more time due to a number of reasons 
> and the prominent among them is a bad network connection. In Abfs driver we 
> cache network connections and keeping such bad connections in cache and 
> reusing them can be bad for perf.
> In this effort we try to identify such connections and close them so that new 
> good connetions can be established and perf can be improved. There are two 
> parts of this effort.
>  # Identifying Tailing Requests: This involves profiling all the network 
> calls and getting percentiles value optimally. By default we consider p99 as 
> the tail latency and all the future requests taking more than tail latency 
> will be considere as Tailing requests.
>  # Proactively Killing Socket Connections: With Apache client, we can now 
> kill the socket connection and fail the tailing request. Such failures will 
> not be thrown back to user and retried immediately without any sleep but from 
> another socket connection.



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to