This is an automated email from the ASF dual-hosted git repository. sodonnell pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push: new 882f08b4bc1d HDFS-17237. Remove IPCLoggerChannelMetrics when the logger is closed (#6217) 882f08b4bc1d is described below commit 882f08b4bc1d23ac3b0d78b339ddd3a5af53abdd Author: Stephen O'Donnell <stephen.odonn...@gmail.com> AuthorDate: Tue Oct 24 21:39:03 2023 +0100 HDFS-17237. Remove IPCLoggerChannelMetrics when the logger is closed (#6217) --- .../hdfs/qjournal/client/IPCLoggerChannel.java | 1 + .../qjournal/client/IPCLoggerChannelMetrics.java | 39 ++++------------------ .../hdfs/qjournal/client/TestIPCLoggerChannel.java | 21 ++++++++++-- 3 files changed, 26 insertions(+), 35 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java index 4b7e59c51f13..67fc85810278 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java @@ -206,6 +206,7 @@ public class IPCLoggerChannel implements AsyncLogger { // making any more calls after this point (eg clear the queue) RPC.stopProxy(proxy); } + metrics.unregister(); } protected QJournalProtocol getProxy() throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannelMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannelMetrics.java index 6eef8ffd3862..c1e27e2e98a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannelMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannelMetrics.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.qjournal.client; import java.net.InetSocketAddress; -import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -29,8 +28,6 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableQuantiles; -import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; - /** * The metrics for a journal from the writer's perspective. */ @@ -43,21 +40,6 @@ class IPCLoggerChannelMetrics { private final MutableQuantiles[] writeEndToEndLatencyQuantiles; private final MutableQuantiles[] writeRpcLatencyQuantiles; - - /** - * In the case of the NN transitioning between states, edit logs are closed - * and reopened. Thus, the IPCLoggerChannel instance that writes to a - * given JournalNode may change over the lifetime of the process. - * However, metrics2 doesn't have a function to unregister a set of metrics - * and fails if a new metrics class is registered with the same name - * as the existing one. Hence, we have to maintain our own registry - * ("multiton") here, so that we have exactly one metrics instance - * per JournalNode, and switch out the pointer to the underlying - * IPCLoggerChannel instance. - */ - private static final Map<String, IPCLoggerChannelMetrics> REGISTRY = - Maps.newHashMap(); - private IPCLoggerChannelMetrics(IPCLoggerChannel ch) { this.ch = ch; @@ -81,25 +63,16 @@ class IPCLoggerChannelMetrics { writeRpcLatencyQuantiles = null; } } - - private void setChannel(IPCLoggerChannel ch) { - assert ch.getRemoteAddress().equals(this.ch.getRemoteAddress()); - this.ch = ch; + + public void unregister() { + DefaultMetricsSystem.instance().unregisterSource(getName(ch)); } static IPCLoggerChannelMetrics create(IPCLoggerChannel ch) { String name = getName(ch); - synchronized (REGISTRY) { - IPCLoggerChannelMetrics m = REGISTRY.get(name); - if (m != null) { - m.setChannel(ch); - } else { - m = new IPCLoggerChannelMetrics(ch); - DefaultMetricsSystem.instance().register(name, null, m); - REGISTRY.put(name, m); - } - return m; - } + IPCLoggerChannelMetrics m = new IPCLoggerChannelMetrics(ch); + DefaultMetricsSystem.instance().register(name, null, m); + return m; } private static String getName(IPCLoggerChannel ch) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java index f2f46424cfd5..06df99de1fe8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java @@ -24,12 +24,13 @@ import java.net.InetSocketAddress; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.metrics2.MetricsSource; +import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.qjournal.client.IPCLoggerChannel; -import org.apache.hadoop.hdfs.qjournal.client.LoggerTooFarBehindException; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol; import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo; import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; @@ -178,4 +179,20 @@ public class TestIPCLoggerChannel { ch.sendEdits(3L, 3L, 1, FAKE_DATA).get(); } + + @Test + public void testMetricsRemovedOnClose() { + MetricsSystem metricsSystem = DefaultMetricsSystem.instance(); + String sourceName = "IPCLoggerChannel-" + + FAKE_ADDR.getAddress().getHostAddress() + + "-" + FAKE_ADDR.getPort(); + // Ensure the metrics exist + MetricsSource source = metricsSystem.getSource(sourceName); + assertNotNull(source); + + ch.close(); + // ensure the metrics are removed. + source = metricsSystem.getSource(sourceName); + assertNull(source); + } } --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org