This is an automated email from the ASF dual-hosted git repository.

sammichen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 91c409d501 HDDS-9334. Improve thread names in common/framework (#5396)
91c409d501 is described below

commit 91c409d501c842d29d5355287af16d32db23d310
Author: Doroszlai, Attila <[email protected]>
AuthorDate: Mon Oct 16 09:37:29 2023 +0200

    HDDS-9334. Improve thread names in common/framework (#5396)
---
 .../java/org/apache/hadoop/hdds/HddsUtils.java     |  7 +++++
 .../java/org/apache/hadoop/hdds/NodeDetails.java   |  6 ++++-
 .../hadoop/hdds/protocol/DatanodeDetails.java      |  9 +++++++
 .../hadoop/hdds/utils/BackgroundService.java       |  8 +++++-
 .../apache/hadoop/ozone/lease/LeaseManager.java    |  8 +++---
 .../apache/hadoop/ozone/HddsDatanodeService.java   |  4 +--
 .../common/impl/BlockDeletingService.java          | 31 +++++++++++-----------
 .../ozone/container/ozoneimpl/OzoneContainer.java  |  5 ++--
 .../container/common/TestBlockDeletingService.java |  3 ++-
 .../scm/update/client/CRLClientUpdateHandler.java  |  2 +-
 .../security/symmetric/DefaultSecretKeyClient.java |  4 +--
 .../symmetric/DefaultSecretKeySignerClient.java    | 11 ++++----
 .../client/CommonCertificateClient.java            |  4 ++-
 .../certificate/client/DNCertificateClient.java    |  2 +-
 .../client/DefaultCertificateClient.java           | 16 ++++++++---
 .../certificate/client/RootCaRotationPoller.java   |  7 ++---
 .../certificate/client/SCMCertificateClient.java   | 26 +++++++++++++-----
 .../hadoop/hdds/server/events/EventQueue.java      | 13 ++++++++-
 .../FixedThreadPoolWithAffinityExecutor.java       |  8 +++++-
 .../hdds/server/events/SingleThreadExecutor.java   |  5 ++--
 .../hadoop/hdds/utils/db/DBStoreBuilder.java       |  8 +++++-
 .../org/apache/hadoop/hdds/utils/db/RDBStore.java  |  6 +++--
 .../apache/hadoop/hdds/utils/db/TypedTable.java    |  9 ++++---
 .../hadoop/hdds/utils/db/cache/FullTableCache.java | 10 ++++---
 .../hdds/utils/db/cache/PartialTableCache.java     | 10 ++++---
 .../client/TestDefaultCertificateClient.java       |  6 +++--
 .../client/TestDnCertificateClientInit.java        |  5 +++-
 .../utils/TestRootCaRotationPoller.java            |  6 ++---
 .../apache/hadoop/hdds/utils/db/TestRDBStore.java  |  2 +-
 .../hadoop/hdds/utils/db/cache/TestTableCache.java |  4 +--
 .../hdds/scm/block/SCMBlockDeletingService.java    |  2 +-
 .../org/apache/hadoop/hdds/scm/ha/SCMContext.java  | 16 +++++++++--
 .../hdds/scm/server/StorageContainerManager.java   | 17 +++++++++---
 .../container/server/TestContainerServer.java      |  4 ++-
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  2 +-
 .../om/service/AbstractKeyDeletingService.java     |  3 ++-
 .../hadoop/ozone/security/OMCertificateClient.java |  4 ++-
 .../recon/security/ReconCertificateClient.java     |  2 +-
 .../ozone/recon/scm/TestReconPipelineManager.java  |  6 ++++-
 39 files changed, 211 insertions(+), 90 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index ec1805d942..985b1b80ee 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -834,6 +834,13 @@ public final class HddsUtils {
     return sortedOzoneProps;
   }
 
+  @Nonnull
+  public static String threadNamePrefix(@Nullable String id) {
+    return id != null && !"".equals(id)
+        ? id + "-"
+        : "";
+  }
+
   /**
    * Execute some code and ensure thread name is not changed
    * (workaround for HADOOP-18433).
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/NodeDetails.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/NodeDetails.java
index 5c86b2e933..8349b12e6b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/NodeDetails.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/NodeDetails.java
@@ -27,7 +27,7 @@ import java.net.InetSocketAddress;
  */
 public abstract class NodeDetails {
   private String serviceId;
-  private String nodeId;
+  private final String nodeId;
   private String hostAddress;
   private int rpcPort;
   private int ratisPort;
@@ -76,6 +76,10 @@ public abstract class NodeDetails {
     return nodeId;
   }
 
+  public String threadNamePrefix() {
+    return HddsUtils.threadNamePrefix(nodeId);
+  }
+
   public InetSocketAddress getRpcAddress() {
     if (rpcAddress == null) {
       rpcAddress = NetUtils.createSocketAddr(hostAddress, rpcPort);
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
index becb3f3311..37446241e4 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
@@ -28,6 +28,7 @@ import java.util.UUID;
 import com.fasterxml.jackson.annotation.JsonIgnore;
 import com.google.common.collect.ImmutableSet;
 import org.apache.hadoop.hdds.DatanodeVersion;
+import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.annotation.InterfaceStability;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name;
@@ -81,6 +82,7 @@ public class DatanodeDetails extends NodeImpl implements
    */
   private final UUID uuid;
   private final String uuidString;
+  private final String threadNamePrefix;
 
   private String ipAddress;
   private String hostName;
@@ -122,6 +124,7 @@ public class DatanodeDetails extends NodeImpl implements
     super(hostName, networkLocation, NetConstants.NODE_COST_DEFAULT);
     this.uuid = uuid;
     this.uuidString = uuid.toString();
+    threadNamePrefix = HddsUtils.threadNamePrefix(uuidString);
     this.ipAddress = ipAddress;
     this.hostName = hostName;
     this.ports = ports;
@@ -141,6 +144,7 @@ public class DatanodeDetails extends NodeImpl implements
         datanodeDetails.getCost());
     this.uuid = datanodeDetails.uuid;
     this.uuidString = uuid.toString();
+    threadNamePrefix = HddsUtils.threadNamePrefix(uuidString);
     this.ipAddress = datanodeDetails.ipAddress;
     this.hostName = datanodeDetails.hostName;
     this.ports = datanodeDetails.ports;
@@ -574,6 +578,11 @@ public class DatanodeDetails extends NodeImpl implements
     return new Builder();
   }
 
+  @JsonIgnore
+  public String threadNamePrefix() {
+    return threadNamePrefix;
+  }
+
   /**
    * Builder class for building DatanodeDetails.
    */
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java
index 07be341cbb..c7edaaeeaa 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java
@@ -53,6 +53,12 @@ public abstract class BackgroundService {
 
   public BackgroundService(String serviceName, long interval,
       TimeUnit unit, int threadPoolSize, long serviceTimeout) {
+    this(serviceName, interval, unit, threadPoolSize, serviceTimeout, "");
+  }
+
+  public BackgroundService(String serviceName, long interval,
+      TimeUnit unit, int threadPoolSize, long serviceTimeout,
+      String threadNamePrefix) {
     this.interval = interval;
     this.unit = unit;
     this.serviceName = serviceName;
@@ -62,7 +68,7 @@ public abstract class BackgroundService {
     ThreadFactory threadFactory = new ThreadFactoryBuilder()
         .setThreadFactory(r -> new Thread(threadGroup, r))
         .setDaemon(true)
-        .setNameFormat(serviceName + "#%d")
+        .setNameFormat(threadNamePrefix + serviceName + "#%d")
         .build();
     exec = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(
         threadPoolSize, threadFactory);
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java
index ba86eefa79..f2bad543a9 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java
@@ -63,7 +63,7 @@ public class LeaseManager<T> {
    *        Default timeout in milliseconds to be used for lease creation.
    */
   public LeaseManager(String name, long defaultTimeout) {
-    this.name = name;
+    this.name = name + "LeaseManager";
     this.defaultTimeout = defaultTimeout;
   }
 
@@ -71,11 +71,11 @@ public class LeaseManager<T> {
    * Starts the lease manager service.
    */
   public void start() {
-    LOG.debug("Starting {} LeaseManager service", name);
+    LOG.debug("Starting {} service", name);
     activeLeases = new ConcurrentHashMap<>();
     leaseMonitor = new LeaseMonitor();
     leaseMonitorThread = new Thread(leaseMonitor);
-    leaseMonitorThread.setName(name + "-LeaseManager#LeaseMonitor");
+    leaseMonitorThread.setName(name + "#LeaseMonitor");
     leaseMonitorThread.setDaemon(true);
     leaseMonitorThread.setUncaughtExceptionHandler((thread, throwable) -> {
       // Let us just restart this thread after logging an error.
@@ -84,7 +84,7 @@ public class LeaseManager<T> {
           thread.toString(), throwable);
       leaseMonitorThread.start();
     });
-    LOG.debug("Starting {}-LeaseManager#LeaseMonitor Thread", name);
+    LOG.debug("Starting {} Thread", leaseMonitorThread.getName());
     leaseMonitorThread.start();
     isRunning = true;
   }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
index fca42820f0..4b79907620 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
@@ -281,8 +281,8 @@ public class HddsDatanodeService extends GenericCli 
implements ServicePlugin {
         if (secConf.isTokenEnabled()) {
           SecretKeyProtocol secretKeyProtocol =
               HddsServerUtil.getSecretKeyClientForDatanode(conf);
-          secretKeyClient = DefaultSecretKeyClient.create(conf,
-              secretKeyProtocol);
+          secretKeyClient = DefaultSecretKeyClient.create(
+              conf, secretKeyProtocol, "");
           secretKeyClient.start(conf);
         }
       }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/BlockDeletingService.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/BlockDeletingService.java
index d67840cd5a..8c090713de 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/BlockDeletingService.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/BlockDeletingService.java
@@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.container.common.impl;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -67,22 +66,22 @@ public class BlockDeletingService extends BackgroundService 
{
   private final Duration blockDeletingMaxLockHoldingTime;
 
   @VisibleForTesting
-  public BlockDeletingService(OzoneContainer ozoneContainer,
-                              long serviceInterval, long serviceTimeout,
-                              TimeUnit timeUnit, int workerSize,
-                              ConfigurationSource conf) {
-    this(ozoneContainer, serviceInterval, serviceTimeout, timeUnit,
-        workerSize, conf, new ReconfigurationHandler(
-            "DN", (OzoneConfiguration) conf, op -> { }));
+  public BlockDeletingService(
+      OzoneContainer ozoneContainer, long serviceInterval, long serviceTimeout,
+      TimeUnit timeUnit, int workerSize, ConfigurationSource conf
+  ) {
+    this(ozoneContainer, serviceInterval, serviceTimeout, timeUnit, workerSize,
+        conf, "", null);
   }
 
-  public BlockDeletingService(OzoneContainer ozoneContainer,
-                              long serviceInterval, long serviceTimeout,
-                              TimeUnit timeUnit, int workerSize,
-                              ConfigurationSource conf,
-                              ReconfigurationHandler reconfigurationHandler) {
+  @SuppressWarnings("checkstyle:parameternumber")
+  public BlockDeletingService(
+      OzoneContainer ozoneContainer, long serviceInterval, long serviceTimeout,
+      TimeUnit timeUnit, int workerSize, ConfigurationSource conf,
+      String threadNamePrefix, ReconfigurationHandler reconfigurationHandler
+  ) {
     super("BlockDeletingService", serviceInterval, timeUnit,
-        workerSize, serviceTimeout);
+        workerSize, serviceTimeout, threadNamePrefix);
     this.ozoneContainer = ozoneContainer;
     try {
       containerDeletionPolicy = conf.getClass(
@@ -94,7 +93,9 @@ public class BlockDeletingService extends BackgroundService {
     }
     this.conf = conf;
     dnConf = conf.getObject(DatanodeConfiguration.class);
-    reconfigurationHandler.register(dnConf);
+    if (reconfigurationHandler != null) {
+      reconfigurationHandler.register(dnConf);
+    }
     this.blockDeletingMaxLockHoldingTime =
         dnConf.getBlockDeletingMaxLockHoldingTime();
     metrics = BlockDeletingServiceMetrics.create();
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index a1559d6885..404297270a 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -229,8 +229,9 @@ public class OzoneContainer {
     blockDeletingService =
         new BlockDeletingService(this, blockDeletingSvcInterval.toMillis(),
             blockDeletingServiceTimeout, TimeUnit.MILLISECONDS,
-            blockDeletingServiceWorkerSize, config, context.getParent()
-            .getReconfigurationHandler());
+            blockDeletingServiceWorkerSize, config,
+            datanodeDetails.threadNamePrefix(),
+            context.getParent().getReconfigurationHandler());
 
     Duration recoveringContainerScrubbingSvcInterval = conf.getObject(
         DatanodeConfiguration.class).getRecoveringContainerScrubInterval();
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index 6efc8913ad..694e6637df 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -863,7 +864,7 @@ public class TestBlockDeletingService {
     timeout  = 0;
     svc = new BlockDeletingService(ozoneContainer,
         TimeUnit.MILLISECONDS.toNanos(1000), timeout, TimeUnit.MILLISECONDS,
-        10, conf);
+        10, conf, "", mock(ReconfigurationHandler.class));
     svc.start();
 
     // get container meta data
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/CRLClientUpdateHandler.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/CRLClientUpdateHandler.java
index 72da5194c3..6723b2fd2b 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/CRLClientUpdateHandler.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/CRLClientUpdateHandler.java
@@ -71,7 +71,7 @@ public class CRLClientUpdateHandler implements 
ClientUpdateHandler {
     LOG.info("Pending CRL check interval : {}s", crlCheckInterval / 1000);
     this.executorService = Executors.newSingleThreadScheduledExecutor(
         new ThreadFactoryBuilder().setDaemon(true)
-            .setNameFormat("CRLUpdateHandler Thread - %d").build());
+            .setNameFormat(clientId + "-CRLUpdateHandler-%d").build());
   }
 
   public static Logger getLog() {
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/DefaultSecretKeyClient.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/DefaultSecretKeyClient.java
index 030b0c7b68..722dbb2daf 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/DefaultSecretKeyClient.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/DefaultSecretKeyClient.java
@@ -61,10 +61,10 @@ public class DefaultSecretKeyClient implements 
SecretKeyClient {
   }
 
   public static SecretKeyClient create(ConfigurationSource conf,
-      SecretKeyProtocol secretKeyProtocol)
+      SecretKeyProtocol secretKeyProtocol, String threadNamePrefix)
       throws IOException {
     SecretKeySignerClient singerClient =
-        new DefaultSecretKeySignerClient(secretKeyProtocol);
+        new DefaultSecretKeySignerClient(secretKeyProtocol, threadNamePrefix);
     SecretKeyVerifierClient verifierClient =
         new DefaultSecretKeyVerifierClient(secretKeyProtocol, conf);
     return new DefaultSecretKeyClient(singerClient, verifierClient);
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/DefaultSecretKeySignerClient.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/DefaultSecretKeySignerClient.java
index f9358a1422..2bbff9480a 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/DefaultSecretKeySignerClient.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/DefaultSecretKeySignerClient.java
@@ -53,11 +53,16 @@ public class DefaultSecretKeySignerClient implements 
SecretKeySignerClient {
   private final SecretKeyProtocol secretKeyProtocol;
   private final AtomicReference<ManagedSecretKey> cache =
       new AtomicReference<>();
+  private final ThreadFactory threadFactory;
   private ScheduledExecutorService executorService;
 
   public DefaultSecretKeySignerClient(
-      SecretKeyProtocol secretKeyProtocol) {
+      SecretKeyProtocol secretKeyProtocol, String threadNamePrefix) {
     this.secretKeyProtocol = secretKeyProtocol;
+    threadFactory = new ThreadFactoryBuilder()
+        .setNameFormat(threadNamePrefix + "SecretKeyPoller")
+        .setDaemon(true)
+        .build();
   }
 
   @Override
@@ -138,10 +143,6 @@ public class DefaultSecretKeySignerClient implements 
SecretKeySignerClient {
                                        Instant initialCreation) {
     Duration rotateDuration = SecretKeyConfig.parseRotateDuration(conf);
     Instant nextRotate = initialCreation.plus(rotateDuration);
-    ThreadFactory threadFactory = new ThreadFactoryBuilder()
-        .setNameFormat("SecretKeyPoller")
-        .setDaemon(true)
-        .build();
     executorService = Executors.newScheduledThreadPool(1, threadFactory);
     Duration interval = SecretKeyConfig.parseRotateCheckDuration(conf);
     Duration initialDelay = Duration.between(Instant.now(), nextRotate);
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CommonCertificateClient.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CommonCertificateClient.java
index a09eb05dc6..5882bdb611 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CommonCertificateClient.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CommonCertificateClient.java
@@ -37,17 +37,19 @@ public abstract class CommonCertificateClient extends 
DefaultCertificateClient {
 
   private final Logger log;
 
+  @SuppressWarnings("checkstyle:ParameterNumber")
   public CommonCertificateClient(
       SecurityConfig securityConfig,
       SCMSecurityProtocolClientSideTranslatorPB scmSecurityClient,
       Logger log,
       String certSerialId,
       String component,
+      String threadNamePrefix,
       Consumer<String> saveCertIdCallback,
       Runnable shutdownCallback
   ) {
     super(securityConfig, scmSecurityClient, log, certSerialId, component,
-        saveCertIdCallback, shutdownCallback);
+        threadNamePrefix, saveCertIdCallback, shutdownCallback);
     this.log = log;
   }
 
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DNCertificateClient.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DNCertificateClient.java
index 27b2da5758..ca56325e69 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DNCertificateClient.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DNCertificateClient.java
@@ -58,7 +58,7 @@ public class DNCertificateClient extends 
DefaultCertificateClient {
       Runnable shutdown
   ) {
     super(securityConfig, scmSecurityClient, LOG, certSerialId, COMPONENT_NAME,
-        saveCertId, shutdown);
+        datanodeDetails.threadNamePrefix(), saveCertId, shutdown);
     this.dn = datanodeDetails;
   }
 
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
index 698b44ac9a..ef538d3f0a 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
@@ -121,6 +121,7 @@ public abstract class DefaultCertificateClient implements 
CertificateClient {
   private String caCertId;
   private String rootCaCertId;
   private String component;
+  private final String threadNamePrefix;
   private List<String> pemEncodedCACerts = null;
   private Lock pemEncodedCACertsLock = new ReentrantLock();
   private KeyStoresFactory serverKeyStoresFactory;
@@ -133,12 +134,14 @@ public abstract class DefaultCertificateClient implements 
CertificateClient {
   private final Set<CertificateNotification> notificationReceivers;
   private RootCaRotationPoller rootCaRotationPoller;
 
+  @SuppressWarnings("checkstyle:ParameterNumber")
   protected DefaultCertificateClient(
       SecurityConfig securityConfig,
       SCMSecurityProtocolClientSideTranslatorPB scmSecurityClient,
       Logger log,
       String certSerialId,
       String component,
+      String threadNamePrefix,
       Consumer<String> saveCertId,
       Runnable shutdown) {
     Objects.requireNonNull(securityConfig);
@@ -148,6 +151,7 @@ public abstract class DefaultCertificateClient implements 
CertificateClient {
     this.logger = log;
     this.certificateMap = new ConcurrentHashMap<>();
     this.component = component;
+    this.threadNamePrefix = threadNamePrefix;
     this.certIdSaveCallback = saveCertId;
     this.shutdownCallback = shutdown;
     this.notificationReceivers = new HashSet<>();
@@ -193,10 +197,15 @@ public abstract class DefaultCertificateClient implements 
CertificateClient {
     }
   }
 
+  protected String threadNamePrefix() {
+    return threadNamePrefix;
+  }
+
   private void startRootCaRotationPoller() {
     if (rootCaRotationPoller == null) {
       rootCaRotationPoller = new RootCaRotationPoller(securityConfig,
-          new HashSet<>(rootCaCertificates), scmSecurityClient);
+          new HashSet<>(rootCaCertificates), scmSecurityClient,
+          threadNamePrefix);
       rootCaRotationPoller.addRootCARotationProcessor(
           this::getRootCaRotationListener);
       rootCaRotationPoller.run();
@@ -1336,8 +1345,9 @@ public abstract class DefaultCertificateClient implements 
CertificateClient {
 
     if (executorService == null) {
       executorService = Executors.newScheduledThreadPool(1,
-          new ThreadFactoryBuilder().setNameFormat(
-                  getComponentName() + "-CertificateRenewerService")
+          new ThreadFactoryBuilder()
+              .setNameFormat(threadNamePrefix + getComponentName()
+                  + "-CertificateRenewerService")
               .setDaemon(true).build());
     }
     this.executorService.scheduleAtFixedRate(
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/RootCaRotationPoller.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/RootCaRotationPoller.java
index 2656eef92d..24f47ed499 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/RootCaRotationPoller.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/RootCaRotationPoller.java
@@ -60,12 +60,13 @@ public class RootCaRotationPoller implements Runnable, 
Closeable {
 
   public RootCaRotationPoller(SecurityConfig securityConfig,
       Set<X509Certificate> initiallyKnownRootCaCerts,
-      SCMSecurityProtocolClientSideTranslatorPB scmSecureClient) {
+      SCMSecurityProtocolClientSideTranslatorPB scmSecureClient,
+      String threadNamePrefix) {
     this.scmSecureClient = scmSecureClient;
     this.knownRootCerts = initiallyKnownRootCaCerts;
     poller = Executors.newScheduledThreadPool(1,
-        new ThreadFactoryBuilder().setNameFormat(
-                this.getClass().getSimpleName())
+        new ThreadFactoryBuilder()
+            .setNameFormat(threadNamePrefix + getClass().getSimpleName())
             .setDaemon(true).build());
     pollingInterval = securityConfig.getRootCaCertificatePollingInterval();
     rootCARotationProcessors = new ArrayList<>();
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/SCMCertificateClient.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/SCMCertificateClient.java
index 357001b44b..63fe9784cd 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/SCMCertificateClient.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/SCMCertificateClient.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdds.security.x509.certificate.client;
 
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.hadoop.hdds.HddsUtils;
 import 
org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto;
 import 
org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdds.security.SecurityConfig;
@@ -71,8 +72,16 @@ public class SCMCertificateClient extends 
DefaultCertificateClient {
   public SCMCertificateClient(SecurityConfig securityConfig,
       SCMSecurityProtocolClientSideTranslatorPB scmClient,
       String scmId, String clusterId, String scmCertId, String hostname) {
-    super(securityConfig, scmClient, LOG, scmCertId,
-        COMPONENT_NAME, null, null);
+    this(securityConfig, scmClient, scmId, clusterId, scmCertId, hostname,
+        COMPONENT_NAME);
+  }
+
+  private SCMCertificateClient(SecurityConfig securityConfig,
+      SCMSecurityProtocolClientSideTranslatorPB scmClient,
+      String scmId, String clusterId, String scmCertId, String hostname,
+      String component) {
+    super(securityConfig, scmClient, LOG, scmCertId, component,
+        HddsUtils.threadNamePrefix(scmId), null, null);
     this.scmId = scmId;
     this.cId = clusterId;
     this.scmHostname = hostname;
@@ -82,16 +91,18 @@ public class SCMCertificateClient extends 
DefaultCertificateClient {
       SecurityConfig securityConfig,
       SCMSecurityProtocolClientSideTranslatorPB scmClient,
       String certSerialId) {
-    super(securityConfig, scmClient, LOG, certSerialId,
-        COMPONENT_NAME, null, null);
+    this(securityConfig, scmClient, null, null, certSerialId, null,
+        COMPONENT_NAME);
   }
 
   public SCMCertificateClient(
       SecurityConfig securityConfig,
       SCMSecurityProtocolClientSideTranslatorPB scmClient,
       String certSerialId,
+      String scmId,
       String component) {
-    super(securityConfig, scmClient, LOG, certSerialId, component, null, null);
+    this(securityConfig, scmClient, scmId, null, certSerialId, null,
+        component);
   }
 
   @Override
@@ -239,8 +250,9 @@ public class SCMCertificateClient extends 
DefaultCertificateClient {
   public void refreshCACertificates() throws IOException {
     if (executorService == null) {
       executorService = Executors.newSingleThreadExecutor(
-          new ThreadFactoryBuilder().setNameFormat(
-                  getComponentName() + "-refreshCACertificates")
+          new ThreadFactoryBuilder()
+              .setNameFormat(threadNamePrefix() + getComponentName()
+                  + "-refreshCACertificates")
               .setDaemon(true).build());
     }
     executorService.execute(new RefreshCACertificates(getScmSecureClient()));
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
index a0b8ac9553..af9d348615 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
@@ -66,6 +66,15 @@ public class EventQueue implements EventPublisher, 
AutoCloseable {
           .create();
 
   private boolean isSilent = false;
+  private final String threadNamePrefix;
+
+  public EventQueue() {
+    threadNamePrefix = "";
+  }
+
+  public EventQueue(String threadNamePrefix) {
+    this.threadNamePrefix = threadNamePrefix;
+  }
 
   // The field parent in DatanodeDetails class has the circular reference
   // which will result in Gson infinite recursive parsing. We need to exclude
@@ -101,7 +110,9 @@ public class EventQueue implements EventPublisher, 
AutoCloseable {
     Preconditions.checkNotNull(handler, "Handler should not be null.");
     validateEvent(event);
     String executorName = getExecutorName(event, handler);
-    this.addHandler(event, new SingleThreadExecutor<>(executorName), handler);
+    SingleThreadExecutor<PAYLOAD> executor =
+        new SingleThreadExecutor<>(executorName, threadNamePrefix);
+    this.addHandler(event, executor, handler);
   }
 
   /**
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/FixedThreadPoolWithAffinityExecutor.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/FixedThreadPoolWithAffinityExecutor.java
index f53bce533d..4669b0520f 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/FixedThreadPoolWithAffinityExecutor.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/FixedThreadPoolWithAffinityExecutor.java
@@ -141,12 +141,18 @@ public class FixedThreadPoolWithAffinityExecutor<P, Q>
 
   public static <Q> List<ThreadPoolExecutor> initializeExecutorPool(
       List<BlockingQueue<Q>> workQueues) {
+    return initializeExecutorPool("", workQueues);
+  }
+
+  public static <Q> List<ThreadPoolExecutor> initializeExecutorPool(
+      String threadNamePrefix, List<BlockingQueue<Q>> workQueues) {
     List<ThreadPoolExecutor> executors = new ArrayList<>();
     for (int i = 0; i < workQueues.size(); ++i) {
       LinkedBlockingQueue<Runnable> poolQueue = new LinkedBlockingQueue<>(1);
       ThreadFactory threadFactory = new ThreadFactoryBuilder()
           .setDaemon(true)
-          .setNameFormat("FixedThreadPoolWithAffinityExecutor-" + i + "-%d")
+          .setNameFormat(threadNamePrefix
+              + "FixedThreadPoolWithAffinityExecutor-" + i + "-%d")
           .build();
       executors.add(new
           ThreadPoolExecutor(
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java
index bc8f7425b6..029387f9ef 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java
@@ -60,9 +60,10 @@ public class SingleThreadExecutor<P> implements 
EventExecutor<P> {
   /**
    * Create SingleThreadExecutor.
    *
+   * @param threadNamePrefix prefix prepended to thread names
    * @param name Unique name used in monitoring and metrics.
    */
-  public SingleThreadExecutor(String name) {
+  public SingleThreadExecutor(String name, String threadNamePrefix) {
     this.name = name;
     MetricsUtil.registerDynamic(this, EVENT_QUEUE + name,
         "Event Executor metrics ", "EventQueue");
@@ -70,7 +71,7 @@ public class SingleThreadExecutor<P> implements 
EventExecutor<P> {
     executor = Executors.newSingleThreadExecutor(
         runnable -> {
           Thread thread = new Thread(runnable);
-          thread.setName(EVENT_QUEUE + "-" + name);
+          thread.setName(threadNamePrefix + EVENT_QUEUE + "-" + name);
           return thread;
         });
   }
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
index 7e7bdb0ecc..1a2bc94a47 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
@@ -106,6 +106,7 @@ public final class DBStoreBuilder {
   // number in request to avoid increase in heap memory.
   private long maxDbUpdatesSizeThreshold;
   private Integer maxNumberOfOpenFiles = null;
+  private String threadNamePrefix = "";
 
   /**
    * Create DBStoreBuilder from a generic DBDefinition.
@@ -219,7 +220,7 @@ public final class DBStoreBuilder {
       return new RDBStore(dbFile, rocksDBOption, writeOptions, tableConfigs,
           registry.build(), openReadOnly, maxFSSnapshots, dbJmxBeanNameName,
           enableCompactionDag, maxDbUpdatesSizeThreshold, createCheckpointDirs,
-          configuration);
+          configuration, threadNamePrefix);
     } finally {
       tableConfigs.forEach(TableConfig::close);
     }
@@ -304,6 +305,11 @@ public final class DBStoreBuilder {
     return this;
   }
 
+  public DBStoreBuilder setThreadNamePrefix(String prefix) {
+    this.threadNamePrefix = prefix;
+    return this;
+  }
+
   /**
    * Converts column families and their corresponding options that have been
    * registered with the builder to a set of {@link TableConfig} objects.
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
index 771b18760a..a091c475bf 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
@@ -78,6 +78,7 @@ public class RDBStore implements DBStore {
   // number in request to avoid increase in heap memory.
   private final long maxDbUpdatesSizeThreshold;
   private final ManagedDBOptions dbOptions;
+  private final String threadNamePrefix;
 
   @SuppressWarnings("parameternumber")
   public RDBStore(File dbFile, ManagedDBOptions dbOptions,
@@ -86,9 +87,10 @@ public class RDBStore implements DBStore {
                   String dbJmxBeanName, boolean enableCompactionDag,
                   long maxDbUpdatesSizeThreshold,
                   boolean createCheckpointDirs,
-                  ConfigurationSource configuration)
+                  ConfigurationSource configuration, String threadNamePrefix)
 
       throws IOException {
+    this.threadNamePrefix = threadNamePrefix;
     Preconditions.checkNotNull(dbFile, "DB file location cannot be null");
     Preconditions.checkNotNull(families);
     Preconditions.checkArgument(!families.isEmpty());
@@ -298,7 +300,7 @@ public class RDBStore implements DBStore {
       Class<K> keyType, Class<V> valueType,
       TableCache.CacheType cacheType) throws IOException {
     return new TypedTable<>(getTable(name), codecRegistry, keyType,
-        valueType, cacheType);
+        valueType, cacheType, threadNamePrefix);
   }
 
   @Override
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
index e55d841538..e668413a83 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
@@ -76,7 +76,7 @@ public class TypedTable<KEY, VALUE> implements Table<KEY, 
VALUE> {
       CodecRegistry codecRegistry, Class<KEY> keyType,
       Class<VALUE> valueType) throws IOException {
     this(rawTable, codecRegistry, keyType, valueType,
-        CacheType.PARTIAL_CACHE);
+        CacheType.PARTIAL_CACHE, "");
   }
 
   /**
@@ -87,12 +87,13 @@ public class TypedTable<KEY, VALUE> implements Table<KEY, 
VALUE> {
    * @param keyType The key type.
    * @param valueType The value type.
    * @param cacheType How to cache the entries?
+   * @param threadNamePrefix
    * @throws IOException if failed to iterate the raw table.
    */
   public TypedTable(RDBTable rawTable,
       CodecRegistry codecRegistry, Class<KEY> keyType,
       Class<VALUE> valueType,
-      CacheType cacheType) throws IOException {
+      CacheType cacheType, String threadNamePrefix) throws IOException {
     this.rawTable = Objects.requireNonNull(rawTable, "rawTable==null");
     Objects.requireNonNull(codecRegistry, "codecRegistry == null");
 
@@ -108,7 +109,7 @@ public class TypedTable<KEY, VALUE> implements Table<KEY, 
VALUE> {
         && valueCodec.supportCodecBuffer();
 
     if (cacheType == CacheType.FULL_CACHE) {
-      cache = new FullTableCache<>();
+      cache = new FullTableCache<>(threadNamePrefix);
       //fill cache
       try (TableIterator<KEY, ? extends KeyValue<KEY, VALUE>> tableIterator =
               iterator()) {
@@ -124,7 +125,7 @@ public class TypedTable<KEY, VALUE> implements Table<KEY, 
VALUE> {
         }
       }
     } else {
-      cache = new PartialTableCache<>();
+      cache = new PartialTableCache<>(threadNamePrefix);
     }
   }
 
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/FullTableCache.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/FullTableCache.java
index f948fde207..e99fcb4165 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/FullTableCache.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/FullTableCache.java
@@ -61,7 +61,7 @@ public class FullTableCache<KEY, VALUE> implements 
TableCache<KEY, VALUE> {
   private final CacheStatsRecorder statsRecorder;
 
 
-  public FullTableCache() {
+  public FullTableCache(String threadNamePrefix) {
     // As for full table cache only we need elements to be inserted in sorted
     // manner, so that list will be easy. But look ups have log(N) time
     // complexity.
@@ -78,9 +78,11 @@ public class FullTableCache<KEY, VALUE> implements 
TableCache<KEY, VALUE> {
 
     // Created a singleThreadExecutor, so one cleanup will be running at a
     // time.
-    ThreadFactory build = new ThreadFactoryBuilder().setDaemon(true)
-        .setNameFormat("FullTableCache Cleanup Thread - %d").build();
-    executorService = Executors.newSingleThreadExecutor(build);
+    ThreadFactory threadFactory = new ThreadFactoryBuilder()
+        .setDaemon(true)
+        .setNameFormat(threadNamePrefix + "FullTableCache-Cleanup-%d")
+        .build();
+    executorService = Executors.newSingleThreadExecutor(threadFactory);
 
     statsRecorder = new CacheStatsRecorder();
   }
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/PartialTableCache.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/PartialTableCache.java
index 4b297aba18..c40c4804b5 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/PartialTableCache.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/PartialTableCache.java
@@ -58,7 +58,7 @@ public class PartialTableCache<KEY, VALUE> implements 
TableCache<KEY, VALUE> {
   private final CacheStatsRecorder statsRecorder;
 
 
-  public PartialTableCache() {
+  public PartialTableCache(String threadNamePrefix) {
     // We use concurrent Hash map for O(1) lookup for get API.
     // During list operation for partial cache we anyway merge between DB and
     // cache state. So entries in cache does not need to be in sorted order.
@@ -77,9 +77,11 @@ public class PartialTableCache<KEY, VALUE> implements 
TableCache<KEY, VALUE> {
     epochEntries = new ConcurrentSkipListMap<>();
     // Created a singleThreadExecutor, so one cleanup will be running at a
     // time.
-    ThreadFactory build = new ThreadFactoryBuilder().setDaemon(true)
-        .setNameFormat("PartialTableCache Cleanup Thread - %d").build();
-    executorService = Executors.newSingleThreadExecutor(build);
+    ThreadFactory threadFactory = new ThreadFactoryBuilder()
+        .setDaemon(true)
+        .setNameFormat(threadNamePrefix + "PartialTableCache-Cleanup-%d")
+        .build();
+    executorService = Executors.newSingleThreadExecutor(threadFactory);
 
     statsRecorder = new CacheStatsRecorder();
   }
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java
index f010b7039c..b22dea8e9c 100644
--- 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java
+++ 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdds.security.x509.certificate.client;
 
 import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
 import 
org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto;
 import 
org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
@@ -321,7 +322,8 @@ public class TestDefaultCertificateClient {
     if (dnCertClient != null) {
       dnCertClient.close();
     }
-    dnCertClient = new DNCertificateClient(dnSecurityConfig, null, null,
+    DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
+    dnCertClient = new DNCertificateClient(dnSecurityConfig, null, dn,
         certSerialId, null, null);
 
     assertNotNull(dnCertClient.getCertificate(cert1.getSerialNumber()
@@ -563,7 +565,7 @@ public class TestDefaultCertificateClient {
     Logger logger = mock(Logger.class);
     String certId = cert.getSerialNumber().toString();
     DefaultCertificateClient client = new DefaultCertificateClient(
-        conf, null, logger, certId, compName, null, null
+        conf, null, logger, certId, compName, "", null, null
     ) {
 
       @Override
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDnCertificateClientInit.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDnCertificateClientInit.java
index ad8a9578a4..18cf33131c 100644
--- 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDnCertificateClientInit.java
+++ 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDnCertificateClientInit.java
@@ -20,6 +20,8 @@ package 
org.apache.hadoop.hdds.security.x509.certificate.client;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
 import org.apache.hadoop.hdds.security.SecurityConfig;
 import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
 import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator;
@@ -92,9 +94,10 @@ public class TestDnCertificateClientInit {
     keyPair = keyGenerator.generateKey();
     x509Certificate = getX509Certificate();
     certSerialId = x509Certificate.getSerialNumber().toString();
+    DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
     dnCertificateClient =
         new DNCertificateClient(
-            securityConfig, null, null, certSerialId, null, null);
+            securityConfig, null, dn, certSerialId, null, null);
     dnKeyCodec = new KeyCodec(securityConfig, DN_COMPONENT);
 
     Files.createDirectories(securityConfig.getKeyLocation(DN_COMPONENT));
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestRootCaRotationPoller.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestRootCaRotationPoller.java
index 90ac9daac5..f0b81c7d6e 100644
--- 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestRootCaRotationPoller.java
+++ 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestRootCaRotationPoller.java
@@ -76,7 +76,7 @@ public class TestRootCaRotationPoller {
     List<String> certsFromScm = new ArrayList<>();
     certsFromScm.add(CertificateCodec.getPEMEncodedString(knownCert));
     RootCaRotationPoller poller = new RootCaRotationPoller(secConf,
-        knownCerts, scmSecurityClient);
+        knownCerts, scmSecurityClient, "");
 
     Mockito.when(scmSecurityClient.getAllRootCaCertificates())
         .thenReturn(certsFromScm);
@@ -105,7 +105,7 @@ public class TestRootCaRotationPoller {
     certsFromScm.add(CertificateCodec.getPEMEncodedString(knownCert));
     certsFromScm.add(CertificateCodec.getPEMEncodedString(newRootCa));
     RootCaRotationPoller poller = new RootCaRotationPoller(secConf,
-        knownCerts, scmSecurityClient);
+        knownCerts, scmSecurityClient, "");
     poller.run();
     Mockito.when(scmSecurityClient.getAllRootCaCertificates())
         .thenReturn(certsFromScm);
@@ -132,7 +132,7 @@ public class TestRootCaRotationPoller {
     certsFromScm.add(CertificateCodec.getPEMEncodedString(knownCert));
     certsFromScm.add(CertificateCodec.getPEMEncodedString(newRootCa));
     RootCaRotationPoller poller = new RootCaRotationPoller(secConf,
-        knownCerts, scmSecurityClient);
+        knownCerts, scmSecurityClient, "");
     poller.run();
     Mockito.when(scmSecurityClient.getAllRootCaCertificates())
         .thenReturn(certsFromScm);
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
index b0a71cea91..4c9b29a995 100644
--- 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
+++ 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
@@ -60,7 +60,7 @@ public class TestRDBStore {
       throws IOException {
     return new RDBStore(dbFile, options, new ManagedWriteOptions(), families,
         CodecRegistry.newBuilder().build(), false, 1000, null, false,
-        maxDbUpdatesSizeThreshold, true, null);
+        maxDbUpdatesSizeThreshold, true, null, "");
   }
 
   public static final int MAX_DB_UPDATES_SIZE_THRESHOLD = 80;
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCache.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCache.java
index 5651ba8163..f22413add8 100644
--- 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCache.java
+++ 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCache.java
@@ -45,9 +45,9 @@ public class TestTableCache {
 
   private void createTableCache(TableCache.CacheType cacheType) {
     if (cacheType == TableCache.CacheType.FULL_CACHE) {
-      tableCache = new FullTableCache<>();
+      tableCache = new FullTableCache<>("");
     } else {
-      tableCache = new PartialTableCache<>();
+      tableCache = new PartialTableCache<>("");
     }
   }
 
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
index 8480c63966..16bfed2bba 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
@@ -107,7 +107,7 @@ public class SCMBlockDeletingService extends 
BackgroundService
         TimeUnit.MILLISECONDS, BLOCK_DELETING_SERVICE_CORE_POOL_SIZE,
         conf.getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,
             OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT,
-            TimeUnit.MILLISECONDS));
+            TimeUnit.MILLISECONDS), scmContext.threadNamePrefix());
 
     this.safemodeExitRunDelayMillis = conf.getTimeDuration(
         HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT,
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMContext.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMContext.java
index de15514839..08ee20f5af 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMContext.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMContext.java
@@ -49,6 +49,7 @@ public final class SCMContext {
    * term equals INVALID_TERM indicates current SCM is running without Ratis.
    */
   public static final long INVALID_TERM = -1;
+  private final String threadNamePrefix;
 
   /**
    * Used by non-HA mode SCM, Recon and Unit Tests.
@@ -80,13 +81,14 @@ public final class SCMContext {
   private SCMContext(boolean isLeader, long term,
       final SafeModeStatus safeModeStatus,
       final FinalizationCheckpoint finalizationCheckpoint,
-      final OzoneStorageContainerManager scm) {
+      final OzoneStorageContainerManager scm, String threadNamePrefix) {
     this.isLeader = isLeader;
     this.term = term;
     this.safeModeStatus = safeModeStatus;
     this.finalizationCheckpoint = finalizationCheckpoint;
     this.scm = scm;
     this.isLeaderReady = false;
+    this.threadNamePrefix = threadNamePrefix;
   }
 
   /**
@@ -266,6 +268,10 @@ public final class SCMContext {
     return scm;
   }
 
+  public String threadNamePrefix() {
+    return threadNamePrefix;
+  }
+
   /**
    * Builder for SCMContext.
    */
@@ -280,6 +286,7 @@ public final class SCMContext {
     private boolean isPreCheckComplete = true;
     private OzoneStorageContainerManager scm = null;
     private FinalizationCheckpoint finalizationCheckpoint;
+    private String threadNamePrefix = "";
 
     public Builder setLeader(boolean leader) {
       this.isLeader = leader;
@@ -313,6 +320,11 @@ public final class SCMContext {
       return this;
     }
 
+    public SCMContext.Builder setThreadNamePrefix(String prefix) {
+      this.threadNamePrefix = prefix;
+      return this;
+    }
+
     public SCMContext build() {
       Preconditions.checkNotNull(scm, "scm == null");
       return buildMaybeInvalid();
@@ -329,7 +341,7 @@ public final class SCMContext {
           new SafeModeStatus(isInSafeMode, isPreCheckComplete),
           Optional.ofNullable(finalizationCheckpoint).orElse(
               FinalizationCheckpoint.FINALIZATION_COMPLETE),
-          scm);
+          scm, threadNamePrefix);
     }
   }
 }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 6e038cd55a..8ecd5ed480 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -306,6 +306,7 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
   private SecurityConfig securityConfig;
 
   private final SCMHANodeDetails scmHANodeDetails;
+  private final String threadNamePrefix;
 
   private final ContainerBalancer containerBalancer;
   // MoveManager is used by ContainerBalancer to schedule container moves
@@ -371,6 +372,7 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
           "failure.", ResultCodes.SCM_NOT_INITIALIZED);
     }
 
+    threadNamePrefix = getScmNodeDetails().threadNamePrefix();
     primaryScmNodeId = scmStorageConfig.getPrimaryScmNodeId();
 
     jvmPauseMonitor = !ratisEnabled ? newJvmPauseMonitor(getScmId()) : null;
@@ -390,7 +392,7 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
     // A valid pointer to the store is required by all the other services 
below.
     initalizeMetadataStore(conf, configurator);
 
-    eventQueue = new EventQueue();
+    eventQueue = new EventQueue(threadNamePrefix);
     serviceManager = new SCMServiceManager();
     reconfigurationHandler =
         new ReconfigurationHandler("SCM", conf, this::checkAdminAccess)
@@ -514,7 +516,8 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
     List<BlockingQueue<ContainerReport>> queues
         = ScmUtils.initContainerReportQueue(configuration);
     List<ThreadPoolExecutor> executors
-        = FixedThreadPoolWithAffinityExecutor.initializeExecutorPool(queues);
+        = FixedThreadPoolWithAffinityExecutor.initializeExecutorPool(
+            threadNamePrefix, queues);
     Map<String, FixedThreadPoolWithAffinityExecutor> reportExecutorMap
         = new ConcurrentHashMap<>();
     FixedThreadPoolWithAffinityExecutor<ContainerReportFromDatanode,
@@ -654,7 +657,7 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
           OzoneConfigKeys.OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION,
           OzoneConfigKeys.OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION_DEFAULT
               .getDuration(), TimeUnit.MILLISECONDS);
-      leaseManager = new LeaseManager<>("Lease Manager", timeDuration);
+      leaseManager = new LeaseManager<>(threadNamePrefix, timeDuration);
     }
 
     scmLayoutVersionManager = new HDDSLayoutVersionManager(
@@ -695,6 +698,7 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
           .setIsInSafeMode(true)
           .setIsPreCheckComplete(false)
           .setSCM(this)
+          .setThreadNamePrefix(threadNamePrefix)
           .setFinalizationCheckpoint(finalizationManager.getCheckpoint())
           .build();
     }
@@ -987,7 +991,8 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
       SCMSecurityProtocolClientSideTranslatorPB scmSecurityClient =
           getScmSecurityClientWithMaxRetry(configuration, getCurrentUser());
       scmCertificateClient = new SCMCertificateClient(securityConfig,
-          scmSecurityClient, certSerialNumber, SCM_ROOT_CA_COMPONENT_NAME);
+          scmSecurityClient, certSerialNumber, getScmId(),
+          SCM_ROOT_CA_COMPONENT_NAME);
     }
     return new ContainerTokenSecretManager(expiryTime,
         secretKeyManagerService.getSecretKeyManager());
@@ -1462,6 +1467,10 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
     return scmHANodeDetails;
   }
 
+  public String threadNamePrefix() {
+    return threadNamePrefix;
+  }
+
   @Override
   public String getDatanodeRpcPort() {
     InetSocketAddress addr = getDatanodeRpcAddress();
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
index e56ac4d02e..7896011790 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
@@ -28,6 +28,7 @@ import java.util.UUID;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
 import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
@@ -89,8 +90,9 @@ public class TestContainerServer {
   public static void setup() {
     DefaultMetricsSystem.setMiniClusterMode(true);
     CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, TEST_DIR);
+    DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
     caClient = new DNCertificateClient(new SecurityConfig(CONF), null,
-        null, null, null, null);
+        dn, null, null, null);
   }
 
   @AfterAll
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index ceadb2d673..3f2543ade1 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -650,7 +650,7 @@ public final class OzoneManager extends 
ServiceRuntimeInfoImpl
 
       SecretKeyProtocol secretKeyProtocol =
           HddsServerUtil.getSecretKeyClientForOm(conf);
-      secretKeyClient = new DefaultSecretKeySignerClient(secretKeyProtocol);
+      secretKeyClient = new DefaultSecretKeySignerClient(secretKeyProtocol, 
"");
     }
     serviceInfo = new ServiceInfoProvider(secConfig, this, certClient,
         testSecureOmFlag);
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java
index 76579668d5..d48f3adb13 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java
@@ -86,7 +86,8 @@ public abstract class AbstractKeyDeletingService extends 
BackgroundService
   public AbstractKeyDeletingService(String serviceName, long interval,
       TimeUnit unit, int threadPoolSize, long serviceTimeout,
       OzoneManager ozoneManager, ScmBlockLocationProtocol scmClient) {
-    super(serviceName, interval, unit, threadPoolSize, serviceTimeout);
+    super(serviceName, interval, unit, threadPoolSize, serviceTimeout,
+        ozoneManager.getNodeDetails().threadNamePrefix());
     this.ozoneManager = ozoneManager;
     this.scmClient = scmClient;
     this.deletedDirsCount = new AtomicLong(0);
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OMCertificateClient.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OMCertificateClient.java
index 4b3bbb5545..02f1f78495 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OMCertificateClient.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OMCertificateClient.java
@@ -20,6 +20,7 @@
 package org.apache.hadoop.ozone.security;
 
 import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import 
org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto;
 import 
org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
@@ -65,7 +66,8 @@ public class OMCertificateClient extends 
CommonCertificateClient {
       Runnable shutdownCallback
   ) {
     super(secConfig, scmSecurityClient, LOG, omStorage.getOmCertSerialId(),
-        COMPONENT_NAME, saveCertIdCallback, shutdownCallback);
+        COMPONENT_NAME, HddsUtils.threadNamePrefix(omStorage.getOmNodeId()),
+        saveCertIdCallback, shutdownCallback);
     this.serviceId = serviceId;
     this.scmID = scmID;
     this.clusterID = omStorage.getClusterID();
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/security/ReconCertificateClient.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/security/ReconCertificateClient.java
index 4cffb84e80..cc1d64c91d 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/security/ReconCertificateClient.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/security/ReconCertificateClient.java
@@ -56,7 +56,7 @@ public class ReconCertificateClient  extends 
CommonCertificateClient {
       Consumer<String> saveCertIdCallback,
       Runnable shutdownCallback) {
     super(config, scmSecurityClient, LOG, storage.getReconCertSerialId(),
-        COMPONENT_NAME, saveCertIdCallback, shutdownCallback);
+        COMPONENT_NAME, "", saveCertIdCallback, shutdownCallback);
     this.clusterID = storage.getClusterID();
     this.reconID = storage.getReconId();
   }
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java
index 648e31c4fa..8e6b1db4cf 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java
@@ -32,6 +32,7 @@ import 
org.apache.hadoop.hdds.scm.ha.SCMHADBTransactionBufferStub;
 import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub;
 import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails;
 import org.apache.hadoop.hdds.scm.net.NetworkTopology;
 import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
@@ -145,9 +146,12 @@ public class TestReconPipelineManager {
                  eventQueue,
                  scmhaManager,
                  scmContext)) {
+      StorageContainerManager mock = mock(StorageContainerManager.class);
+      Mockito.when(mock.getScmNodeDetails())
+          .thenReturn(mock(SCMNodeDetails.class));
       scmContext = new SCMContext.Builder().setIsInSafeMode(true)
               .setLeader(true).setIsPreCheckComplete(true)
-              .setSCM(mock(StorageContainerManager.class)).build();
+              .setSCM(mock).build();
       reconPipelineManager.setScmContext(scmContext);
       reconPipelineManager.addPipeline(validPipeline);
       reconPipelineManager.addPipeline(invalidPipeline);


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to