This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 49cfaaa7004 HDDS-14787. Intermittent BindException in datanode upgrade 
tests (#9889)
49cfaaa7004 is described below

commit 49cfaaa70046f1abb7f15c706e023db6ee810ad3
Author: Arun Sarin <[email protected]>
AuthorDate: Tue Mar 10 00:17:47 2026 +0530

    HDDS-14787. Intermittent BindException in datanode upgrade tests (#9889)
---
 .../ozone/container/common/SCMTestUtils.java       | 34 ++++++++++++++
 .../container/common/TestDatanodeStateMachine.java | 11 +----
 .../TestDatanodeUpgradeToContainerIdsTable.java    | 12 ++---
 .../upgrade/TestDatanodeUpgradeToHBaseSupport.java | 14 ++----
 .../upgrade/TestDatanodeUpgradeToSchemaV3.java     | 53 ++++++++++------------
 .../ozone/container/common/TestEndPoint.java       |  5 +-
 6 files changed, 69 insertions(+), 60 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
index e29f46cd74e..226404e74bb 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
@@ -49,6 +49,10 @@
  * Test Endpoint class.
  */
 public final class SCMTestUtils {
+
+  /** Cluster ID shared by all tests that need a fixed, predictable value. */
+  public static final String CLUSTER_ID = "clusterID";
+
   /**
    * Never constructed.
    */
@@ -112,6 +116,36 @@ public static RPC.Server 
startScmRpcServer(ConfigurationSource configuration,
     return scmServer;
   }
 
+  /**
+   * Starts a mock SCM RPC server bound to an OS-assigned port (port 0),
+   * updates {@code OZONE_SCM_NAMES} in {@code conf} with the actual address,
+   * and returns the running server. Eliminates the TOCTOU race of 
pre-reserving
+   * a port with {@link #getReuseableAddress()} and binding later.
+   *
+   * @param conf configuration to update with the actual bound address
+   * @param server protocol implementation to serve
+   * @return started RPC server; caller can retrieve the address via
+   *         {@code server.getListenerAddress()}
+   */
+  public static RPC.Server startScmRpcServer(OzoneConfiguration conf,
+      StorageContainerDatanodeProtocol server) throws IOException {
+    RPC.Server rpcServer = startScmRpcServer(conf, server,
+        new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 10);
+    conf.setSocketAddr(ScmConfigKeys.OZONE_SCM_NAMES,
+        rpcServer.getListenerAddress());
+    return rpcServer;
+  }
+
+  /**
+   * Convenience overload of {@link #startScmRpcServer(OzoneConfiguration,
+   * StorageContainerDatanodeProtocol)} using a {@link ScmTestMock} initialised
+   * with {@link #CLUSTER_ID}.
+   */
+  public static RPC.Server startScmRpcServer(OzoneConfiguration conf)
+      throws IOException {
+    return startScmRpcServer(conf, new ScmTestMock(CLUSTER_ID));
+  }
+
   public static InetSocketAddress getReuseableAddress() throws IOException {
     try (ServerSocket socket = new ServerSocket(0)) {
       socket.setReuseAddress(true);
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
index d870879cba8..721f50bec13 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
@@ -27,7 +27,6 @@
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import java.io.File;
 import java.io.IOException;
-import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
@@ -88,22 +87,14 @@ void setUp() throws Exception {
         true);
     conf.setBoolean(
         OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true);
-    List<String> serverAddresses = new ArrayList<>();
     scmServers = new ArrayList<>();
     mockServers = new ArrayList<>();
     for (int x = 0; x < SCM_SERVER_COUNT; x++) {
-      int port = SCMTestUtils.getReuseableAddress().getPort();
-      String address = "127.0.0.1";
-      serverAddresses.add(address + ":" + port);
       ScmTestMock mock = new ScmTestMock();
-      scmServers.add(SCMTestUtils.startScmRpcServer(conf, mock,
-          new InetSocketAddress(address, port), 10));
+      scmServers.add(SCMTestUtils.startScmRpcServer(conf, mock));
       mockServers.add(mock);
     }
 
-    conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES,
-        serverAddresses.toArray(new String[0]));
-
     executorService = HadoopExecutors.newCachedThreadPool(
         new ThreadFactoryBuilder().setDaemon(true)
             .setNameFormat("TestDataNodeStateMachineThread-%d").build());
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToContainerIdsTable.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToContainerIdsTable.java
index 7af7d4dd94a..5fb5ba1edd6 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToContainerIdsTable.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToContainerIdsTable.java
@@ -26,7 +26,6 @@
 import java.util.Collections;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.pipeline.MockPipeline;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
@@ -35,7 +34,6 @@
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.ipc_.RPC;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.apache.hadoop.ozone.container.common.ScmTestMock;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
@@ -55,10 +53,8 @@ public class TestDatanodeUpgradeToContainerIdsTable {
 
   private DatanodeStateMachine dsm;
   private OzoneConfiguration conf;
-  private static final String CLUSTER_ID = "clusterID";
 
   private RPC.Server scmRpcServer;
-  private InetSocketAddress address;
 
   private void initTests() throws Exception {
     conf = new OzoneConfiguration();
@@ -66,8 +62,6 @@ private void initTests() throws Exception {
   }
 
   private void setup() throws Exception {
-    address = SCMTestUtils.getReuseableAddress();
-    conf.setSocketAddr(ScmConfigKeys.OZONE_SCM_NAMES, address);
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
         tempFolder.toString());
   }
@@ -87,7 +81,8 @@ public void teardown() throws Exception {
   public void testContainerTableAccessBeforeAndAfterUpgrade() throws Exception 
{
     initTests();
     // start DN and SCM
-    scmRpcServer = SCMTestUtils.startScmRpcServer(conf, new 
ScmTestMock(CLUSTER_ID), address, 10);
+    scmRpcServer = SCMTestUtils.startScmRpcServer(conf);
+    InetSocketAddress address = scmRpcServer.getListenerAddress();
     UpgradeTestHelper.addHddsVolume(conf, tempFolder);
     dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, 
address,
         HDDSLayoutFeature.HBASE_SUPPORT.layoutVersion());
@@ -123,7 +118,8 @@ public void testContainerTableAccessBeforeAndAfterUpgrade() 
throws Exception {
   public void testContainerTableFinalizeRetry() throws Exception {
     initTests();
     // start DN and SCM
-    scmRpcServer = SCMTestUtils.startScmRpcServer(conf, new 
ScmTestMock(CLUSTER_ID), address, 10);
+    scmRpcServer = SCMTestUtils.startScmRpcServer(conf);
+    InetSocketAddress address = scmRpcServer.getListenerAddress();
     UpgradeTestHelper.addHddsVolume(conf, tempFolder);
     dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, 
address,
         HDDSLayoutFeature.HBASE_SUPPORT.layoutVersion());
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToHBaseSupport.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToHBaseSupport.java
index 84d48609610..214ea1d167d 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToHBaseSupport.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToHBaseSupport.java
@@ -27,13 +27,11 @@
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.pipeline.MockPipeline;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
 import org.apache.hadoop.ipc_.RPC;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.apache.hadoop.ozone.container.common.ScmTestMock;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
@@ -50,10 +48,8 @@ public class TestDatanodeUpgradeToHBaseSupport {
 
   private DatanodeStateMachine dsm;
   private OzoneConfiguration conf;
-  private static final String CLUSTER_ID = "clusterID";
 
   private RPC.Server scmRpcServer;
-  private InetSocketAddress address;
 
   private void initTests() throws Exception {
     conf = new OzoneConfiguration();
@@ -61,8 +57,6 @@ private void initTests() throws Exception {
   }
 
   private void setup() throws Exception {
-    address = SCMTestUtils.getReuseableAddress();
-    conf.setSocketAddr(ScmConfigKeys.OZONE_SCM_NAMES, address);
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
         tempFolder.toString());
   }
@@ -85,8 +79,8 @@ public void teardown() throws Exception {
   public void testIncrementalChunkListBeforeAndAfterUpgrade() throws Exception 
{
     initTests();
     // start DN and SCM
-    scmRpcServer = SCMTestUtils.startScmRpcServer(conf,
-        new ScmTestMock(CLUSTER_ID), address, 10);
+    scmRpcServer = SCMTestUtils.startScmRpcServer(conf);
+    InetSocketAddress address = scmRpcServer.getListenerAddress();
     UpgradeTestHelper.addHddsVolume(conf, tempFolder);
     dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, 
address,
         HDDSLayoutFeature.HADOOP_PRC_PORTS_IN_DATANODEDETAILS.layoutVersion());
@@ -120,8 +114,8 @@ public void testIncrementalChunkListBeforeAndAfterUpgrade() 
throws Exception {
   public void testBlockFinalizationBeforeAndAfterUpgrade() throws Exception {
     initTests();
     // start DN and SCM
-    scmRpcServer = SCMTestUtils.startScmRpcServer(conf,
-        new ScmTestMock(CLUSTER_ID), address, 10);
+    scmRpcServer = SCMTestUtils.startScmRpcServer(conf);
+    InetSocketAddress address = scmRpcServer.getListenerAddress();
     UpgradeTestHelper.addHddsVolume(conf, tempFolder);
     dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, 
address,
         HDDSLayoutFeature.HADOOP_PRC_PORTS_IN_DATANODEDETAILS.layoutVersion());
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java
index 56b9945706f..1b8470d8a2c 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java
@@ -52,7 +52,6 @@
 import org.apache.hadoop.ozone.container.common.ContainerTestUtils;
 import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.apache.hadoop.ozone.container.common.ScmTestMock;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
 import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
@@ -74,10 +73,8 @@ public class TestDatanodeUpgradeToSchemaV3 {
 
   private DatanodeStateMachine dsm;
   private OzoneConfiguration conf;
-  private static final String CLUSTER_ID = "clusterID";
 
   private RPC.Server scmRpcServer;
-  private InetSocketAddress address;
 
   private void initTests(Boolean enable) throws Exception {
     boolean schemaV3Enabled = enable;
@@ -92,8 +89,6 @@ private void initTests(Boolean enable) throws Exception {
   }
 
   private void setup() throws Exception {
-    address = SCMTestUtils.getReuseableAddress();
-    conf.setSocketAddr(ScmConfigKeys.OZONE_SCM_NAMES, address);
     conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY,
         tempFolder.resolve("data").toString());
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
@@ -121,8 +116,8 @@ public void teardown() throws Exception {
   public void testDBOnHddsVolume(boolean schemaV3Enabled) throws Exception {
     initTests(schemaV3Enabled);
     // start DN and SCM
-    scmRpcServer = SCMTestUtils.startScmRpcServer(conf,
-        new ScmTestMock(CLUSTER_ID), address, 10);
+    scmRpcServer = SCMTestUtils.startScmRpcServer(conf);
+    InetSocketAddress address = scmRpcServer.getListenerAddress();
     UpgradeTestHelper.addHddsVolume(conf, tempFolder);
 
     dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, 
address,
@@ -158,8 +153,8 @@ public void testDBOnHddsVolume(boolean schemaV3Enabled) 
throws Exception {
   public void testDBOnDbVolume(boolean schemaV3Enabled) throws Exception {
     initTests(schemaV3Enabled);
     // start DN and SCM
-    scmRpcServer = SCMTestUtils.startScmRpcServer(conf,
-        new ScmTestMock(CLUSTER_ID), address, 10);
+    scmRpcServer = SCMTestUtils.startScmRpcServer(conf);
+    InetSocketAddress address = scmRpcServer.getListenerAddress();
     UpgradeTestHelper.addHddsVolume(conf, tempFolder);
     UpgradeTestHelper.addDbVolume(conf, tempFolder);
 
@@ -199,8 +194,8 @@ public void testDBCreatedInFinalize(boolean schemaV3Enabled)
       throws Exception {
     initTests(schemaV3Enabled);
     // start DN and SCM
-    scmRpcServer = SCMTestUtils.startScmRpcServer(conf,
-        new ScmTestMock(CLUSTER_ID), address, 10);
+    scmRpcServer = SCMTestUtils.startScmRpcServer(conf);
+    InetSocketAddress address = scmRpcServer.getListenerAddress();
     // add one HddsVolume
     UpgradeTestHelper.addHddsVolume(conf, tempFolder);
 
@@ -214,8 +209,8 @@ public void testDBCreatedInFinalize(boolean schemaV3Enabled)
     HddsVolume dataVolume = (
         HddsVolume) dsm.getContainer().getVolumeSet().getVolumesList().get(0);
     // Format HddsVolume to mimic the real cluster upgrade situation
-    dataVolume.format(CLUSTER_ID);
-    File idDir = new File(dataVolume.getStorageDir(), CLUSTER_ID);
+    dataVolume.format(SCMTestUtils.CLUSTER_ID);
+    File idDir = new File(dataVolume.getStorageDir(), SCMTestUtils.CLUSTER_ID);
     if (!idDir.mkdir()) {
       fail("Failed to create id directory");
     }
@@ -246,8 +241,8 @@ public void testDBCreatedInFinalize(boolean schemaV3Enabled)
   public void testFinalizeTwice(boolean schemaV3Enabled) throws Exception {
     initTests(schemaV3Enabled);
     // start DN and SCM
-    scmRpcServer = SCMTestUtils.startScmRpcServer(conf,
-        new ScmTestMock(CLUSTER_ID), address, 10);
+    scmRpcServer = SCMTestUtils.startScmRpcServer(conf);
+    InetSocketAddress address = scmRpcServer.getListenerAddress();
     // add one HddsVolume and two DbVolume
     UpgradeTestHelper.addHddsVolume(conf, tempFolder);
     UpgradeTestHelper.addDbVolume(conf, tempFolder);
@@ -276,8 +271,8 @@ public void testAddHddsVolumeAfterFinalize(boolean 
schemaV3Enabled)
       throws Exception {
     initTests(schemaV3Enabled);
     // start DN and SCM
-    scmRpcServer = SCMTestUtils.startScmRpcServer(conf,
-        new ScmTestMock(CLUSTER_ID), address, 10);
+    scmRpcServer = SCMTestUtils.startScmRpcServer(conf);
+    InetSocketAddress address = scmRpcServer.getListenerAddress();
     UpgradeTestHelper.addHddsVolume(conf, tempFolder);
 
     dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, 
address,
@@ -310,8 +305,8 @@ public void testAddHddsVolumeAfterFinalize(boolean 
schemaV3Enabled)
   public void testAddDbVolumeAfterFinalize(boolean schemaV3Enabled)
       throws Exception {
     initTests(schemaV3Enabled);
-    scmRpcServer = SCMTestUtils.startScmRpcServer(conf,
-        new ScmTestMock(CLUSTER_ID), address, 10);
+    scmRpcServer = SCMTestUtils.startScmRpcServer(conf);
+    InetSocketAddress address = scmRpcServer.getListenerAddress();
     UpgradeTestHelper.addHddsVolume(conf, tempFolder);
 
     dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, 
address,
@@ -353,8 +348,8 @@ public void testAddDbAndHddsVolumeAfterFinalize(boolean 
schemaV3Enabled)
       throws Exception {
     initTests(schemaV3Enabled);
     // start DN and SCM
-    scmRpcServer = SCMTestUtils.startScmRpcServer(conf,
-        new ScmTestMock(CLUSTER_ID), address, 10);
+    scmRpcServer = SCMTestUtils.startScmRpcServer(conf);
+    InetSocketAddress address = scmRpcServer.getListenerAddress();
     UpgradeTestHelper.addHddsVolume(conf, tempFolder);
 
     dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, 
address,
@@ -421,8 +416,8 @@ public void testWriteWithV3Disabled(boolean schemaV3Enabled)
   public void testWrite(boolean enable, String expectedVersion)
       throws Exception {
     // start DN and SCM
-    scmRpcServer = SCMTestUtils.startScmRpcServer(conf,
-        new ScmTestMock(CLUSTER_ID), address, 10);
+    scmRpcServer = SCMTestUtils.startScmRpcServer(conf);
+    InetSocketAddress address = scmRpcServer.getListenerAddress();
     UpgradeTestHelper.addHddsVolume(conf, tempFolder);
     // Disable Schema V3
     conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED, false);
@@ -472,8 +467,8 @@ public void testReadsDuringFinalize(boolean schemaV3Enabled)
       throws Exception {
     initTests(schemaV3Enabled);
     // start DN and SCM
-    scmRpcServer = SCMTestUtils.startScmRpcServer(conf,
-        new ScmTestMock(CLUSTER_ID), address, 10);
+    scmRpcServer = SCMTestUtils.startScmRpcServer(conf);
+    InetSocketAddress address = scmRpcServer.getListenerAddress();
     UpgradeTestHelper.addHddsVolume(conf, tempFolder);
     dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, 
address,
         HDDSLayoutFeature.ERASURE_CODED_STORAGE_SUPPORT.layoutVersion());
@@ -514,8 +509,8 @@ public void testReadsDuringFinalize(boolean schemaV3Enabled)
   public void testFinalizeFailure(boolean schemaV3Enabled) throws Exception {
     initTests(schemaV3Enabled);
     // start DN and SCM
-    scmRpcServer = SCMTestUtils.startScmRpcServer(conf,
-        new ScmTestMock(CLUSTER_ID), address, 10);
+    scmRpcServer = SCMTestUtils.startScmRpcServer(conf);
+    InetSocketAddress address = scmRpcServer.getListenerAddress();
     UpgradeTestHelper.addHddsVolume(conf, tempFolder);
     // Let HddsVolume be formatted to mimic the real cluster upgrade
     // Set layout version.
@@ -528,8 +523,8 @@ public void testFinalizeFailure(boolean schemaV3Enabled) 
throws Exception {
     HddsVolume dataVolume = (
         HddsVolume) dsm.getContainer().getVolumeSet().getVolumesList().get(0);
     // Format HddsVolume to mimic the real cluster upgrade situation
-    dataVolume.format(CLUSTER_ID);
-    File idDir = new File(dataVolume.getStorageDir(), CLUSTER_ID);
+    dataVolume.format(SCMTestUtils.CLUSTER_ID);
+    File idDir = new File(dataVolume.getStorageDir(), SCMTestUtils.CLUSTER_ID);
     if (!idDir.mkdir()) {
       fail("Failed to create id directory");
     }
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index 6069a120476..229d12f5be0 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -115,7 +115,6 @@ public static void tearDown() throws Exception {
 
   @BeforeAll
   static void setUp() throws Exception {
-    serverAddress = SCMTestUtils.getReuseableAddress();
     ozoneConf = SCMTestUtils.getConf(testDir);
     scmServerImpl = new ScmTestMock();
     dnDetails = randomDatanodeDetails();
@@ -123,8 +122,8 @@ static void setUp() throws Exception {
         UUID.randomUUID().toString(),
         HDDSLayoutFeature.DATANODE_SCHEMA_V3.layoutVersion());
     layoutStorage.initialize();
-    scmServer = SCMTestUtils.startScmRpcServer(ozoneConf,
-        scmServerImpl, serverAddress, 10);
+    scmServer = SCMTestUtils.startScmRpcServer(ozoneConf, scmServerImpl);
+    serverAddress = scmServer.getListenerAddress();
     volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(ozoneConf);
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to