This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 7d2864cfb8 HDDS-10151. Replace single-use Random objects with
RandomUtils in test classes (#6041)
7d2864cfb8 is described below
commit 7d2864cfb8fdd4cfa26dcf52d48791dfb722376b
Author: Will Xiao <[email protected]>
AuthorDate: Mon Jan 22 00:07:49 2024 +0800
HDDS-10151. Replace single-use Random objects with RandomUtils in test
classes (#6041)
---
.../hadoop/hdds/scm/storage/TestBlockInputStream.java | 5 ++---
.../io/TestECBlockReconstructedStripeInputStream.java | 4 ++--
.../apache/hadoop/ozone/common/TestChecksumByteBuffer.java | 8 +++-----
.../hadoop/hdds/server/http/TestHttpServer2Metrics.java | 12 +++++-------
.../apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java | 13 +++++--------
.../algorithms/TestSCMContainerPlacementRackAware.java | 8 ++++----
.../checksum/TestReplicatedBlockChecksumComputer.java | 11 ++++-------
.../apache/hadoop/fs/contract/AbstractContractSeekTest.java | 7 +++----
.../hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java | 5 ++---
.../hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java | 5 ++---
.../org/apache/hadoop/ozone/TestMultipartObjectGet.java | 5 ++---
.../apache/hadoop/ozone/client/rpc/TestWatchForCommit.java | 6 +++---
.../hadoop/ozone/shell/TestDeletedBlocksTxnShell.java | 7 +++----
.../java/org/apache/hadoop/ozone/recon/TestReconUtils.java | 5 ++---
14 files changed, 42 insertions(+), 59 deletions(-)
diff --git
a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java
b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java
index 3dc5a82b33..ae8c114d81 100644
---
a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java
+++
b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.hdds.scm.storage;
import com.google.common.primitives.Bytes;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.client.ContainerBlockID;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -50,7 +51,6 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.Random;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -186,9 +186,8 @@ public class TestBlockInputStream {
assertThrows(EOFException.class, () -> seekAndVerify(finalPos));
// Seek to random positions between 0 and the block size.
- Random random = new Random();
for (int i = 0; i < 10; i++) {
- pos = random.nextInt(blockSize);
+ pos = RandomUtils.nextInt(0, blockSize);
seekAndVerify(pos);
}
}
diff --git
a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java
b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java
index c708fc28dd..c32cea0951 100644
---
a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java
+++
b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.client.io;
import com.google.common.collect.ImmutableSet;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
@@ -41,7 +42,6 @@ import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
-import java.util.Random;
import java.util.Set;
import java.util.SplittableRandom;
import java.util.concurrent.ExecutorService;
@@ -645,7 +645,7 @@ public class TestECBlockReconstructedStripeInputStream {
}
private Integer getRandomStreamIndex(Set<Integer> set) {
- return set.stream().skip(new Random().nextInt(set.size()))
+ return set.stream().skip(RandomUtils.nextInt(0, set.size()))
.findFirst().orElse(null);
}
diff --git
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java
index 5b88f5cb30..9567fa2c28 100644
---
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java
+++
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.util.PureJavaCrc32C;
import org.junit.jupiter.api.Test;
import java.nio.charset.StandardCharsets;
-import java.util.Random;
+import org.apache.commons.lang3.RandomUtils;
import java.util.zip.Checksum;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -59,11 +59,9 @@ public class TestChecksumByteBuffer {
checkBytes("hello world!".getBytes(StandardCharsets.UTF_8));
- final Random random = new Random();
- final byte[] bytes = new byte[1 << 10];
+ final int len = 1 << 10;
for (int i = 0; i < 1000; i++) {
- random.nextBytes(bytes);
- checkBytes(bytes, random.nextInt(bytes.length));
+ checkBytes(RandomUtils.nextBytes(len), RandomUtils.nextInt(0, len));
}
}
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHttpServer2Metrics.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHttpServer2Metrics.java
index 257c543d22..3f00bc53d2 100644
---
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHttpServer2Metrics.java
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHttpServer2Metrics.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdds.server.http;
+import org.apache.commons.lang3.RandomUtils;
import static
org.apache.hadoop.hdds.server.http.HttpServer2Metrics.HttpServer2MetricsInfo.HttpServerIdleThreadCount;
import static
org.apache.hadoop.hdds.server.http.HttpServer2Metrics.HttpServer2MetricsInfo.HttpServerMaxThreadCount;
import static
org.apache.hadoop.hdds.server.http.HttpServer2Metrics.HttpServer2MetricsInfo.HttpServerThreadCount;
@@ -36,8 +37,6 @@ import org.eclipse.jetty.util.thread.QueuedThreadPool;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
-import java.util.Random;
-
/**
* Testing HttpServer2Metrics.
*/
@@ -57,11 +56,10 @@ public class TestHttpServer2Metrics {
@Test
public void testMetrics() {
// crate mock metrics
- Random random = new Random();
- int threadCount = random.nextInt();
- int maxThreadCount = random.nextInt();
- int idleThreadCount = random.nextInt();
- int threadQueueWaitingTaskCount = random.nextInt();
+ int threadCount = RandomUtils.nextInt();
+ int maxThreadCount = RandomUtils.nextInt();
+ int idleThreadCount = RandomUtils.nextInt();
+ int threadQueueWaitingTaskCount = RandomUtils.nextInt();
String name = "s3g";
when(threadPool.getThreads()).thenReturn(threadCount);
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
index be57aa8ea6..9292ffa865 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdds.scm.block;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
@@ -70,7 +71,6 @@ import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
-import java.util.Random;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
@@ -218,9 +218,8 @@ public class TestDeletedBlockLog {
private Map<Long, List<Long>> generateData(int dataSize,
HddsProtos.LifeCycleState state) throws IOException {
Map<Long, List<Long>> blockMap = new HashMap<>();
- Random random = new Random(1);
- int continerIDBase = random.nextInt(100);
- int localIDBase = random.nextInt(1000);
+ int continerIDBase = RandomUtils.nextInt(0, 100);
+ int localIDBase = RandomUtils.nextInt(0, 1000);
for (int i = 0; i < dataSize; i++) {
long containerID = continerIDBase + i;
updateContainerMetadata(containerID, state);
@@ -692,13 +691,12 @@ public class TestDeletedBlockLog {
@Test
public void testRandomOperateTransactions() throws Exception {
mockContainerHealthResult(true);
- Random random = new Random();
int added = 0, committed = 0;
List<DeletedBlocksTransaction> blocks = new ArrayList<>();
List<Long> txIDs;
// Randomly add/get/commit/increase transactions.
for (int i = 0; i < 100; i++) {
- int state = random.nextInt(4);
+ int state = RandomUtils.nextInt(0, 4);
if (state == 0) {
addTransactions(generateData(10), true);
added += 10;
@@ -803,8 +801,7 @@ public class TestDeletedBlockLog {
// add two transactions for same container
containerID = blocks.get(0).getContainerID();
Map<Long, List<Long>> deletedBlocksMap = new HashMap<>();
- Random random = new Random();
- long localId = random.nextLong();
+ long localId = RandomUtils.nextLong();
deletedBlocksMap.put(containerID, new LinkedList<>(
Collections.singletonList(localId)));
addTransactions(deletedBlocksMap, true);
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
index 39e19135ef..3ed6ac89d6 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
@@ -19,9 +19,11 @@ package
org.apache.hadoop.hdds.scm.container.placement.algorithms;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
-import java.util.Random;
import java.util.stream.IntStream;
+import org.apache.commons.lang3.RandomUtils;
+import org.apache.commons.lang3.StringUtils;
+
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -48,8 +50,6 @@ import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource;
import org.junit.jupiter.params.provider.ValueSource;
-import org.apache.commons.lang3.StringUtils;
-
import static
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONED;
import static
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE;
import static
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
@@ -625,7 +625,7 @@ public class TestSCMContainerPlacementRackAware {
for (int i = 0; i < 10; i++) {
// Set a random DN to in_service and ensure it is always picked
- int index = new Random().nextInt(dnInfos.size());
+ int index = RandomUtils.nextInt(0, dnInfos.size());
dnInfos.get(index).setNodeStatus(NodeStatus.inServiceHealthy());
try {
List<DatanodeDetails> datanodeDetails =
diff --git
a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java
b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java
index 5cf4401bae..6162f1ae5a 100644
---
a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java
+++
b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.ozone.client.checksum;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.util.DataChecksum;
@@ -27,7 +28,6 @@ import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
-import java.util.Random;
import static
org.apache.hadoop.hdds.scm.OzoneClientConfig.ChecksumCombineMode.COMPOSITE_CRC;
import static
org.apache.hadoop.hdds.scm.OzoneClientConfig.ChecksumCombineMode.MD5MD5CRC;
@@ -40,9 +40,8 @@ public class TestReplicatedBlockChecksumComputer {
@Test
public void testComputeMd5Crc() throws IOException {
final int lenOfBytes = 32;
- byte[] randomChunkChecksum = new byte[lenOfBytes];
- Random r = new Random();
- r.nextBytes(randomChunkChecksum);
+ byte[] randomChunkChecksum = RandomUtils.nextBytes(lenOfBytes);
+
MD5Hash emptyBlockMD5 = MD5Hash.digest(randomChunkChecksum);
byte[] emptyBlockMD5Hash = emptyBlockMD5.getDigest();
AbstractBlockChecksumComputer computer =
@@ -56,9 +55,7 @@ public class TestReplicatedBlockChecksumComputer {
@Test
public void testComputeCompositeCrc() throws IOException {
final int lenOfBytes = 32;
- byte[] randomChunkChecksum = new byte[lenOfBytes];
- Random r = new Random();
- r.nextBytes(randomChunkChecksum);
+ byte[] randomChunkChecksum = RandomUtils.nextBytes(lenOfBytes);
CrcComposer crcComposer =
CrcComposer.newCrcComposer(DataChecksum.Type.CRC32C, 4);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
index 49c693268e..618025dc06 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.fs.contract;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataInputStream;
@@ -31,7 +32,6 @@ import org.slf4j.LoggerFactory;
import java.io.EOFException;
import java.io.IOException;
-import java.util.Random;
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
@@ -341,15 +341,14 @@ public abstract class AbstractContractSeekTest extends
AbstractFSContractTestBas
byte[] buf = dataset(filesize, 0, 255);
Path randomSeekFile = path("testrandomseeks.bin");
createFile(getFileSystem(), randomSeekFile, true, buf);
- Random r = new Random();
// Record the sequence of seeks and reads which trigger a failure.
int[] seeks = new int[10];
int[] reads = new int[10];
try (FSDataInputStream stm = getFileSystem().open(randomSeekFile)) {
for (int i = 0; i < limit; i++) {
- int seekOff = r.nextInt(buf.length);
- int toRead = r.nextInt(Math.min(buf.length - seekOff, 32000));
+ int seekOff = RandomUtils.nextInt(0, buf.length);
+ int toRead = RandomUtils.nextInt(0, Math.min(buf.length - seekOff,
32000));
seeks[i % seeks.length] = seekOff;
reads[i % reads.length] = toRead;
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java
index 61b0281c65..1675807d23 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java
@@ -96,7 +96,6 @@ import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Optional;
-import java.util.Random;
import java.util.Set;
import java.util.TreeSet;
import java.util.UUID;
@@ -2000,7 +1999,7 @@ abstract class AbstractRootedOzoneFileSystemTest {
@Test
void testRenameFile() throws Exception {
- final String dir = "/dir" + new Random().nextInt(1000);
+ final String dir = "/dir" + RandomUtils.nextInt(0, 1000);
Path dirPath = new Path(getBucketPath() + dir);
Path file1Source = new Path(getBucketPath() + dir
+ "/file1_Copy");
@@ -2026,7 +2025,7 @@ abstract class AbstractRootedOzoneFileSystemTest {
*/
@Test
void testRenameFileToDir() throws Exception {
- final String dir = "/dir" + new Random().nextInt(1000);
+ final String dir = "/dir" + RandomUtils.nextInt(0, 1000);
Path dirPath = new Path(getBucketPath() + dir);
getFs().mkdirs(dirPath);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java
index 725b17ee9d..439b563d63 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdds.scm.pipeline;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
@@ -34,7 +35,6 @@ import java.util.concurrent.TimeoutException;
import java.util.concurrent.TimeUnit;
import java.util.HashMap;
import java.util.Map;
-import java.util.Random;
import java.util.UUID;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE;
@@ -172,7 +172,6 @@ public class TestLeaderChoosePolicy {
// each datanode has leaderNumOfEachDn leaders after balance
checkLeaderBalance(dnNum, leaderNumOfEachDn);
- Random r = new Random(0);
for (int i = 0; i < 10; i++) {
// destroy some pipelines, wait new pipelines created,
// then check leader balance
@@ -181,7 +180,7 @@ public class TestLeaderChoosePolicy {
.getPipelines(RatisReplicationConfig.getInstance(
ReplicationFactor.THREE), Pipeline.PipelineState.OPEN);
- int destroyNum = r.nextInt(pipelines.size());
+ int destroyNum = RandomUtils.nextInt(0, pipelines.size());
for (int k = 0; k <= destroyNum; k++) {
pipelineManager.closePipeline(pipelines.get(k), false);
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java
index 0dae8a8b0d..c2e671b896 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.ozone;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.conf.DefaultConfigManager;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.IOUtils;
@@ -41,7 +42,6 @@ import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.io.ByteArrayInputStream;
-import java.security.SecureRandom;
import java.util.ArrayList;
import java.util.UUID;
import java.util.List;
@@ -217,8 +217,7 @@ public class TestMultipartObjectGet {
private static String generateRandomContent(int sizeInMB) {
int bytesToGenerate = sizeInMB * 1024 * 1024;
- byte[] randomBytes = new byte[bytesToGenerate];
- new SecureRandom().nextBytes(randomBytes);
+ byte[] randomBytes = RandomUtils.nextBytes(bytesToGenerate);
return Base64.getEncoder().encodeToString(randomBytes);
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
index d03c57bf4e..b053a4394b 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
@@ -22,7 +22,7 @@ import java.io.OutputStream;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
-import java.util.Random;
+import org.apache.commons.lang3.RandomUtils;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
@@ -280,7 +280,7 @@ public class TestWatchForCommit {
// as well as there is no logIndex generate in Ratis.
// The basic idea here is just to test if its throws an exception.
xceiverClient
- .watchForCommit(index + new Random().nextInt(100) + 10);
+ .watchForCommit(index + RandomUtils.nextInt(0, 100) + 10);
fail("expected exception not thrown");
} catch (Exception e) {
assertInstanceOf(ExecutionException.class, e);
@@ -374,7 +374,7 @@ public class TestWatchForCommit {
// The basic idea here is just to test if its throws an exception.
xceiverClient
.watchForCommit(reply.getLogIndex() +
- new Random().nextInt(100) + 10);
+ RandomUtils.nextInt(0, 100) + 10);
fail("Expected exception not thrown");
} catch (Exception e) {
assertInstanceOf(GroupMismatchException.class,
HddsClientUtils.checkForException(e));
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java
index e74041ceaf..36b970f4ee 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java
@@ -16,6 +16,7 @@
*/
package org.apache.hadoop.ozone.shell;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -52,7 +53,6 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.Random;
import java.util.Set;
import java.util.UUID;
import java.util.regex.Matcher;
@@ -129,9 +129,8 @@ public class TestDeletedBlocksTxnShell {
//<containerID, List<blockID>>
private Map<Long, List<Long>> generateData(int dataSize) throws Exception {
Map<Long, List<Long>> blockMap = new HashMap<>();
- Random random = new Random(1);
- int continerIDBase = random.nextInt(100);
- int localIDBase = random.nextInt(1000);
+ int continerIDBase = RandomUtils.nextInt(0, 100);
+ int localIDBase = RandomUtils.nextInt(0, 1000);
for (int i = 0; i < dataSize; i++) {
long containerID = continerIDBase + i;
updateContainerMetadata(containerID);
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java
index b34c8d31c6..07196e29ea 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java
@@ -41,8 +41,8 @@ import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.net.URL;
-import java.util.Random;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -206,9 +206,8 @@ public class TestReconUtils {
assertNextClosestPowerIndexOfTwo(n - 1);
}
- final Random random = new Random();
for (int i = 0; i < 10; i++) {
- assertNextClosestPowerIndexOfTwo(random.nextLong());
+ assertNextClosestPowerIndexOfTwo(RandomUtils.nextLong());
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]