This is an automated email from the ASF dual-hosted git repository.
pifta pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new d3b1a06 HDDS-6219. Switch to RATIS ReplicationType from STAND_ALONE
in our tests. (#3014)
d3b1a06 is described below
commit d3b1a0691437137d5928e5f9fd999470feffdcb3
Author: Istvan Fajth <[email protected]>
AuthorDate: Thu Jan 27 18:22:08 2022 +0100
HDDS-6219. Switch to RATIS ReplicationType from STAND_ALONE in our tests.
(#3014)
---
.../java/org/apache/hadoop/ozone/TestDataUtil.java | 2 +-
.../hadoop/ozone/TestStorageContainerManager.java | 2 +
.../client/rpc/TestOzoneAtRestEncryption.java | 12 +--
.../rpc/TestOzoneClientMultipartUploadWithFSO.java | 42 +++++-----
.../client/rpc/TestOzoneRpcClientAbstract.java | 93 +++++++++++-----------
.../ozone/client/rpc/TestSecureOzoneRpcClient.java | 6 +-
.../apache/hadoop/ozone/container/TestHelper.java | 7 +-
.../commandhandler/TestCloseContainerHandler.java | 4 +-
.../commandhandler/TestDeleteContainerHandler.java | 3 +-
.../hadoop/ozone/dn/scrubber/TestDataScrubber.java | 7 +-
.../TestDatanodeHddsVolumeFailureDetection.java | 7 +-
.../ozone/om/TestContainerReportWithKeys.java | 2 +-
.../org/apache/hadoop/ozone/om/TestKeyPurging.java | 2 +-
.../hadoop/ozone/om/TestObjectStoreWithFSO.java | 4 +-
.../apache/hadoop/ozone/om/TestOzoneManagerHA.java | 4 +-
.../hadoop/ozone/om/TestOzoneManagerPrepare.java | 2 +-
.../ozone/recon/TestReconWithOzoneManagerFSO.java | 6 +-
.../ozone/scm/TestStorageContainerManagerHA.java | 4 +-
.../hadoop/ozone/client/OzoneBucketStub.java | 2 +-
.../hadoop/ozone/s3/endpoint/TestObjectHead.java | 2 +-
20 files changed, 112 insertions(+), 101 deletions(-)
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java
index 027afa3..2983270 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java
@@ -83,7 +83,7 @@ public final class TestDataUtil {
public static void createKey(OzoneBucket bucket, String keyName,
String content) throws IOException {
createKey(bucket, keyName, ReplicationFactor.ONE,
- ReplicationType.STAND_ALONE, content);
+ ReplicationType.RATIS, content);
}
public static void createKey(OzoneBucket bucket, String keyName,
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 6116212..add89d5 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -369,6 +369,7 @@ public class TestStorageContainerManager {
.setNumDatanodes(1)
.build();
cluster.waitForClusterToBeReady();
+ cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000);
try {
DeletedBlockLog delLog = cluster.getStorageContainerManager()
@@ -746,6 +747,7 @@ public class TestStorageContainerManager {
.setNumDatanodes(1)
.build();
cluster.waitForClusterToBeReady();
+ cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000);
try {
TestStorageContainerManagerHelper helper =
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
index d5e60d2..9a4d691 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
@@ -74,7 +74,7 @@ import org.apache.ozone.test.GenericTestUtils;
import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
-import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
+import static org.apache.hadoop.hdds.client.ReplicationType.RATIS;
import org.junit.AfterClass;
import org.junit.Assert;
@@ -235,7 +235,7 @@ public class TestOzoneAtRestEncryption {
String value = "sample value";
try (OzoneOutputStream out = bucket.createKey(keyName,
value.getBytes(StandardCharsets.UTF_8).length,
- ReplicationType.STAND_ALONE,
+ ReplicationType.RATIS,
ReplicationFactor.ONE, new HashMap<>())) {
out.write(value.getBytes(StandardCharsets.UTF_8));
}
@@ -259,7 +259,7 @@ public class TestOzoneAtRestEncryption {
Assert.assertEquals(len, value.length());
Assert.assertTrue(verifyRatisReplication(bucket.getVolumeName(),
- bucket.getName(), keyName, ReplicationType.STAND_ALONE,
+ bucket.getName(), keyName, ReplicationType.RATIS,
ReplicationFactor.ONE));
Assert.assertEquals(value, new String(fileContent,
StandardCharsets.UTF_8));
Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
@@ -323,7 +323,7 @@ public class TestOzoneAtRestEncryption {
keyMetadata.put(OzoneConsts.GDPR_FLAG, "true");
try (OzoneOutputStream out = bucket.createKey(keyName,
value.getBytes(StandardCharsets.UTF_8).length,
- ReplicationType.STAND_ALONE,
+ ReplicationType.RATIS,
ReplicationFactor.ONE, keyMetadata)) {
out.write(value.getBytes(StandardCharsets.UTF_8));
}
@@ -340,7 +340,7 @@ public class TestOzoneAtRestEncryption {
Assert.assertEquals(len, value.length());
Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
- keyName, ReplicationType.STAND_ALONE,
+ keyName, ReplicationType.RATIS,
ReplicationFactor.ONE));
Assert.assertEquals(value, new String(fileContent,
StandardCharsets.UTF_8));
Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
@@ -463,7 +463,7 @@ public class TestOzoneAtRestEncryption {
String keyName = "mpu_test_key_" + numParts;
// Initiate multipart upload
- String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
ONE);
// Upload Parts
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
index d772a3f..e44cf2d 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
@@ -74,7 +74,7 @@ import java.util.TreeMap;
import java.util.UUID;
import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
-import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
+import static org.apache.hadoop.hdds.client.ReplicationType.RATIS;
import static
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@@ -161,7 +161,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
- STAND_ALONE, ONE);
+ RATIS, ONE);
Assert.assertNotNull(multipartInfo);
String uploadID = multipartInfo.getUploadID();
@@ -173,7 +173,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
// Call initiate multipart upload for the same key again, this should
// generate a new uploadID.
multipartInfo = bucket.initiateMultipartUpload(keyName,
- STAND_ALONE, ONE);
+ RATIS, ONE);
Assert.assertNotNull(multipartInfo);
Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
@@ -227,7 +227,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
- STAND_ALONE, ONE);
+ RATIS, ONE);
Assert.assertNotNull(multipartInfo);
String uploadID = multipartInfo.getUploadID();
@@ -321,7 +321,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
OzoneBucket bucket = volume.getBucket(bucketName);
// Initiate multipart upload
- String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
ONE);
// Upload Parts
@@ -352,7 +352,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
- String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
ONE);
// We have not uploaded any parts, but passing some list it should throw
@@ -376,7 +376,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
- String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
ONE);
uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
@@ -400,7 +400,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
- String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
ONE);
uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
@@ -426,7 +426,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName,
- STAND_ALONE, ONE);
+ RATIS, ONE);
Assert.assertNotNull(omMultipartInfo.getUploadID());
@@ -505,7 +505,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName,
- STAND_ALONE, ONE);
+ RATIS, ONE);
Assert.assertNotNull(omMultipartInfo.getUploadID());
@@ -540,7 +540,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
- String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
ONE);
bucket.abortMultipartUpload(keyName, uploadID);
}
@@ -563,7 +563,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
ozoneManager.getMetadataManager().getBucketTable().get(buckKey);
BucketLayout bucketLayout = buckInfo.getBucketLayout();
- String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
ONE);
String partName = uploadPart(bucket, keyName, uploadID, 1,
"data".getBytes(UTF_8));
@@ -603,7 +603,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
OzoneBucket bucket = volume.getBucket(bucketName);
Map<Integer, String> partsMap = new TreeMap<>();
- String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
ONE);
String partName1 = uploadPart(bucket, keyName, uploadID, 1,
generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
@@ -620,7 +620,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
bucket.listParts(keyName, uploadID, 0, 3);
- Assert.assertEquals(STAND_ALONE,
+ Assert.assertEquals(RATIS,
ozoneMultipartUploadPartListParts.getReplicationType());
Assert.assertEquals(3,
ozoneMultipartUploadPartListParts.getPartInfoList().size());
@@ -705,7 +705,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
OzoneBucket bucket = volume.getBucket(bucketName);
Map<Integer, String> partsMap = new TreeMap<>();
- String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
ONE);
String partName1 = uploadPart(bucket, keyName, uploadID, 1,
generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
@@ -722,7 +722,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
bucket.listParts(keyName, uploadID, 0, 2);
- Assert.assertEquals(STAND_ALONE,
+ Assert.assertEquals(RATIS,
ozoneMultipartUploadPartListParts.getReplicationType());
Assert.assertEquals(2,
@@ -808,7 +808,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
OzoneBucket bucket = volume.getBucket(bucketName);
- String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
ONE);
uploadPart(bucket, keyName, uploadID, 1,
generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
@@ -821,7 +821,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
Assert.assertEquals(0,
ozoneMultipartUploadPartListParts.getPartInfoList().size());
- Assert.assertEquals(STAND_ALONE,
+ Assert.assertEquals(RATIS,
ozoneMultipartUploadPartListParts.getReplicationType());
// As we don't have any parts with greater than partNumberMarker and list
@@ -866,11 +866,11 @@ public class TestOzoneClientMultipartUploadWithFSO {
OzoneBucket bucket = volume.getBucket(bucketName);
// Initiate multipart upload
- String uploadID1 = initiateMultipartUpload(bucket, key1, STAND_ALONE,
+ String uploadID1 = initiateMultipartUpload(bucket, key1, RATIS,
ONE);
- String uploadID2 = initiateMultipartUpload(bucket, key2, STAND_ALONE,
+ String uploadID2 = initiateMultipartUpload(bucket, key2, RATIS,
ONE);
- String uploadID3 = initiateMultipartUpload(bucket, key3, STAND_ALONE,
+ String uploadID3 = initiateMultipartUpload(bucket, key3, RATIS,
ONE);
// Upload Parts
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index 897ad22..e70087a 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -116,8 +116,7 @@ import org.apache.commons.lang3.StringUtils;
import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
-import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
-import static
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS;
+import static org.apache.hadoop.hdds.client.ReplicationType.RATIS;
import static org.apache.hadoop.ozone.OmUtils.MAX_TRXN_ID;
import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT;
@@ -860,7 +859,7 @@ public abstract class TestOzoneRpcClientAbstract {
String keyName = UUID.randomUUID().toString();
OzoneOutputStream out = bucket.createKey(keyName,
- value.getBytes(UTF_8).length, STAND_ALONE,
+ value.getBytes(UTF_8).length, RATIS,
ONE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
@@ -870,7 +869,7 @@ public abstract class TestOzoneRpcClientAbstract {
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
is.read(fileContent);
Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
- keyName, STAND_ALONE,
+ keyName, RATIS,
ONE));
Assert.assertEquals(value, new String(fileContent, UTF_8));
Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
@@ -934,7 +933,7 @@ public abstract class TestOzoneRpcClientAbstract {
try {
OzoneOutputStream out = bucket.createKey(UUID.randomUUID().toString(),
- valueLength, STAND_ALONE, ONE, new HashMap<>());
+ valueLength, RATIS, ONE, new HashMap<>());
for (int i = 0; i <= (4 * blockSize) / value.length(); i++) {
out.write(value.getBytes(UTF_8));
}
@@ -959,7 +958,7 @@ public abstract class TestOzoneRpcClientAbstract {
bucket.setQuota(OzoneQuota.parseQuota(
5 * blockSize + " B", "100"));
OzoneOutputStream out = bucket.createKey(UUID.randomUUID().toString(),
- valueLength, STAND_ALONE, ONE, new HashMap<>());
+ valueLength, RATIS, ONE, new HashMap<>());
out.close();
Assert.assertEquals(4 * blockSize,
store.getVolume(volumeName).getBucket(bucketName).getUsedBytes());
@@ -1093,7 +1092,7 @@ public abstract class TestOzoneRpcClientAbstract {
private void writeKey(OzoneBucket bucket, String keyName,
ReplicationFactor replication, String value, int valueLength)
throws IOException{
- OzoneOutputStream out = bucket.createKey(keyName, valueLength, STAND_ALONE,
+ OzoneOutputStream out = bucket.createKey(keyName, valueLength, RATIS,
replication, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
@@ -1102,7 +1101,7 @@ public abstract class TestOzoneRpcClientAbstract {
private void writeFile(OzoneBucket bucket, String keyName,
ReplicationFactor replication, String value, int valueLength)
throws IOException{
- OzoneOutputStream out = bucket.createFile(keyName, valueLength,
STAND_ALONE,
+ OzoneOutputStream out = bucket.createFile(keyName, valueLength, RATIS,
replication, true, true);
out.write(value.getBytes(UTF_8));
out.close();
@@ -1124,7 +1123,7 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
- STAND_ALONE, ONE);
+ RATIS, ONE);
assertNotNull(multipartInfo);
String uploadID = multipartInfo.getUploadID();
@@ -1162,7 +1161,7 @@ public abstract class TestOzoneRpcClientAbstract {
// create the initial key with size 0, write will allocate the first block.
OzoneOutputStream out = bucket.createKey(keyName, 0,
- STAND_ALONE, ONE, new HashMap<>());
+ RATIS, ONE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
OmKeyArgs.Builder builder = new OmKeyArgs.Builder();
@@ -1428,7 +1427,7 @@ public abstract class TestOzoneRpcClientAbstract {
//String keyValue = "this is a test value.glx";
// create the initial key with size 0, write will allocate the first block.
OzoneOutputStream out = bucket.createKey(keyName,
- keyValue.getBytes(UTF_8).length, STAND_ALONE,
+ keyValue.getBytes(UTF_8).length, RATIS,
ONE, new HashMap<>());
out.write(keyValue.getBytes(UTF_8));
out.close();
@@ -1780,7 +1779,7 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
OzoneOutputStream out = bucket.createKey(keyName,
- value.getBytes(UTF_8).length, STAND_ALONE,
+ value.getBytes(UTF_8).length, RATIS,
ONE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
@@ -2057,25 +2056,25 @@ public abstract class TestOzoneRpcClientAbstract {
byte[] value = RandomStringUtils.randomAscii(10240).getBytes(UTF_8);
OzoneOutputStream one = volAbucketA.createKey(
keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5),
- value.length, STAND_ALONE, ONE,
+ value.length, RATIS, ONE,
new HashMap<>());
one.write(value);
one.close();
OzoneOutputStream two = volAbucketB.createKey(
keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5),
- value.length, STAND_ALONE, ONE,
+ value.length, RATIS, ONE,
new HashMap<>());
two.write(value);
two.close();
OzoneOutputStream three = volBbucketA.createKey(
keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5),
- value.length, STAND_ALONE, ONE,
+ value.length, RATIS, ONE,
new HashMap<>());
three.write(value);
three.close();
OzoneOutputStream four = volBbucketB.createKey(
keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5),
- value.length, STAND_ALONE, ONE,
+ value.length, RATIS, ONE,
new HashMap<>());
four.write(value);
four.close();
@@ -2090,25 +2089,25 @@ public abstract class TestOzoneRpcClientAbstract {
byte[] value = RandomStringUtils.randomAscii(10240).getBytes(UTF_8);
OzoneOutputStream one = volAbucketA.createKey(
keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5),
- value.length, STAND_ALONE, ONE,
+ value.length, RATIS, ONE,
new HashMap<>());
one.write(value);
one.close();
OzoneOutputStream two = volAbucketB.createKey(
keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5),
- value.length, STAND_ALONE, ONE,
+ value.length, RATIS, ONE,
new HashMap<>());
two.write(value);
two.close();
OzoneOutputStream three = volBbucketA.createKey(
keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5),
- value.length, STAND_ALONE, ONE,
+ value.length, RATIS, ONE,
new HashMap<>());
three.write(value);
three.close();
OzoneOutputStream four = volBbucketB.createKey(
keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5),
- value.length, STAND_ALONE, ONE,
+ value.length, RATIS, ONE,
new HashMap<>());
four.write(value);
four.close();
@@ -2189,7 +2188,7 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
- STAND_ALONE, ONE);
+ RATIS, ONE);
assertNotNull(multipartInfo);
String uploadID = multipartInfo.getUploadID();
@@ -2201,7 +2200,7 @@ public abstract class TestOzoneRpcClientAbstract {
// Call initiate multipart upload for the same key again, this should
// generate a new uploadID.
multipartInfo = bucket.initiateMultipartUpload(keyName,
- STAND_ALONE, ONE);
+ RATIS, ONE);
assertNotNull(multipartInfo);
Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
@@ -2256,7 +2255,7 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
- STAND_ALONE, ONE);
+ RATIS, ONE);
assertNotNull(multipartInfo);
String uploadID = multipartInfo.getUploadID();
@@ -2293,7 +2292,7 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
- STAND_ALONE, ONE);
+ RATIS, ONE);
assertNotNull(multipartInfo);
String uploadID = multipartInfo.getUploadID();
@@ -2562,7 +2561,7 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneBucket bucket = volume.getBucket(bucketName);
// Initiate multipart upload
- String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
ONE);
// Upload Parts
@@ -2595,7 +2594,7 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
- String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
ONE);
// We have not uploaded any parts, but passing some list it should throw
@@ -2620,7 +2619,7 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
- String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
ONE);
uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
@@ -2645,7 +2644,7 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
- String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
ONE);
uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
@@ -2685,7 +2684,7 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName,
- STAND_ALONE, ONE);
+ RATIS, ONE);
Assert.assertNotNull(omMultipartInfo.getUploadID());
@@ -2720,7 +2719,7 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName,
- STAND_ALONE, ONE);
+ RATIS, ONE);
Assert.assertNotNull(omMultipartInfo.getUploadID());
@@ -2782,7 +2781,7 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
- String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
ONE);
bucket.abortMultipartUpload(keyName, uploadID);
}
@@ -2798,7 +2797,7 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
- String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
ONE);
uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
bucket.abortMultipartUpload(keyName, uploadID);
@@ -2816,7 +2815,7 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneBucket bucket = volume.getBucket(bucketName);
Map<Integer, String> partsMap = new TreeMap<>();
- String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
ONE);
String partName1 = uploadPart(bucket, keyName, uploadID, 1,
generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
@@ -2833,7 +2832,7 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
bucket.listParts(keyName, uploadID, 0, 3);
- Assert.assertEquals(STAND_ALONE,
+ Assert.assertEquals(RATIS,
ozoneMultipartUploadPartListParts.getReplicationType());
Assert.assertEquals(3,
ozoneMultipartUploadPartListParts.getPartInfoList().size());
@@ -2867,7 +2866,7 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneBucket bucket = volume.getBucket(bucketName);
Map<Integer, String> partsMap = new TreeMap<>();
- String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
ONE);
String partName1 = uploadPart(bucket, keyName, uploadID, 1,
generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
@@ -2884,7 +2883,7 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
bucket.listParts(keyName, uploadID, 0, 2);
- Assert.assertEquals(STAND_ALONE,
+ Assert.assertEquals(RATIS,
ozoneMultipartUploadPartListParts.getReplicationType());
Assert.assertEquals(2,
@@ -2972,7 +2971,7 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneBucket bucket = volume.getBucket(bucketName);
- String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
ONE);
uploadPart(bucket, keyName, uploadID, 1,
generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
@@ -2985,7 +2984,7 @@ public abstract class TestOzoneRpcClientAbstract {
Assert.assertEquals(0,
ozoneMultipartUploadPartListParts.getPartInfoList().size());
- Assert.assertEquals(STAND_ALONE,
+ Assert.assertEquals(RATIS,
ozoneMultipartUploadPartListParts.getReplicationType());
// As we don't have any parts with greater than partNumberMarker and list
@@ -3331,7 +3330,7 @@ public abstract class TestOzoneRpcClientAbstract {
}
private void writeKey(String key1, OzoneBucket bucket) throws IOException {
- OzoneOutputStream out = bucket.createKey(key1, 1024, STAND_ALONE,
+ OzoneOutputStream out = bucket.createKey(key1, 1024, RATIS,
ONE, new HashMap<>());
out.write(RandomStringUtils.random(1024).getBytes(UTF_8));
out.close();
@@ -3453,7 +3452,7 @@ public abstract class TestOzoneRpcClientAbstract {
private void createTestKey(OzoneBucket bucket, String keyName,
String keyValue) throws IOException {
OzoneOutputStream out = bucket.createKey(keyName,
- keyValue.getBytes(UTF_8).length, STAND_ALONE,
+ keyValue.getBytes(UTF_8).length, RATIS,
ONE, new HashMap<>());
out.write(keyValue.getBytes(UTF_8));
out.close();
@@ -3512,7 +3511,7 @@ public abstract class TestOzoneRpcClientAbstract {
Map<String, String> keyMetadata = new HashMap<>();
keyMetadata.put(OzoneConsts.GDPR_FLAG, "true");
OzoneOutputStream out = bucket.createKey(keyName,
- text.getBytes(UTF_8).length, STAND_ALONE, ONE, keyMetadata);
+ text.getBytes(UTF_8).length, RATIS, ONE, keyMetadata);
out.write(text.getBytes(UTF_8));
out.close();
Assert.assertNull(keyMetadata.get(OzoneConsts.GDPR_SECRET));
@@ -3530,7 +3529,7 @@ public abstract class TestOzoneRpcClientAbstract {
byte[] fileContent = new byte[text.getBytes(UTF_8).length];
is.read(fileContent);
Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
- keyName, STAND_ALONE,
+ keyName, RATIS,
ONE));
Assert.assertEquals(text, new String(fileContent, UTF_8));
@@ -3593,7 +3592,7 @@ public abstract class TestOzoneRpcClientAbstract {
Map<String, String> keyMetadata = new HashMap<>();
keyMetadata.put(OzoneConsts.GDPR_FLAG, "true");
OzoneOutputStream out = bucket.createKey(keyName,
- text.getBytes(UTF_8).length, STAND_ALONE, ONE, keyMetadata);
+ text.getBytes(UTF_8).length, RATIS, ONE, keyMetadata);
out.write(text.getBytes(UTF_8));
out.close();
@@ -3610,7 +3609,7 @@ public abstract class TestOzoneRpcClientAbstract {
byte[] fileContent = new byte[text.getBytes(UTF_8).length];
is.read(fileContent);
Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
- keyName, STAND_ALONE,
+ keyName, RATIS,
ONE));
Assert.assertEquals(text, new String(fileContent, UTF_8));
@@ -3658,7 +3657,8 @@ public abstract class TestOzoneRpcClientAbstract {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
ReplicationConfig replicationConfig = ReplicationConfig
- .fromProtoTypeAndFactor(RATIS, HddsProtos.ReplicationFactor.THREE);
+ .fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS,
+ HddsProtos.ReplicationFactor.THREE);
String value = "sample value";
store.createVolume(volumeName);
@@ -3700,7 +3700,8 @@ public abstract class TestOzoneRpcClientAbstract {
String bucketName, String keyName, boolean versioning) throws Exception {
ReplicationConfig replicationConfig = ReplicationConfig
- .fromProtoTypeAndFactor(RATIS, HddsProtos.ReplicationFactor.THREE);
+ .fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS,
+ HddsProtos.ReplicationFactor.THREE);
String value = "sample value";
store.createVolume(volumeName);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
index b44afe3..a525499 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
@@ -160,7 +160,7 @@ public class TestSecureOzoneRpcClient extends
TestOzoneRpcClient {
String keyName = UUID.randomUUID().toString();
try (OzoneOutputStream out = bucket.createKey(keyName,
- value.getBytes(UTF_8).length, ReplicationType.STAND_ALONE,
+ value.getBytes(UTF_8).length, ReplicationType.RATIS,
ReplicationFactor.ONE, new HashMap<>())) {
out.write(value.getBytes(UTF_8));
}
@@ -174,7 +174,7 @@ public class TestSecureOzoneRpcClient extends
TestOzoneRpcClient {
}
Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
- keyName, ReplicationType.STAND_ALONE,
+ keyName, ReplicationType.RATIS,
ReplicationFactor.ONE));
Assert.assertEquals(value, new String(fileContent, UTF_8));
Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
@@ -203,7 +203,7 @@ public class TestSecureOzoneRpcClient extends
TestOzoneRpcClient {
String keyName = UUID.randomUUID().toString();
try (OzoneOutputStream out = bucket.createKey(keyName,
- value.getBytes(UTF_8).length, ReplicationType.STAND_ALONE,
+ value.getBytes(UTF_8).length, ReplicationType.RATIS,
ReplicationFactor.ONE, new HashMap<>())) {
LambdaTestUtils.intercept(IOException.class, "UNAUTHENTICATED: Fail " +
"to find any token ",
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
index 85d46ca..cf81bd9 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
@@ -119,9 +119,12 @@ public final class TestHelper {
public static OzoneOutputStream createKey(String keyName,
ReplicationType type, long size, ObjectStore objectStore,
String volumeName, String bucketName) throws Exception {
+ if (type == ReplicationType.STAND_ALONE) {
+ throw new IllegalArgumentException(ReplicationType.STAND_ALONE +
+ " replication type should not be used in tests to write keys
anymore."
+ );
+ }
org.apache.hadoop.hdds.client.ReplicationFactor factor =
- type == ReplicationType.STAND_ALONE ?
- org.apache.hadoop.hdds.client.ReplicationFactor.ONE :
org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
return objectStore.getVolume(volumeName).getBucket(bucketName)
.createKey(keyName, size, type, factor, new HashMap<>());
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
index cb85161..9b0ccd5 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
@@ -77,6 +77,8 @@ public class TestCloseContainerHandler {
conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false);
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(1).build();
+ cluster.waitForClusterToBeReady();
+ cluster.waitForPipelineTobeReady(ONE, 30000);
}
@After
@@ -96,7 +98,7 @@ public class TestCloseContainerHandler {
objectStore.createVolume("test");
objectStore.getVolume("test").createBucket("test");
OzoneOutputStream key = objectStore.getVolume("test").getBucket("test")
- .createKey("test", 1024, ReplicationType.STAND_ALONE,
+ .createKey("test", 1024, ReplicationType.RATIS,
ReplicationFactor.ONE, new HashMap<>());
key.write("test".getBytes(UTF_8));
key.close();
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
index e3eccb5..0a0e0fa 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
@@ -87,6 +87,7 @@ public class TestDeleteContainerHandler {
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(1).build();
cluster.waitForClusterToBeReady();
+ cluster.waitForPipelineTobeReady(ONE, 30000);
OzoneClient client = OzoneClientFactory.getRpcClient(conf);
objectStore = client.getObjectStore();
@@ -243,7 +244,7 @@ public class TestDeleteContainerHandler {
private void createKey(String keyName) throws IOException {
OzoneOutputStream key = objectStore.getVolume(volumeName)
.getBucket(bucketName)
- .createKey(keyName, 1024, ReplicationType.STAND_ALONE,
+ .createKey(keyName, 1024, ReplicationType.RATIS,
ReplicationFactor.ONE, new HashMap<>());
key.write("test".getBytes(UTF_8));
key.close();
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java
index 658746a..898119f 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java
@@ -70,7 +70,7 @@ import org.junit.rules.Timeout;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
-import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
+import static org.apache.hadoop.hdds.client.ReplicationType.RATIS;
/**
* This class tests the data scrubber functionality.
@@ -101,6 +101,7 @@ public class TestDataScrubber {
cluster = MiniOzoneCluster.newBuilder(ozoneConfig).setNumDatanodes(1)
.build();
cluster.waitForClusterToBeReady();
+ cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000);
ozClient = OzoneClientFactory.getRpcClient(ozoneConfig);
store = ozClient.getObjectStore();
ozoneManager = cluster.getOzoneManager();
@@ -137,7 +138,7 @@ public class TestDataScrubber {
String keyName = UUID.randomUUID().toString();
OzoneOutputStream out = bucket.createKey(keyName,
- value.getBytes(UTF_8).length, STAND_ALONE,
+ value.getBytes(UTF_8).length, RATIS,
ONE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
@@ -147,7 +148,7 @@ public class TestDataScrubber {
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
is.read(fileContent);
Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
- keyName, STAND_ALONE,
+ keyName, RATIS,
ONE));
Assert.assertEquals(value, new String(fileContent, UTF_8));
Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java
index 2a7fbe8..af39055 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java
@@ -62,7 +62,7 @@ import org.junit.rules.Timeout;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
-import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
+import static org.apache.hadoop.hdds.client.ReplicationType.RATIS;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE;
@@ -109,6 +109,7 @@ public class TestDatanodeHddsVolumeFailureDetection {
.setNumDataVolumes(1)
.build();
cluster.waitForClusterToBeReady();
+ cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000);
ozClient = OzoneClientFactory.getRpcClient(ozoneConfig);
store = ozClient.getObjectStore();
@@ -141,7 +142,7 @@ public class TestDatanodeHddsVolumeFailureDetection {
String keyName = UUID.randomUUID().toString();
String value = "sample value";
OzoneOutputStream out = bucket.createKey(keyName,
- value.getBytes(UTF_8).length, STAND_ALONE,
+ value.getBytes(UTF_8).length, RATIS,
ONE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
@@ -243,7 +244,7 @@ public class TestDatanodeHddsVolumeFailureDetection {
String keyName = UUID.randomUUID().toString();
String value = "sample value";
OzoneOutputStream out = bucket.createKey(keyName,
- value.getBytes(UTF_8).length, STAND_ALONE,
+ value.getBytes(UTF_8).length, RATIS,
ONE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
index 4678176..9104d98 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
@@ -107,7 +107,7 @@ public class TestContainerReportWithKeys {
objectStore.getVolume(volumeName).createBucket(bucketName);
OzoneOutputStream key =
objectStore.getVolume(volumeName).getBucket(bucketName)
- .createKey(keyName, keySize, ReplicationType.STAND_ALONE,
+ .createKey(keyName, keySize, ReplicationType.RATIS,
ReplicationFactor.ONE, new HashMap<>());
String dataString = RandomStringUtils.randomAlphabetic(keySize);
key.write(dataString.getBytes(UTF_8));
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java
index 232dfab..88f0dca 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java
@@ -112,7 +112,7 @@ public class TestKeyPurging {
String keyName = keyBase + "-" + i;
keys.add(keyName);
OzoneOutputStream keyStream = TestHelper.createKey(
- keyName, ReplicationType.STAND_ALONE, ReplicationFactor.ONE,
+ keyName, ReplicationType.RATIS, ReplicationFactor.ONE,
KEY_SIZE, store, volumeName, bucketName);
keyStream.write(data);
keyStream.close();
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java
index b8f37ce..54b0b25 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java
@@ -68,7 +68,7 @@ import java.util.UUID;
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
-import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
+import static org.apache.hadoop.hdds.client.ReplicationType.RATIS;
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME;
@@ -693,7 +693,7 @@ public class TestObjectStoreWithFSO {
private void createTestKey(OzoneBucket bucket, String keyName,
String keyValue) throws IOException {
OzoneOutputStream out = bucket.createKey(keyName,
- keyValue.getBytes(StandardCharsets.UTF_8).length, STAND_ALONE,
+ keyValue.getBytes(StandardCharsets.UTF_8).length, RATIS,
ONE, new HashMap<>());
out.write(keyValue.getBytes(StandardCharsets.UTF_8));
out.close();
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java
index b3af2f9..2652317 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java
@@ -217,7 +217,7 @@ public abstract class TestOzoneManagerHA {
String keyName = "key" + RandomStringUtils.randomNumeric(5);
String data = "data" + RandomStringUtils.randomNumeric(5);
OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(keyName,
- data.length(), ReplicationType.STAND_ALONE,
+ data.length(), ReplicationType.RATIS,
ReplicationFactor.ONE, new HashMap<>());
ozoneOutputStream.write(data.getBytes(UTF_8), 0, data.length());
ozoneOutputStream.close();
@@ -380,7 +380,7 @@ public abstract class TestOzoneManagerHA {
String value = "random data";
OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(keyName,
- value.length(), ReplicationType.STAND_ALONE,
+ value.length(), ReplicationType.RATIS,
ReplicationFactor.ONE, new HashMap<>());
ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length());
ozoneOutputStream.close();
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java
index fe4429e..003f970 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java
@@ -373,7 +373,7 @@ public class TestOzoneManagerPrepare extends
TestOzoneManagerHA {
byte[] data = ContainerTestHelper.getFixedLengthString(
keyString, 100).getBytes(UTF_8);
OzoneOutputStream keyStream = TestHelper.createKey(
- keyName, ReplicationType.STAND_ALONE, ReplicationFactor.ONE,
+ keyName, ReplicationType.RATIS, ReplicationFactor.ONE,
100, store, volumeName, bucketName);
keyStream.write(data);
keyStream.close();
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java
index f3dc4b6..c16583c 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java
@@ -23,6 +23,7 @@ import java.util.UUID;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.client.ObjectStore;
@@ -73,8 +74,7 @@ public class TestReconWithOzoneManagerFSO {
.includeRecon(true)
.build();
cluster.waitForClusterToBeReady();
-
- cluster.getStorageContainerManager().exitSafeMode();
+ cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000);
store = cluster.getClient().getObjectStore();
}
@@ -94,7 +94,7 @@ public class TestReconWithOzoneManagerFSO {
byte[] data = ContainerTestHelper.getFixedLengthString(
keyString, 100).getBytes(UTF_8);
OzoneOutputStream keyStream = TestHelper.createKey(
- keyName, ReplicationType.STAND_ALONE, ReplicationFactor.ONE,
+ keyName, ReplicationType.RATIS, ReplicationFactor.ONE,
100, store, volumeName, bucketName);
keyStream.write(data);
keyStream.close();
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java
index a5d5505..f09a150 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java
@@ -58,7 +58,7 @@ import java.io.IOException;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
-import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
+import static org.apache.hadoop.hdds.client.ReplicationType.RATIS;
/**
* Base class for Ozone Manager HA tests.
@@ -155,7 +155,7 @@ public class TestStorageContainerManagerHA {
String keyName = UUID.randomUUID().toString();
OzoneOutputStream out = bucket
- .createKey(keyName, value.getBytes(UTF_8).length, STAND_ALONE, ONE,
+ .createKey(keyName, value.getBytes(UTF_8).length, RATIS, ONE,
new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
diff --git
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
index cf0d4c3..6a0d428 100644
---
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
+++
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
@@ -87,7 +87,7 @@ public class OzoneBucketStub extends OzoneBucket {
@Override
public OzoneOutputStream createKey(String key, long size) throws IOException
{
- return createKey(key, size, ReplicationType.STAND_ALONE,
+ return createKey(key, size, ReplicationType.RATIS,
ReplicationFactor.ONE, new HashMap<>());
}
diff --git
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java
index 66c7456..3c9a17c 100644
---
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java
+++
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java
@@ -69,7 +69,7 @@ public class TestObjectHead {
//GIVEN
String value = RandomStringUtils.randomAlphanumeric(32);
OzoneOutputStream out = bucket.createKey("key1",
- value.getBytes(UTF_8).length, ReplicationType.STAND_ALONE,
+ value.getBytes(UTF_8).length, ReplicationType.RATIS,
ReplicationFactor.ONE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]