This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch HDDS-3816-ec
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/HDDS-3816-ec by this push:
new 143315c89b HDDS-6409. EC: OzoneMultipartUpload needs to handle
ECReplicationConfig (#3279)
143315c89b is described below
commit 143315c89b2f44b55e7b57459fe19f66fe826f41
Author: Doroszlai, Attila <[email protected]>
AuthorDate: Wed Apr 6 20:26:12 2022 +0200
HDDS-6409. EC: OzoneMultipartUpload needs to handle ECReplicationConfig
(#3279)
---
.../hadoop/ozone/client/io/ECKeyOutputStream.java | 109 ++------
.../hadoop/ozone/client/io/KeyOutputStream.java | 43 ++++
.../apache/hadoop/ozone/client/rpc/RpcClient.java | 66 +++--
...OzoneManagerProtocolClientSideTranslatorPB.java | 9 +-
hadoop-ozone/integration-test/pom.xml | 5 +
.../rpc/TestOzoneClientMultipartUploadWithFSO.java | 18 +-
.../client/rpc/TestOzoneRpcClientAbstract.java | 280 +++++++++------------
.../src/main/proto/OmClientProtocol.proto | 2 +-
.../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 7 +-
.../S3MultipartUploadCompleteRequest.java | 12 +-
.../hadoop/ozone/s3/endpoint/BucketEndpoint.java | 3 +-
.../hadoop/ozone/s3/endpoint/ObjectEndpoint.java | 5 +-
.../apache/hadoop/ozone/s3/util/S3StorageType.java | 13 +-
.../hadoop/ozone/client/OzoneBucketStub.java | 13 +-
pom.xml | 6 +
15 files changed, 270 insertions(+), 321 deletions(-)
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java
index 234c1d3133..2c519626e3 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java
@@ -35,8 +35,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
-import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
@@ -49,7 +47,7 @@ import org.slf4j.LoggerFactory;
* ECKeyOutputStream handles the EC writes by writing the data into underlying
* block output streams chunk by chunk.
*/
-public class ECKeyOutputStream extends KeyOutputStream {
+public final class ECKeyOutputStream extends KeyOutputStream {
private OzoneClientConfig config;
private ECChunkBuffers ecChunkBufferCache;
private int ecChunkSize;
@@ -88,31 +86,31 @@ public class ECKeyOutputStream extends KeyOutputStream {
return blockOutputStreamEntryPool.getLocationInfoList();
}
- @SuppressWarnings({"parameternumber", "squid:S00107"})
- public ECKeyOutputStream(OzoneClientConfig config, OpenKeySession handler,
- XceiverClientFactory xceiverClientManager, OzoneManagerProtocol omClient,
- int chunkSize, String requestId, ECReplicationConfig replicationConfig,
- String uploadID, int partNumber, boolean isMultipart,
- boolean unsafeByteBufferConversion, ByteBufferPool byteBufferPool) {
- this.config = config;
- this.bufferPool = byteBufferPool;
+ private ECKeyOutputStream(Builder builder) {
+ this.config = builder.getClientConfig();
+ this.bufferPool = builder.getByteBufferPool();
// For EC, cell/chunk size and buffer size can be same for now.
- ecChunkSize = replicationConfig.getEcChunkSize();
+ ecChunkSize = builder.getReplicationConfig().getEcChunkSize();
this.config.setStreamBufferMaxSize(ecChunkSize);
this.config.setStreamBufferFlushSize(ecChunkSize);
this.config.setStreamBufferSize(ecChunkSize);
- this.numDataBlks = replicationConfig.getData();
- this.numParityBlks = replicationConfig.getParity();
+ this.numDataBlks = builder.getReplicationConfig().getData();
+ this.numParityBlks = builder.getReplicationConfig().getParity();
ecChunkBufferCache = new ECChunkBuffers(
ecChunkSize, numDataBlks, numParityBlks, bufferPool);
- OmKeyInfo info = handler.getKeyInfo();
+ OmKeyInfo info = builder.getOpenHandler().getKeyInfo();
blockOutputStreamEntryPool =
- new ECBlockOutputStreamEntryPool(config, omClient, requestId,
- replicationConfig, uploadID, partNumber, isMultipart, info,
- unsafeByteBufferConversion, xceiverClientManager, handler.getId());
+ new ECBlockOutputStreamEntryPool(config,
+ builder.getOmClient(), builder.getRequestID(),
+ builder.getReplicationConfig(),
+ builder.getMultipartUploadID(), builder.getMultipartNumber(),
+ builder.isMultipartKey(),
+ info, builder.isUnsafeByteBufferConversionEnabled(),
+ builder.getXceiverManager(), builder.getOpenHandler().getId());
this.writeOffset = 0;
- this.encoder = CodecUtil.createRawEncoderWithFallback(replicationConfig);
+ this.encoder = CodecUtil.createRawEncoderWithFallback(
+ builder.getReplicationConfig());
}
/**
@@ -593,68 +591,13 @@ public class ECKeyOutputStream extends KeyOutputStream {
/**
* Builder class of ECKeyOutputStream.
*/
- public static class Builder {
- private OpenKeySession openHandler;
- private XceiverClientFactory xceiverManager;
- private OzoneManagerProtocol omClient;
- private int chunkSize;
- private String requestID;
- private String multipartUploadID;
- private int multipartNumber;
- private boolean isMultipartKey;
- private boolean unsafeByteBufferConversion;
- private OzoneClientConfig clientConfig;
+ public static class Builder extends KeyOutputStream.Builder {
private ECReplicationConfig replicationConfig;
private ByteBufferPool byteBufferPool;
- public Builder setMultipartUploadID(String uploadID) {
- this.multipartUploadID = uploadID;
- return this;
- }
-
- public Builder setMultipartNumber(int partNumber) {
- this.multipartNumber = partNumber;
- return this;
- }
-
- public Builder setHandler(OpenKeySession handler) {
- this.openHandler = handler;
- return this;
- }
-
- public Builder setXceiverClientManager(XceiverClientFactory manager) {
- this.xceiverManager = manager;
- return this;
- }
-
- public Builder setOmClient(OzoneManagerProtocol client) {
- this.omClient = client;
- return this;
- }
-
- public Builder setChunkSize(int size) {
- this.chunkSize = size;
- return this;
- }
-
- public Builder setRequestID(String id) {
- this.requestID = id;
- return this;
- }
-
- public Builder setIsMultipartKey(boolean isMultipart) {
- this.isMultipartKey = isMultipart;
- return this;
- }
-
- public Builder setConfig(OzoneClientConfig config) {
- this.clientConfig = config;
- return this;
- }
-
- public Builder enableUnsafeByteBufferConversion(boolean enabled) {
- this.unsafeByteBufferConversion = enabled;
- return this;
+ @Override
+ public ECReplicationConfig getReplicationConfig() {
+ return replicationConfig;
}
public ECKeyOutputStream.Builder setReplicationConfig(
@@ -663,17 +606,19 @@ public class ECKeyOutputStream extends KeyOutputStream {
return this;
}
+ public ByteBufferPool getByteBufferPool() {
+ return byteBufferPool;
+ }
+
public ECKeyOutputStream.Builder setByteBufferPool(
ByteBufferPool bufferPool) {
this.byteBufferPool = bufferPool;
return this;
}
+ @Override
public ECKeyOutputStream build() {
- return new ECKeyOutputStream(clientConfig, openHandler, xceiverManager,
- omClient, chunkSize, requestID, replicationConfig, multipartUploadID,
- multipartNumber, isMultipartKey, unsafeByteBufferConversion,
- byteBufferPool);
+ return new ECKeyOutputStream(this);
}
}
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
index c752e7a5b7..ee69ca11d9 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
@@ -553,56 +553,99 @@ public class KeyOutputStream extends OutputStream {
private OzoneClientConfig clientConfig;
private ReplicationConfig replicationConfig;
+ public String getMultipartUploadID() {
+ return multipartUploadID;
+ }
+
public Builder setMultipartUploadID(String uploadID) {
this.multipartUploadID = uploadID;
return this;
}
+ public int getMultipartNumber() {
+ return multipartNumber;
+ }
+
public Builder setMultipartNumber(int partNumber) {
this.multipartNumber = partNumber;
return this;
}
+ public OpenKeySession getOpenHandler() {
+ return openHandler;
+ }
+
public Builder setHandler(OpenKeySession handler) {
this.openHandler = handler;
return this;
}
+ public XceiverClientFactory getXceiverManager() {
+ return xceiverManager;
+ }
+
public Builder setXceiverClientManager(XceiverClientFactory manager) {
this.xceiverManager = manager;
return this;
}
+ public OzoneManagerProtocol getOmClient() {
+ return omClient;
+ }
+
public Builder setOmClient(OzoneManagerProtocol client) {
this.omClient = client;
return this;
}
+ public int getChunkSize() {
+ return chunkSize;
+ }
+
public Builder setChunkSize(int size) {
this.chunkSize = size;
return this;
}
+ public String getRequestID() {
+ return requestID;
+ }
+
public Builder setRequestID(String id) {
this.requestID = id;
return this;
}
+ public boolean isMultipartKey() {
+ return isMultipartKey;
+ }
+
public Builder setIsMultipartKey(boolean isMultipart) {
this.isMultipartKey = isMultipart;
return this;
}
+ public OzoneClientConfig getClientConfig() {
+ return clientConfig;
+ }
+
public Builder setConfig(OzoneClientConfig config) {
this.clientConfig = config;
return this;
}
+ public boolean isUnsafeByteBufferConversionEnabled() {
+ return unsafeByteBufferConversion;
+ }
+
public Builder enableUnsafeByteBufferConversion(boolean enabled) {
this.unsafeByteBufferConversion = enabled;
return this;
}
+ public ReplicationConfig getReplicationConfig() {
+ return replicationConfig;
+ }
public Builder setReplicationConfig(ReplicationConfig replConfig) {
this.replicationConfig = replConfig;
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 6da0267877..bf06efa38c 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -1237,19 +1237,11 @@ public class RpcClient implements ClientProtocol {
.build();
OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs);
- KeyOutputStream keyOutputStream =
- new KeyOutputStream.Builder()
- .setHandler(openKey)
- .setXceiverClientManager(xceiverClientManager)
- .setOmClient(ozoneManagerClient)
- .setRequestID(requestId)
- .setReplicationConfig(openKey.getKeyInfo().getReplicationConfig())
- .setMultipartNumber(partNumber)
- .setMultipartUploadID(uploadID)
- .setIsMultipartKey(true)
- .enableUnsafeByteBufferConversion(unsafeByteBufferConversion)
- .setConfig(clientConfig)
- .build();
+ KeyOutputStream keyOutputStream = createKeyOutputStream(openKey, requestId)
+ .setMultipartNumber(partNumber)
+ .setMultipartUploadID(uploadID)
+ .setIsMultipartKey(true)
+ .build();
keyOutputStream.addPreallocateBlocks(
openKey.getKeyInfo().getLatestVersionLocations(),
openKey.getOpenVersion());
@@ -1583,27 +1575,9 @@ public class RpcClient implements ClientProtocol {
private OzoneOutputStream createOutputStream(OpenKeySession openKey,
String requestId) throws IOException {
- KeyOutputStream keyOutputStream = null;
-
- if (openKey.getKeyInfo().getReplicationConfig()
- .getReplicationType() == HddsProtos.ReplicationType.EC) {
- keyOutputStream = new ECKeyOutputStream.Builder().setHandler(openKey)
- .setXceiverClientManager(xceiverClientManager)
- .setOmClient(ozoneManagerClient).setRequestID(requestId)
- .setReplicationConfig(
- (ECReplicationConfig)openKey.getKeyInfo().getReplicationConfig())
- .enableUnsafeByteBufferConversion(unsafeByteBufferConversion)
- .setConfig(clientConfig)
- .setByteBufferPool(byteBufferPool)
- .build();
- } else {
- keyOutputStream = new KeyOutputStream.Builder().setHandler(openKey)
- .setXceiverClientManager(xceiverClientManager)
- .setOmClient(ozoneManagerClient).setRequestID(requestId)
- .setReplicationConfig(openKey.getKeyInfo().getReplicationConfig())
- .enableUnsafeByteBufferConversion(unsafeByteBufferConversion)
- .setConfig(clientConfig).build();
- }
+
+ KeyOutputStream keyOutputStream = createKeyOutputStream(openKey, requestId)
+ .build();
keyOutputStream
.addPreallocateBlocks(openKey.getKeyInfo().getLatestVersionLocations(),
@@ -1639,6 +1613,30 @@ public class RpcClient implements ClientProtocol {
}
}
+ private KeyOutputStream.Builder createKeyOutputStream(OpenKeySession openKey,
+ String requestId) {
+ KeyOutputStream.Builder builder;
+
+ ReplicationConfig replicationConfig =
+ openKey.getKeyInfo().getReplicationConfig();
+ if (replicationConfig.getReplicationType() ==
+ HddsProtos.ReplicationType.EC) {
+ builder = new ECKeyOutputStream.Builder()
+ .setReplicationConfig((ECReplicationConfig) replicationConfig)
+ .setByteBufferPool(byteBufferPool);
+ } else {
+ builder = new KeyOutputStream.Builder()
+ .setReplicationConfig(replicationConfig);
+ }
+
+ return builder.setHandler(openKey)
+ .setXceiverClientManager(xceiverClientManager)
+ .setOmClient(ozoneManagerClient)
+ .setRequestID(requestId)
+ .enableUnsafeByteBufferConversion(unsafeByteBufferConversion)
+ .setConfig(clientConfig);
+ }
+
@Override
public KeyProvider getKeyProvider() throws IOException {
URI kmsUri = getKeyProviderUri();
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index b1ef8fecd2..e8acfed16d 100644
---
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -1104,8 +1104,9 @@ public final class
OzoneManagerProtocolClientSideTranslatorPB
OmMultipartUploadListParts omMultipartUploadListParts =
new OmMultipartUploadListParts(
- ReplicationConfig.fromProtoTypeAndFactor(response.getType(),
- response.getFactor()),
+ ReplicationConfig.fromProto(
+ response.getType(), response.getFactor(),
+ response.getEcReplicationConfig()),
response.getNextPartNumberMarker(), response.getIsTruncated());
omMultipartUploadListParts.addProtoPartList(response.getPartsListList());
@@ -1140,8 +1141,8 @@ public final class
OzoneManagerProtocolClientSideTranslatorPB
proto.getKeyName(),
proto.getUploadId(),
Instant.ofEpochMilli(proto.getCreationTime()),
- ReplicationConfig.fromProtoTypeAndFactor(proto.getType(),
- proto.getFactor())
+ ReplicationConfig.fromProto(proto.getType(), proto.getFactor(),
+ proto.getEcReplicationConfig())
))
.collect(Collectors.toList());
diff --git a/hadoop-ozone/integration-test/pom.xml
b/hadoop-ozone/integration-test/pom.xml
index c3bce81d31..cea5163fc1 100644
--- a/hadoop-ozone/integration-test/pom.xml
+++ b/hadoop-ozone/integration-test/pom.xml
@@ -108,6 +108,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>junit-jupiter-api</artifactId>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>org.junit.jupiter</groupId>
+ <artifactId>junit-jupiter-params</artifactId>
+ <scope>test</scope>
+ </dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-migrationsupport</artifactId>
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
index 9ea04d453f..293725f508 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
@@ -18,9 +18,11 @@
package org.apache.hadoop.ozone.client.rpc;
import org.apache.commons.lang3.RandomUtils;
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.OzoneTestUtils;
@@ -620,8 +622,10 @@ public class TestOzoneClientMultipartUploadWithFSO {
OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
bucket.listParts(keyName, uploadID, 0, 3);
- Assert.assertEquals(RATIS,
- ozoneMultipartUploadPartListParts.getReplicationType());
+ Assert.assertEquals(
+ RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE),
+ ozoneMultipartUploadPartListParts.getReplicationConfig());
+
Assert.assertEquals(3,
ozoneMultipartUploadPartListParts.getPartInfoList().size());
@@ -722,8 +726,9 @@ public class TestOzoneClientMultipartUploadWithFSO {
OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
bucket.listParts(keyName, uploadID, 0, 2);
- Assert.assertEquals(RATIS,
- ozoneMultipartUploadPartListParts.getReplicationType());
+ Assert.assertEquals(
+ RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE),
+ ozoneMultipartUploadPartListParts.getReplicationConfig());
Assert.assertEquals(2,
ozoneMultipartUploadPartListParts.getPartInfoList().size());
@@ -821,8 +826,9 @@ public class TestOzoneClientMultipartUploadWithFSO {
Assert.assertEquals(0,
ozoneMultipartUploadPartListParts.getPartInfoList().size());
- Assert.assertEquals(RATIS,
- ozoneMultipartUploadPartListParts.getReplicationType());
+ Assert.assertEquals(
+ RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE),
+ ozoneMultipartUploadPartListParts.getReplicationConfig());
// As we don't have any parts with greater than partNumberMarker and list
// is not truncated, so it should return false here.
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index 37dec7c6bf..fb50b81f18 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -35,6 +35,7 @@ import java.util.UUID;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Stream;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
@@ -151,6 +152,8 @@ import static org.slf4j.event.Level.DEBUG;
import org.junit.jupiter.api.MethodOrderer;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestMethodOrder;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
/**
* This is an abstract class to test all the public facing APIs of Ozone
@@ -841,8 +844,8 @@ public abstract class TestOzoneRpcClientAbstract {
);
}
- private boolean verifyRatisReplication(String volumeName, String bucketName,
- String keyName, ReplicationType type, ReplicationFactor factor)
+ private void verifyReplication(String volumeName, String bucketName,
+ String keyName, ReplicationConfig replication)
throws IOException {
OmKeyArgs keyArgs = new OmKeyArgs.Builder()
.setVolumeName(volumeName)
@@ -850,22 +853,13 @@ public abstract class TestOzoneRpcClientAbstract {
.setKeyName(keyName)
.setRefreshPipeline(true)
.build();
- HddsProtos.ReplicationType replicationType =
- HddsProtos.ReplicationType.valueOf(type.toString());
- HddsProtos.ReplicationFactor replicationFactor =
- HddsProtos.ReplicationFactor.valueOf(factor.getValue());
OmKeyInfo keyInfo = ozoneManager.lookupKey(keyArgs);
for (OmKeyLocationInfo info:
keyInfo.getLatestVersionLocations().getLocationList()) {
ContainerInfo container =
storageContainerLocationClient.getContainer(info.getContainerID());
- if (!ReplicationConfig.getLegacyFactor(container.getReplicationConfig())
- .equals(replicationFactor) || (
- container.getReplicationType() != replicationType)) {
- return false;
- }
+ Assert.assertEquals(replication, container.getReplicationConfig());
}
- return true;
}
@Test
@@ -893,9 +887,9 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneInputStream is = bucket.readKey(keyName);
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
is.read(fileContent);
- Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
- keyName, RATIS,
- ONE));
+ verifyReplication(volumeName, bucketName, keyName,
+ RatisReplicationConfig.getInstance(
+ HddsProtos.ReplicationFactor.ONE));
Assert.assertEquals(value, new String(fileContent, UTF_8));
Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
@@ -1037,22 +1031,15 @@ public abstract class TestOzoneRpcClientAbstract {
// TODO: testBucketQuota overlaps with testBucketUsedBytes,
// do cleanup when EC branch gets merged into master.
- @Test
- public void testBucketQuota() throws IOException {
+ @ParameterizedTest
+ @MethodSource("replicationConfigs")
+ void testBucketQuota(ReplicationConfig repConfig) throws IOException {
int blockSize = (int) ozoneManager.getConfiguration().getStorageSize(
OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES);
- ReplicationConfig[] repConfigs = new ReplicationConfig[]{
- ReplicationConfig.fromTypeAndFactor(RATIS, ONE),
- ReplicationConfig.fromTypeAndFactor(RATIS, THREE),
- new ECReplicationConfig("rs-3-2-1024k"),
- };
-
- for (ReplicationConfig repConfig : repConfigs) {
- for (int i = 0; i <= repConfig.getRequiredNodes(); i++) {
- bucketQuotaTestHelper(i * blockSize, repConfig);
- bucketQuotaTestHelper(i * blockSize + 1, repConfig);
- }
+ for (int i = 0; i <= repConfig.getRequiredNodes(); i++) {
+ bucketQuotaTestHelper(i * blockSize, repConfig);
+ bucketQuotaTestHelper(i * blockSize + 1, repConfig);
}
}
@@ -1232,8 +1219,10 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
+ ReplicationConfig replication = RatisReplicationConfig.getInstance(
+ HddsProtos.ReplicationFactor.ONE);
OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
- RATIS, ONE);
+ replication);
assertNotNull(multipartInfo);
String uploadID = multipartInfo.getUploadID();
@@ -1316,8 +1305,9 @@ public abstract class TestOzoneRpcClientAbstract {
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
is.read(fileContent);
is.close();
- Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
- keyName, ReplicationType.RATIS, ONE));
+ verifyReplication(volumeName, bucketName, keyName,
+ RatisReplicationConfig.getInstance(
+ HddsProtos.ReplicationFactor.ONE));
Assert.assertEquals(value, new String(fileContent, UTF_8));
Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
@@ -1350,9 +1340,9 @@ public abstract class TestOzoneRpcClientAbstract {
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
is.read(fileContent);
is.close();
- Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
- keyName, ReplicationType.RATIS,
- THREE));
+ verifyReplication(volumeName, bucketName, keyName,
+ RatisReplicationConfig.getInstance(
+ HddsProtos.ReplicationFactor.THREE));
Assert.assertEquals(value, new String(fileContent, UTF_8));
Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
@@ -1391,9 +1381,9 @@ public abstract class TestOzoneRpcClientAbstract {
byte[] fileContent = new byte[data.getBytes(UTF_8).length];
is.read(fileContent);
is.close();
- Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
- keyName, ReplicationType.RATIS,
- THREE));
+ verifyReplication(volumeName, bucketName, keyName,
+ RatisReplicationConfig.getInstance(
+ HddsProtos.ReplicationFactor.THREE));
Assert.assertEquals(data, new String(fileContent, UTF_8));
Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
@@ -2323,9 +2313,18 @@ public abstract class TestOzoneRpcClientAbstract {
}
}
- @Test
- public void testInitiateMultipartUploadWithReplicationInformationSet() throws
- IOException {
+ static Stream<ReplicationConfig> replicationConfigs() {
+ return Stream.of(
+ RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE),
+ RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE),
+ new ECReplicationConfig(3, 2)
+ );
+ }
+
+ @ParameterizedTest
+ @MethodSource("replicationConfigs")
+ void testInitiateMultipartUpload(ReplicationConfig replicationConfig)
+ throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String keyName = UUID.randomUUID().toString();
@@ -2335,7 +2334,7 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
- RATIS, ONE);
+ replicationConfig);
assertNotNull(multipartInfo);
String uploadID = multipartInfo.getUploadID();
@@ -2347,7 +2346,7 @@ public abstract class TestOzoneRpcClientAbstract {
// Call initiate multipart upload for the same key again, this should
// generate a new uploadID.
multipartInfo = bucket.initiateMultipartUpload(keyName,
- RATIS, ONE);
+ replicationConfig);
assertNotNull(multipartInfo);
Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
@@ -2390,8 +2389,10 @@ public abstract class TestOzoneRpcClientAbstract {
assertNotNull(multipartInfo.getUploadID());
}
- @Test
- public void testUploadPartWithNoOverride() throws IOException {
+ @ParameterizedTest
+ @MethodSource("replicationConfigs")
+ void testUploadPartWithNoOverride(ReplicationConfig replication)
+ throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String keyName = UUID.randomUUID().toString();
@@ -2402,7 +2403,7 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
- RATIS, ONE);
+ replication);
assertNotNull(multipartInfo);
String uploadID = multipartInfo.getUploadID();
@@ -2420,13 +2421,13 @@ public abstract class TestOzoneRpcClientAbstract {
.getCommitUploadPartInfo();
assertNotNull(commitUploadPartInfo);
- String partName = commitUploadPartInfo.getPartName();
assertNotNull(commitUploadPartInfo.getPartName());
-
}
- @Test
- public void testUploadPartOverrideWithStandAlone() throws IOException {
+ @ParameterizedTest
+ @MethodSource("replicationConfigs")
+ void testUploadPartOverride(ReplicationConfig replication)
+ throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
@@ -2439,7 +2440,7 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
- RATIS, ONE);
+ replication);
assertNotNull(multipartInfo);
String uploadID = multipartInfo.getUploadID();
@@ -2485,66 +2486,6 @@ public abstract class TestOzoneRpcClientAbstract {
commitUploadPartInfo.getPartName());
}
- @Test
- public void testUploadPartOverrideWithRatis() throws IOException {
-
- String volumeName = UUID.randomUUID().toString();
- String bucketName = UUID.randomUUID().toString();
- String keyName = UUID.randomUUID().toString();
- String sampleData = "sample Value";
-
- store.createVolume(volumeName);
- OzoneVolume volume = store.getVolume(volumeName);
- volume.createBucket(bucketName);
- OzoneBucket bucket = volume.getBucket(bucketName);
- OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
- ReplicationType.RATIS, THREE);
-
- assertNotNull(multipartInfo);
- String uploadID = multipartInfo.getUploadID();
- Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
- Assert.assertEquals(bucketName, multipartInfo.getBucketName());
- Assert.assertEquals(keyName, multipartInfo.getKeyName());
- assertNotNull(multipartInfo.getUploadID());
-
- int partNumber = 1;
-
- OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
- sampleData.length(), partNumber, uploadID);
- ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length());
- ozoneOutputStream.close();
-
- OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream
- .getCommitUploadPartInfo();
-
- assertNotNull(commitUploadPartInfo);
- String partName = commitUploadPartInfo.getPartName();
- assertNotNull(commitUploadPartInfo.getPartName());
-
- // Overwrite the part by creating part key with same part number
- // and same content.
- ozoneOutputStream = bucket.createMultipartKey(keyName,
- sampleData.length(), partNumber, uploadID);
- ozoneOutputStream.write(string2Bytes(sampleData), 0, "name".length());
- ozoneOutputStream.close();
-
- commitUploadPartInfo = ozoneOutputStream
- .getCommitUploadPartInfo();
-
- assertNotNull(commitUploadPartInfo);
- assertNotNull(commitUploadPartInfo.getPartName());
-
- // AWS S3 for same content generates same partName during upload part.
- // In AWS S3 ETag is generated from md5sum. In Ozone right now we
- // don't do this. For now to make things work for large file upload
- // through aws s3 cp, the partName are generated in a predictable fashion.
- // So, when a part is override partNames will still be same irrespective
- // of content in ozone s3. This will make S3 Mpu completeMPU pass when
- // comparing part names and large file uploads work using aws cp.
- assertEquals("Part names should be same", partName,
- commitUploadPartInfo.getPartName());
- }
-
@Test
public void testNoSuchUploadError() throws Exception {
String volumeName = UUID.randomUUID().toString();
@@ -2585,7 +2526,9 @@ public abstract class TestOzoneRpcClientAbstract {
bucket.addAcl(acl3);
bucket.addAcl(acl4);
- doMultipartUpload(bucket, keyName, (byte)98);
+ ReplicationConfig replication = RatisReplicationConfig.getInstance(
+ HddsProtos.ReplicationFactor.ONE);
+ doMultipartUpload(bucket, keyName, (byte)98, replication);
OzoneObj keyObj = OzoneObjInfo.Builder.newBuilder()
.setBucketName(bucketName)
.setVolumeName(volumeName).setKeyName(keyName)
@@ -2631,7 +2574,7 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneBucket bucket2 = client.getObjectStore().getVolume(volumeName)
.getBucket(bucketName);
try {
- initiateMultipartUpload(bucket2, keyName2, ReplicationType.RATIS, THREE);
+ initiateMultipartUpload(bucket2, keyName2, anyReplication());
fail("User without permission should fail");
} catch (Exception e) {
assertTrue(e instanceof OMException);
@@ -2654,7 +2597,7 @@ public abstract class TestOzoneRpcClientAbstract {
store.addAcl(bucketObj, acl9);
store.addAcl(bucketObj, acl10);
String uploadId = initiateMultipartUpload(bucket2, keyName2,
- ReplicationType.RATIS, THREE);
+ anyReplication());
// Upload part
byte[] data = generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)1);
@@ -2676,8 +2619,10 @@ public abstract class TestOzoneRpcClientAbstract {
}
}
- @Test
- public void testMultipartUploadOverride() throws Exception {
+ @ParameterizedTest
+ @MethodSource("replicationConfigs")
+ void testMultipartUploadOverride(ReplicationConfig replication)
+ throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String keyName = UUID.randomUUID().toString();
@@ -2687,11 +2632,11 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
- doMultipartUpload(bucket, keyName, (byte)96);
+ doMultipartUpload(bucket, keyName, (byte)96, replication);
// Initiate Multipart upload again, now we should read latest version, as
// read always reads latest blocks.
- doMultipartUpload(bucket, keyName, (byte)97);
+ doMultipartUpload(bucket, keyName, (byte)97, replication);
}
@@ -2708,8 +2653,8 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneBucket bucket = volume.getBucket(bucketName);
// Initiate multipart upload
- String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
- ONE);
+ String uploadID = initiateMultipartUpload(bucket, keyName,
+ anyReplication());
// Upload Parts
Map<Integer, String> partsMap = new TreeMap<>();
@@ -2741,8 +2686,8 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
- String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
- ONE);
+ String uploadID = initiateMultipartUpload(bucket, keyName,
+ anyReplication());
// We have not uploaded any parts, but passing some list it should throw
// error.
@@ -2766,8 +2711,9 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
- String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
- ONE);
+ ReplicationConfig replication = RatisReplicationConfig.getInstance(
+ HddsProtos.ReplicationFactor.ONE);
+ String uploadID = initiateMultipartUpload(bucket, keyName, replication);
uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
// We have not uploaded any parts, but passing some list it should throw
@@ -2791,8 +2737,9 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
- String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
- ONE);
+ ReplicationConfig replication = RatisReplicationConfig.getInstance(
+ HddsProtos.ReplicationFactor.ONE);
+ String uploadID = initiateMultipartUpload(bucket, keyName, replication);
uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
// We have not uploaded any parts, but passing some list it should throw
@@ -2820,7 +2767,7 @@ public abstract class TestOzoneRpcClientAbstract {
}
@Test
- public void testAbortUploadFailWithInProgressPartUpload() throws Exception {
+ void testAbortUploadFailWithInProgressPartUpload() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String keyName = UUID.randomUUID().toString();
@@ -2831,7 +2778,7 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName,
- RATIS, ONE);
+ anyReplication());
Assert.assertNotNull(omMultipartInfo.getUploadID());
@@ -2855,7 +2802,7 @@ public abstract class TestOzoneRpcClientAbstract {
}
@Test
- public void testCommitPartAfterCompleteUpload() throws Exception {
+ void testCommitPartAfterCompleteUpload() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String keyName = UUID.randomUUID().toString();
@@ -2866,7 +2813,7 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName,
- RATIS, ONE);
+ anyReplication());
Assert.assertNotNull(omMultipartInfo.getUploadID());
@@ -2928,8 +2875,8 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
- String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
- ONE);
+ String uploadID = initiateMultipartUpload(bucket, keyName,
+ anyReplication());
bucket.abortMultipartUpload(keyName, uploadID);
}
@@ -2944,14 +2891,16 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
- String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
- ONE);
+ String uploadID = initiateMultipartUpload(bucket, keyName,
+ anyReplication());
uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
bucket.abortMultipartUpload(keyName, uploadID);
}
- @Test
- public void testListMultipartUploadParts() throws Exception {
+ @ParameterizedTest
+ @MethodSource("replicationConfigs")
+ void testListMultipartUploadParts(ReplicationConfig replication)
+ throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String keyName = UUID.randomUUID().toString();
@@ -2962,8 +2911,7 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneBucket bucket = volume.getBucket(bucketName);
Map<Integer, String> partsMap = new TreeMap<>();
- String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
- ONE);
+ String uploadID = initiateMultipartUpload(bucket, keyName, replication);
String partName1 = uploadPart(bucket, keyName, uploadID, 1,
generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
partsMap.put(1, partName1);
@@ -2979,10 +2927,9 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
bucket.listParts(keyName, uploadID, 0, 3);
- Assert.assertEquals(RATIS,
- ozoneMultipartUploadPartListParts.getReplicationType());
- Assert.assertEquals(3,
- ozoneMultipartUploadPartListParts.getPartInfoList().size());
+ Assert.assertEquals(
+ replication,
+ ozoneMultipartUploadPartListParts.getReplicationConfig());
Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
.getPartInfoList().get(0).getPartNumber()),
@@ -3000,9 +2947,11 @@ public abstract class TestOzoneRpcClientAbstract {
Assert.assertFalse(ozoneMultipartUploadPartListParts.isTruncated());
}
- @Test
- public void testListMultipartUploadPartsWithContinuation()
- throws Exception {
+ @ParameterizedTest
+ @MethodSource("replicationConfigs")
+ void testListMultipartUploadPartsWithContinuation(
+ ReplicationConfig replication) throws Exception {
+
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String keyName = UUID.randomUUID().toString();
@@ -3013,8 +2962,7 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneBucket bucket = volume.getBucket(bucketName);
Map<Integer, String> partsMap = new TreeMap<>();
- String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
- ONE);
+ String uploadID = initiateMultipartUpload(bucket, keyName, replication);
String partName1 = uploadPart(bucket, keyName, uploadID, 1,
generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
partsMap.put(1, partName1);
@@ -3030,8 +2978,8 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
bucket.listParts(keyName, uploadID, 0, 2);
- Assert.assertEquals(RATIS,
- ozoneMultipartUploadPartListParts.getReplicationType());
+ Assert.assertEquals(replication,
+ ozoneMultipartUploadPartListParts.getReplicationConfig());
Assert.assertEquals(2,
ozoneMultipartUploadPartListParts.getPartInfoList().size());
@@ -3118,8 +3066,8 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneBucket bucket = volume.getBucket(bucketName);
- String uploadID = initiateMultipartUpload(bucket, keyName, RATIS,
- ONE);
+ String uploadID = initiateMultipartUpload(bucket, keyName,
+ anyReplication());
uploadPart(bucket, keyName, uploadID, 1,
generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
@@ -3131,8 +3079,6 @@ public abstract class TestOzoneRpcClientAbstract {
Assert.assertEquals(0,
ozoneMultipartUploadPartListParts.getPartInfoList().size());
- Assert.assertEquals(RATIS,
- ozoneMultipartUploadPartListParts.getReplicationType());
// As we don't have any parts with greater than partNumberMarker and list
// is not truncated, so it should return false here.
@@ -3489,11 +3435,11 @@ public abstract class TestOzoneRpcClientAbstract {
return chars;
}
- private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val)
+ private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val,
+ ReplicationConfig replication)
throws Exception {
// Initiate Multipart upload request
- String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType
- .RATIS, THREE);
+ String uploadID = initiateMultipartUpload(bucket, keyName, replication);
// Upload parts
Map<Integer, String> partsMap = new TreeMap<>();
@@ -3526,9 +3472,8 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneInputStream inputStream = bucket.readKey(keyName);
inputStream.read(fileContent);
- Assert.assertTrue(verifyRatisReplication(bucket.getVolumeName(),
- bucket.getName(), keyName, ReplicationType.RATIS,
- THREE));
+ verifyReplication(bucket.getVolumeName(), bucket.getName(), keyName,
+ replication);
StringBuilder sb = new StringBuilder(length);
@@ -3555,10 +3500,9 @@ public abstract class TestOzoneRpcClientAbstract {
}
private String initiateMultipartUpload(OzoneBucket bucket, String keyName,
- ReplicationType replicationType, ReplicationFactor replicationFactor)
- throws Exception {
+ ReplicationConfig replicationConfig) throws Exception {
OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
- replicationType, replicationFactor);
+ replicationConfig);
String uploadID = multipartInfo.getUploadID();
Assert.assertNotNull(uploadID);
@@ -3675,9 +3619,8 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneInputStream is = bucket.readKey(keyName);
byte[] fileContent = new byte[text.getBytes(UTF_8).length];
is.read(fileContent);
- Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
- keyName, RATIS,
- ONE));
+ verifyReplication(volumeName, bucketName, keyName,
+ RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE));
Assert.assertEquals(text, new String(fileContent, UTF_8));
//Step 4
@@ -3755,9 +3698,8 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneInputStream is = bucket.readKey(keyName);
byte[] fileContent = new byte[text.getBytes(UTF_8).length];
is.read(fileContent);
- Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
- keyName, RATIS,
- ONE));
+ verifyReplication(volumeName, bucketName, keyName,
+ RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE));
Assert.assertEquals(text, new String(fileContent, UTF_8));
//Step 4
@@ -3967,4 +3909,8 @@ public abstract class TestOzoneRpcClientAbstract {
volume.createBucket(bucketName, builder.build());
return volume.getBucket(bucketName);
}
+
+ private static ReplicationConfig anyReplication() {
+ return
RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE);
+ }
}
diff --git
a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index 06c8987449..407954b7ba 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -1299,7 +1299,7 @@ message MultipartUploadInfo {
required string uploadId = 4;
required uint64 creationTime = 5;
required hadoop.hdds.ReplicationType type = 6;
- required hadoop.hdds.ReplicationFactor factor = 7;
+ optional hadoop.hdds.ReplicationFactor factor = 7;
optional hadoop.hdds.ECReplicationConfig ecReplicationConfig = 8;
}
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 03e9361ac5..03d0d68312 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -708,9 +708,10 @@ public class KeyManagerImpl implements KeyManager {
omPartInfoList.add(omPartInfo);
//if there are parts, use replication type from one of the parts
- replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(
- partKeyInfo.getPartKeyInfo().getType(),
- partKeyInfo.getPartKeyInfo().getFactor());
+ replicationConfig = ReplicationConfig.fromProto(
+ partKeyInfo.getPartKeyInfo().getType(),
+ partKeyInfo.getPartKeyInfo().getFactor(),
+ partKeyInfo.getPartKeyInfo().getEcReplicationConfig());
count++;
}
}
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
index d727f5727c..f2e0c54746 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
@@ -31,7 +31,6 @@ import java.util.Map;
import java.util.TreeMap;
import org.apache.hadoop.hdds.client.ReplicationConfig;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
import org.apache.hadoop.ozone.OzoneConsts;
@@ -354,10 +353,8 @@ public class S3MultipartUploadCompleteRequest extends
OMKeyRequest {
String ozoneKey, TreeMap<Integer, PartKeyInfo> partKeyInfoMap,
List<OmKeyLocationInfo> partLocationInfos, long dataSize)
throws IOException {
- HddsProtos.ReplicationType type = partKeyInfoMap.lastEntry().getValue()
- .getPartKeyInfo().getType();
- HddsProtos.ReplicationFactor factor =
- partKeyInfoMap.lastEntry().getValue().getPartKeyInfo().getFactor();
+ OzoneManagerProtocolProtos.KeyInfo partKeyInfo =
+ partKeyInfoMap.lastEntry().getValue().getPartKeyInfo();
OmKeyInfo omKeyInfo = getOmKeyInfoFromKeyTable(ozoneKey, keyName,
omMetadataManager);
@@ -374,8 +371,9 @@ public class S3MultipartUploadCompleteRequest extends
OMKeyRequest {
OmKeyInfo.Builder builder =
new OmKeyInfo.Builder().setVolumeName(volumeName)
.setBucketName(bucketName).setKeyName(dbOpenKeyInfo.getKeyName())
- .setReplicationConfig(
- ReplicationConfig.fromProtoTypeAndFactor(type, factor))
+ .setReplicationConfig(ReplicationConfig.fromProto(
+ partKeyInfo.getType(), partKeyInfo.getFactor(),
+ partKeyInfo.getEcReplicationConfig()))
.setCreationTime(keyArgs.getModificationTime())
.setModificationTime(keyArgs.getModificationTime())
.setDataSize(dataSize)
diff --git
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
index 2f3cb5c23b..933dcbc33e 100644
---
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
+++
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
@@ -282,8 +282,7 @@ public class BucketEndpoint extends EndpointBase {
upload.getKeyName(),
upload.getUploadId(),
upload.getCreationTime(),
- S3StorageType.fromReplicationType(upload.getReplicationType(),
- upload.getReplicationFactor())
+ S3StorageType.fromReplicationConfig(upload.getReplicationConfig())
)));
getMetrics().incListMultipartUploadsSuccess();
return Response.ok(result).build();
diff --git
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
index d048550778..5b37d1f8b3 100644
---
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
+++
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
@@ -712,9 +712,8 @@ public class ObjectEndpoint extends EndpointBase {
listPartsResponse.setPartNumberMarker(partNumberMarker);
listPartsResponse.setTruncated(false);
- listPartsResponse.setStorageClass(S3StorageType.fromReplicationType(
- ozoneMultipartUploadPartListParts.getReplicationType(),
-
ozoneMultipartUploadPartListParts.getReplicationFactor()).toString());
+ listPartsResponse.setStorageClass(S3StorageType.fromReplicationConfig(
+
ozoneMultipartUploadPartListParts.getReplicationConfig()).toString());
if (ozoneMultipartUploadPartListParts.isTruncated()) {
listPartsResponse.setTruncated(
diff --git
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java
index 21b3b8d0df..ae42e812fb 100644
---
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java
+++
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java
@@ -18,9 +18,12 @@
package org.apache.hadoop.ozone.s3.util;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.ozone.OzoneConfigKeys;
/**
@@ -70,10 +73,12 @@ public enum S3StorageType {
? REDUCED_REDUNDANCY : STANDARD;
}
- public static S3StorageType fromReplicationType(
- ReplicationType replicationType, ReplicationFactor factor) {
- if ((replicationType == ReplicationType.STAND_ALONE) ||
- (factor == ReplicationFactor.ONE)) {
+ public static S3StorageType fromReplicationConfig(ReplicationConfig config) {
+ if (config instanceof ECReplicationConfig) {
+ return S3StorageType.STANDARD;
+ }
+ if (config.getReplicationType() == HddsProtos.ReplicationType.STAND_ALONE
||
+ config.getRequiredNodes() == 1) {
return S3StorageType.REDUCED_REDUNDANCY;
} else {
return S3StorageType.STANDARD;
diff --git
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
index f4b5846f00..20e506d54f 100644
---
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
+++
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
@@ -175,10 +175,7 @@ public class OzoneBucketStub extends OzoneBucket {
ozoneKeyDetails.getDataSize(),
ozoneKeyDetails.getCreationTime().toEpochMilli(),
ozoneKeyDetails.getModificationTime().toEpochMilli(),
- ReplicationConfig.fromTypeAndFactor(
- ozoneKeyDetails.getReplicationType(),
- ReplicationFactor.valueOf(ozoneKeyDetails.getReplicationFactor())
- ));
+ ozoneKeyDetails.getReplicationConfig());
} else {
throw new OMException(ResultCodes.KEY_NOT_FOUND);
}
@@ -318,8 +315,9 @@ public class OzoneBucketStub extends OzoneBucket {
List<PartInfo> partInfoList = new ArrayList<>();
if (partList.get(key) == null) {
- return new OzoneMultipartUploadPartListParts(ReplicationType.RATIS,
- ReplicationFactor.ONE, 0, false);
+ return new OzoneMultipartUploadPartListParts(
+ RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE),
+ 0, false);
} else {
Map<Integer, Part> partMap = partList.get(key);
Iterator<Map.Entry<Integer, Part>> partIterator =
@@ -348,8 +346,7 @@ public class OzoneBucketStub extends OzoneBucket {
}
OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
- new OzoneMultipartUploadPartListParts(ReplicationType.RATIS,
- ReplicationFactor.ONE,
+ new OzoneMultipartUploadPartListParts(replicationConfig,
nextPartNumberMarker, truncated);
ozoneMultipartUploadPartListParts.addAllParts(partInfoList);
diff --git a/pom.xml b/pom.xml
index a745233db4..ac4bbcd800 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1173,6 +1173,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xs
<version>${junit.jupiter.version}</version>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>org.junit.jupiter</groupId>
+ <artifactId>junit-jupiter-params</artifactId>
+ <version>${junit.jupiter.version}</version>
+ <scope>test</scope>
+ </dependency>
<dependency>
<groupId>org.junit.vintage</groupId>
<artifactId>junit-vintage-engine</artifactId>
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]