This is an automated email from the ASF dual-hosted git repository.
weichiu pushed a commit to branch HDDS-7593
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/HDDS-7593 by this push:
new 463a09b7c6 HDDS-9884. Pass DatanodeVersion to the client (#6155)
463a09b7c6 is described below
commit 463a09b7c6fb974baf3be6f626764b073c00ff5e
Author: Siyao Meng <[email protected]>
AuthorDate: Thu Feb 15 09:13:12 2024 -0800
HDDS-9884. Pass DatanodeVersion to the client (#6155)
---
.../hadoop/hdds/protocol/DatanodeDetails.java | 6 +
.../apache/hadoop/ozone/HddsDatanodeService.java | 31 ++++-
.../container/common/helpers/DatanodeIdYaml.java | 2 +-
.../interface-client/src/main/proto/hdds.proto | 1 +
hadoop-ozone/fault-injection-test/pom.xml | 8 ++
.../org/apache/hadoop/ozone/MiniOzoneCluster.java | 34 ++++-
.../apache/hadoop/ozone/MiniOzoneClusterImpl.java | 21 ++-
.../client/rpc/TestBlockDataStreamOutput.java | 27 ++++
.../ozone/client/rpc/TestDatanodeVersion.java | 143 +++++++++++++++++++++
9 files changed, 262 insertions(+), 11 deletions(-)
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
index 5b6fb6fe9b..885710646d 100644
---
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
+++
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
@@ -371,6 +371,9 @@ public class DatanodeDetails extends NodeImpl implements
builder.setPersistedOpStateExpiry(
datanodeDetailsProto.getPersistedOpStateExpiry());
}
+ if (datanodeDetailsProto.hasCurrentVersion()) {
+ builder.setCurrentVersion(datanodeDetailsProto.getCurrentVersion());
+ }
return builder;
}
@@ -475,6 +478,8 @@ public class DatanodeDetails extends NodeImpl implements
}
}
+ builder.setCurrentVersion(currentVersion);
+
return builder;
}
@@ -505,6 +510,7 @@ public class DatanodeDetails extends NodeImpl implements
}
/**
+ * Note: Datanode initial version is not passed to the client due to no use
case. See HDDS-9884
* @return the version this datanode was initially created with
*/
public int getInitialVersion() {
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
index f59622cb0f..bbaf58d36b 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
@@ -233,7 +233,6 @@ public class HddsDatanodeService extends GenericCli
implements ServicePlugin {
datanodeDetails.setRevision(
HddsVersionInfo.HDDS_VERSION_INFO.getRevision());
datanodeDetails.setBuildDate(HddsVersionInfo.HDDS_VERSION_INFO.getDate());
- datanodeDetails.setCurrentVersion(DatanodeVersion.CURRENT_VERSION);
TracingUtil.initTracing(
"HddsDatanodeService." + datanodeDetails.getUuidString()
.substring(0, 8), conf);
@@ -424,17 +423,19 @@ public class HddsDatanodeService extends GenericCli
implements ServicePlugin {
String idFilePath = HddsServerUtil.getDatanodeIdFilePath(conf);
Preconditions.checkNotNull(idFilePath);
File idFile = new File(idFilePath);
+ DatanodeDetails details;
if (idFile.exists()) {
- return ContainerUtils.readDatanodeDetailsFrom(idFile);
+ details = ContainerUtils.readDatanodeDetailsFrom(idFile);
+ // Current version is always overridden to the latest
+ details.setCurrentVersion(getDefaultCurrentVersion());
} else {
// There is no datanode.id file, this might be the first time datanode
// is started.
- DatanodeDetails details = DatanodeDetails.newBuilder()
- .setUuid(UUID.randomUUID()).build();
- details.setInitialVersion(DatanodeVersion.CURRENT_VERSION);
- details.setCurrentVersion(DatanodeVersion.CURRENT_VERSION);
- return details;
+ details =
DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()).build();
+ details.setInitialVersion(getDefaultInitialVersion());
+ details.setCurrentVersion(getDefaultCurrentVersion());
}
+ return details;
}
/**
@@ -678,4 +679,20 @@ public class HddsDatanodeService extends GenericCli
implements ServicePlugin {
.setPoolSize(Integer.parseInt(value));
return value;
}
+
+ /**
+ * Returns the initial version of the datanode.
+ */
+ @VisibleForTesting
+ public static int getDefaultInitialVersion() {
+ return DatanodeVersion.CURRENT_VERSION;
+ }
+
+ /**
+ * Returns the current version of the datanode.
+ */
+ @VisibleForTesting
+ public static int getDefaultCurrentVersion() {
+ return DatanodeVersion.CURRENT_VERSION;
+ }
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java
index f8acbc7e2d..58b1c1d1d5 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java
@@ -56,7 +56,7 @@ public final class DatanodeIdYaml {
}
/**
- * Creates a yaml file using DatnodeDetails. This method expects the path
+ * Creates a yaml file using DatanodeDetails. This method expects the path
* validation to be performed by the caller.
*
* @param datanodeDetails {@link DatanodeDetails}
diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto
b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
index 3f346300b3..987bf25ad8 100644
--- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto
+++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
@@ -45,6 +45,7 @@ message DatanodeDetailsProto {
optional string networkLocation = 7; // Network topology location
optional NodeOperationalState persistedOpState = 8; // The Operational
state persisted in the datanode.id file
optional int64 persistedOpStateExpiry = 9; // The seconds after the epoch
when the OpState should expire
+ optional int32 currentVersion = 10; // Current datanode wire version
// TODO(runzhiwang): when uuid is gone, specify 1 as the index of uuid128
and mark as required
optional UUID uuid128 = 100; // UUID with 128 bits assigned to the
Datanode.
}
diff --git a/hadoop-ozone/fault-injection-test/pom.xml
b/hadoop-ozone/fault-injection-test/pom.xml
index 7de9bcc297..432faab487 100644
--- a/hadoop-ozone/fault-injection-test/pom.xml
+++ b/hadoop-ozone/fault-injection-test/pom.xml
@@ -33,4 +33,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<module>mini-chaos-tests</module>
</modules>
+ <dependencies>
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-inline</artifactId>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+
</project>
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index e864cae00b..176dab7335 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -23,6 +23,7 @@ import java.util.Optional;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
+import org.apache.hadoop.hdds.DatanodeVersion;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -321,15 +322,20 @@ public interface MiniOzoneCluster extends AutoCloseable {
protected Optional<Integer> hbProcessorInterval = Optional.empty();
protected String scmId = UUID.randomUUID().toString();
protected String omId = UUID.randomUUID().toString();
-
+
protected Optional<String> datanodeReservedSpace = Optional.empty();
protected boolean includeRecon = false;
-
protected Optional<Integer> omLayoutVersion = Optional.empty();
protected Optional<Integer> scmLayoutVersion = Optional.empty();
protected Optional<Integer> dnLayoutVersion = Optional.empty();
+ protected int dnInitialVersion =
DatanodeVersion.FUTURE_VERSION.toProtoValue();
+ protected int dnCurrentVersion =
DatanodeVersion.FUTURE_VERSION.toProtoValue();
+
+ // Use relative smaller number of handlers for testing
+ protected int numOfOmHandlers = 20;
+ protected int numOfScmHandlers = 20;
protected int numOfDatanodes = 3;
protected int numDataVolumes = 1;
protected boolean startDataNodes = true;
@@ -412,6 +418,30 @@ public interface MiniOzoneCluster extends AutoCloseable {
return this;
}
+ /**
+ * Set the initialVersion for all datanodes.
+ *
+ * @param val initialVersion value to be set for all datanodes.
+ *
+ * @return MiniOzoneCluster.Builder
+ */
+ public Builder setDatanodeInitialVersion(int val) {
+ dnInitialVersion = val;
+ return this;
+ }
+
+ /**
+ * Set the currentVersion for all datanodes.
+ *
+ * @param val currentVersion value to be set for all datanodes.
+ *
+ * @return MiniOzoneCluster.Builder
+ */
+ public Builder setDatanodeCurrentVersion(int val) {
+ dnCurrentVersion = val;
+ return this;
+ }
+
/**
* Sets the number of data volumes per datanode.
*
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 400ae3ee2c..820562bae2 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -35,6 +35,7 @@ import java.util.concurrent.TimeoutException;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
+import org.apache.hadoop.hdds.DatanodeVersion;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
@@ -102,6 +103,8 @@ import static
org.apache.ozone.test.GenericTestUtils.PortAllocator.getFreePort;
import static
org.apache.ozone.test.GenericTestUtils.PortAllocator.localhostWithFreePort;
import org.hadoop.ozone.recon.codegen.ReconSqlDbConfig;
+import org.mockito.MockedStatic;
+import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -132,12 +135,14 @@ public class MiniOzoneClusterImpl implements
MiniOzoneCluster {
private CertificateClient caClient;
private final Set<AutoCloseable> clients = ConcurrentHashMap.newKeySet();
private SecretKeyClient secretKeyClient;
+ private static MockedStatic mockDNStatic =
Mockito.mockStatic(HddsDatanodeService.class);
/**
* Creates a new MiniOzoneCluster with Recon.
*
* @throws IOException if there is an I/O error
*/
+ @SuppressWarnings("checkstyle:ParameterNumber")
MiniOzoneClusterImpl(OzoneConfiguration conf,
SCMConfigurator scmConfigurator,
OzoneManager ozoneManager,
@@ -396,6 +401,16 @@ public class MiniOzoneClusterImpl implements
MiniOzoneCluster {
}, 1000, waitForClusterToBeReadyTimeout);
}
+ private static void overrideDatanodeVersions(int dnInitialVersion, int
dnCurrentVersion) {
+ // FUTURE_VERSION (-1) is not a valid version for a datanode, using it as
a marker when version is not overridden
+ if (dnInitialVersion != DatanodeVersion.FUTURE_VERSION.toProtoValue()) {
+
mockDNStatic.when(HddsDatanodeService::getDefaultInitialVersion).thenReturn(dnInitialVersion);
+ }
+ if (dnCurrentVersion != DatanodeVersion.FUTURE_VERSION.toProtoValue()) {
+
mockDNStatic.when(HddsDatanodeService::getDefaultCurrentVersion).thenReturn(dnCurrentVersion);
+ }
+ }
+
@Override
public void restartHddsDatanode(int i, boolean waitForDatanode)
throws InterruptedException, TimeoutException {
@@ -782,10 +797,14 @@ public class MiniOzoneClusterImpl implements
MiniOzoneCluster {
String[] args = new String[] {};
conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, scmAddress);
List<HddsDatanodeService> hddsDatanodes = new ArrayList<>();
+
+ // Override default datanode initial and current version if necessary
+ overrideDatanodeVersions(dnInitialVersion, dnCurrentVersion);
+
for (int i = 0; i < numOfDatanodes; i++) {
OzoneConfiguration dnConf = new OzoneConfiguration(conf);
configureDatanodePorts(dnConf);
- String datanodeBaseDir = path + "/datanode-" + Integer.toString(i);
+ String datanodeBaseDir = path + "/datanode-" + i;
Path metaDir = Paths.get(datanodeBaseDir, "meta");
List<String> dataDirs = new ArrayList<>();
List<String> reservedSpaceList = new ArrayList<>();
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
index 8bb791bb10..2559be4ea4 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
@@ -17,9 +17,11 @@
package org.apache.hadoop.ozone.client.rpc;
+import org.apache.hadoop.hdds.DatanodeVersion;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.StorageUnit;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.OzoneClientConfig;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
@@ -28,6 +30,7 @@ import
org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput;
import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.ozone.ClientConfigForTesting;
+import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.client.ObjectStore;
@@ -45,6 +48,7 @@ import org.junit.jupiter.api.Timeout;
import java.io.IOException;
import java.nio.ByteBuffer;
+import java.util.List;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
@@ -70,6 +74,7 @@ public class TestBlockDataStreamOutput {
private static String volumeName;
private static String bucketName;
private static String keyString;
+ private static final int DN_OLD_VERSION =
DatanodeVersion.SEPARATE_RATIS_PORTS_AVAILABLE.toProtoValue();
/**
* Create a MiniDFSCluster for testing.
@@ -105,6 +110,7 @@ public class TestBlockDataStreamOutput {
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(5)
+ .setDatanodeCurrentVersion(DN_OLD_VERSION)
.setTotalPipelineNumLimit(3)
.build();
cluster.waitForClusterToBeReady();
@@ -270,4 +276,25 @@ public class TestBlockDataStreamOutput {
assertEquals(dataLength, stream.getTotalAckDataLength());
}
+ @Test
+ public void testDatanodeVersion() throws Exception {
+ // Verify all DNs internally have versions set correctly
+ List<HddsDatanodeService> dns = cluster.getHddsDatanodes();
+ for (HddsDatanodeService dn : dns) {
+ DatanodeDetails details = dn.getDatanodeDetails();
+ assertEquals(DN_OLD_VERSION, details.getCurrentVersion());
+ }
+
+ String keyName = getKeyName();
+ OzoneDataStreamOutput key = createKey(keyName, ReplicationType.RATIS, 0);
+ KeyDataStreamOutput keyDataStreamOutput = (KeyDataStreamOutput)
key.getByteBufStreamOutput();
+ BlockDataStreamOutputEntry stream =
keyDataStreamOutput.getStreamEntries().get(0);
+
+ // Now check 3 DNs in a random pipeline returns the correct DN versions
+ List<DatanodeDetails> streamDnDetails = stream.getPipeline().getNodes();
+ for (DatanodeDetails details : streamDnDetails) {
+ assertEquals(DN_OLD_VERSION, details.getCurrentVersion());
+ }
+ }
+
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDatanodeVersion.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDatanodeVersion.java
new file mode 100644
index 0000000000..5e7d8a4b05
--- /dev/null
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDatanodeVersion.java
@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.client.rpc;
+
+import org.apache.hadoop.hdds.DatanodeVersion;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
+import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
+import org.apache.hadoop.ozone.HddsDatanodeService;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.io.BlockDataStreamOutputEntry;
+import org.apache.hadoop.ozone.client.io.KeyDataStreamOutput;
+import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
+import org.apache.hadoop.ozone.container.TestHelper;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+
+import java.util.List;
+import java.util.UUID;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+/**
+ * Tests DatanodeVersion in client stream.
+ */
+@Timeout(120)
+public class TestDatanodeVersion {
+ private static MiniOzoneCluster cluster;
+ private static OzoneConfiguration conf = new OzoneConfiguration();
+ private static OzoneClient client;
+ private static ObjectStore objectStore;
+ private static int chunkSize;
+ private static int flushSize;
+ private static int maxFlushSize;
+ private static int blockSize;
+ private static String volumeName;
+ private static String bucketName;
+ private static final int DN_OLD_VERSION =
DatanodeVersion.SEPARATE_RATIS_PORTS_AVAILABLE.toProtoValue();
+
+ /**
+ * Create a MiniDFSCluster for testing.
+ * <p>
+ * Ozone is made active by setting OZONE_ENABLED = true
+ */
+ @BeforeAll
+ public static void init() throws Exception {
+ chunkSize = 100;
+ flushSize = 2 * chunkSize;
+ maxFlushSize = 2 * flushSize;
+ blockSize = 2 * maxFlushSize;
+
+ OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
+ conf.setFromObject(clientConfig);
+
+ conf.setQuietMode(false);
+ conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4,
StorageUnit.MB);
+
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
+ .setBlockSize(blockSize)
+ .setChunkSize(chunkSize)
+ .setStreamBufferFlushSize(flushSize)
+ .setStreamBufferMaxSize(maxFlushSize)
+ .setDataStreamBufferFlushSize(maxFlushSize)
+ .setDataStreamMinPacketSize(chunkSize)
+ .setDataStreamWindowSize(5 * chunkSize)
+ .applyTo(conf);
+
+ cluster = MiniOzoneCluster.newBuilder(conf)
+ .setNumDatanodes(3)
+ .setDatanodeCurrentVersion(DN_OLD_VERSION)
+ .build();
+ cluster.waitForClusterToBeReady();
+ //the easiest way to create an open container is creating a key
+ client = OzoneClientFactory.getRpcClient(conf);
+ objectStore = client.getObjectStore();
+ volumeName = "testblockoutputstream";
+ bucketName = volumeName;
+ objectStore.createVolume(volumeName);
+ objectStore.getVolume(volumeName).createBucket(bucketName);
+ }
+
+ /**
+ * Shutdown MiniDFSCluster.
+ */
+ @AfterAll
+ public static void shutdown() {
+ IOUtils.closeQuietly(client);
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+
+ static OzoneDataStreamOutput createKey(String keyName, ReplicationType type,
long size) throws Exception {
+ return TestHelper.createStreamKey(keyName, type, size, objectStore,
volumeName, bucketName);
+ }
+
+ @Test
+ public void testStreamDatanodeVersion() throws Exception {
+ // Verify all DNs internally have versions set correctly
+ List<HddsDatanodeService> dns = cluster.getHddsDatanodes();
+ for (HddsDatanodeService dn : dns) {
+ DatanodeDetails details = dn.getDatanodeDetails();
+ assertEquals(DN_OLD_VERSION, details.getCurrentVersion());
+ }
+
+ String keyName = UUID.randomUUID().toString();
+ OzoneDataStreamOutput key = createKey(keyName, ReplicationType.RATIS, 0);
+ KeyDataStreamOutput keyDataStreamOutput = (KeyDataStreamOutput)
key.getByteBufStreamOutput();
+ BlockDataStreamOutputEntry stream =
keyDataStreamOutput.getStreamEntries().get(0);
+
+ // Now check 3 DNs in a random pipeline returns the correct DN versions
+ List<DatanodeDetails> streamDnDetails = stream.getPipeline().getNodes();
+ for (DatanodeDetails details : streamDnDetails) {
+ assertEquals(DN_OLD_VERSION, details.getCurrentVersion());
+ }
+ }
+
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]