This is an automated email from the ASF dual-hosted git repository.
agupta pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new a8c377f1fb HDDS-11069. Block location is missing in output of Ozone
debug chunkinfo command for EC. (#6909)
a8c377f1fb is described below
commit a8c377f1fba68c024ff82dbe08c800be6392ec4f
Author: Sadanand Shenoy <[email protected]>
AuthorDate: Fri Jul 12 13:41:03 2024 +0530
HDDS-11069. Block location is missing in output of Ozone debug chunkinfo
command for EC. (#6909)
---
.../apache/hadoop/hdds/scm/XceiverClientGrpc.java | 24 ++++++++++++++++
.../hadoop/ozone/shell/TestOzoneDebugShell.java | 32 +++++++++++++++-------
.../apache/hadoop/ozone/debug/ChunkKeyHandler.java | 2 +-
3 files changed, 47 insertions(+), 11 deletions(-)
diff --git
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index 02747f53ca..dac582a141 100644
---
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -285,6 +285,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
}
for (DatanodeDetails dn : datanodeList) {
try {
+ request = reconstructRequestIfNeeded(request, dn);
futureHashMap.put(dn, sendCommandAsync(request, dn).getResponse());
} catch (InterruptedException e) {
LOG.error("Command execution was interrupted.");
@@ -316,6 +317,29 @@ public class XceiverClientGrpc extends XceiverClientSpi {
return responseProtoHashMap;
}
+ /**
+ * @param request
+ * @param dn
+ * @param pipeline
+ * In case of getBlock for EC keys, it is required to set replicaIndex for
+ * every request with the replicaIndex for that DN for which the request is
+ * sent to. This method unpacks proto and reconstructs request after setting
+ * the replicaIndex field.
+ * @return new updated Request
+ */
+ private ContainerCommandRequestProto reconstructRequestIfNeeded(
+ ContainerCommandRequestProto request, DatanodeDetails dn) {
+ boolean isEcRequest = pipeline.getReplicationConfig()
+ .getReplicationType() == HddsProtos.ReplicationType.EC;
+ if (request.hasGetBlock() && isEcRequest) {
+ ContainerProtos.GetBlockRequestProto gbr = request.getGetBlock();
+ request = request.toBuilder().setGetBlock(gbr.toBuilder().setBlockID(
+ gbr.getBlockID().toBuilder().setReplicaIndex(
+ pipeline.getReplicaIndex(dn)).build()).build()).build();
+ }
+ return request;
+ }
+
@Override
public ContainerCommandResponseProto sendCommand(
ContainerCommandRequestProto request, List<Validator> validators)
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java
index 15d9746fcb..9b1747b4c2 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.ozone.shell;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -46,6 +48,8 @@ import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.ValueSource;
import picocli.CommandLine;
import java.io.File;
@@ -86,7 +90,7 @@ public class TestOzoneDebugShell {
protected static void startCluster() throws Exception {
// Init HA cluster
omServiceId = "om-service-test1";
- final int numDNs = 3;
+ final int numDNs = 5;
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(numDNs)
.build();
@@ -112,13 +116,14 @@ public class TestOzoneDebugShell {
startCluster();
}
- @Test
- public void testChunkInfoCmdBeforeAfterCloseContainer() throws Exception {
+ @ParameterizedTest
+ @ValueSource(booleans = {true, false})
+ public void testChunkInfoCmdBeforeAfterCloseContainer(boolean isEcKey)
throws Exception {
final String volumeName = UUID.randomUUID().toString();
final String bucketName = UUID.randomUUID().toString();
final String keyName = UUID.randomUUID().toString();
- writeKey(volumeName, bucketName, keyName);
+ writeKey(volumeName, bucketName, keyName, isEcKey);
int exitCode = runChunkInfoCommand(volumeName, bucketName, keyName);
assertEquals(0, exitCode);
@@ -134,7 +139,7 @@ public class TestOzoneDebugShell {
final String volumeName = UUID.randomUUID().toString();
final String bucketName = UUID.randomUUID().toString();
final String keyName = UUID.randomUUID().toString();
- writeKey(volumeName, bucketName, keyName);
+ writeKey(volumeName, bucketName, keyName, false);
int exitCode = runChunkInfoAndVerifyPaths(volumeName, bucketName, keyName);
assertEquals(0, exitCode);
}
@@ -150,7 +155,7 @@ public class TestOzoneDebugShell {
final String bucketName = UUID.randomUUID().toString();
final String keyName = UUID.randomUUID().toString();
- writeKey(volumeName, bucketName, keyName);
+ writeKey(volumeName, bucketName, keyName, false);
String snapshotName =
client.getObjectStore().createSnapshot(volumeName, bucketName,
"snap1");
@@ -176,15 +181,22 @@ public class TestOzoneDebugShell {
OM_DB_NAME + checkPointDir;
}
-
private static void writeKey(String volumeName, String bucketName,
- String keyName) throws IOException {
+ String keyName, boolean isEcKey) throws IOException {
+ ReplicationConfig repConfig;
+ if (isEcKey) {
+ repConfig = new ECReplicationConfig(3, 2);
+ } else {
+ repConfig = ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS,
+ ReplicationFactor.THREE);
+ }
try (OzoneClient client = OzoneClientFactory.getRpcClient(conf)) {
// see HDDS-10091 for making this work with FILE_SYSTEM_OPTIMIZED layout
- TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName,
BucketLayout.LEGACY);
+ TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName,
+ BucketLayout.LEGACY);
TestDataUtil.createKey(
client.getObjectStore().getVolume(volumeName).getBucket(bucketName),
- keyName, ReplicationFactor.THREE, ReplicationType.RATIS, "test");
+ keyName, repConfig, "test");
}
}
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java
index 5c311d49c9..b5b2364007 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java
@@ -111,7 +111,7 @@ public class ChunkKeyHandler extends KeyHandler implements
keyPipeline.getReplicationConfig().getReplicationType() ==
HddsProtos.ReplicationType.EC;
Pipeline pipeline;
- if (keyPipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) {
+ if (!isECKey && keyPipeline.getType() !=
HddsProtos.ReplicationType.STAND_ALONE) {
pipeline = Pipeline.newBuilder(keyPipeline)
.setReplicationConfig(StandaloneReplicationConfig
.getInstance(ONE)).build();
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]