This is an automated email from the ASF dual-hosted git repository.
sshenoy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 1864a7922b HDDS-9356. Ozone Debug Chunkinfo shows incorrect block path
for some nodes. (#5402)
1864a7922b is described below
commit 1864a7922b522f1e5df3254e3a3f95dcce55acd7
Author: Aryan Gupta <[email protected]>
AuthorDate: Tue Oct 10 12:29:03 2023 +0530
HDDS-9356. Ozone Debug Chunkinfo shows incorrect block path for some nodes.
(#5402)
---
.../hdds/scm/storage/ContainerProtocolCalls.java | 26 ++++++++++++
.../hdds/scm/cli/ContainerOperationClient.java | 20 ++++++++++
.../hadoop/ozone/shell/TestOzoneDebugShell.java | 46 +++++++++++++++++++++-
.../apache/hadoop/ozone/debug/ChunkKeyHandler.java | 12 ++++--
4 files changed, 99 insertions(+), 5 deletions(-)
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index a3ac8de027..638282b30c 100644
---
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -724,4 +724,30 @@ public final class ContainerProtocolCalls {
}
return datanodeToResponseMap;
}
+
+ public static HashMap<DatanodeDetails, ReadContainerResponseProto>
+ readContainerFromAllNodes(XceiverClientSpi client, long containerID,
+ String encodedToken) throws IOException, InterruptedException {
+ String id = client.getPipeline().getFirstNode().getUuidString();
+ HashMap<DatanodeDetails, ReadContainerResponseProto> datanodeToResponseMap
+ = new HashMap<>();
+ ContainerCommandRequestProto.Builder request =
+ ContainerCommandRequestProto.newBuilder();
+ request.setCmdType(Type.ReadContainer);
+ request.setContainerID(containerID);
+ request.setReadContainer(ReadContainerRequestProto.getDefaultInstance());
+ request.setDatanodeUuid(id);
+ if (encodedToken != null) {
+ request.setEncodedToken(encodedToken);
+ }
+ Map<DatanodeDetails, ContainerCommandResponseProto> responses =
+ client.sendCommandOnAllNodes(request.build());
+ for (Map.Entry<DatanodeDetails, ContainerCommandResponseProto> entry :
+ responses.entrySet()) {
+ datanodeToResponseMap.put(entry.getKey(),
+ entry.getValue().getReadContainer());
+ }
+ return datanodeToResponseMap;
+ }
+
}
diff --git
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
index e10d1d5b83..ee33c6b0e7 100644
---
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
+++
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.SecretKeyProtocolScm;
import
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto;
import
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadContainerResponseProto;
@@ -363,6 +364,25 @@ public class ContainerOperationClient implements ScmClient
{
}
}
+ public Map<DatanodeDetails, ReadContainerResponseProto>
+ readContainerFromAllNodes(long containerID, Pipeline pipeline)
+ throws IOException, InterruptedException {
+ XceiverClientManager clientManager = getXceiverClientManager();
+ String encodedToken = getEncodedContainerToken(containerID);
+ XceiverClientSpi client = null;
+ try {
+ client = clientManager.acquireClientForReadData(pipeline);
+ Map<DatanodeDetails, ReadContainerResponseProto> responses =
+ ContainerProtocolCalls.readContainerFromAllNodes(client, containerID,
+ encodedToken);
+ return responses;
+ } finally {
+ if (client != null) {
+ clientManager.releaseClient(client, false);
+ }
+ }
+ }
+
@Override
public ContainerDataProto readContainer(long containerID) throws IOException
{
ContainerWithPipeline info = getContainerWithPipeline(containerID);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java
index 72b14ddd40..16963fdb35 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.ozone.shell;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
@@ -52,6 +54,8 @@ import java.io.PrintWriter;
import java.io.StringWriter;
import java.time.Duration;
import java.util.UUID;
+import java.util.Set;
+import java.util.HashSet;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
@@ -130,6 +134,16 @@ public class TestOzoneDebugShell {
Assertions.assertEquals(0, exitCode);
}
+ @Test
+ public void testChunkInfoVerifyPathsAreDifferent() throws Exception {
+ final String volumeName = UUID.randomUUID().toString();
+ final String bucketName = UUID.randomUUID().toString();
+ final String keyName = UUID.randomUUID().toString();
+ writeKey(volumeName, bucketName, keyName);
+ int exitCode = runChunkInfoAndVerifyPaths(volumeName, bucketName, keyName);
+ Assertions.assertEquals(0, exitCode);
+ }
+
@Test
public void testLdbCliForOzoneSnapshot() throws Exception {
StringWriter stdout = new StringWriter();
@@ -182,7 +196,6 @@ public class TestOzoneDebugShell {
String keyName) {
String bucketPath =
Path.SEPARATOR + volumeName + Path.SEPARATOR + bucketName;
-
String[] args = new String[] {
getSetConfStringFromConf(OMConfigKeys.OZONE_OM_ADDRESS_KEY),
"chunkinfo", bucketPath + Path.SEPARATOR + keyName };
@@ -192,6 +205,37 @@ public class TestOzoneDebugShell {
return exitCode;
}
+ private int runChunkInfoAndVerifyPaths(String volumeName, String bucketName,
+ String keyName) throws Exception {
+ String bucketPath =
+ Path.SEPARATOR + volumeName + Path.SEPARATOR + bucketName;
+ String[] args = new String[] {
+ getSetConfStringFromConf(OMConfigKeys.OZONE_OM_ADDRESS_KEY),
+ "chunkinfo", bucketPath + Path.SEPARATOR + keyName };
+ OzoneDebug ozoneDebugShell = new OzoneDebug(conf);
+ int exitCode = 1;
+ try (GenericTestUtils.SystemOutCapturer capture = new GenericTestUtils
+ .SystemOutCapturer()) {
+ exitCode = ozoneDebugShell.execute(args);
+ Set<String> blockFilePaths = new HashSet<>();
+ String output = capture.getOutput();
+ ObjectMapper objectMapper = new ObjectMapper();
+ // Parse the JSON array string into a JsonNode
+ JsonNode jsonNode = objectMapper.readTree(output);
+ JsonNode keyLocations = jsonNode.get("KeyLocations").get(0);
+ for (JsonNode element : keyLocations) {
+ String fileName =
+ element.get("Locations").get("files").get(0).toString();
+ blockFilePaths.add(fileName);
+ }
+ // DN storage directories are set differently for each DN
+ // in MiniOzoneCluster as datanode-0,datanode-1,datanode-2 which is why
+ // we expect 3 paths here in the set.
+ Assertions.assertEquals(3, blockFilePaths.size());
+ }
+ return exitCode;
+ }
+
/**
* Generate string to pass as extra arguments to the
* ozone debug command line, This is necessary for client to
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java
index 21bd114551..2c55b4ea4c 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java
@@ -23,7 +23,6 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
-import java.util.HashMap;
import java.util.HashSet;
import com.google.gson.GsonBuilder;
import com.google.gson.Gson;
@@ -134,11 +133,16 @@ public class ChunkKeyHandler extends KeyHandler implements
ContainerProtos.DatanodeBlockID datanodeBlockID =
keyLocation.getBlockID().getDatanodeBlockIDProtobuf();
// doing a getBlock on all nodes
- HashMap<DatanodeDetails, ContainerProtos.GetBlockResponseProto>
+ Map<DatanodeDetails, ContainerProtos.GetBlockResponseProto>
responses = null;
+ Map<DatanodeDetails, ContainerProtos.ReadContainerResponseProto>
+ readContainerResponses = null;
try {
responses =
ContainerProtocolCalls.getBlockFromAllNodes(xceiverClient,
datanodeBlockID, keyLocation.getToken());
+ readContainerResponses =
+ containerOperationClient.readContainerFromAllNodes(
+ keyLocation.getContainerID(), pipeline);
} catch (InterruptedException e) {
LOG.error("Execution interrupted due to " + e);
Thread.currentThread().interrupt();
@@ -146,6 +150,7 @@ public class ChunkKeyHandler extends KeyHandler implements
JsonArray responseFromAllNodes = new JsonArray();
for (Map.Entry<DatanodeDetails, ContainerProtos.GetBlockResponseProto>
entry : responses.entrySet()) {
+ chunkPaths.clear();
JsonObject jsonObj = new JsonObject();
if (entry.getValue() == null) {
LOG.error("Cant execute getBlock on this node");
@@ -153,8 +158,7 @@ public class ChunkKeyHandler extends KeyHandler implements
}
tempchunks = entry.getValue().getBlockData().getChunksList();
ContainerProtos.ContainerDataProto containerData =
- containerOperationClient.readContainer(keyLocation
- .getContainerID(), pipeline);
+ readContainerResponses.get(entry.getKey()).getContainerData();
for (ContainerProtos.ChunkInfo chunkInfo : tempchunks) {
String fileName = containerLayoutVersion.getChunkFile(new File(
getChunkLocationPath(containerData.getContainerPath())),
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]