This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 205cb2cc4d HDDS-12846. Log DatanodeDetails instead of
DatanodeDetails.getUuidString (#8295)
205cb2cc4d is described below
commit 205cb2cc4d0e3b445ff56033e763dcebfb9f8019
Author: Chia-Chuan Yu <[email protected]>
AuthorDate: Sat Apr 19 02:29:42 2025 +0800
HDDS-12846. Log DatanodeDetails instead of DatanodeDetails.getUuidString
(#8295)
---
.../ClosePipelineCommandHandler.java | 7 ++---
.../balancer/AbstractFindTargetGreedy.java | 12 ++++----
.../container/balancer/ContainerBalancerTask.java | 34 ++++++++++------------
.../scm/container/balancer/FindSourceGreedy.java | 7 ++---
.../hdds/scm/container/balancer/MoveManager.java | 2 +-
.../health/DeletingContainerHandler.java | 2 +-
.../replication/health/EmptyContainerHandler.java | 2 +-
.../hdds/scm/node/HealthyReadOnlyNodeHandler.java | 2 +-
.../hadoop/hdds/scm/node/SCMNodeManager.java | 8 ++---
.../hdds/scm/pipeline/PipelineActionHandler.java | 2 +-
.../hdds/scm/pipeline/PipelineManagerImpl.java | 3 +-
.../hdds/scm/pipeline/RatisPipelineProvider.java | 2 +-
.../hadoop/hdds/scm/container/MockNodeManager.java | 3 +-
.../hadoop/hdds/upgrade/TestHDDSUpgrade.java | 2 +-
.../ozone/recon/scm/ReconNewNodeHandler.java | 3 +-
.../hadoop/ozone/freon/DatanodeSimulator.java | 6 ++--
16 files changed, 41 insertions(+), 56 deletions(-)
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ClosePipelineCommandHandler.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ClosePipelineCommandHandler.java
index 5cbe472689..1dd9ef24ec 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ClosePipelineCommandHandler.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ClosePipelineCommandHandler.java
@@ -139,11 +139,10 @@ public void handle(SCMCommand<?> command, OzoneContainer
ozoneContainer,
// Remove the Ratis group from the current datanode pipeline, might
throw GroupMismatchException as
// well. It is a no-op for XceiverServerSpi implementations (e.g.
XceiverServerGrpc)
server.removeGroup(pipelineIdProto);
- LOG.info("Close Pipeline {} command on datanode {}.", pipelineID,
- dn.getUuidString());
+ LOG.info("Close Pipeline {} command on datanode {}.", pipelineID,
dn);
} else {
LOG.debug("Ignoring close pipeline command for pipeline {} on
datanode {} " +
- "as it does not exist", pipelineID, dn.getUuidString());
+ "as it does not exist", pipelineID, dn);
}
} catch (IOException e) {
Throwable gme = HddsClientUtils.containsException(e,
GroupMismatchException.class);
@@ -151,7 +150,7 @@ public void handle(SCMCommand<?> command, OzoneContainer
ozoneContainer,
// ignore silently since this means that the group has been closed
by earlier close pipeline
// command in another datanode
LOG.debug("The group for pipeline {} on datanode {} has been removed
by earlier close " +
- "pipeline command handled in another datanode", pipelineID,
dn.getUuidString());
+ "pipeline command handled in another datanode", pipelineID, dn);
} else {
LOG.error("Can't close pipeline {}", pipelineID, e);
}
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java
index 0bd07b7167..dad5ce8534 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java
@@ -127,7 +127,7 @@ public ContainerMoveSelection findTargetForContainerMove(
}
}
logger.debug("Container Balancer could not find a target for " +
- "source datanode {}", source.getUuidString());
+ "source datanode {}", source);
return null;
}
@@ -164,8 +164,7 @@ private boolean containerMoveSatisfiesPlacementPolicy(
boolean isPolicySatisfied = placementStatus.isPolicySatisfied();
if (!isPolicySatisfied) {
logger.debug("Moving container {} from source {} to target {} will not "
+
- "satisfy placement policy.", containerID, source.getUuidString(),
- target.getUuidString());
+ "satisfy placement policy.", containerID, source, target);
}
return isPolicySatisfied;
}
@@ -191,7 +190,7 @@ private boolean canSizeEnterTarget(DatanodeDetails target,
long size) {
if (sizeEnteringAfterMove > config.getMaxSizeEnteringTarget()) {
logger.debug("{} bytes cannot enter datanode {} because 'size" +
".entering.target.max' limit is {} and {} bytes have already "
+
- "entered.", size, target.getUuidString(),
+ "entered.", size, target,
config.getMaxSizeEnteringTarget(),
sizeEnteringNode.get(target));
return false;
@@ -200,14 +199,13 @@ private boolean canSizeEnterTarget(DatanodeDetails
target, long size) {
.calculateUtilization(sizeEnteringAfterMove), upperLimit) > 0) {
logger.debug("{} bytes cannot enter datanode {} because its " +
"utilization will exceed the upper limit of {}.", size,
- target.getUuidString(), upperLimit);
+ target, upperLimit);
return false;
}
return true;
}
- logger.warn("No record of how much size has entered datanode {}",
- target.getUuidString());
+ logger.warn("No record of how much size has entered datanode {}", target);
return false;
}
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
index 7ca52abc79..98e64b6c00 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
@@ -506,7 +506,7 @@ private boolean initializeIteration() {
if (LOG.isDebugEnabled()) {
LOG.debug("Utilization for node {} with capacity {}B, used {}B, and " +
"remaining {}B is {}",
- datanodeUsageInfo.getDatanodeDetails().getUuidString(),
+ datanodeUsageInfo.getDatanodeDetails(),
datanodeUsageInfo.getScmNodeStat().getCapacity().get(),
datanodeUsageInfo.getScmNodeStat().getScmUsed().get(),
datanodeUsageInfo.getScmNodeStat().getRemaining().get(),
@@ -683,8 +683,8 @@ private boolean processMoveSelection(DatanodeDetails source,
"{}B from source datanode {} to target datanode {}",
containerID.toString(),
containerInfo.getUsedBytes(),
- source.getUuidString(),
- moveSelection.getTargetNode().getUuidString());
+ source,
+ moveSelection.getTargetNode());
if (moveContainer(source, moveSelection)) {
// consider move successful for now, and update selection criteria
@@ -792,9 +792,8 @@ private long cancelMovesThatExceedTimeoutDuration() {
if (!entry.getValue().isDone()) {
LOG.warn("Container move timed out for container {} from source {}" +
" to target {}.", entry.getKey().getContainerID(),
- containerToSourceMap.get(entry.getKey().getContainerID())
- .getUuidString(),
- entry.getKey().getTargetNode().getUuidString());
+ containerToSourceMap.get(entry.getKey().getContainerID()),
+ entry.getKey().getTargetNode());
entry.getValue().cancel(true);
numCancelled += 1;
@@ -817,14 +816,14 @@ private ContainerMoveSelection
matchSourceWithTarget(DatanodeDetails source) {
if (sourceContainerIDSet.isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug("ContainerBalancer could not find any candidate containers "
+
- "for datanode {}", source.getUuidString());
+ "for datanode {}", source);
}
return null;
}
if (LOG.isDebugEnabled()) {
LOG.debug("ContainerBalancer is finding suitable target for source " +
- "datanode {}", source.getUuidString());
+ "datanode {}", source);
}
ContainerMoveSelection moveSelection = null;
@@ -846,12 +845,11 @@ private ContainerMoveSelection
matchSourceWithTarget(DatanodeDetails source) {
if (moveSelection == null) {
LOG.info("ContainerBalancer could not find a suitable target for " +
- "source node {}.", source.getUuidString());
+ "source node {}.", source);
return null;
}
LOG.info("ContainerBalancer matched source datanode {} with target " +
- "datanode {} for container move.", source.getUuidString(),
- moveSelection.getTargetNode().getUuidString());
+ "datanode {} for container move.", source,
moveSelection.getTargetNode());
return moveSelection;
}
@@ -947,24 +945,22 @@ private boolean moveContainer(DatanodeDetails source,
if (ex != null) {
LOG.info("Container move for container {} from source {} to " +
"target {} failed with exceptions.",
- containerID.toString(),
- source.getUuidString(),
- moveSelection.getTargetNode().getUuidString(), ex);
+ containerID, source,
+ moveSelection.getTargetNode(), ex);
metrics.incrementNumContainerMovesFailedInLatestIteration(1);
} else {
if (result == MoveManager.MoveResult.COMPLETED) {
sizeActuallyMovedInLatestIteration +=
containerInfo.getUsedBytes();
LOG.debug("Container move completed for container {} from " +
- "source {} to target {}", containerID,
- source.getUuidString(),
- moveSelection.getTargetNode().getUuidString());
+ "source {} to target {}", containerID, source,
+ moveSelection.getTargetNode());
} else {
LOG.warn(
"Container move for container {} from source {} to target" +
" {} failed: {}",
- moveSelection.getContainerID(), source.getUuidString(),
- moveSelection.getTargetNode().getUuidString(), result);
+ moveSelection.getContainerID(), source,
+ moveSelection.getTargetNode(), result);
}
}
});
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java
index cfcba76959..6388684596 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java
@@ -165,21 +165,20 @@ public boolean canSizeLeaveSource(DatanodeDetails source,
long size) {
//3 after subtracting sizeLeavingAfterMove, the usage is bigger
// than or equal to lowerLimit
if (size <= 0) {
- LOG.debug("{} bytes container cannot leave datanode {}", size,
source.getUuidString());
+ LOG.debug("{} bytes container cannot leave datanode {}", size, source);
return false;
}
if (sizeLeavingAfterMove > config.getMaxSizeLeavingSource()) {
LOG.debug("{} bytes cannot leave datanode {} because 'size.leaving" +
".source.max' limit is {} and {} bytes have already left.",
- size, source.getUuidString(), config.getMaxSizeLeavingSource(),
+ size, source, config.getMaxSizeLeavingSource(),
sizeLeavingNode.get(source));
return false;
}
if (Double.compare(nodeManager.getUsageInfo(source)
.calculateUtilization(-sizeLeavingAfterMove), lowerLimit) < 0) {
LOG.debug("{} bytes cannot leave datanode {} because its utilization "
+
- "will drop below the lower limit of {}.", size,
- source.getUuidString(), lowerLimit);
+ "will drop below the lower limit of {}.", size, source,
lowerLimit);
return false;
}
return true;
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/MoveManager.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/MoveManager.java
index 57a546c547..790461101d 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/MoveManager.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/MoveManager.java
@@ -300,7 +300,7 @@ CompletableFuture<MoveResult> move(
}
startMove(containerInfo, src, tgt, ret);
LOG.debug("Processed a move request for container {}, from {} to {}",
- cid, src.getUuidString(), tgt.getUuidString());
+ cid, src, tgt);
return ret;
}
}
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/DeletingContainerHandler.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/DeletingContainerHandler.java
index a3d0efaa8f..3be77b65f6 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/DeletingContainerHandler.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/DeletingContainerHandler.java
@@ -98,7 +98,7 @@ public boolean handle(ContainerCheckRequest request) {
} catch (NotLeaderException e) {
LOG.warn("Failed to delete empty replica with index {} for " +
"container {} on datanode {}", rp.getReplicaIndex(),
- cID, rp.getDatanodeDetails().getUuidString(), e);
+ cID, rp.getDatanodeDetails(), e);
}
});
return true;
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/EmptyContainerHandler.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/EmptyContainerHandler.java
index 144af19987..c7390ce0ca 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/EmptyContainerHandler.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/EmptyContainerHandler.java
@@ -134,7 +134,7 @@ private void deleteContainerReplicas(final ContainerInfo
containerInfo,
" {} on datanode {}",
rp.getReplicaIndex(),
containerInfo.containerID(),
- rp.getDatanodeDetails().getUuidString(), e);
+ rp.getDatanodeDetails(), e);
}
}
}
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HealthyReadOnlyNodeHandler.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HealthyReadOnlyNodeHandler.java
index 9bbfb519b6..e85b0ec32d 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HealthyReadOnlyNodeHandler.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HealthyReadOnlyNodeHandler.java
@@ -88,7 +88,7 @@ public void onMessage(DatanodeDetails datanodeDetails,
"containers.",
pipelineID, pipeline.getPipelineState(),
HddsProtos.NodeState.HEALTHY_READONLY,
- datanodeDetails.getUuidString());
+ datanodeDetails);
pipelineManager.closePipeline(pipelineID);
} catch (IOException ex) {
LOG.error("Failed to close pipeline {} which uses HEALTHY READONLY " +
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 30bdfa7219..68c1e0bbb3 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -439,10 +439,7 @@ public RegisteredCommand register(
final DatanodeInfo oldNode = nodeStateManager.getNode(datanodeDetails);
if (updateDnsToDnIdMap(oldNode.getHostName(), oldNode.getIpAddress(),
hostName, ipAddress, dnId)) {
- LOG.info("Updating datanode {} from {} to {}",
- datanodeDetails.getUuidString(),
- oldNode,
- datanodeDetails);
+ LOG.info("Updating datanode from {} to {}", oldNode,
datanodeDetails);
clusterMap.update(oldNode, datanodeDetails);
nodeStateManager.updateNode(datanodeDetails, layoutInfo);
DatanodeDetails dn = nodeStateManager.getNode(datanodeDetails);
@@ -1069,8 +1066,7 @@ private SCMNodeStat getNodeStatInternal(DatanodeDetails
datanodeDetails) {
return new SCMNodeStat(capacity, used, remaining, committed,
freeSpaceToSpare);
} catch (NodeNotFoundException e) {
- LOG.warn("Cannot generate NodeStat, datanode {} not found.",
- datanodeDetails.getUuidString());
+ LOG.warn("Cannot generate NodeStat, datanode {} not found.",
datanodeDetails);
return null;
}
}
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
index e191162ab0..94a17d5989 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
@@ -75,7 +75,7 @@ private void processPipelineAction(final DatanodeDetails
datanode,
final PipelineID pid = PipelineID.getFromProtobuf(info.getPipelineID());
final String logMsg = "Received pipeline action " + action + " for " + pid
+
- " from datanode " + datanode.getUuidString() + "." +
+ " from datanode " + datanode + "." +
" Reason : " + info.getDetailedReason();
// We can skip processing Pipeline Action if the current SCM is not leader.
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
index 4ef1676618..6bb59a2236 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
@@ -546,8 +546,7 @@ public void closeStalePipelines(DatanodeDetails
datanodeDetails) {
List<Pipeline> pipelinesWithStaleIpOrHostname =
getStalePipelines(datanodeDetails);
if (pipelinesWithStaleIpOrHostname.isEmpty()) {
- LOG.debug("No stale pipelines for datanode {}",
- datanodeDetails.getUuidString());
+ LOG.debug("No stale pipelines for datanode {}", datanodeDetails);
return;
}
LOG.info("Found {} stale pipelines",
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
index c95f7208b8..008683dbc9 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
@@ -201,7 +201,7 @@ public synchronized Pipeline create(RatisReplicationConfig
replicationConfig,
dns.forEach(node -> {
LOG.info("Sending CreatePipelineCommand for pipeline:{} to datanode:{}",
- pipeline.getId(), node.getUuidString());
+ pipeline.getId(), node);
eventPublisher.fireEvent(SCMEvents.DATANODE_COMMAND,
new CommandForDatanode<>(node.getUuid(), createCommand));
});
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index 4904b85876..7b2aa2981b 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -188,8 +188,7 @@ public MockNodeManager(
setContainers(usageInfo.getDatanodeDetails(), entry.getValue());
} catch (NodeNotFoundException e) {
LOG.warn("Could not find Datanode {} for adding containers to it. " +
- "Skipping this node.", usageInfo
- .getDatanodeDetails().getUuidString());
+ "Skipping this node.", usageInfo.getDatanodeDetails());
continue;
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
index a2537e6a80..0704932744 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
@@ -409,7 +409,7 @@ private Boolean injectDataNodeFailureDuringSCMUpgrade() {
new ArrayList<>(cluster.getHddsDatanodes());
for (HddsDatanodeService ds: currentDataNodes) {
DatanodeDetails dn = ds.getDatanodeDetails();
- LOG.info("Restarting datanode {}", dn.getUuidString());
+ LOG.info("Restarting datanode {}", dn);
cluster.restartHddsDatanode(dn, false);
}
cluster.waitForClusterToBeReady();
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNewNodeHandler.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNewNodeHandler.java
index 844aacb065..ce9bce3a98 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNewNodeHandler.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNewNodeHandler.java
@@ -44,8 +44,7 @@ public void onMessage(DatanodeDetails datanodeDetails,
try {
nodeManager.addNodeToDB(datanodeDetails);
} catch (IOException e) {
- LOG.error("Unable to add new node {} to Node DB.",
- datanodeDetails.getUuidString());
+ LOG.error("Unable to add new node {} to Node DB.", datanodeDetails);
}
}
}
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java
index d1247c8b73..d1749f9a15 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java
@@ -337,7 +337,7 @@ private boolean startDatanode(DatanodeSimulationState dn)
throws IOException {
if (!registerDataNode(dn)) {
LOGGER.info("Failed to register datanode to SCM: {}",
- dn.getDatanodeDetails().getUuidString());
+ dn.getDatanodeDetails());
return false;
}
@@ -359,7 +359,7 @@ private boolean startDatanode(DatanodeSimulationState dn)
reconHeartbeatInterval, TimeUnit.MILLISECONDS);
LOGGER.info("Successfully registered datanode to SCM: {}",
- dn.getDatanodeDetails().getUuidString());
+ dn.getDatanodeDetails());
return true;
}
@@ -418,7 +418,7 @@ private void heartbeat(InetSocketAddress endpoint,
}
} catch (Exception e) {
LOGGER.info("Error sending heartbeat for {}: {}",
- dn.getDatanodeDetails().getUuidString(), e.getMessage(), e);
+ dn.getDatanodeDetails(), e.getMessage(), e);
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]