szetszwo commented on code in PR #6500:
URL: https://github.com/apache/ozone/pull/6500#discussion_r1590175491
##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java:
##########
@@ -181,55 +180,53 @@ public String process(ContainerData containerData,
DatanodeStore store,
return null;
}
- JsonObject containerJson = inspectContainer(kvData, store);
+ ObjectNode containerJson = inspectContainer(kvData, store);
boolean correct = checkAndRepair(containerJson, kvData, store);
- Gson gson = new GsonBuilder()
- .setPrettyPrinting()
- .serializeNulls()
- .create();
- String jsonReport = gson.toJson(containerJson);
+ String jsonReport = null;
+ jsonReport = JsonUtils.toJsonStringWIthIndent(containerJson);
if (log != null) {
if (correct) {
log.trace(jsonReport);
} else {
log.error(jsonReport);
}
}
+
return jsonReport;
}
- static JsonObject inspectContainer(KeyValueContainerData containerData,
+ static ObjectNode inspectContainer(KeyValueContainerData containerData,
DatanodeStore store) {
- JsonObject containerJson = new JsonObject();
+ ObjectNode containerJson = JsonUtils.createObjectNode(null);
try {
// Build top level container properties.
- containerJson.addProperty("containerID", containerData.getContainerID());
+ containerJson.put("containerID", containerData.getContainerID());
String schemaVersion = containerData.getSchemaVersion();
- containerJson.addProperty("schemaVersion", schemaVersion);
- containerJson.addProperty("containerState",
- containerData.getState().toString());
- containerJson.addProperty("currentDatanodeID",
+ containerJson.put("schemaVersion", schemaVersion);
+ containerJson.put("containerState", containerData.getState().toString());
+ containerJson.put("currentDatanodeID",
containerData.getVolume().getDatanodeUuid());
- containerJson.addProperty("originDatanodeID",
- containerData.getOriginNodeId());
+ containerJson.put("originDatanodeID", containerData.getOriginNodeId());
// Build DB metadata values.
- Table<String, Long> metadataTable = store.getMetadataTable();
- JsonObject dBMetadata = getDBMetadataJson(metadataTable, containerData);
- containerJson.add("dBMetadata", dBMetadata);
+ // Assuming getDBMetadataJson and getAggregateValues methods return
ObjectNode and are refactored to use Jackson
+ ObjectNode dBMetadata =
+ getDBMetadataJson(store.getMetadataTable(), containerData);
Review Comment:
Use single line.
##########
hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java:
##########
@@ -28,13 +28,12 @@
import java.util.Map;
import java.util.Properties;
-import com.google.common.base.Strings;
Review Comment:
It is good to remove the common Strings import. 👍
##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java:
##########
@@ -373,33 +365,32 @@ private boolean checkAndRepair(JsonObject parent,
return repaired;
};
- JsonObject blockCountError = buildErrorAndRepair("dBMetadata." +
+ ObjectNode blockCountError = buildErrorAndRepair("dBMetadata." +
OzoneConsts.BLOCK_COUNT, blockCountAggregate, blockCountDB,
keyRepairAction);
errors.add(blockCountError);
}
// Check and repair used bytes.
- JsonElement usedBytesDB = parent.getAsJsonObject("dBMetadata")
- .get(OzoneConsts.CONTAINER_BYTES_USED);
- JsonElement usedBytesAggregate = parent.getAsJsonObject("aggregates")
- .get("usedBytes");
+ JsonNode usedBytesDB = parent.path("dBMetadata")
+ .path(OzoneConsts.CONTAINER_BYTES_USED);
Review Comment:
Use single line.
##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java:
##########
@@ -181,55 +180,53 @@ public String process(ContainerData containerData,
DatanodeStore store,
return null;
}
- JsonObject containerJson = inspectContainer(kvData, store);
+ ObjectNode containerJson = inspectContainer(kvData, store);
boolean correct = checkAndRepair(containerJson, kvData, store);
- Gson gson = new GsonBuilder()
- .setPrettyPrinting()
- .serializeNulls()
- .create();
- String jsonReport = gson.toJson(containerJson);
+ String jsonReport = null;
+ jsonReport = JsonUtils.toJsonStringWIthIndent(containerJson);
Review Comment:
Use single line.
##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java:
##########
@@ -463,32 +453,34 @@ private boolean checkAndRepair(JsonObject parent,
return repaired;
};
- JsonObject chunksDirError =
buildErrorAndRepair("chunksDirectory.present",
- new JsonPrimitive(true), chunksDirPresent, dirRepairAction);
+ ObjectNode chunksDirError =
+ buildErrorAndRepair("chunksDirectory.present",
+ JsonNodeFactory.instance.booleanNode(true), chunksDirPresent,
+ dirRepairAction);
Review Comment:
Reformat this as
```java
ObjectNode chunksDirError = buildErrorAndRepair("chunksDirectory.present",
JsonNodeFactory.instance.booleanNode(true), chunksDirPresent,
dirRepairAction);
```
##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java:
##########
@@ -181,55 +180,53 @@ public String process(ContainerData containerData,
DatanodeStore store,
return null;
}
- JsonObject containerJson = inspectContainer(kvData, store);
+ ObjectNode containerJson = inspectContainer(kvData, store);
boolean correct = checkAndRepair(containerJson, kvData, store);
- Gson gson = new GsonBuilder()
- .setPrettyPrinting()
- .serializeNulls()
- .create();
- String jsonReport = gson.toJson(containerJson);
+ String jsonReport = null;
+ jsonReport = JsonUtils.toJsonStringWIthIndent(containerJson);
if (log != null) {
if (correct) {
log.trace(jsonReport);
} else {
log.error(jsonReport);
}
}
+
return jsonReport;
}
- static JsonObject inspectContainer(KeyValueContainerData containerData,
+ static ObjectNode inspectContainer(KeyValueContainerData containerData,
DatanodeStore store) {
- JsonObject containerJson = new JsonObject();
+ ObjectNode containerJson = JsonUtils.createObjectNode(null);
try {
// Build top level container properties.
- containerJson.addProperty("containerID", containerData.getContainerID());
+ containerJson.put("containerID", containerData.getContainerID());
String schemaVersion = containerData.getSchemaVersion();
- containerJson.addProperty("schemaVersion", schemaVersion);
- containerJson.addProperty("containerState",
- containerData.getState().toString());
- containerJson.addProperty("currentDatanodeID",
+ containerJson.put("schemaVersion", schemaVersion);
+ containerJson.put("containerState", containerData.getState().toString());
+ containerJson.put("currentDatanodeID",
containerData.getVolume().getDatanodeUuid());
- containerJson.addProperty("originDatanodeID",
- containerData.getOriginNodeId());
+ containerJson.put("originDatanodeID", containerData.getOriginNodeId());
// Build DB metadata values.
- Table<String, Long> metadataTable = store.getMetadataTable();
- JsonObject dBMetadata = getDBMetadataJson(metadataTable, containerData);
- containerJson.add("dBMetadata", dBMetadata);
+ // Assuming getDBMetadataJson and getAggregateValues methods return
ObjectNode and are refactored to use Jackson
+ ObjectNode dBMetadata =
+ getDBMetadataJson(store.getMetadataTable(), containerData);
+ containerJson.set("dBMetadata", dBMetadata);
// Build aggregate values.
- JsonObject aggregates = getAggregateValues(store,
- containerData, schemaVersion);
- containerJson.add("aggregates", aggregates);
+ ObjectNode aggregates =
+ getAggregateValues(store, containerData, schemaVersion);
Review Comment:
Use single line.
##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java:
##########
@@ -373,33 +375,32 @@ private boolean checkAndRepair(JsonObject parent,
return repaired;
};
- JsonObject blockCountError = buildErrorAndRepair("dBMetadata." +
+ ObjectNode blockCountError = buildErrorAndRepair("dBMetadata." +
OzoneConsts.BLOCK_COUNT, blockCountAggregate, blockCountDB,
keyRepairAction);
errors.add(blockCountError);
}
// Check and repair used bytes.
- JsonElement usedBytesDB = parent.getAsJsonObject("dBMetadata")
- .get(OzoneConsts.CONTAINER_BYTES_USED);
- JsonElement usedBytesAggregate = parent.getAsJsonObject("aggregates")
- .get("usedBytes");
+ JsonNode usedBytesDB = parent.path("dBMetadata")
+ .path(OzoneConsts.CONTAINER_BYTES_USED);
+ JsonNode usedBytesAggregate = parent.path("aggregates").path("usedBytes");
// If used bytes is absent from the DB, it is only an error if there is
// a non-zero aggregate of used bytes among the block keys.
long usedBytesDBLong = 0;
- if (!usedBytesDB.isJsonNull()) {
- usedBytesDBLong = usedBytesDB.getAsLong();
+ if (!usedBytesDB.isMissingNode()) {
Review Comment:
> ... If it's not missing, then the value is extracted using asLong().
Is it possible that it is not missing but equals to null?
##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java:
##########
@@ -408,18 +399,18 @@ private boolean checkAndRepair(JsonObject parent,
return repaired;
};
- JsonObject usedBytesError = buildErrorAndRepair("dBMetadata." +
+ ObjectNode usedBytesError = buildErrorAndRepair("dBMetadata." +
OzoneConsts.CONTAINER_BYTES_USED, usedBytesAggregate,
usedBytesDB,
keyRepairAction);
errors.add(usedBytesError);
}
// check and repair if db delete count mismatches delete transaction count.
- final JsonElement pendingDeleteCountDB = dBMetadata.get(
+ JsonNode pendingDeleteCountDB = dBMetadata.path(
OzoneConsts.PENDING_DELETE_BLOCK_COUNT);
final long dbDeleteCount = jsonToLong(pendingDeleteCountDB);
- final JsonElement pendingDeleteCountAggregate
- = aggregates.get(PendingDelete.COUNT);
+ final JsonNode pendingDeleteCountAggregate
+ = aggregates.path(PendingDelete.COUNT);
Review Comment:
Use single line.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]