adoroszlai commented on code in PR #6500:
URL: https://github.com/apache/ozone/pull/6500#discussion_r1556930658


##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java:
##########
@@ -181,55 +182,63 @@ public String process(ContainerData containerData, 
DatanodeStore store,
       return null;
     }
 
-    JsonObject containerJson = inspectContainer(kvData, store);
-    boolean correct = checkAndRepair(containerJson, kvData, store);
-
-    Gson gson = new GsonBuilder()
-        .setPrettyPrinting()
-        .serializeNulls()
-        .create();
-    String jsonReport = gson.toJson(containerJson);
-    if (log != null) {
-      if (correct) {
-        log.trace(jsonReport);
-      } else {
-        log.error(jsonReport);
+    // Assuming inspectContainer method returns an ObjectNode instead of 
JsonObject
+    // You will need to refactor inspectContainer to construct an ObjectNode 
using Jackson

Review Comment:
   I guess these comments (and similar ones below) related to refactoring are 
temporary, and can be removed.



##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java:
##########
@@ -408,18 +414,19 @@ private boolean checkAndRepair(JsonObject parent,
         return repaired;
       };
 
-      JsonObject usedBytesError = buildErrorAndRepair("dBMetadata." +
-              OzoneConsts.CONTAINER_BYTES_USED, usedBytesAggregate, 
usedBytesDB,
-          keyRepairAction);
-      errors.add(usedBytesError);
+      ObjectNode usedBytesError =
+          buildErrorAndRepair("dBMetadata." + OzoneConsts.CONTAINER_BYTES_USED,
+              usedBytesAggregate,
+          usedBytesDB, keyRepairAction);
+      ((ArrayNode) errors).add(usedBytesError);

Review Comment:
   Cast seems to be unnecessary for `ArrayNode errors`.
   
   ```suggestion
         errors.add(usedBytesError);
   ```



##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java:
##########
@@ -437,17 +444,16 @@ private boolean checkAndRepair(JsonObject parent,
         return false;
       };
 
-      final JsonObject deleteCountError = buildErrorAndRepair(
-          "dBMetadata." + OzoneConsts.PENDING_DELETE_BLOCK_COUNT,
-          pendingDeleteCountAggregate, pendingDeleteCountDB,
+      final ObjectNode deleteCountError = buildErrorAndRepair("dBMetadata." + 
OzoneConsts.PENDING_DELETE_BLOCK_COUNT,

Review Comment:
   nit: let's not change this
   
   ```suggestion
         final ObjectNode deleteCountError = buildErrorAndRepair(
             "dBMetadata." + OzoneConsts.PENDING_DELETE_BLOCK_COUNT,
   ```



##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java:
##########
@@ -437,17 +444,16 @@ private boolean checkAndRepair(JsonObject parent,
         return false;
       };
 
-      final JsonObject deleteCountError = buildErrorAndRepair(
-          "dBMetadata." + OzoneConsts.PENDING_DELETE_BLOCK_COUNT,
-          pendingDeleteCountAggregate, pendingDeleteCountDB,
+      final ObjectNode deleteCountError = buildErrorAndRepair("dBMetadata." + 
OzoneConsts.PENDING_DELETE_BLOCK_COUNT,
+          pendingDeleteCountAggregate,
+          pendingDeleteCountDB,
           deleteCountRepairAction);
-      errors.add(deleteCountError);
+      ((ArrayNode) errors).add(deleteCountError);

Review Comment:
   ```suggestion
         errors.add(deleteCountError);
   ```



##########
hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/heatmap/TestHeatMapInfo.java:
##########
@@ -745,35 +743,43 @@ public void setUp() throws Exception {
   public void testHeatMapGeneratedInfo() throws IOException {
     // Setup
     // Run the test
-    JsonElement jsonElement = JsonParser.parseString(auditRespStr);
-    JsonObject jsonObject = jsonElement.getAsJsonObject();
-    JsonElement facets = jsonObject.get("facets");
-    JsonObject facetsBucketsObject =
-        facets.getAsJsonObject().get("resources")
-            .getAsJsonObject();
-    ObjectMapper objectMapper = new ObjectMapper();
+    // Parse the JSON string to JsonNode
+    JsonNode rootNode = JsonUtils.readTree(auditRespStr);
 
-    HeatMapProviderDataResource auditLogFacetsResources =
-        objectMapper.readValue(
-            facetsBucketsObject.toString(), HeatMapProviderDataResource.class);
-    EntityMetaData[] entities = auditLogFacetsResources.getMetaDataList();
-    List<EntityMetaData> entityMetaDataList =
-        Arrays.stream(entities).collect(Collectors.toList());
-    EntityReadAccessHeatMapResponse entityReadAccessHeatMapResponse =
-        heatMapUtil.generateHeatMap(entityMetaDataList);
-    
assertThat(entityReadAccessHeatMapResponse.getChildren().size()).isGreaterThan(0);
-    assertEquals(12, entityReadAccessHeatMapResponse.getChildren().size());
-    assertEquals(25600, entityReadAccessHeatMapResponse.getSize());
-    assertEquals(2924, entityReadAccessHeatMapResponse.getMinAccessCount());
-    assertEquals(155074, entityReadAccessHeatMapResponse.getMaxAccessCount());
-    assertEquals("root", entityReadAccessHeatMapResponse.getLabel());
-    assertEquals(0.0, 
entityReadAccessHeatMapResponse.getChildren().get(0).getColor());
-    assertEquals(0.442,
-        entityReadAccessHeatMapResponse.getChildren().get(0).getChildren()
-            .get(0).getChildren().get(1).getColor());
-    assertEquals(0.058,
-        entityReadAccessHeatMapResponse.getChildren().get(0).getChildren()
-            .get(1).getChildren().get(3).getColor());
+    JsonNode facetsNode = rootNode.path("facets");
+    JsonNode resourcesNode = facetsNode.path("resources");
+
+    // Deserialize the resources node directly if it's not missing
+    HeatMapProviderDataResource auditLogFacetsResources = null;
+
+    if (!resourcesNode.isMissingNode()) {
+      auditLogFacetsResources = JsonUtils.treeToValue(resourcesNode,
+          HeatMapProviderDataResource.class);
+    }
+
+    if (auditLogFacetsResources != null) {

Review Comment:
   Instead of wrapping in `if`, let's add an assumption and keep the original 
structure.  Similarly, in other test methods, too.
   
   ```
   import static org.assertj.core.api.Assumptions.assumeThat;
   
   ...
   
       assumeThat(resourcesNode.isMissingNode()).isFalse();
   
       HeatMapProviderDataResource auditLogFacetsResources = 
JsonUtils.treeToValue(resourcesNode,
           HeatMapProviderDataResource.class);
   
      ...
   ```
   
   BTW, when can `resources` be missing?



##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java:
##########
@@ -165,7 +166,7 @@ public void process(ContainerData containerData, 
DatanodeStore store) {
   }
 
   public String process(ContainerData containerData, DatanodeStore store,
-      Logger log) {
+                        Logger log) {

Review Comment:
   nit: please avoid changing whitespace like this
   
   (Such indentation depends on the method's visibility, return type and name.  
If any of those change, indentation will be off and need to be changed again.)



##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java:
##########
@@ -463,32 +469,37 @@ private boolean checkAndRepair(JsonObject parent,
         return repaired;
       };
 
-      JsonObject chunksDirError = 
buildErrorAndRepair("chunksDirectory.present",
-          new JsonPrimitive(true), chunksDirPresent, dirRepairAction);
-      errors.add(chunksDirError);
+      ObjectNode chunksDirError =
+          buildErrorAndRepair("chunksDirectory.present",
+              JsonNodeFactory.instance.booleanNode(true), chunksDirPresent,
+              dirRepairAction);
+      ((ArrayNode) errors).add(
+          chunksDirError); // Assuming 'errors' is an ArrayNode

Review Comment:
   ```suggestion
         errors.add(chunksDirError);
   ```



##########
hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java:
##########
@@ -79,17 +70,13 @@ public EventQueue(String threadNamePrefix) {
   // The field parent in DatanodeDetails class has the circular reference
   // which will result in Gson infinite recursive parsing. We need to exclude
   // this field when generating json string for DatanodeDetails object
-  static class DatanodeDetailsGsonExclusionStrategy
-          implements ExclusionStrategy {
-    @Override
-    public boolean shouldSkipField(FieldAttributes f) {
-      return f.getDeclaringClass() == NodeImpl.class
-              && f.getName().equals("parent");
-    }
-
-    @Override
-    public boolean shouldSkipClass(Class<?> aClass) {
-      return false;
+  public String serializeObject(Object obj) {
+    try {
+      return JsonUtils.toJsonString(obj);
+    } catch (Exception e) {
+      // Handle the exception, maybe log it
+      LOG.error("Error serializing object: ", e);

Review Comment:
   To test the change in `EventQueue`, please run `ozone insight logs -v 
scm.event-queue` in an unsecure cluster (use `ozone` compose environment).
   
   Errors related to serialization show up in SCM log:
   
   ```
   scm_1       | 2024-04-09 06:08:32,299 [IPC Server handler 2 on default port 
9861] ERROR events.EventQueue: Error serializing object: 
   scm_1       | com.fasterxml.jackson.databind.exc.InvalidDefinitionException: 
No serializer found for class com.google.protobuf.UnknownFieldSet$Parser and no 
properties discovered to create BeanSerializer (to avoid exception, disable 
SerializationFeature.FAIL_ON_EMPTY_BEANS) (through reference chain: 
org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher$NodeReportFromDatanode["report"]->org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos$NodeReportProto["unknownFields"]->com.google.protobuf.UnknownFieldSet["parserForType"])
   scm_1       |        at 
com.fasterxml.jackson.databind.exc.InvalidDefinitionException.from(InvalidDefinitionException.java:77)
   scm_1       |        at 
com.fasterxml.jackson.databind.SerializerProvider.reportBadDefinition(SerializerProvider.java:1330)
   scm_1       |        at 
com.fasterxml.jackson.databind.DatabindContext.reportBadDefinition(DatabindContext.java:414)
   scm_1       |        at 
com.fasterxml.jackson.databind.ser.impl.UnknownSerializer.failForEmpty(UnknownSerializer.java:53)
   scm_1       |        at 
com.fasterxml.jackson.databind.ser.impl.UnknownSerializer.serialize(UnknownSerializer.java:30)
   scm_1       |        at 
com.fasterxml.jackson.databind.ser.BeanPropertyWriter.serializeAsField(BeanPropertyWriter.java:732)
   scm_1       |        at 
com.fasterxml.jackson.databind.ser.std.BeanSerializerBase.serializeFields(BeanSerializerBase.java:770)
   scm_1       |        at 
com.fasterxml.jackson.databind.ser.BeanSerializer.serialize(BeanSerializer.java:183)
   scm_1       |        at 
com.fasterxml.jackson.databind.ser.BeanPropertyWriter.serializeAsField(BeanPropertyWriter.java:732)
   scm_1       |        at 
com.fasterxml.jackson.databind.ser.std.BeanSerializerBase.serializeFields(BeanSerializerBase.java:770)
   scm_1       |        at 
com.fasterxml.jackson.databind.ser.BeanSerializer.serialize(BeanSerializer.java:183)
   scm_1       |        at 
com.fasterxml.jackson.databind.ser.BeanPropertyWriter.serializeAsField(BeanPropertyWriter.java:732)
   scm_1       |        at 
com.fasterxml.jackson.databind.ser.std.BeanSerializerBase.serializeFields(BeanSerializerBase.java:770)
   scm_1       |        at 
com.fasterxml.jackson.databind.ser.BeanSerializer.serialize(BeanSerializer.java:183)
   scm_1       |        at 
com.fasterxml.jackson.databind.ser.DefaultSerializerProvider._serialize(DefaultSerializerProvider.java:502)
   scm_1       |        at 
com.fasterxml.jackson.databind.ser.DefaultSerializerProvider.serializeValue(DefaultSerializerProvider.java:341)
   scm_1       |        at 
com.fasterxml.jackson.databind.ObjectMapper._writeValueAndClose(ObjectMapper.java:4793)
   scm_1       |        at 
com.fasterxml.jackson.databind.ObjectMapper.writeValueAsString(ObjectMapper.java:4038)
   scm_1       |        at 
org.apache.hadoop.hdds.server.JsonUtils.toJsonString(JsonUtils.java:70)
   scm_1       |        at 
org.apache.hadoop.hdds.server.events.EventQueue.serializeObject(EventQueue.java:75)
   scm_1       |        at 
org.apache.hadoop.hdds.server.events.EventQueue.fireEvent(EventQueue.java:195)
   ```



##########
hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java:
##########
@@ -205,11 +192,14 @@ public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void 
fireEvent(
         for (EventHandler handler : executorAndHandlers.getValue()) {
           queuedCount.incrementAndGet();
           if (LOG.isTraceEnabled()) {
-            LOG.trace(
-                "Delivering [event={}] to executor/handler {}: 
<json>{}</json>",
-                event.getName(),
-                executorAndHandlers.getKey().getName(),
-                TRACING_SERIALIZER.toJson(payload).replaceAll("\n", "\\\\n"));
+            String jsonPayload = serializeObject(payload);
+            if (jsonPayload != null) {
+              LOG.trace(
+                  "Delivering [event={}] to executor/handler {}: 
<json>{}</json>",
+                  event.getName(),
+                  executorAndHandlers.getKey().getName(),
+                  jsonPayload.replaceAll("\n", "\\\\n"));
+            }

Review Comment:
   If serialization fails, let's still log it without the payload, at debug 
level (using the `else if` branch).



##########
hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java:
##########
@@ -79,17 +70,13 @@ public EventQueue(String threadNamePrefix) {
   // The field parent in DatanodeDetails class has the circular reference
   // which will result in Gson infinite recursive parsing. We need to exclude
   // this field when generating json string for DatanodeDetails object

Review Comment:
   Comment seems to be outdated.



##########
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java:
##########
@@ -376,48 +367,42 @@ private void 
checkJsonReportForIncorrectContainer(JsonObject inspectJson,
   }
 
   private void checkJsonErrorsReport(
-      JsonObject jsonReport, String propertyValue,
+      JsonNode jsonReport, String propertyValue,
       long correctExpected, long correctActual,
       boolean correctRepair) {
     if (correctExpected == correctActual) {
       return;
     }
-    checkJsonErrorsReport(jsonReport, propertyValue,
-        new JsonPrimitive(correctExpected),
-        new JsonPrimitive(correctActual),
-        correctRepair);
+    JsonNode correctExpectedNode = JsonUtils.valueToJsonNode(correctExpected);
+    JsonNode correctActualNode = JsonUtils.valueToJsonNode(correctActual);
+
+    checkJsonErrorsReport(jsonReport, propertyValue, correctExpectedNode,
+        correctActualNode, correctRepair);
   }
 
   /**
    * Checks the erorr list in the provided JsonReport for an error matching
    * the template passed in with the parameters.
    */
-  private void checkJsonErrorsReport(JsonObject jsonReport,
-      String propertyValue, JsonPrimitive correctExpected,
-      JsonPrimitive correctActual, boolean correctRepair) {
+  private void checkJsonErrorsReport(JsonNode jsonReport,
+                                     String propertyValue,
+                                     JsonNode correctExpected,
+                                     JsonNode correctActual,
+                                     boolean correctRepair) {

Review Comment:
   Let's keep original formatting.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to