ChenSammi commented on code in PR #4540:
URL: https://github.com/apache/ozone/pull/4540#discussion_r1191131886


##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/solr/SolrUtil.java:
##########
@@ -0,0 +1,497 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.solr;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParser;
+import com.google.inject.Inject;
+import org.apache.commons.lang3.ArrayUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.HddsUtils;
+import org.apache.hadoop.hdds.conf.ConfigurationException;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler;
+import org.apache.hadoop.ozone.recon.api.types.AuditLogFacetsResources;
+import org.apache.hadoop.ozone.recon.api.types.DUResponse;
+import org.apache.hadoop.ozone.recon.api.types.EntityMetaData;
+import org.apache.hadoop.ozone.recon.api.types.EntityReadAccessHeatMapResponse;
+import org.apache.hadoop.ozone.recon.api.types.LastXUnit;
+import org.apache.hadoop.ozone.recon.api.types.ResponseStatus;
+import org.apache.hadoop.ozone.recon.http.HttpRequestWrapper;
+import org.apache.hadoop.ozone.recon.http.ReconHttpClient;
+import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
+import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.http.NameValuePair;
+import org.apache.http.message.BasicNameValuePair;
+import org.apache.solr.client.solrj.util.ClientUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.security.PrivilegedExceptionAction;
+import java.text.SimpleDateFormat;
+import java.time.Instant;
+import java.time.ZonedDateTime;
+import java.time.temporal.ChronoUnit;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Date;
+import java.util.List;
+import java.util.TimeZone;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.stream.Collectors;
+
+import static 
org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_SOLR_TIMEZONE_KEY;
+import static 
org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_SOLR_ADDRESS_KEY;
+
+/**
+ * This class is general utility class for handling
+ * Solr query functions.
+ */
+public class SolrUtil {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SolrUtil.class);
+  public static final String DEFAULT_TIMEZONE_VALUE = "UTC";
+
+  private OzoneConfiguration ozoneConfiguration;
+  private final ReconNamespaceSummaryManager reconNamespaceSummaryManager;
+  private final ReconOMMetadataManager omMetadataManager;
+  private final OzoneStorageContainerManager reconSCM;
+  private AtomicReference<EntityReadAccessHeatMapResponse>
+      entityReadAccessHeatMapRespRef;
+  private SimpleDateFormat dateFormat = new SimpleDateFormat(
+      "yyyy-MM-dd'T'HH:mm:ss'Z'");
+  private final String timeZone;
+
+  @Inject
+  public SolrUtil(ReconNamespaceSummaryManager
+                      namespaceSummaryManager,
+                  ReconOMMetadataManager omMetadataManager,
+                  OzoneStorageContainerManager reconSCM,
+                  OzoneConfiguration ozoneConfiguration) {
+    this.reconNamespaceSummaryManager = namespaceSummaryManager;
+    this.omMetadataManager = omMetadataManager;
+    this.reconSCM = reconSCM;
+    this.entityReadAccessHeatMapRespRef = new AtomicReference<>(
+        new EntityReadAccessHeatMapResponse());
+    this.ozoneConfiguration = ozoneConfiguration;
+    this.timeZone = this.ozoneConfiguration.get(OZONE_RECON_SOLR_TIMEZONE_KEY,
+        DEFAULT_TIMEZONE_VALUE);
+    if (timeZone != null) {
+      LOG.info("Setting timezone to " + timeZone);
+      try {
+        dateFormat.setTimeZone(TimeZone.getTimeZone(timeZone));
+      } catch (Throwable t) {
+        LOG.error("Error setting timezone. TimeZone = " + timeZone);
+      }
+    }
+  }
+
+  private void addBucketData(
+      EntityReadAccessHeatMapResponse volumeEntity, String[] split,
+      int readAccessCount, long keySize) {
+    List<EntityReadAccessHeatMapResponse> children =
+        volumeEntity.getChildren();
+    EntityReadAccessHeatMapResponse bucketEntity = null;
+    List<EntityReadAccessHeatMapResponse> bucketList =
+        children.stream().filter(entity -> entity.getLabel().
+            equalsIgnoreCase(split[1])).collect(Collectors.toList());
+    if (bucketList.size() > 0) {
+      bucketEntity = bucketList.get(0);
+    }
+    if (children.contains(bucketEntity)) {
+      addPrefixPathInfoToBucket(split, bucketEntity, readAccessCount, keySize);
+    } else {
+      addBucketAndPrefixPath(split, volumeEntity, readAccessCount, keySize);
+    }
+  }
+
+  private void addVolumeData(
+      EntityReadAccessHeatMapResponse rootEntity,
+      String[] split, int readAccessCount, long keySize) {
+    List<EntityReadAccessHeatMapResponse> children =
+        rootEntity.getChildren();
+    EntityReadAccessHeatMapResponse volumeInfo =
+        new EntityReadAccessHeatMapResponse();
+    volumeInfo.setLabel(split[0]);
+    children.add(volumeInfo);
+    addBucketAndPrefixPath(split, volumeInfo, readAccessCount, keySize);
+  }
+
+  private void updateVolumeSize(
+      EntityReadAccessHeatMapResponse volumeInfo) {
+    List<EntityReadAccessHeatMapResponse> children =
+        volumeInfo.getChildren();
+    children.stream().forEach(bucket -> {
+      volumeInfo.setSize(volumeInfo.getSize() + bucket.getSize());
+      updateBucketLevelMinMaxAccessCount(bucket);
+      updateBucketAccessRatio(bucket);
+    });
+  }
+
+  private void updateBucketAccessRatio(EntityReadAccessHeatMapResponse bucket) 
{
+    long delta = bucket.getMaxAccessCount() - bucket.getMinAccessCount();
+    List<EntityReadAccessHeatMapResponse> children =
+        bucket.getChildren();
+    children.stream().forEach(path -> {
+      path.setColor(1.000);
+      if (delta > 0) {
+        double truncatedValue = truncate(
+            ((double) path.getAccessCount() /
+                (double) bucket.getMaxAccessCount()), 3);
+        path.setColor(truncatedValue);
+      }
+    });
+  }
+
+  private static double truncate(double value, int decimalPlaces) {
+    if (decimalPlaces < 0) {
+      throw new IllegalArgumentException();
+    }
+    value = value * Math.pow(10, decimalPlaces);
+    value = Math.floor(value);
+    value = value / Math.pow(10, decimalPlaces);
+    return value;
+  }
+
+  private void updateRootEntitySize(
+      EntityReadAccessHeatMapResponse rootEntity) {
+    List<EntityReadAccessHeatMapResponse> children =
+        rootEntity.getChildren();
+    children.stream().forEach(volume -> {
+      updateVolumeSize(volume);
+      rootEntity.setSize(rootEntity.getSize() + volume.getSize());
+    });
+  }
+
+  private void addBucketAndPrefixPath(
+      String[] split, EntityReadAccessHeatMapResponse volumeEntity,
+      long readAccessCount, long keySize) {
+    List<EntityReadAccessHeatMapResponse> bucketEntities =
+        volumeEntity.getChildren();
+    EntityReadAccessHeatMapResponse bucket =
+        new EntityReadAccessHeatMapResponse();
+    bucket.setLabel(split[1]);
+    bucketEntities.add(bucket);
+    bucket.setMinAccessCount(readAccessCount);
+    addPrefixPathInfoToBucket(split, bucket, readAccessCount, keySize);
+  }
+
+  private void addPrefixPathInfoToBucket(
+      String[] split, EntityReadAccessHeatMapResponse bucket,
+      long readAccessCount, long keySize) {
+    List<EntityReadAccessHeatMapResponse> prefixes = bucket.getChildren();
+    updateBucketSize(bucket, keySize);
+    String path = Arrays.stream(split)
+        .skip(2).collect(Collectors.joining("/"));
+    EntityReadAccessHeatMapResponse prefixPathInfo =
+        new EntityReadAccessHeatMapResponse();
+    prefixPathInfo.setLabel(path);
+    prefixPathInfo.setAccessCount(readAccessCount);
+    prefixPathInfo.setSize(keySize);
+    prefixes.add(prefixPathInfo);
+    // This is done for specific ask by UI treemap to render and provide
+    // varying color shades based on varying ranges of access count.
+    updateRootLevelMinMaxAccessCount(readAccessCount);
+  }
+
+  private void updateBucketLevelMinMaxAccessCount(
+      EntityReadAccessHeatMapResponse bucket) {
+    List<EntityReadAccessHeatMapResponse> children =
+        bucket.getChildren();
+    if (children.size() > 0) {
+      bucket.setMinAccessCount(Long.MAX_VALUE);
+    }
+    children.stream().forEach(path -> {
+      long readAccessCount = path.getAccessCount();
+      bucket.setMinAccessCount(
+          path.getAccessCount() < bucket.getMinAccessCount() ? readAccessCount 
:
+              bucket.getMinAccessCount());
+      bucket.setMaxAccessCount(
+          readAccessCount > bucket.getMaxAccessCount() ? readAccessCount :
+              bucket.getMaxAccessCount());
+    });
+  }
+
+  private void updateRootLevelMinMaxAccessCount(long readAccessCount) {
+    EntityReadAccessHeatMapResponse rootEntity =
+        this.entityReadAccessHeatMapRespRef.get();
+    rootEntity.setMinAccessCount(
+        readAccessCount < rootEntity.getMinAccessCount() ? readAccessCount :
+            rootEntity.getMinAccessCount());
+    rootEntity.setMaxAccessCount(
+        readAccessCount > rootEntity.getMaxAccessCount() ? readAccessCount :
+            rootEntity.getMaxAccessCount());
+  }
+
+  private void updateBucketSize(EntityReadAccessHeatMapResponse bucket,
+                                       long keySize) {
+    bucket.setSize(bucket.getSize() + keySize);
+  }
+
+  public void queryLogs(String path, String entityType, String startDate,

Review Comment:
   AtomicReference of entityReadAccessHeatMapRespRef cannot make it thread 
safe. Say,
   
   ```
   thread1         t1                    t2                  t3                
t4                                     t5 
                  queryLogs            generateHeatMap                          
            getEntityReadAccessHeatMapResponse
   thread 2 
                                        queryLogs                             
generateHeatMap       getEntityReadAccessHeatMapResponse
   
   ```
   Then both thread1 and thread2 will have the result of thread2's query 
result. 
   
   So if entityReadAccessHeatMapRespRef is a resource, then synchronized access 
should be applied to it. Otherwise, make it as the return value of queryLogs 
can solve the problem.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to