sumitagrawl commented on code in PR #4540:
URL: https://github.com/apache/ozone/pull/4540#discussion_r1173568340


##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/solr/SolrUtil.java:
##########
@@ -0,0 +1,492 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.solr;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParser;
+import com.google.inject.Inject;
+import org.apache.commons.lang3.ArrayUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.HddsUtils;
+import org.apache.hadoop.hdds.conf.ConfigurationException;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler;
+import org.apache.hadoop.ozone.recon.api.types.AuditLogFacetsResources;
+import org.apache.hadoop.ozone.recon.api.types.DUResponse;
+import org.apache.hadoop.ozone.recon.api.types.EntityMetaData;
+import org.apache.hadoop.ozone.recon.api.types.EntityReadAccessHeatMapResponse;
+import org.apache.hadoop.ozone.recon.api.types.LastXUnit;
+import org.apache.hadoop.ozone.recon.api.types.ResponseStatus;
+import org.apache.hadoop.ozone.recon.http.HttpRequestWrapper;
+import org.apache.hadoop.ozone.recon.http.ReconHttpClient;
+import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
+import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.http.NameValuePair;
+import org.apache.http.message.BasicNameValuePair;
+import org.apache.solr.client.solrj.util.ClientUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.security.PrivilegedExceptionAction;
+import java.text.SimpleDateFormat;
+import java.time.Instant;
+import java.time.ZonedDateTime;
+import java.time.temporal.ChronoUnit;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Date;
+import java.util.List;
+import java.util.TimeZone;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.stream.Collectors;
+
+import static 
org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_SOLR_TIMEZONE_KEY;
+import static 
org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_SOLR_ADDRESS_KEY;
+
+/**
+ * This class is general utility class for handling
+ * Solr query functions.
+ */
+public class SolrUtil {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SolrUtil.class);
+  public static final String DEFAULT_TIMEZONE_VALUE = "UTC";
+
+  private OzoneConfiguration ozoneConfiguration;
+  private final ReconNamespaceSummaryManager reconNamespaceSummaryManager;
+  private final ReconOMMetadataManager omMetadataManager;
+  private final OzoneStorageContainerManager reconSCM;
+  private AtomicReference<EntityReadAccessHeatMapResponse>
+      entityReadAccessHeatMapRespRef;
+  private SimpleDateFormat dateFormat = new SimpleDateFormat(
+      "yyyy-MM-dd'T'HH:mm:ss'Z'");
+  private final String timeZone;
+
+  @Inject
+  public SolrUtil(ReconNamespaceSummaryManager
+                      namespaceSummaryManager,
+                  ReconOMMetadataManager omMetadataManager,
+                  OzoneStorageContainerManager reconSCM,
+                  OzoneConfiguration ozoneConfiguration) {
+    this.reconNamespaceSummaryManager = namespaceSummaryManager;
+    this.omMetadataManager = omMetadataManager;
+    this.reconSCM = reconSCM;
+    this.entityReadAccessHeatMapRespRef = new AtomicReference<>(
+        new EntityReadAccessHeatMapResponse());
+    this.ozoneConfiguration = ozoneConfiguration;
+    this.timeZone = this.ozoneConfiguration.get(OZONE_RECON_SOLR_TIMEZONE_KEY,
+        DEFAULT_TIMEZONE_VALUE);
+    if (timeZone != null) {
+      LOG.info("Setting timezone to " + timeZone);
+      try {
+        dateFormat.setTimeZone(TimeZone.getTimeZone(timeZone));
+      } catch (Throwable t) {
+        LOG.error("Error setting timezone. TimeZone = " + timeZone);
+      }
+    }
+  }
+
+  private void addBucketData(
+      EntityReadAccessHeatMapResponse volumeEntity, String[] split,
+      int readAccessCount, long keySize) {
+    List<EntityReadAccessHeatMapResponse> children =
+        volumeEntity.getChildren();
+    EntityReadAccessHeatMapResponse bucketEntity = null;
+    List<EntityReadAccessHeatMapResponse> bucketList =
+        children.stream().filter(entity -> entity.getLabel().
+            equalsIgnoreCase(split[1])).collect(Collectors.toList());
+    if (bucketList.size() > 0) {
+      bucketEntity = bucketList.get(0);
+    }
+    if (children.contains(bucketEntity)) {
+      addPrefixPathInfoToBucket(split, bucketEntity, readAccessCount, keySize);
+    } else {
+      addBucketAndPrefixPath(split, volumeEntity, readAccessCount, keySize);
+    }
+  }
+
+  private void addVolumeData(
+      EntityReadAccessHeatMapResponse rootEntity,
+      String[] split, int readAccessCount, long keySize) {
+    List<EntityReadAccessHeatMapResponse> children =
+        rootEntity.getChildren();
+    EntityReadAccessHeatMapResponse volumeInfo =
+        new EntityReadAccessHeatMapResponse();
+    volumeInfo.setLabel(split[0]);
+    children.add(volumeInfo);
+    addBucketAndPrefixPath(split, volumeInfo, readAccessCount, keySize);
+  }
+
+  private void updateVolumeSize(
+      EntityReadAccessHeatMapResponse volumeInfo) {
+    List<EntityReadAccessHeatMapResponse> children =
+        volumeInfo.getChildren();
+    children.stream().forEach(bucket -> {
+      volumeInfo.setSize(volumeInfo.getSize() + bucket.getSize());
+      updateBucketLevelMinMaxAccessCount(bucket);
+      updateBucketColors(bucket);
+    });
+  }
+
+  private void updateBucketColors(EntityReadAccessHeatMapResponse bucket) {

Review Comment:
   Color is mapped by UI, its mostly access ratio, can rename method accordingly



##########
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java:
##########
@@ -350,6 +352,52 @@ public static InetSocketAddress getReconAddresses(
     return NetUtils.createSocketAddr(hostname.get(), port);
   }
 
+  /**
+   * Retrieve the socket addresses of Apache Solr Server.
+   *
+   * @return Apache Solr server address
+   * @throws IllegalArgumentException If the configuration is invalid
+   */
+  public static InetSocketAddress getSolrAddress(

Review Comment:
   IMO, this method should be moved to recon (instead of common), as part of 
recon-->solr module



##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/solr/SolrUtil.java:
##########
@@ -0,0 +1,492 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.solr;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParser;
+import com.google.inject.Inject;
+import org.apache.commons.lang3.ArrayUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.HddsUtils;
+import org.apache.hadoop.hdds.conf.ConfigurationException;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler;
+import org.apache.hadoop.ozone.recon.api.types.AuditLogFacetsResources;
+import org.apache.hadoop.ozone.recon.api.types.DUResponse;
+import org.apache.hadoop.ozone.recon.api.types.EntityMetaData;
+import org.apache.hadoop.ozone.recon.api.types.EntityReadAccessHeatMapResponse;
+import org.apache.hadoop.ozone.recon.api.types.LastXUnit;
+import org.apache.hadoop.ozone.recon.api.types.ResponseStatus;
+import org.apache.hadoop.ozone.recon.http.HttpRequestWrapper;
+import org.apache.hadoop.ozone.recon.http.ReconHttpClient;
+import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
+import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.http.NameValuePair;
+import org.apache.http.message.BasicNameValuePair;
+import org.apache.solr.client.solrj.util.ClientUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.security.PrivilegedExceptionAction;
+import java.text.SimpleDateFormat;
+import java.time.Instant;
+import java.time.ZonedDateTime;
+import java.time.temporal.ChronoUnit;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Date;
+import java.util.List;
+import java.util.TimeZone;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.stream.Collectors;
+
+import static 
org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_SOLR_TIMEZONE_KEY;
+import static 
org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_SOLR_ADDRESS_KEY;
+
+/**
+ * This class is general utility class for handling
+ * Solr query functions.
+ */
+public class SolrUtil {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SolrUtil.class);
+  public static final String DEFAULT_TIMEZONE_VALUE = "UTC";
+
+  private OzoneConfiguration ozoneConfiguration;
+  private final ReconNamespaceSummaryManager reconNamespaceSummaryManager;
+  private final ReconOMMetadataManager omMetadataManager;
+  private final OzoneStorageContainerManager reconSCM;
+  private AtomicReference<EntityReadAccessHeatMapResponse>
+      entityReadAccessHeatMapRespRef;
+  private SimpleDateFormat dateFormat = new SimpleDateFormat(
+      "yyyy-MM-dd'T'HH:mm:ss'Z'");
+  private final String timeZone;
+
+  @Inject
+  public SolrUtil(ReconNamespaceSummaryManager
+                      namespaceSummaryManager,
+                  ReconOMMetadataManager omMetadataManager,
+                  OzoneStorageContainerManager reconSCM,
+                  OzoneConfiguration ozoneConfiguration) {
+    this.reconNamespaceSummaryManager = namespaceSummaryManager;
+    this.omMetadataManager = omMetadataManager;
+    this.reconSCM = reconSCM;
+    this.entityReadAccessHeatMapRespRef = new AtomicReference<>(
+        new EntityReadAccessHeatMapResponse());
+    this.ozoneConfiguration = ozoneConfiguration;
+    this.timeZone = this.ozoneConfiguration.get(OZONE_RECON_SOLR_TIMEZONE_KEY,
+        DEFAULT_TIMEZONE_VALUE);
+    if (timeZone != null) {
+      LOG.info("Setting timezone to " + timeZone);
+      try {
+        dateFormat.setTimeZone(TimeZone.getTimeZone(timeZone));
+      } catch (Throwable t) {
+        LOG.error("Error setting timezone. TimeZone = " + timeZone);
+      }
+    }
+  }
+
+  private void addBucketData(
+      EntityReadAccessHeatMapResponse volumeEntity, String[] split,
+      int readAccessCount, long keySize) {
+    List<EntityReadAccessHeatMapResponse> children =
+        volumeEntity.getChildren();
+    EntityReadAccessHeatMapResponse bucketEntity = null;
+    List<EntityReadAccessHeatMapResponse> bucketList =
+        children.stream().filter(entity -> entity.getLabel().
+            equalsIgnoreCase(split[1])).collect(Collectors.toList());
+    if (bucketList.size() > 0) {
+      bucketEntity = bucketList.get(0);
+    }
+    if (children.contains(bucketEntity)) {
+      addPrefixPathInfoToBucket(split, bucketEntity, readAccessCount, keySize);
+    } else {
+      addBucketAndPrefixPath(split, volumeEntity, readAccessCount, keySize);
+    }
+  }
+
+  private void addVolumeData(
+      EntityReadAccessHeatMapResponse rootEntity,
+      String[] split, int readAccessCount, long keySize) {
+    List<EntityReadAccessHeatMapResponse> children =
+        rootEntity.getChildren();
+    EntityReadAccessHeatMapResponse volumeInfo =
+        new EntityReadAccessHeatMapResponse();
+    volumeInfo.setLabel(split[0]);
+    children.add(volumeInfo);
+    addBucketAndPrefixPath(split, volumeInfo, readAccessCount, keySize);
+  }
+
+  private void updateVolumeSize(
+      EntityReadAccessHeatMapResponse volumeInfo) {
+    List<EntityReadAccessHeatMapResponse> children =
+        volumeInfo.getChildren();
+    children.stream().forEach(bucket -> {
+      volumeInfo.setSize(volumeInfo.getSize() + bucket.getSize());
+      updateBucketLevelMinMaxAccessCount(bucket);
+      updateBucketColors(bucket);
+    });
+  }
+
+  private void updateBucketColors(EntityReadAccessHeatMapResponse bucket) {
+    long delta = bucket.getMaxAccessCount() - bucket.getMinAccessCount();
+    List<EntityReadAccessHeatMapResponse> children =
+        bucket.getChildren();
+    children.stream().forEach(path -> {
+      path.setColor(1.000);
+      if (delta > 0) {
+        double truncatedValue = truncate(
+            ((double) path.getAccessCount() /
+                (double) bucket.getMaxAccessCount()), 3);
+        path.setColor(truncatedValue);
+      }
+    });
+  }
+
+  private static double truncate(double value, int decimalPlaces) {
+    if (decimalPlaces < 0) {
+      throw new IllegalArgumentException();
+    }
+    value = value * Math.pow(10, decimalPlaces);
+    value = Math.floor(value);
+    value = value / Math.pow(10, decimalPlaces);
+    return value;
+  }
+
+  private void updateRootEntitySize(
+      EntityReadAccessHeatMapResponse rootEntity) {
+    List<EntityReadAccessHeatMapResponse> children =
+        rootEntity.getChildren();
+    children.stream().forEach(volume -> {
+      updateVolumeSize(volume);
+      rootEntity.setSize(rootEntity.getSize() + volume.getSize());
+    });
+  }
+
+  private void addBucketAndPrefixPath(
+      String[] split, EntityReadAccessHeatMapResponse volumeEntity,
+      long readAccessCount, long keySize) {
+    List<EntityReadAccessHeatMapResponse> bucketEntities =
+        volumeEntity.getChildren();
+    EntityReadAccessHeatMapResponse bucket =
+        new EntityReadAccessHeatMapResponse();
+    bucket.setLabel(split[1]);
+    bucketEntities.add(bucket);
+    bucket.setMinAccessCount(readAccessCount);
+    addPrefixPathInfoToBucket(split, bucket, readAccessCount, keySize);
+  }
+
+  private void addPrefixPathInfoToBucket(
+      String[] split, EntityReadAccessHeatMapResponse bucket,
+      long readAccessCount, long keySize) {
+    List<EntityReadAccessHeatMapResponse> prefixes = bucket.getChildren();
+    updateBucketSize(bucket, keySize);
+    String path = Arrays.stream(split)
+        .skip(2).collect(Collectors.joining("/"));
+    EntityReadAccessHeatMapResponse prefixPathInfo =
+        new EntityReadAccessHeatMapResponse();
+    prefixPathInfo.setLabel(path);
+    prefixPathInfo.setAccessCount(readAccessCount);
+    prefixPathInfo.setSize(keySize);
+    prefixes.add(prefixPathInfo);
+    // This is done for specific ask by UI treemap to render and provide
+    // varying color shades based on varying ranges of access count.
+    updateRootLevelMinMaxAccessCount(readAccessCount);
+  }
+
+  private void updateBucketLevelMinMaxAccessCount(
+      EntityReadAccessHeatMapResponse bucket) {
+    List<EntityReadAccessHeatMapResponse> children =
+        bucket.getChildren();
+    bucket.setMinAccessCount(Long.MAX_VALUE);
+    children.stream().forEach(path -> {
+      long readAccessCount = path.getAccessCount();
+      bucket.setMinAccessCount(
+          path.getAccessCount() < bucket.getMinAccessCount() ? readAccessCount 
:
+              bucket.getMinAccessCount());
+      bucket.setMaxAccessCount(
+          readAccessCount > bucket.getMaxAccessCount() ? readAccessCount :
+              bucket.getMaxAccessCount());
+    });
+  }
+
+  private void updateRootLevelMinMaxAccessCount(long readAccessCount) {
+    EntityReadAccessHeatMapResponse rootEntity =
+        this.entityReadAccessHeatMapRespRef.get();
+    rootEntity.setMinAccessCount(
+        readAccessCount < rootEntity.getMinAccessCount() ? readAccessCount :
+            rootEntity.getMinAccessCount());
+    rootEntity.setMaxAccessCount(
+        readAccessCount > rootEntity.getMaxAccessCount() ? readAccessCount :
+            rootEntity.getMaxAccessCount());
+  }
+
+  private void updateBucketSize(EntityReadAccessHeatMapResponse bucket,
+                                       long keySize) {
+    bucket.setSize(bucket.getSize() + keySize);
+  }
+
+  public void queryLogs(String path, String entityType, String startDate,
+                        ReconHttpClient reconHttpClient) {
+    try {
+      SecurityUtil.doAsCurrentUser((PrivilegedExceptionAction<Void>) () -> {
+        InetSocketAddress solrAddress =
+            HddsUtils.getSolrAddress(ozoneConfiguration);
+        if (null == solrAddress) {
+          throw new ConfigurationException(String.format("For heatmap " +
+                  "feature Solr host and port configuration must be provided " 
+
+                  "for config key %s. Example format -> <Host>:<Port>",
+              OZONE_SOLR_ADDRESS_KEY));
+        }
+        List<NameValuePair> urlParameters = new ArrayList<>();
+        validateAndAddSolrReqParam(escapeQueryParamVal(path),
+            "resource", urlParameters);
+        validateAndAddSolrReqParam(entityType, "resType", urlParameters);
+        validateStartDate(startDate, urlParameters);
+        final String solrAuditResp =
+            reconHttpClient.sendRequest(
+                prepareHttpRequest(urlParameters, solrAddress,
+                    "/solr/ranger_audits/query"));
+        LOG.info("Solr Response: {}", solrAuditResp);
+        JsonElement jsonElement = JsonParser.parseString(solrAuditResp);
+        JsonObject jsonObject = jsonElement.getAsJsonObject();
+        JsonElement facets = jsonObject.get("facets");
+        JsonElement resources = facets.getAsJsonObject().get("resources");
+        JsonObject facetsBucketsObject = new JsonObject();
+        if (null != resources) {
+          facetsBucketsObject = resources.getAsJsonObject();
+        }
+        ObjectMapper objectMapper = new ObjectMapper();
+
+        AuditLogFacetsResources auditLogFacetsResources =
+            objectMapper.readValue(
+                facetsBucketsObject.toString(), AuditLogFacetsResources.class);
+        EntityMetaData[] entities = auditLogFacetsResources.getBuckets();
+        if (null != entities && !(ArrayUtils.isEmpty(entities))) {
+          generateHeatMap(entities);
+        }
+        return null;
+      });
+    } catch (JsonProcessingException e) {
+      LOG.error("Solr Query Output Processing Error: {} ", e);
+    } catch (IOException e) {
+      LOG.error("Error while generating the access heatmap: {} ", e);
+    }
+  }
+
+  private String escapeQueryParamVal(String path) {
+    StringBuilder sb = new StringBuilder();
+    if (!StringUtils.isEmpty(path)) {
+      sb.append("*");
+      sb.append(ClientUtils.escapeQueryChars(path));
+      sb.append("*");
+    }
+    return sb.toString();
+  }
+
+  private void validateStartDate(String startDate,
+                                 List<NameValuePair> urlParameters) {
+    if (!StringUtils.isEmpty(startDate)) {
+      ZonedDateTime lastXUnitsOfZonedDateTime = null;
+      startDate = validateStartDate(startDate);
+      if (null != LastXUnit.getType(startDate)) {
+        lastXUnitsOfZonedDateTime =
+            lastXUnitsOfTime(LastXUnit.getType(startDate));
+      } else {
+        lastXUnitsOfZonedDateTime =
+            epochMilliSToZDT(startDate);
+      }
+      urlParameters.add(new BasicNameValuePair("fq",
+          setDateRange("evtTime",
+              Date.from(lastXUnitsOfZonedDateTime.toInstant()), null)));
+    }
+  }
+
+  private void validateAndAddSolrReqParam(
+      String paramVal, String paramName,
+      List<NameValuePair> urlParameters) {
+    if (!StringUtils.isEmpty(paramVal)) {
+      StringBuilder sb = new StringBuilder(paramName);
+      sb.append(":");
+      sb.append(paramVal);
+      urlParameters.add(new BasicNameValuePair("fq", sb.toString()));
+    }
+  }
+
+  private HttpRequestWrapper prepareHttpRequest(
+      List<NameValuePair> urlParameters, InetSocketAddress solrAddress,
+      String uri) {
+    // add request parameter, form parameters
+    urlParameters.add(new BasicNameValuePair("q", "*:*"));
+    urlParameters.add(new BasicNameValuePair("wt", "json"));
+    urlParameters.add(new BasicNameValuePair("fl",
+        "access, agent, repo, resource, resType, event_count"));
+    urlParameters.add(new BasicNameValuePair("fq", "access:read"));
+    urlParameters.add(new BasicNameValuePair("fq", "repo:cm_ozone"));
+
+    urlParameters.add(new BasicNameValuePair("sort", "event_count desc"));
+    urlParameters.add(new BasicNameValuePair("start", "0"));
+    urlParameters.add(new BasicNameValuePair("rows", "0"));
+
+    urlParameters.add(new BasicNameValuePair("json.facet", "{\n" +
+        "    resources:{\n" +
+        "      type : terms,\n" +
+        "      field : resource,\n" +
+        "      sort : \"read_access_count desc\",\n" +
+        "      limit : 100,\n" +
+        "      facet:{\n" +
+        "        read_access_count : \"sum(event_count)\"\n" +
+        "      }\n" +
+        "    }\n" +
+        "  }"));
+    HttpRequestWrapper requestWrapper =
+        new HttpRequestWrapper(solrAddress.getHostName(),
+            solrAddress.getPort(), uri,
+            urlParameters, HttpRequestWrapper.HttpReqType.POST);
+    return requestWrapper;
+  }
+
+  private void generateHeatMap(EntityMetaData[] entities) {
+    EntityReadAccessHeatMapResponse rootEntity =
+        entityReadAccessHeatMapRespRef.get();
+    rootEntity.setMinAccessCount(entities[0].getReadAccessCount());
+    rootEntity.setLabel("root");
+    List<EntityReadAccessHeatMapResponse> children =
+        rootEntity.getChildren();
+    Arrays.stream(entities).forEach(entityMetaData -> {
+      String path = entityMetaData.getVal();
+      String[] split = path.split("/");

Review Comment:
   split size not validated for adding to bucket, if no "/" can cause 
OutOfIndex exception



##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/solr/SolrAccessAuditsService.java:
##########
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.solr;
+
+import com.google.inject.Inject;
+import org.apache.hadoop.hdds.HddsUtils;
+import org.apache.hadoop.hdds.conf.ConfigurationException;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.recon.ReconConfig;
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.ozone.OzoneSecurityUtil;
+import org.apache.hadoop.ozone.recon.api.types.EntityReadAccessHeatMapResponse;
+import org.apache.hadoop.ozone.recon.http.ReconHttpClient;
+import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
+import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import static 
org.apache.hadoop.hdds.recon.ReconConfig.ConfigStrings.OZONE_RECON_KERBEROS_PRINCIPAL_KEY;
+import static 
org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_SOLR_ADDRESS_KEY;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
+
+/**
+ * This class is an implementation of abstract class for querying entity's
+ * audit logs through SolrAccessAuditService.
+ */
+public class SolrAccessAuditsService extends AccessAuditsService {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SolrAccessAuditsService.class);
+  private UserGroupInformation reconUser;
+  private final OzoneConfiguration ozoneConfiguration;
+  private final ReconNamespaceSummaryManager reconNamespaceSummaryManager;
+  private final ReconOMMetadataManager omMetadataManager;
+  private final OzoneStorageContainerManager reconSCM;
+  private ReconHttpClient reconHttpClient;
+
+  @Inject
+  public SolrAccessAuditsService(OzoneConfiguration ozoneConfiguration,
+                                 ReconNamespaceSummaryManager
+                                     namespaceSummaryManager,
+                                 ReconOMMetadataManager omMetadataManager,
+                                 OzoneStorageContainerManager reconSCM,
+                                 ReconHttpClient reconHttpClient) {
+    this.reconHttpClient = reconHttpClient;
+    this.ozoneConfiguration = ozoneConfiguration;
+    this.reconNamespaceSummaryManager = namespaceSummaryManager;
+    this.omMetadataManager = omMetadataManager;
+    this.reconSCM = reconSCM;
+    this.reconUser = initializeReconUGI(ozoneConfiguration);
+  }
+
+  /**
+   * Initialize recon UGI if security is enabled in the configuration.
+   *
+   * @param conf OzoneConfiguration
+   * @return recon UserGroupInformation object
+   */
+  private static UserGroupInformation initializeReconUGI(
+      OzoneConfiguration conf) {
+    try {
+      if (OzoneSecurityUtil.isSecurityEnabled(conf)) {
+        return getReconUGI(conf);
+      }
+    } catch (Exception ex) {
+      LOG.error("Error initializing recon UGI. ", ex);

Review Comment:
   This should throw exception if initialize of security UGI fails



##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/solr/SolrUtil.java:
##########
@@ -0,0 +1,492 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.solr;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParser;
+import com.google.inject.Inject;
+import org.apache.commons.lang3.ArrayUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.HddsUtils;
+import org.apache.hadoop.hdds.conf.ConfigurationException;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler;
+import org.apache.hadoop.ozone.recon.api.types.AuditLogFacetsResources;
+import org.apache.hadoop.ozone.recon.api.types.DUResponse;
+import org.apache.hadoop.ozone.recon.api.types.EntityMetaData;
+import org.apache.hadoop.ozone.recon.api.types.EntityReadAccessHeatMapResponse;
+import org.apache.hadoop.ozone.recon.api.types.LastXUnit;
+import org.apache.hadoop.ozone.recon.api.types.ResponseStatus;
+import org.apache.hadoop.ozone.recon.http.HttpRequestWrapper;
+import org.apache.hadoop.ozone.recon.http.ReconHttpClient;
+import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
+import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.http.NameValuePair;
+import org.apache.http.message.BasicNameValuePair;
+import org.apache.solr.client.solrj.util.ClientUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.security.PrivilegedExceptionAction;
+import java.text.SimpleDateFormat;
+import java.time.Instant;
+import java.time.ZonedDateTime;
+import java.time.temporal.ChronoUnit;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Date;
+import java.util.List;
+import java.util.TimeZone;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.stream.Collectors;
+
+import static 
org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_SOLR_TIMEZONE_KEY;
+import static 
org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_SOLR_ADDRESS_KEY;
+
+/**
+ * This class is general utility class for handling
+ * Solr query functions.
+ */
+public class SolrUtil {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SolrUtil.class);
+  public static final String DEFAULT_TIMEZONE_VALUE = "UTC";
+
+  private OzoneConfiguration ozoneConfiguration;
+  private final ReconNamespaceSummaryManager reconNamespaceSummaryManager;
+  private final ReconOMMetadataManager omMetadataManager;
+  private final OzoneStorageContainerManager reconSCM;
+  private AtomicReference<EntityReadAccessHeatMapResponse>
+      entityReadAccessHeatMapRespRef;
+  private SimpleDateFormat dateFormat = new SimpleDateFormat(
+      "yyyy-MM-dd'T'HH:mm:ss'Z'");
+  private final String timeZone;
+
+  @Inject
+  public SolrUtil(ReconNamespaceSummaryManager
+                      namespaceSummaryManager,
+                  ReconOMMetadataManager omMetadataManager,
+                  OzoneStorageContainerManager reconSCM,
+                  OzoneConfiguration ozoneConfiguration) {
+    this.reconNamespaceSummaryManager = namespaceSummaryManager;
+    this.omMetadataManager = omMetadataManager;
+    this.reconSCM = reconSCM;
+    this.entityReadAccessHeatMapRespRef = new AtomicReference<>(
+        new EntityReadAccessHeatMapResponse());
+    this.ozoneConfiguration = ozoneConfiguration;
+    this.timeZone = this.ozoneConfiguration.get(OZONE_RECON_SOLR_TIMEZONE_KEY,
+        DEFAULT_TIMEZONE_VALUE);
+    if (timeZone != null) {
+      LOG.info("Setting timezone to " + timeZone);
+      try {
+        dateFormat.setTimeZone(TimeZone.getTimeZone(timeZone));
+      } catch (Throwable t) {
+        LOG.error("Error setting timezone. TimeZone = " + timeZone);
+      }
+    }
+  }
+
+  private void addBucketData(
+      EntityReadAccessHeatMapResponse volumeEntity, String[] split,
+      int readAccessCount, long keySize) {
+    List<EntityReadAccessHeatMapResponse> children =
+        volumeEntity.getChildren();
+    EntityReadAccessHeatMapResponse bucketEntity = null;
+    List<EntityReadAccessHeatMapResponse> bucketList =
+        children.stream().filter(entity -> entity.getLabel().
+            equalsIgnoreCase(split[1])).collect(Collectors.toList());
+    if (bucketList.size() > 0) {
+      bucketEntity = bucketList.get(0);
+    }
+    if (children.contains(bucketEntity)) {
+      addPrefixPathInfoToBucket(split, bucketEntity, readAccessCount, keySize);
+    } else {
+      addBucketAndPrefixPath(split, volumeEntity, readAccessCount, keySize);
+    }
+  }
+
+  private void addVolumeData(
+      EntityReadAccessHeatMapResponse rootEntity,
+      String[] split, int readAccessCount, long keySize) {
+    List<EntityReadAccessHeatMapResponse> children =
+        rootEntity.getChildren();
+    EntityReadAccessHeatMapResponse volumeInfo =
+        new EntityReadAccessHeatMapResponse();
+    volumeInfo.setLabel(split[0]);
+    children.add(volumeInfo);
+    addBucketAndPrefixPath(split, volumeInfo, readAccessCount, keySize);
+  }
+
+  private void updateVolumeSize(
+      EntityReadAccessHeatMapResponse volumeInfo) {
+    List<EntityReadAccessHeatMapResponse> children =
+        volumeInfo.getChildren();
+    children.stream().forEach(bucket -> {
+      volumeInfo.setSize(volumeInfo.getSize() + bucket.getSize());
+      updateBucketLevelMinMaxAccessCount(bucket);
+      updateBucketColors(bucket);
+    });
+  }
+
+  private void updateBucketColors(EntityReadAccessHeatMapResponse bucket) {
+    long delta = bucket.getMaxAccessCount() - bucket.getMinAccessCount();
+    List<EntityReadAccessHeatMapResponse> children =
+        bucket.getChildren();
+    children.stream().forEach(path -> {
+      path.setColor(1.000);
+      if (delta > 0) {
+        double truncatedValue = truncate(
+            ((double) path.getAccessCount() /
+                (double) bucket.getMaxAccessCount()), 3);
+        path.setColor(truncatedValue);
+      }
+    });
+  }
+
+  private static double truncate(double value, int decimalPlaces) {
+    if (decimalPlaces < 0) {
+      throw new IllegalArgumentException();
+    }
+    value = value * Math.pow(10, decimalPlaces);
+    value = Math.floor(value);
+    value = value / Math.pow(10, decimalPlaces);
+    return value;
+  }
+
+  private void updateRootEntitySize(
+      EntityReadAccessHeatMapResponse rootEntity) {
+    List<EntityReadAccessHeatMapResponse> children =
+        rootEntity.getChildren();
+    children.stream().forEach(volume -> {
+      updateVolumeSize(volume);
+      rootEntity.setSize(rootEntity.getSize() + volume.getSize());
+    });
+  }
+
+  private void addBucketAndPrefixPath(
+      String[] split, EntityReadAccessHeatMapResponse volumeEntity,
+      long readAccessCount, long keySize) {
+    List<EntityReadAccessHeatMapResponse> bucketEntities =
+        volumeEntity.getChildren();
+    EntityReadAccessHeatMapResponse bucket =
+        new EntityReadAccessHeatMapResponse();
+    bucket.setLabel(split[1]);
+    bucketEntities.add(bucket);
+    bucket.setMinAccessCount(readAccessCount);
+    addPrefixPathInfoToBucket(split, bucket, readAccessCount, keySize);
+  }
+
+  private void addPrefixPathInfoToBucket(
+      String[] split, EntityReadAccessHeatMapResponse bucket,
+      long readAccessCount, long keySize) {
+    List<EntityReadAccessHeatMapResponse> prefixes = bucket.getChildren();
+    updateBucketSize(bucket, keySize);
+    String path = Arrays.stream(split)
+        .skip(2).collect(Collectors.joining("/"));
+    EntityReadAccessHeatMapResponse prefixPathInfo =
+        new EntityReadAccessHeatMapResponse();
+    prefixPathInfo.setLabel(path);
+    prefixPathInfo.setAccessCount(readAccessCount);
+    prefixPathInfo.setSize(keySize);
+    prefixes.add(prefixPathInfo);
+    // This is done for specific ask by UI treemap to render and provide
+    // varying color shades based on varying ranges of access count.
+    updateRootLevelMinMaxAccessCount(readAccessCount);
+  }
+
+  private void updateBucketLevelMinMaxAccessCount(
+      EntityReadAccessHeatMapResponse bucket) {
+    List<EntityReadAccessHeatMapResponse> children =
+        bucket.getChildren();
+    bucket.setMinAccessCount(Long.MAX_VALUE);

Review Comment:
   default is minAccess and maxAccess count is "0", just considering from this 
code, if children count is "0", minAccess Count remains MAX_VALUE



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to