[ 
https://issues.apache.org/jira/browse/DRILL-5270?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16464375#comment-16464375
 ] 

ASF GitHub Bot commented on DRILL-5270:
---------------------------------------

kkhatua closed pull request #755: DRILL-5270: Improve loading of profiles 
listing in the WebUI
URL: https://github.com/apache/drill/pull/755
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java 
b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
index 54fb46ab68..1dafb51f06 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
@@ -165,6 +165,8 @@ private ExecConstants() {
   public static final String SYS_STORE_PROVIDER_LOCAL_ENABLE_WRITE = 
"drill.exec.sys.store.provider.local.write";
   public static final String PROFILES_STORE_INMEMORY = 
"drill.exec.profiles.store.inmemory";
   public static final String PROFILES_STORE_CAPACITY = 
"drill.exec.profiles.store.capacity";
+  public static final String PROFILES_STORE_ARCHIVE_ENABLED = 
"drill.exec.profiles.store.archive.enabled";
+  public static final String PROFILES_STORE_ARCHIVE_RATE = 
"drill.exec.profiles.store.archive.rate";
   public static final String IMPERSONATION_ENABLED = 
"drill.exec.impersonation.enabled";
   public static final String IMPERSONATION_MAX_CHAINED_USER_HOPS = 
"drill.exec.impersonation.max_chained_user_hops";
   public static final String AUTHENTICATION_MECHANISMS = 
"drill.exec.security.auth.mechanisms";
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/ProfileResources.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/ProfileResources.java
index ec06f0ef4b..3569db972c 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/ProfileResources.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/ProfileResources.java
@@ -18,6 +18,7 @@
 package org.apache.drill.exec.server.rest.profile;
 
 import java.text.SimpleDateFormat;
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Date;
 import java.util.Iterator;
@@ -58,6 +59,7 @@
 import org.apache.drill.exec.work.foreman.Foreman;
 import org.glassfish.jersey.server.mvc.Viewable;
 
+import com.google.common.base.Stopwatch;
 import com.google.common.collect.Lists;
 
 @Path("/")
@@ -71,6 +73,7 @@
   @Inject SecurityContext sc;
 
   public static class ProfileInfo implements Comparable<ProfileInfo> {
+    private static final String TRAILING_DOTS = " ... ";
     private static final int QUERY_SNIPPET_MAX_CHAR = 150;
     private static final int QUERY_SNIPPET_MAX_LINES = 8;
 
@@ -171,13 +174,13 @@ private String extractQuerySnippet(String queryText) {
       //Trimming down based on line-count
       if (QUERY_SNIPPET_MAX_LINES < queryParts.length) {
         int linesConstructed = 0;
-        StringBuilder lineCappedQuerySnippet = new StringBuilder();
+        StringBuilder lineCappedQuerySnippet = new 
StringBuilder(QUERY_SNIPPET_MAX_CHAR + TRAILING_DOTS.length());
         for (String qPart : queryParts) {
           lineCappedQuerySnippet.append(qPart);
           if (++linesConstructed < QUERY_SNIPPET_MAX_LINES) {
             lineCappedQuerySnippet.append(System.lineSeparator());
           } else {
-            lineCappedQuerySnippet.append(" ... ");
+            lineCappedQuerySnippet.append(TRAILING_DOTS);
             break;
           }
         }
@@ -260,8 +263,6 @@ public QProfiles getProfilesJSON(@Context UriInfo uriInfo) {
 
       Collections.sort(runningQueries, Collections.reverseOrder());
 
-      final List<ProfileInfo> finishedQueries = Lists.newArrayList();
-
       //Defining #Profiles to load
       int maxProfilesToLoad = 
work.getContext().getConfig().getInt(ExecConstants.HTTP_MAX_PROFILES);
       String maxProfilesParams = 
uriInfo.getQueryParameters().getFirst(MAX_QPROFILES_PARAM);
@@ -269,8 +270,9 @@ public QProfiles getProfilesJSON(@Context UriInfo uriInfo) {
         maxProfilesToLoad = Integer.valueOf(maxProfilesParams);
       }
 
-      final Iterator<Map.Entry<String, QueryProfile>> range = 
completed.getRange(0, maxProfilesToLoad);
+      final List<ProfileInfo> finishedQueries = new 
ArrayList<ProfileResources.ProfileInfo>(maxProfilesToLoad);
 
+      final Iterator<Map.Entry<String, QueryProfile>> range = 
completed.getRange(0, maxProfilesToLoad);
       while (range.hasNext()) {
         try {
           final Map.Entry<String, QueryProfile> profileEntry = range.next();
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/DrillSysFilePathFilter.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/DrillSysFilePathFilter.java
new file mode 100644
index 0000000000..dd3f585685
--- /dev/null
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/DrillSysFilePathFilter.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.sys.store;
+
+import static org.apache.drill.exec.ExecConstants.DRILL_SYS_FILE_SUFFIX;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+
+/**
+ * Filter for Drill System Files
+ */
+public class DrillSysFilePathFilter implements PathFilter {
+
+  //NOTE: The filename is a combination of query ID (which is monotonically
+  //decreasing value derived off epoch timestamp) and a random value. This
+  //filter helps eliminate that list
+  String cutoffFileName = null;
+  public DrillSysFilePathFilter() {}
+
+  public DrillSysFilePathFilter(String cutoffSysFileName) {
+    if (cutoffSysFileName != null) {
+      this.cutoffFileName = cutoffSysFileName + DRILL_SYS_FILE_SUFFIX;
+    }
+  }
+
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.fs.PathFilter#accept(org.apache.hadoop.fs.Path)
+   */
+  @Override
+  public boolean accept(Path file){
+    if (file.getName().endsWith(DRILL_SYS_FILE_SUFFIX)) {
+      if (cutoffFileName != null) {
+        return (file.getName().compareTo(cutoffFileName) <= 0);
+      } else {
+        return true;
+      }
+    }
+    return false;
+  }
+}
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/LocalPersistentStore.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/LocalPersistentStore.java
index 313a9be5ea..d15443a4ae 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/LocalPersistentStore.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/LocalPersistentStore.java
@@ -28,7 +28,10 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.TreeSet;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import javax.annotation.Nullable;
@@ -37,47 +40,120 @@
 import org.apache.drill.common.collections.ImmutableEntry;
 import org.apache.drill.common.concurrent.AutoCloseableLock;
 import org.apache.drill.common.config.DrillConfig;
+import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.exception.VersionMismatchException;
 import org.apache.drill.exec.store.dfs.DrillFileSystem;
-import org.apache.drill.exec.util.DrillFileSystemUtil;
 import org.apache.drill.exec.store.sys.BasePersistentStore;
 import org.apache.drill.exec.store.sys.PersistentStoreConfig;
 import org.apache.drill.exec.store.sys.PersistentStoreMode;
+import org.apache.drill.exec.util.DrillFileSystemUtil;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Function;
 import com.google.common.base.Preconditions;
+import com.google.common.base.Stopwatch;
 import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import org.apache.hadoop.fs.PathFilter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 public class LocalPersistentStore<V> extends BasePersistentStore<V> {
+  private static final String ARCHIVE_LOCATION = "archived";
+
   private static final Logger logger = 
LoggerFactory.getLogger(LocalPersistentStore.class);
 
   private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
   private final AutoCloseableLock readLock = new 
AutoCloseableLock(readWriteLock.readLock());
   private final AutoCloseableLock writeLock = new 
AutoCloseableLock(readWriteLock.writeLock());
+  private final AutoCloseableLock readCachedProfilesLock = new 
AutoCloseableLock(new ReentrantLock());
 
+  //Provides a threshold above which we report the time to load
+  private static final long RESPONSE_TIME_THRESHOLD_MSEC = 2000L;
+
+  private static final int drillSysFileExtSize = 
DRILL_SYS_FILE_SUFFIX.length();
   private final Path basePath;
   private final PersistentStoreConfig<V> config;
   private final DrillFileSystem fs;
   private int version = -1;
+  private Function<String, Entry<String, V>> transformer;
+
+  private TreeSet<String> profilesSet;
+  private int profilesSetSize;
+  private PathFilter sysFileSuffixFilter;
+//  private String mostRecentProfile;
+  private long basePathLastModified;
+  private long lastKnownFileCount;
+  private int maxSetCapacity;
+  private Stopwatch listAndBuildWatch;
+  private Stopwatch transformWatch;
 
-  public LocalPersistentStore(DrillFileSystem fs, Path base, 
PersistentStoreConfig<V> config) {
+  private boolean enableArchiving;
+  private Path archivePath;
+  private TreeSet<String> pendingArchivalSet;
+  private int pendingArchivalSetSize;
+  private int archivalThreshold;
+  private int archivalRate;
+  private Iterable<Entry<String, V>> iterableProfileSet;
+  private Stopwatch archiveWatch;
+
+  public LocalPersistentStore(DrillFileSystem fs, Path base, 
PersistentStoreConfig<V> config, DrillConfig drillConfig) {
     super();
     this.basePath = new Path(base, config.getName());
     this.config = config;
     this.fs = fs;
+    //MRU Profile Cache
+    this.profilesSet = new TreeSet<String>();
+    this.profilesSetSize= 0;
+    this.basePathLastModified = 0L;
+    this.lastKnownFileCount = 0L;
+    this.sysFileSuffixFilter = new DrillSysFilePathFilter();
+    //this.mostRecentProfile = null;
+
+    //Initializing for archiving
+    if (drillConfig != null ) {
+      this.enableArchiving = 
drillConfig.getBoolean(ExecConstants.PROFILES_STORE_ARCHIVE_ENABLED); 
//(maxStoreCapacity > 0); //Implicitly infer
+      this.archivalThreshold = 
drillConfig.getInt(ExecConstants.PROFILES_STORE_CAPACITY);
+      this.archivalRate = 
drillConfig.getInt(ExecConstants.PROFILES_STORE_ARCHIVE_RATE);
+    } else {
+      this.enableArchiving = false;
+    }
+    this.pendingArchivalSet = new TreeSet<String>();
+    this.pendingArchivalSetSize = 0;
+    this.archivePath = new Path(basePath, ARCHIVE_LOCATION);
 
+    // Timing
+    this.listAndBuildWatch = Stopwatch.createUnstarted();
+    this.transformWatch = Stopwatch.createUnstarted();
+    this.archiveWatch = Stopwatch.createUnstarted();
+
+    // One time transformer function instantiation
+    this.transformer = new Function<String, Entry<String, V>>() {
+      @Nullable
+      @Override
+      public Entry<String, V> apply(String key) {
+        return new ImmutableEntry<>(key, get(key));
+      }
+    };
+
+    //Base Dir
     try {
       if (!fs.mkdirs(basePath)) {
         version++;
       }
+      //Creating Archive if required
+      if (enableArchiving) {
+        try {
+          if (!fs.exists(archivePath)) {
+            fs.mkdirs(archivePath);
+          }
+        } catch (IOException e) {
+          logger.warn("Disabling profile archiving due to failure in creating 
profile archive {} : {}", archivePath, e);
+          this.enableArchiving = false;
+        }
+      }
     } catch (IOException e) {
       throw new RuntimeException("Failure setting pstore configuration path.");
     }
@@ -114,47 +190,178 @@ public static DrillFileSystem getFileSystem(DrillConfig 
config, Path root) throw
 
   @Override
   public Iterator<Map.Entry<String, V>> getRange(int skip, int take) {
-    try (AutoCloseableLock lock = readLock.open()) {
+    //Marking currently seen modification time
+    long currBasePathModified = 0L;
+    try {
+      currBasePathModified = fs.getFileStatus(basePath).getModificationTime();
+    } catch (IOException e) {
+      logger.error("Failed to get FileStatus for {}", basePath, e);
+      throw new RuntimeException(e);
+    }
+
+    logger.info("Requesting thread: {}-{}" , Thread.currentThread().getName(), 
Thread.currentThread().getId());
+    //Acquiring lock to avoid reloading for request coming in before 
completion of profile read
+    try (AutoCloseableLock lock = readCachedProfilesLock.open()) {
       try {
-        // list only files with sys file suffix
-        PathFilter sysFileSuffixFilter = new PathFilter() {
-          @Override
-          public boolean accept(Path path) {
-            return path.getName().endsWith(DRILL_SYS_FILE_SUFFIX);
+        long expectedFileCount = fs.getFileStatus(basePath).getLen();
+        logger.debug("Current ModTime: {} (Last known ModTime: {})", 
currBasePathModified, basePathLastModified);
+        logger.debug("Expected {} files (Last known {} files)", 
expectedFileCount, lastKnownFileCount);
+
+        //Force-read list of profiles based on change of any of the 3 states
+        if (this.basePathLastModified < currBasePathModified  //Has 
ModificationTime changed?
+            || this.lastKnownFileCount != expectedFileCount   //Has Profile 
Count changed?
+            || (skip + take) > maxSetCapacity ) {             //Does 
requestSize exceed current cached size
+
+          if (maxSetCapacity < (skip + take)) {
+            logger.debug("Updating last Max Capacity from {} to {}", 
maxSetCapacity , (skip + take) );
+            maxSetCapacity = skip + take;
           }
-        };
+          //Mark Start Time
+          listAndBuildWatch.reset().start();
 
-        List<FileStatus> fileStatuses = DrillFileSystemUtil.listFiles(fs, 
basePath, false, sysFileSuffixFilter);
-        if (fileStatuses.isEmpty()) {
-          return Collections.emptyIterator();
-        }
+          //Listing ALL DrillSysFiles
+          //Can apply MostRecentProfile name as filter. Unfortunately, Hadoop 
(2.7.1) currently doesn't leverage this to speed up
+          List<FileStatus> fileStatuses = DrillFileSystemUtil.listFiles(fs, 
basePath, false,
+              sysFileSuffixFilter
+              );
+          //Checking if empty
+          if (fileStatuses.isEmpty()) {
+            //WithoutFilter::
+            return Collections.emptyIterator();
+          }
+          //Force a reload of the profile.
+          //Note: We shouldn't need to do this if the load is incremental 
(i.e. using mostRecentProfile)
+          profilesSet.clear();
+          profilesSetSize = 0;
+          int profilesInStoreCount = 0;
 
-        List<String> files = Lists.newArrayList();
-        for (FileStatus stat : fileStatuses) {
-          String s = stat.getPath().getName();
-          files.add(s.substring(0, s.length() - 
DRILL_SYS_FILE_SUFFIX.length()));
-        }
+          if (enableArchiving) {
+            pendingArchivalSet.clear();
+            pendingArchivalSetSize = 0;
+          }
+
+          //Constructing TreeMap from List
+          for (FileStatus stat : fileStatuses) {
+            String profileName = stat.getPath().getName();
+            profilesSetSize = addToProfileSet(profileName, profilesSetSize, 
maxSetCapacity);
+            profilesInStoreCount++;
+          }
+
+          //Archive older profiles
+          if (enableArchiving) {
+            archiveProfiles(fs, profilesInStoreCount);
+          }
 
-        Collections.sort(files);
+          //Report Lag
+          if (listAndBuildWatch.stop().elapsed(TimeUnit.MILLISECONDS) >= 
RESPONSE_TIME_THRESHOLD_MSEC) {
+            logger.warn("Took {} ms to list&map from {} profiles (out of {} 
profiles in store)", listAndBuildWatch.elapsed(TimeUnit.MILLISECONDS)
+                , profilesSetSize, profilesInStoreCount);
+          }
+          //Recording last checked modified time and the most recent profile
+          basePathLastModified = currBasePathModified;
+          /*TODO: mostRecentProfile = profilesSet.first();*/
+          lastKnownFileCount = expectedFileCount;
 
-        return Iterables.transform(Iterables.limit(Iterables.skip(files, 
skip), take), new Function<String, Entry<String, V>>() {
-          @Nullable
-          @Override
-          public Entry<String, V> apply(String key) {
-            return new ImmutableEntry<>(key, get(key));
+          //Transform profileSet for consumption
+          transformWatch.start();
+          iterableProfileSet = Iterables.transform(profilesSet, transformer);
+          if (transformWatch.stop().elapsed(TimeUnit.MILLISECONDS) >= 
RESPONSE_TIME_THRESHOLD_MSEC) {
+            logger.warn("Took {} ms to transform {} profiles", 
transformWatch.elapsed(TimeUnit.MILLISECONDS), profilesSetSize);
           }
-        }).iterator();
+        }
+        return iterableProfileSet.iterator();
       } catch (IOException e) {
         throw new RuntimeException(e);
       }
     }
   }
 
+  private void archiveProfiles(DrillFileSystem fs, int profilesInStoreCount) {
+    if (profilesInStoreCount > archivalThreshold) {
+      int pendingArchivalCount = profilesInStoreCount - archivalThreshold;
+      logger.info("Found {} excess profiles. For now, will attempt archiving 
{} profiles to {}", pendingArchivalCount
+          , Math.min(pendingArchivalCount, archivalRate), archivePath);
+      try {
+        if (fs.isDirectory(archivePath)) {
+          int archivedCount = 0;
+          archiveWatch.reset().start(); //Clockig
+          while (archivedCount < archivalRate) {
+            String toArchive = pendingArchivalSet.pollLast() + 
DRILL_SYS_FILE_SUFFIX;
+            boolean renameStatus = DrillFileSystemUtil.rename(fs, new 
Path(basePath, toArchive), new Path(archivePath, toArchive));
+            if (!renameStatus) {
+              //Stop attempting any more archiving since other StoreProviders 
might be archiving
+              logger.error("Move failed for {} from {} to {}", toArchive, 
basePath.toString(), archivePath.toString());
+              logger.warn("Skip archiving under the assumption that another 
Drillbit is archiving");
+              break;
+            }
+            archivedCount++;
+          }
+          logger.info("Archived {} profiles to {} in {} ms", archivedCount, 
archivePath, archiveWatch.stop().elapsed(TimeUnit.MILLISECONDS));
+        } else {
+          logger.error("Unable to archive {} profiles to {}", 
pendingArchivalSetSize, archivePath.toString());
+        }
+      } catch (IOException e) {
+        e.printStackTrace();
+      }
+    }
+    //Clean up
+    pendingArchivalSet.clear();
+  }
+
+  /**
+   * Add profile name to a TreeSet
+   * @param profileName Name of the profile to add
+   * @param currentSize Provided so as to avoid the need to recount
+   * @param maximumCapacity Maximum number of profiles to maintain
+   * @return Current size of tree after addition
+   */
+  private int addToProfileSet(String profileName, int currentSize, int 
maximumCapacity) {
+    //Add if not reached max capacity
+    if (currentSize < maximumCapacity) {
+      profilesSet.add(profileName.substring(0, profileName.length() - 
drillSysFileExtSize));
+      currentSize++;
+    } else {
+      boolean addedProfile = profilesSet.add(profileName.substring(0, 
profileName.length() - drillSysFileExtSize));
+      //Remove existing 'oldest' file
+      if (addedProfile) {
+        String oldestProfile = profilesSet.pollLast();
+        if (enableArchiving) {
+          addToArchiveSet(oldestProfile, archivalRate);
+        }
+      }
+    }
+    return currentSize;
+  }
+
+  /**
+   * Add profile name to a TreeSet
+   * @param profileName Name of the profile to add
+   * @param currentSize Provided so as to avoid the need to recount
+   * @param maximumCapacity Maximum number of profiles to maintain
+   * @return Current size of tree after addition
+   */
+  private int addToArchiveSet(String profileName, int maximumCapacity) {
+    //TODO make this global
+    int currentSize = pendingArchivalSet.size();
+    //Add if not reached max capacity
+    if (currentSize < maximumCapacity) {
+      pendingArchivalSet.add(profileName);
+      currentSize++;
+    } else {
+      boolean addedProfile = pendingArchivalSet.add(profileName);
+      //Remove existing 'youngest' file
+      if (addedProfile) {
+        pendingArchivalSet.pollFirst();
+      }
+    }
+    return currentSize;
+  }
+
   private Path makePath(String name) {
     Preconditions.checkArgument(
         !name.contains("/") &&
-            !name.contains(":") &&
-            !name.contains(".."));
+        !name.contains(":") &&
+        !name.contains(".."));
     return new Path(basePath, name + DRILL_SYS_FILE_SUFFIX);
   }
 
@@ -263,5 +470,4 @@ public void delete(String key) {
   @Override
   public void close() {
   }
-
 }
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/provider/LocalPersistentStoreProvider.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/provider/LocalPersistentStoreProvider.java
index 0b4a20128f..e283f33338 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/provider/LocalPersistentStoreProvider.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/provider/LocalPersistentStoreProvider.java
@@ -41,6 +41,8 @@
   // This flag is used in testing. Ideally, tests should use a specific 
PersistentStoreProvider that knows
   // how to handle this flag.
   private final boolean enableWrite;
+  //Config reference for archiving
+  private final DrillConfig drillConfig;
 
   public LocalPersistentStoreProvider(final PersistentStoreRegistry<?> 
registry) throws StoreException {
     this(registry.getConfig());
@@ -48,6 +50,7 @@ public LocalPersistentStoreProvider(final 
PersistentStoreRegistry<?> registry) t
 
   public LocalPersistentStoreProvider(final DrillConfig config) throws 
StoreException {
     this.path = new 
Path(config.getString(ExecConstants.SYS_STORE_PROVIDER_LOCAL_PATH));
+    this.drillConfig = config;
     this.enableWrite = 
config.getBoolean(ExecConstants.SYS_STORE_PROVIDER_LOCAL_ENABLE_WRITE);
     try {
       this.fs = LocalPersistentStore.getFileSystem(config, path);
@@ -62,7 +65,7 @@ public LocalPersistentStoreProvider(final DrillConfig config) 
throws StoreExcept
     case BLOB_PERSISTENT:
     case PERSISTENT:
       if (enableWrite) {
-        return new LocalPersistentStore<>(fs, path, storeConfig);
+        return new LocalPersistentStore<>(fs, path, storeConfig, drillConfig);
       }
       return new NoWriteLocalStore<>();
     default:
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/provider/ZookeeperPersistentStoreProvider.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/provider/ZookeeperPersistentStoreProvider.java
index a5502cbd1f..bee532ba58 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/provider/ZookeeperPersistentStoreProvider.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/provider/ZookeeperPersistentStoreProvider.java
@@ -40,6 +40,8 @@
   private final CuratorFramework curator;
   private final DrillFileSystem fs;
   private final Path blobRoot;
+  //Reference for Archiving
+  private final DrillConfig drillConfig;
 
   public ZookeeperPersistentStoreProvider(final 
PersistentStoreRegistry<ZKClusterCoordinator> registry) throws StoreException {
     this(registry.getConfig(), registry.getCoordinator().getCurator());
@@ -48,13 +50,12 @@ public ZookeeperPersistentStoreProvider(final 
PersistentStoreRegistry<ZKClusterC
   @VisibleForTesting
   public ZookeeperPersistentStoreProvider(final DrillConfig config, final 
CuratorFramework curator) throws StoreException {
     this.curator = curator;
-
+    this.drillConfig = config;
     if (config.hasPath(DRILL_EXEC_SYS_STORE_PROVIDER_ZK_BLOBROOT)) {
       blobRoot = new 
Path(config.getString(DRILL_EXEC_SYS_STORE_PROVIDER_ZK_BLOBROOT));
     }else{
       blobRoot = LocalPersistentStore.getLogDir();
     }
-
     try {
       this.fs = LocalPersistentStore.getFileSystem(config, blobRoot);
     } catch (IOException ex) {
@@ -66,7 +67,7 @@ public ZookeeperPersistentStoreProvider(final DrillConfig 
config, final CuratorF
   public <V> PersistentStore<V> getOrCreateStore(final 
PersistentStoreConfig<V> config) throws StoreException {
     switch(config.getMode()){
     case BLOB_PERSISTENT:
-      return new LocalPersistentStore<>(fs, blobRoot, config);
+      return new LocalPersistentStore<>(fs, blobRoot, config, drillConfig);
     case PERSISTENT:
       final ZookeeperPersistentStore<V> store = new 
ZookeeperPersistentStore<>(curator, config);
       try {
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/util/DrillFileSystemUtil.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/util/DrillFileSystemUtil.java
index 56d93851de..3aaf779c57 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/util/DrillFileSystemUtil.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/util/DrillFileSystemUtil.java
@@ -88,4 +88,16 @@ public boolean accept(Path path) {
     return FileSystemUtil.listAll(fs, path, recursive, 
FileSystemUtil.mergeFilters(DRILL_SYSTEM_FILTER, filters));
   }
 
+  /**
+   * Returns the status of a file/directory specified in source path to be 
renamed/moved to a destination path
+   *
+   * @param fs current file system
+   * @param src path to source
+   * @param dst path to destination
+   * @return status of rename/move
+   */
+  public static boolean rename(FileSystem fs, Path src, Path dst) throws 
IOException {
+    return FileSystemUtil.rename(fs, src, dst);
+  }
+
 }
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/util/FileSystemUtil.java 
b/exec/java-exec/src/main/java/org/apache/drill/exec/util/FileSystemUtil.java
index 84b22b610c..e6003ebca3 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/util/FileSystemUtil.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/util/FileSystemUtil.java
@@ -135,6 +135,18 @@ public boolean accept(Path path) {
     };
   }
 
+  /**
+   * Helper method that will rename/move file specified in the source path to 
a destination path
+   *
+   * @param fs current file system
+   * @param src path to source
+   * @param dst path to destination
+   * @return status of rename/move
+   */
+  public static boolean rename(FileSystem fs, Path src, Path dst) throws 
IOException {
+    return fs.rename(src, dst);
+  }
+
   /**
    * Helper method that will store in given holder statuses of all directories 
present in given path applying custom filter.
    * If recursive flag is set to true, will call itself recursively to add 
statuses of nested directories.
diff --git a/exec/java-exec/src/main/resources/drill-module.conf 
b/exec/java-exec/src/main/resources/drill-module.conf
index 32be387ba4..f793c9d55e 100644
--- a/exec/java-exec/src/main/resources/drill-module.conf
+++ b/exec/java-exec/src/main/resources/drill-module.conf
@@ -174,7 +174,11 @@ drill.exec: {
   },
   profiles.store: {
     inmemory: false,
-    capacity: 1000
+    capacity: 1000,
+    archive: {
+      enabled: false,
+      rate: 1000
+    }
   },
   impersonation: {
     enabled: false,


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


> Improve loading of profiles listing in the WebUI
> ------------------------------------------------
>
>                 Key: DRILL-5270
>                 URL: https://issues.apache.org/jira/browse/DRILL-5270
>             Project: Apache Drill
>          Issue Type: Improvement
>          Components: Web Server
>    Affects Versions: 1.9.0
>            Reporter: Kunal Khatua
>            Assignee: Kunal Khatua
>            Priority: Major
>             Fix For: 1.14.0
>
>
> Currently, as the number of profiles increase, we reload the same list of 
> profiles from the FS.
> An ideal improvement would be to detect if there are any new profiles and 
> only reload from the disk then. Otherwise, a cached list is sufficient.
> For a directory of 280K profiles, the load time is close to 6 seconds on a 32 
> core server. With the caching, we can get it down to as much as a few 
> milliseconds.
> To render the cache as invalid, we inspect the last modified time of the 
> directory to confirm whether a reload is needed. 



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

Reply via email to