This is an automated email from the ASF dual-hosted git repository.

mmiller pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/main by this push:
     new bd46219  Create AccumuloDataVersion (#2234)
bd46219 is described below

commit bd46219580375f36861961c092fba59fa96041a3
Author: Mike Miller <mmil...@apache.org>
AuthorDate: Wed Aug 18 09:06:21 2021 -0400

    Create AccumuloDataVersion (#2234)
    
    * Create new class for data version called AccumuloDataVersion
    * Move data version from ServerConstants to AccumuloDataVersion and add
    javadoc description of the different version numers
    * Create AccumuloDataVersion.get() method for getting version
    * Move other constants from ServerConstants to Constants
    * Rename ServerConstants to ServerDirs
---
 .../java/org/apache/accumulo/core/Constants.java   | 10 ++-
 .../apache/accumulo/core/file/rfile/RFileTest.java |  8 ---
 .../miniclusterImpl/MiniAccumuloClusterImpl.java   |  6 +-
 .../accumulo/server/AccumuloDataVersion.java       | 81 ++++++++++++++++++++++
 .../org/apache/accumulo/server/ServerContext.java  | 20 +++---
 .../{ServerConstants.java => ServerDirs.java}      | 63 ++++-------------
 .../org/apache/accumulo/server/ServerInfo.java     | 14 ++--
 .../apache/accumulo/server/fs/VolumeManager.java   |  6 +-
 .../apache/accumulo/server/gc/GcVolumeUtil.java    |  6 +-
 .../apache/accumulo/server/init/Initialize.java    | 42 +++++------
 .../apache/accumulo/server/util/ChangeSecret.java  | 18 ++---
 ...erverConstantsTest.java => ServerDirsTest.java} | 35 +++++-----
 .../server/security/SystemCredentialsTest.java     | 15 ++--
 .../accumulo/gc/GarbageCollectionAlgorithm.java    |  5 +-
 .../tableOps/tableExport/WriteExportFiles.java     |  4 +-
 .../manager/tableOps/tableImport/ImportTable.java  |  4 +-
 .../manager/upgrade/UpgradeCoordinator.java        | 32 ++++-----
 .../accumulo/manager/upgrade/Upgrader8to9.java     |  4 +-
 .../accumulo/manager/upgrade/AccumuloTest.java     | 27 ++++----
 .../org/apache/accumulo/tserver/TabletServer.java  |  3 +-
 .../org/apache/accumulo/tserver/log/DfsLogger.java |  6 +-
 .../test/MissingWalHeaderCompletesRecoveryIT.java  |  6 +-
 .../java/org/apache/accumulo/test/VolumeIT.java    |  4 +-
 .../UnusedWalDoesntCloseReplicationStatusIT.java   |  4 +-
 24 files changed, 232 insertions(+), 191 deletions(-)

diff --git a/core/src/main/java/org/apache/accumulo/core/Constants.java 
b/core/src/main/java/org/apache/accumulo/core/Constants.java
index 8559656..a46803f 100644
--- a/core/src/main/java/org/apache/accumulo/core/Constants.java
+++ b/core/src/main/java/org/apache/accumulo/core/Constants.java
@@ -21,8 +21,15 @@ package org.apache.accumulo.core;
 import static java.nio.charset.StandardCharsets.UTF_8;
 
 public class Constants {
-
+  // defines Accumulo data version constants
   public static final String VERSION = FilteredConstants.VERSION;
+  public static final String VERSION_DIR = "version";
+
+  // important directories
+  public static final String INSTANCE_ID_DIR = "instance_id";
+  public static final String TABLE_DIR = "tables";
+  public static final String RECOVERY_DIR = "recovery";
+  public static final String WAL_DIR = "wal";
 
   // Zookeeper locations
   public static final String ZROOT = "/accumulo";
@@ -116,5 +123,4 @@ public class Constants {
   public static final String HDFS_TABLES_DIR = "/tables";
 
   public static final int DEFAULT_VISIBILITY_CACHE_SIZE = 1000;
-
 }
diff --git 
a/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java 
b/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
index ed62565..94270ad 100644
--- a/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
@@ -2324,14 +2324,6 @@ public class RFileTest {
     // This tests that the normal set of operations used to populate a root 
tablet
     conf = getAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
 
-    // populate the root tablet with info about the default tablet
-    // the root tablet contains the key extent and locations of all the
-    // metadata tablets
-    // String initRootTabFile = ServerConstants.getMetadataTableDir() + 
"/root_tablet/00000_00000."
-    // + 
FileOperations.getNewFileExtension(AccumuloConfiguration.getDefaultConfiguration());
-    // FileSKVWriter mfw = 
FileOperations.getInstance().openWriter(initRootTabFile, fs, conf,
-    // AccumuloConfiguration.getDefaultConfiguration());
-
     TestRFile testRfile = new TestRFile(conf);
     testRfile.openWriter();
 
diff --git 
a/minicluster/src/main/java/org/apache/accumulo/miniclusterImpl/MiniAccumuloClusterImpl.java
 
b/minicluster/src/main/java/org/apache/accumulo/miniclusterImpl/MiniAccumuloClusterImpl.java
index 679f725..a5a85e7 100644
--- 
a/minicluster/src/main/java/org/apache/accumulo/miniclusterImpl/MiniAccumuloClusterImpl.java
+++ 
b/minicluster/src/main/java/org/apache/accumulo/miniclusterImpl/MiniAccumuloClusterImpl.java
@@ -78,8 +78,8 @@ import org.apache.accumulo.fate.zookeeper.ZooUtil;
 import org.apache.accumulo.manager.state.SetGoalState;
 import org.apache.accumulo.minicluster.MiniAccumuloCluster;
 import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.ServerContext;
+import org.apache.accumulo.server.ServerDirs;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.accumulo.server.init.Initialize;
@@ -470,12 +470,12 @@ public class MiniAccumuloClusterImpl implements 
AccumuloCluster {
     if (config.useExistingInstance()) {
       AccumuloConfiguration acuConf = config.getAccumuloConfiguration();
       Configuration hadoopConf = config.getHadoopConfiguration();
-      ServerConstants serverConstants = new ServerConstants(acuConf, 
hadoopConf);
+      ServerDirs serverDirs = new ServerDirs(acuConf, hadoopConf);
 
       ConfigurationCopy cc = new ConfigurationCopy(acuConf);
       Path instanceIdPath;
       try (var fs = VolumeManagerImpl.get(cc, hadoopConf)) {
-        instanceIdPath = serverConstants.getInstanceIdLocation(fs.getFirst());
+        instanceIdPath = serverDirs.getInstanceIdLocation(fs.getFirst());
       } catch (IOException e) {
         throw new RuntimeException(e);
       }
diff --git 
a/server/base/src/main/java/org/apache/accumulo/server/AccumuloDataVersion.java 
b/server/base/src/main/java/org/apache/accumulo/server/AccumuloDataVersion.java
new file mode 100644
index 0000000..ad617b8
--- /dev/null
+++ 
b/server/base/src/main/java/org/apache/accumulo/server/AccumuloDataVersion.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.server;
+
+import java.util.Set;
+
+/**
+ * Class representing the version of data stored in Accumulo.
+ *
+ * This version is separate but related to the file specific version in
+ * {@link org.apache.accumulo.core.file.rfile.RFile}. A version change to 
RFile will reflect a
+ * version change to the AccumuloDataVersion. But a version change to the 
AccumuloDataVersion may
+ * not affect the version number in RFile. For example, changes made to other 
parts of Accumulo that
+ * affects how data is stored, like the metadata table, would change the 
AccumuloDataVersion number
+ * here but not in RFile.
+ *
+ * This number is stored in HDFS under {@link 
org.apache.accumulo.core.Constants#VERSION_DIR}.
+ *
+ * This class is used for checking the version during server startup and 
upgrades.
+ */
+public class AccumuloDataVersion {
+
+  /**
+   * version (10) reflects changes to how root tablet metadata is serialized 
in zookeeper starting
+   * with 2.1
+   */
+  public static final int ROOT_TABLET_META_CHANGES = 10;
+
+  /**
+   * version (9) reflects changes to crypto that resulted in RFiles and WALs 
being serialized
+   * differently in version 2.0.0. Also RFiles in 2.0.0 may have summary data.
+   */
+  public static final int CRYPTO_CHANGES = 9;
+
+  /**
+   * version (8) reflects changes to RFile index (ACCUMULO-1124) AND the 
change to WAL tracking in
+   * ZK in version 1.8.0
+   */
+  public static final int SHORTEN_RFILE_KEYS = 8;
+
+  /**
+   * Historic data versions
+   *
+   * <ul>
+   * <li>version (7) also reflects the addition of a replication table in 1.7.0
+   * <li>version (6) reflects the addition of a separate root table 
(ACCUMULO-1481) in 1.6.0 -
+   * <li>version (5) moves delete file markers for the metadata table into the 
root tablet
+   * <li>version (4) moves logging to HDFS in 1.5.0
+   * </ul>
+   */
+  private static final int CURRENT_VERSION = ROOT_TABLET_META_CHANGES;
+
+  /**
+   * Get the current Accumulo Data Version. See Javadoc of static final 
integers for a detailed
+   * description of that version.
+   *
+   * @return integer representing the Accumulo Data Version
+   */
+  public static int get() {
+    return CURRENT_VERSION;
+  }
+
+  public static final Set<Integer> CAN_RUN =
+      Set.of(SHORTEN_RFILE_KEYS, CRYPTO_CHANGES, CURRENT_VERSION);
+}
diff --git 
a/server/base/src/main/java/org/apache/accumulo/server/ServerContext.java 
b/server/base/src/main/java/org/apache/accumulo/server/ServerContext.java
index 6056f47..6f1366d 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/ServerContext.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/ServerContext.java
@@ -80,7 +80,7 @@ public class ServerContext extends ClientContext {
 
   private final ServerInfo info;
   private final ZooReaderWriter zooReaderWriter;
-  private final ServerConstants serverConstants;
+  private final ServerDirs serverDirs;
 
   private TableManager tableManager;
   private UniqueNameAllocator nameAllocator;
@@ -98,7 +98,7 @@ public class ServerContext extends ClientContext {
     super(SingletonReservation.noop(), info, info.getSiteConfiguration());
     this.info = info;
     zooReaderWriter = new ZooReaderWriter(info.getSiteConfiguration());
-    serverConstants = info.getServerConstants();
+    serverDirs = info.getServerDirs();
   }
 
   /**
@@ -171,8 +171,8 @@ public class ServerContext extends ClientContext {
     return defaultConfig;
   }
 
-  public ServerConstants getServerConstants() {
-    return serverConstants;
+  public ServerDirs getServerDirs() {
+    return serverDirs;
   }
 
   /**
@@ -286,26 +286,26 @@ public class ServerContext extends ClientContext {
   }
 
   public Set<String> getBaseUris() {
-    return serverConstants.getBaseUris();
+    return serverDirs.getBaseUris();
   }
 
   public List<Pair<Path,Path>> getVolumeReplacements() {
-    return serverConstants.getVolumeReplacements();
+    return serverDirs.getVolumeReplacements();
   }
 
   public Set<String> getTablesDirs() {
-    return serverConstants.getTablesDirs();
+    return serverDirs.getTablesDirs();
   }
 
   public Set<String> getRecoveryDirs() {
-    return serverConstants.getRecoveryDirs();
+    return serverDirs.getRecoveryDirs();
   }
 
   /**
    * Check to see if this version of Accumulo can run against or upgrade the 
passed in data version.
    */
   public static void ensureDataVersionCompatible(int dataVersion) {
-    if (!(ServerConstants.CAN_RUN.contains(dataVersion))) {
+    if (!(AccumuloDataVersion.CAN_RUN.contains(dataVersion))) {
       throw new IllegalStateException("This version of accumulo (" + 
Constants.VERSION
           + ") is not compatible with files stored using data version " + 
dataVersion);
     }
@@ -370,7 +370,7 @@ public class ServerContext extends ClientContext {
     log.info("{} starting", application);
     log.info("Instance {}", getInstanceID());
     // It doesn't matter which Volume is used as they should all have the data 
version stored
-    int dataVersion = 
serverConstants.getAccumuloPersistentVersion(getVolumeManager().getFirst());
+    int dataVersion = 
serverDirs.getAccumuloPersistentVersion(getVolumeManager().getFirst());
     log.info("Data Version {}", dataVersion);
     waitForZookeeperAndHdfs();
 
diff --git 
a/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java 
b/server/base/src/main/java/org/apache/accumulo/server/ServerDirs.java
similarity index 81%
rename from 
server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java
rename to server/base/src/main/java/org/apache/accumulo/server/ServerDirs.java
index 48e0172..57a5671 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/ServerDirs.java
@@ -30,6 +30,7 @@ import java.util.Objects;
 import java.util.Set;
 import java.util.stream.Collectors;
 
+import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.util.Pair;
@@ -42,49 +43,11 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 
-public class ServerConstants {
-
-  public static final String VERSION_DIR = "version";
-
-  public static final String INSTANCE_ID_DIR = "instance_id";
-
-  /**
-   * version (10) reflects changes to how root tablet metadata is serialized 
in zookeeper starting
-   * with 2.1
-   */
-  public static final int ROOT_TABLET_META_CHANGES = 10;
-
-  /**
-   * version (9) reflects changes to crypto that resulted in RFiles and WALs 
being serialized
-   * differently in version 2.0.0. Also RFiles in 2.0.0 may have summary data.
-   */
-  public static final int CRYPTO_CHANGES = 9;
-
-  /**
-   * version (8) reflects changes to RFile index (ACCUMULO-1124) AND the 
change to WAL tracking in
-   * ZK in version 1.8.0
-   */
-  public static final int SHORTEN_RFILE_KEYS = 8;
-
-  /**
-   * Historic data versions
-   *
-   * <ul>
-   * <li>version (7) also reflects the addition of a replication table in 1.7.0
-   * <li>version (6) reflects the addition of a separate root table 
(ACCUMULO-1481) in 1.6.0 -
-   * <li>version (5) moves delete file markers for the metadata table into the 
root tablet
-   * <li>version (4) moves logging to HDFS in 1.5.0
-   * </ul>
-   *
-   *
-   */
-  public static final int DATA_VERSION = ROOT_TABLET_META_CHANGES;
-
-  public static final Set<Integer> CAN_RUN =
-      Set.of(SHORTEN_RFILE_KEYS, CRYPTO_CHANGES, DATA_VERSION);
-  public static final String TABLE_DIR = "tables";
-  public static final String RECOVERY_DIR = "recovery";
-  public static final String WAL_DIR = "wal";
+/**
+ * Class that holds important server Directories. These need to be separate 
from {@link ServerInfo}
+ * for bootstrapping during initialization.
+ */
+public class ServerDirs {
 
   private Set<String> baseUris;
   private Set<String> tablesDirs;
@@ -94,7 +57,7 @@ public class ServerConstants {
   private final AccumuloConfiguration conf;
   private final Configuration hadoopConf;
 
-  public ServerConstants(AccumuloConfiguration conf, Configuration hadoopConf) 
{
+  public ServerDirs(AccumuloConfiguration conf, Configuration hadoopConf) {
     this.conf = Objects.requireNonNull(conf, "Configuration cannot be null");
     this.hadoopConf = Objects.requireNonNull(hadoopConf, "Hadoop configuration 
cannot be null");
     this.replacementsList = loadVolumeReplacements();
@@ -119,12 +82,12 @@ public class ServerConstants {
     // user-implemented VolumeChoosers)
     LinkedHashSet<String> baseDirsList = new LinkedHashSet<>();
     for (String baseDir : configuredBaseDirs) {
-      Path path = new Path(baseDir, INSTANCE_ID_DIR);
+      Path path = new Path(baseDir, Constants.INSTANCE_ID_DIR);
       String currentIid;
       int currentVersion;
       try {
         currentIid = VolumeManager.getInstanceIDFromHdfs(path, hadoopConf);
-        Path vpath = new Path(baseDir, VERSION_DIR);
+        Path vpath = new Path(baseDir, Constants.VERSION_DIR);
         currentVersion = 
getAccumuloPersistentVersion(vpath.getFileSystem(hadoopConf), vpath);
       } catch (Exception e) {
         if (ignore) {
@@ -166,14 +129,14 @@ public class ServerConstants {
 
   public Set<String> getTablesDirs() {
     if (tablesDirs == null) {
-      tablesDirs = prefix(getBaseUris(), TABLE_DIR);
+      tablesDirs = prefix(getBaseUris(), Constants.TABLE_DIR);
     }
     return tablesDirs;
   }
 
   public Set<String> getRecoveryDirs() {
     if (recoveryDirs == null) {
-      recoveryDirs = prefix(getBaseUris(), RECOVERY_DIR);
+      recoveryDirs = prefix(getBaseUris(), Constants.RECOVERY_DIR);
     }
     return recoveryDirs;
   }
@@ -251,7 +214,7 @@ public class ServerConstants {
 
   public Path getDataVersionLocation(Volume v) {
     // all base dirs should have the same version, so can choose any one
-    return v.prefixChild(VERSION_DIR);
+    return v.prefixChild(Constants.VERSION_DIR);
   }
 
   public int getAccumuloPersistentVersion(Volume v) {
@@ -276,6 +239,6 @@ public class ServerConstants {
 
   public Path getInstanceIdLocation(Volume v) {
     // all base dirs should have the same instance id, so can choose any one
-    return v.prefixChild(ServerConstants.INSTANCE_ID_DIR);
+    return v.prefixChild(Constants.INSTANCE_ID_DIR);
   }
 }
diff --git 
a/server/base/src/main/java/org/apache/accumulo/server/ServerInfo.java 
b/server/base/src/main/java/org/apache/accumulo/server/ServerInfo.java
index c6d3dab..c37e65b 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/ServerInfo.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/ServerInfo.java
@@ -53,7 +53,7 @@ public class ServerInfo implements ClientInfo {
   private int zooKeepersSessionTimeOut;
   private VolumeManager volumeManager;
   private ZooCache zooCache;
-  private final ServerConstants serverConstants;
+  private final ServerDirs serverDirs;
 
   ServerInfo(SiteConfiguration siteConfig, String instanceName, String 
zooKeepers,
       int zooKeepersSessionTimeOut) {
@@ -83,7 +83,7 @@ public class ServerInfo implements ClientInfo {
       throw new RuntimeException("Instance id " + instanceID + " pointed to by 
the name "
           + instanceName + " does not exist in zookeeper");
     }
-    serverConstants = new ServerConstants(siteConfig, hadoopConf);
+    serverDirs = new ServerDirs(siteConfig, hadoopConf);
   }
 
   ServerInfo(SiteConfiguration config) {
@@ -95,8 +95,8 @@ public class ServerInfo implements ClientInfo {
     } catch (IOException e) {
       throw new IllegalStateException(e);
     }
-    serverConstants = new ServerConstants(siteConfig, hadoopConf);
-    Path instanceIdPath = 
serverConstants.getInstanceIdLocation(volumeManager.getFirst());
+    serverDirs = new ServerDirs(siteConfig, hadoopConf);
+    Path instanceIdPath = 
serverDirs.getInstanceIdLocation(volumeManager.getFirst());
     instanceID = VolumeManager.getInstanceIDFromHdfs(instanceIdPath, 
hadoopConf);
     zooKeepers = config.get(Property.INSTANCE_ZK_HOST);
     zooKeepersSessionTimeOut = (int) 
config.getTimeInMillis(Property.INSTANCE_ZK_TIMEOUT);
@@ -118,7 +118,7 @@ public class ServerInfo implements ClientInfo {
     zooKeepersSessionTimeOut = (int) 
config.getTimeInMillis(Property.INSTANCE_ZK_TIMEOUT);
     zooCache = new ZooCacheFactory().getZooCache(zooKeepers, 
zooKeepersSessionTimeOut);
     this.instanceName = instanceName;
-    serverConstants = new ServerConstants(siteConfig, hadoopConf);
+    serverDirs = new ServerDirs(siteConfig, hadoopConf);
   }
 
   public SiteConfiguration getSiteConfiguration() {
@@ -184,7 +184,7 @@ public class ServerInfo implements ClientInfo {
     return this.hadoopConf;
   }
 
-  public ServerConstants getServerConstants() {
-    return serverConstants;
+  public ServerDirs getServerDirs() {
+    return serverDirs;
   }
 }
diff --git 
a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManager.java 
b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManager.java
index c2ee6ee..66b5d52 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManager.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManager.java
@@ -25,9 +25,9 @@ import java.util.Collection;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.volume.Volume;
 import org.apache.accumulo.core.volume.VolumeConfiguration;
-import org.apache.accumulo.server.ServerConstants;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -46,9 +46,7 @@ import org.slf4j.LoggerFactory;
 public interface VolumeManager extends AutoCloseable {
 
   enum FileType {
-    TABLE(ServerConstants.TABLE_DIR),
-    WAL(ServerConstants.WAL_DIR),
-    RECOVERY(ServerConstants.RECOVERY_DIR);
+    TABLE(Constants.TABLE_DIR), WAL(Constants.WAL_DIR), 
RECOVERY(Constants.RECOVERY_DIR);
 
     private String dir;
 
diff --git 
a/server/base/src/main/java/org/apache/accumulo/server/gc/GcVolumeUtil.java 
b/server/base/src/main/java/org/apache/accumulo/server/gc/GcVolumeUtil.java
index 65c33b5..aa4ac4a 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/gc/GcVolumeUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/gc/GcVolumeUtil.java
@@ -22,9 +22,9 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.stream.Collectors;
 
+import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.data.TableId;
 import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily;
-import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.hadoop.fs.Path;
 
@@ -34,8 +34,8 @@ public class GcVolumeUtil {
 
   public static String getDeleteTabletOnAllVolumesUri(TableId tableId, String 
dirName) {
     ServerColumnFamily.validateDirCol(dirName);
-    return ALL_VOLUMES_PREFIX + ServerConstants.TABLE_DIR + Path.SEPARATOR + 
tableId
-        + Path.SEPARATOR + dirName;
+    return ALL_VOLUMES_PREFIX + Constants.TABLE_DIR + Path.SEPARATOR + tableId 
+ Path.SEPARATOR
+        + dirName;
   }
 
   public static Collection<Path> expandAllVolumesUri(VolumeManager fs, Path 
path) {
diff --git 
a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java 
b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
index d6e1799..1d53f72 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
@@ -90,8 +90,9 @@ import org.apache.accumulo.core.volume.VolumeConfiguration;
 import org.apache.accumulo.fate.zookeeper.ZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
-import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.AccumuloDataVersion;
 import org.apache.accumulo.server.ServerContext;
+import org.apache.accumulo.server.ServerDirs;
 import org.apache.accumulo.server.constraints.MetadataConstraints;
 import org.apache.accumulo.server.fs.VolumeChooserEnvironmentImpl;
 import org.apache.accumulo.server.fs.VolumeManager;
@@ -341,8 +342,8 @@ public class Initialize implements KeywordExecutable {
       String rootTabletDirName = RootTable.ROOT_TABLET_DIR_NAME;
       String ext = 
FileOperations.getNewFileExtension(DefaultConfiguration.getInstance());
       String rootTabletFileUri = new Path(fs.choose(chooserEnv, 
configuredVolumes) + Path.SEPARATOR
-          + ServerConstants.TABLE_DIR + Path.SEPARATOR + RootTable.ID + 
Path.SEPARATOR
-          + rootTabletDirName + Path.SEPARATOR + "00000_00000." + 
ext).toString();
+          + Constants.TABLE_DIR + Path.SEPARATOR + RootTable.ID + 
Path.SEPARATOR + rootTabletDirName
+          + Path.SEPARATOR + "00000_00000." + ext).toString();
 
       try {
         initZooKeeper(opts, uuid.toString(), instanceNamePath, 
rootTabletDirName,
@@ -354,9 +355,8 @@ public class Initialize implements KeywordExecutable {
 
       try {
         initFileSystem(siteConfig, hadoopConf, fs, uuid,
-            new Path(fs.choose(chooserEnv, configuredVolumes) + Path.SEPARATOR
-                + ServerConstants.TABLE_DIR + Path.SEPARATOR + RootTable.ID + 
rootTabletDirName)
-                    .toString(),
+            new Path(fs.choose(chooserEnv, configuredVolumes) + Path.SEPARATOR 
+ Constants.TABLE_DIR
+                + Path.SEPARATOR + RootTable.ID + 
rootTabletDirName).toString(),
             rootTabletFileUri, context);
       } catch (Exception e) {
         log.error("FATAL Failed to initialize filesystem", e);
@@ -438,11 +438,11 @@ public class Initialize implements KeywordExecutable {
   private static void initDirs(VolumeManager fs, UUID uuid, Set<String> 
baseDirs, boolean print)
       throws IOException {
     for (String baseDir : baseDirs) {
-      fs.mkdirs(new Path(new Path(baseDir, ServerConstants.VERSION_DIR),
-          "" + ServerConstants.DATA_VERSION), new FsPermission("700"));
+      fs.mkdirs(new Path(new Path(baseDir, Constants.VERSION_DIR), "" + 
AccumuloDataVersion.get()),
+          new FsPermission("700"));
 
       // create an instance id
-      Path iidLocation = new Path(baseDir, ServerConstants.INSTANCE_ID_DIR);
+      Path iidLocation = new Path(baseDir, Constants.INSTANCE_ID_DIR);
       fs.mkdirs(iidLocation);
       fs.createNewFile(new Path(iidLocation, uuid.toString()));
       if (print) {
@@ -844,8 +844,8 @@ public class Initialize implements KeywordExecutable {
   public static boolean isInitialized(VolumeManager fs, SiteConfiguration 
siteConfig)
       throws IOException {
     for (String baseDir : VolumeConfiguration.getVolumeUris(siteConfig)) {
-      if (fs.exists(new Path(baseDir, ServerConstants.INSTANCE_ID_DIR))
-          || fs.exists(new Path(baseDir, ServerConstants.VERSION_DIR))) {
+      if (fs.exists(new Path(baseDir, Constants.INSTANCE_ID_DIR))
+          || fs.exists(new Path(baseDir, Constants.VERSION_DIR))) {
         return true;
       }
     }
@@ -854,22 +854,22 @@ public class Initialize implements KeywordExecutable {
   }
 
   private static void addVolumes(VolumeManager fs, SiteConfiguration 
siteConfig,
-      Configuration hadoopConf, ServerConstants serverConstants) throws 
IOException {
+      Configuration hadoopConf, ServerDirs serverDirs) throws IOException {
 
     Set<String> volumeURIs = VolumeConfiguration.getVolumeUris(siteConfig);
 
-    Set<String> initializedDirs = serverConstants.checkBaseUris(hadoopConf, 
volumeURIs, true);
+    Set<String> initializedDirs = serverDirs.checkBaseUris(hadoopConf, 
volumeURIs, true);
 
     HashSet<String> uinitializedDirs = new HashSet<>();
     uinitializedDirs.addAll(volumeURIs);
     uinitializedDirs.removeAll(initializedDirs);
 
     Path aBasePath = new Path(initializedDirs.iterator().next());
-    Path iidPath = new Path(aBasePath, ServerConstants.INSTANCE_ID_DIR);
-    Path versionPath = new Path(aBasePath, ServerConstants.VERSION_DIR);
+    Path iidPath = new Path(aBasePath, Constants.INSTANCE_ID_DIR);
+    Path versionPath = new Path(aBasePath, Constants.VERSION_DIR);
 
     UUID uuid = UUID.fromString(VolumeManager.getInstanceIDFromHdfs(iidPath, 
hadoopConf));
-    for (Pair<Path,Path> replacementVolume : 
serverConstants.getVolumeReplacements()) {
+    for (Pair<Path,Path> replacementVolume : 
serverDirs.getVolumeReplacements()) {
       if (aBasePath.equals(replacementVolume.getFirst())) {
         log.error(
             "{} is set to be replaced in {} and should not appear in {}."
@@ -879,9 +879,9 @@ public class Initialize implements KeywordExecutable {
       }
     }
 
-    int persistentVersion = serverConstants
-        .getAccumuloPersistentVersion(versionPath.getFileSystem(hadoopConf), 
versionPath);
-    if (persistentVersion != ServerConstants.DATA_VERSION) {
+    int persistentVersion =
+        
serverDirs.getAccumuloPersistentVersion(versionPath.getFileSystem(hadoopConf), 
versionPath);
+    if (persistentVersion != AccumuloDataVersion.get()) {
       throw new IOException(
           "Accumulo " + Constants.VERSION + " cannot initialize data version " 
+ persistentVersion);
     }
@@ -942,7 +942,7 @@ public class Initialize implements KeywordExecutable {
       setZooReaderWriter(new ZooReaderWriter(siteConfig));
       SecurityUtil.serverLogin(siteConfig);
       Configuration hadoopConfig = new Configuration();
-      ServerConstants serverConstants = new ServerConstants(siteConfig, 
hadoopConfig);
+      ServerDirs serverDirs = new ServerDirs(siteConfig, hadoopConfig);
 
       try (var fs = VolumeManagerImpl.get(siteConfig, hadoopConfig)) {
 
@@ -972,7 +972,7 @@ public class Initialize implements KeywordExecutable {
         }
 
         if (opts.addVolumes) {
-          addVolumes(fs, siteConfig, hadoopConfig, serverConstants);
+          addVolumes(fs, siteConfig, hadoopConfig, serverDirs);
         }
 
         if (!opts.resetSecurity && !opts.addVolumes) {
diff --git 
a/server/base/src/main/java/org/apache/accumulo/server/util/ChangeSecret.java 
b/server/base/src/main/java/org/apache/accumulo/server/util/ChangeSecret.java
index 139b482..21495e5 100644
--- 
a/server/base/src/main/java/org/apache/accumulo/server/util/ChangeSecret.java
+++ 
b/server/base/src/main/java/org/apache/accumulo/server/util/ChangeSecret.java
@@ -32,8 +32,8 @@ import org.apache.accumulo.fate.zookeeper.ZooReader;
 import org.apache.accumulo.fate.zookeeper.ZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
-import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.ServerContext;
+import org.apache.accumulo.server.ServerDirs;
 import org.apache.accumulo.server.cli.ServerUtilOpts;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
@@ -65,8 +65,8 @@ public class ChangeSecret {
     var siteConfig = SiteConfiguration.auto();
     var hadoopConf = new Configuration();
     try (var fs = VolumeManagerImpl.get(siteConfig, hadoopConf)) {
-      ServerConstants serverConstants = new ServerConstants(siteConfig, 
hadoopConf);
-      verifyHdfsWritePermission(serverConstants, fs);
+      ServerDirs serverDirs = new ServerDirs(siteConfig, hadoopConf);
+      verifyHdfsWritePermission(serverDirs, fs);
 
       Opts opts = new Opts();
       List<String> argsList = new ArrayList<>(args.length + 2);
@@ -80,7 +80,7 @@ public class ChangeSecret {
         verifyAccumuloIsDown(context, opts.oldPass);
 
         final String newInstanceId = UUID.randomUUID().toString();
-        updateHdfs(serverConstants, fs, newInstanceId);
+        updateHdfs(serverDirs, fs, newInstanceId);
         rewriteZooKeeperInstance(context, newInstanceId, opts.oldPass, 
opts.newPass);
         if (opts.oldPass != null) {
           deleteInstance(context, opts.oldPass);
@@ -162,11 +162,11 @@ public class ChangeSecret {
     new_.putPersistentData(path, newInstanceId.getBytes(UTF_8), 
NodeExistsPolicy.OVERWRITE);
   }
 
-  private static void updateHdfs(ServerConstants serverConstants, 
VolumeManager fs,
-      String newInstanceId) throws IOException {
+  private static void updateHdfs(ServerDirs serverDirs, VolumeManager fs, 
String newInstanceId)
+      throws IOException {
     // Need to recreate the instanceId on all of them to keep consistency
     for (Volume v : fs.getVolumes()) {
-      final Path instanceId = serverConstants.getInstanceIdLocation(v);
+      final Path instanceId = serverDirs.getInstanceIdLocation(v);
       if (!v.getFileSystem().delete(instanceId, true)) {
         throw new IOException("Could not recursively delete " + instanceId);
       }
@@ -179,10 +179,10 @@ public class ChangeSecret {
     }
   }
 
-  private static void verifyHdfsWritePermission(ServerConstants 
serverConstants, VolumeManager fs)
+  private static void verifyHdfsWritePermission(ServerDirs serverDirs, 
VolumeManager fs)
       throws Exception {
     for (Volume v : fs.getVolumes()) {
-      final Path instanceId = serverConstants.getInstanceIdLocation(v);
+      final Path instanceId = serverDirs.getInstanceIdLocation(v);
       FileStatus fileStatus = v.getFileSystem().getFileStatus(instanceId);
       checkHdfsAccessPermissions(fileStatus, FsAction.WRITE);
     }
diff --git 
a/server/base/src/test/java/org/apache/accumulo/server/ServerConstantsTest.java 
b/server/base/src/test/java/org/apache/accumulo/server/ServerDirsTest.java
similarity index 79%
rename from 
server/base/src/test/java/org/apache/accumulo/server/ServerConstantsTest.java
rename to 
server/base/src/test/java/org/apache/accumulo/server/ServerDirsTest.java
index 1cabad9..b7b19d4 100644
--- 
a/server/base/src/test/java/org/apache/accumulo/server/ServerConstantsTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/ServerDirsTest.java
@@ -30,6 +30,7 @@ import java.util.Set;
 import java.util.UUID;
 import java.util.stream.Collectors;
 
+import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.ConfigurationCopy;
 import org.apache.accumulo.core.conf.Property;
@@ -45,23 +46,23 @@ import org.junit.rules.TemporaryFolder;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 
 @SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "paths not 
set by user input")
-public class ServerConstantsTest {
+public class ServerDirsTest {
 
   AccumuloConfiguration conf;
   Configuration hadoopConf = new Configuration();
-  ServerConstants constants;
+  ServerDirs constants;
 
   @Before
   public void setup() throws IOException {
     String uuid = UUID.randomUUID().toString();
 
     var vols =
-        init(folder.newFolder(), Arrays.asList(uuid), 
Arrays.asList(ServerConstants.DATA_VERSION));
+        init(folder.newFolder(), Arrays.asList(uuid), 
Arrays.asList(AccumuloDataVersion.get()));
 
     ConfigurationCopy copy = new ConfigurationCopy();
     copy.set(Property.INSTANCE_VOLUMES.getKey(), String.join(",", vols));
     conf = copy;
-    constants = new ServerConstants(conf, hadoopConf);
+    constants = new ServerDirs(conf, hadoopConf);
   }
 
   @Rule
@@ -73,24 +74,24 @@ public class ServerConstantsTest {
     String uuid1 = UUID.randomUUID().toString();
     String uuid2 = UUID.randomUUID().toString();
 
-    verifyAllPass(init(folder.newFolder(), Arrays.asList(uuid1),
-        Arrays.asList(ServerConstants.DATA_VERSION)));
+    verifyAllPass(
+        init(folder.newFolder(), Arrays.asList(uuid1), 
Arrays.asList(AccumuloDataVersion.get())));
     verifyAllPass(init(folder.newFolder(), Arrays.asList(uuid1, uuid1),
-        Arrays.asList(ServerConstants.DATA_VERSION, 
ServerConstants.DATA_VERSION)));
+        Arrays.asList(AccumuloDataVersion.get(), AccumuloDataVersion.get())));
 
     verifyError(
         init(folder.newFolder(), Arrays.asList((String) null), 
Arrays.asList((Integer) null)));
     verifyError(init(folder.newFolder(), Arrays.asList(uuid1, uuid2),
-        Arrays.asList(ServerConstants.DATA_VERSION, 
ServerConstants.DATA_VERSION)));
+        Arrays.asList(AccumuloDataVersion.get(), AccumuloDataVersion.get())));
     verifyError(init(folder.newFolder(), Arrays.asList(uuid1, uuid1),
-        Arrays.asList(ServerConstants.DATA_VERSION, 
ServerConstants.DATA_VERSION - 1)));
+        Arrays.asList(AccumuloDataVersion.get(), AccumuloDataVersion.get() - 
1)));
     verifyError(init(folder.newFolder(), Arrays.asList(uuid1, uuid2),
-        Arrays.asList(ServerConstants.DATA_VERSION, 
ServerConstants.DATA_VERSION - 1)));
-    verifyError(init(folder.newFolder(), Arrays.asList(uuid1, uuid2, null), 
Arrays.asList(
-        ServerConstants.DATA_VERSION, ServerConstants.DATA_VERSION, 
ServerConstants.DATA_VERSION)));
+        Arrays.asList(AccumuloDataVersion.get(), AccumuloDataVersion.get() - 
1)));
+    verifyError(init(folder.newFolder(), Arrays.asList(uuid1, uuid2, null), 
Arrays
+        .asList(AccumuloDataVersion.get(), AccumuloDataVersion.get(), 
AccumuloDataVersion.get())));
 
     verifySomePass(init(folder.newFolder(), Arrays.asList(uuid1, uuid1, null),
-        Arrays.asList(ServerConstants.DATA_VERSION, 
ServerConstants.DATA_VERSION, null)));
+        Arrays.asList(AccumuloDataVersion.get(), AccumuloDataVersion.get(), 
null)));
   }
 
   private void verifyAllPass(Set<String> paths) {
@@ -140,15 +141,15 @@ public class ServerConstantsTest {
       accumuloPaths.add(accumuloPath);
 
       if (uuids.get(i) != null) {
-        fs.mkdirs(new Path(accumuloPath + "/" + 
ServerConstants.INSTANCE_ID_DIR));
+        fs.mkdirs(new Path(accumuloPath + "/" + Constants.INSTANCE_ID_DIR));
         fs.createNewFile(
-            new Path(accumuloPath + "/" + ServerConstants.INSTANCE_ID_DIR + 
"/" + uuids.get(i)));
+            new Path(accumuloPath + "/" + Constants.INSTANCE_ID_DIR + "/" + 
uuids.get(i)));
       }
 
       if (dataVersions.get(i) != null) {
-        fs.mkdirs(new Path(accumuloPath + "/" + ServerConstants.VERSION_DIR));
+        fs.mkdirs(new Path(accumuloPath + "/" + Constants.VERSION_DIR));
         fs.createNewFile(
-            new Path(accumuloPath + "/" + ServerConstants.VERSION_DIR + "/" + 
dataVersions.get(i)));
+            new Path(accumuloPath + "/" + Constants.VERSION_DIR + "/" + 
dataVersions.get(i)));
       }
     }
 
diff --git 
a/server/base/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java
 
b/server/base/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java
index fd07f88..ef22a11 100644
--- 
a/server/base/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java
+++ 
b/server/base/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java
@@ -25,9 +25,10 @@ import java.io.File;
 import java.io.IOException;
 import java.util.UUID;
 
+import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.clientImpl.Credentials;
 import org.apache.accumulo.core.conf.SiteConfiguration;
-import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.AccumuloDataVersion;
 import org.apache.accumulo.server.security.SystemCredentials.SystemToken;
 import org.apache.commons.codec.digest.Crypt;
 import org.junit.BeforeClass;
@@ -49,18 +50,18 @@ public class SystemCredentialsTest {
   @SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "input not 
from a user")
   @BeforeClass
   public static void setUp() throws IOException {
-    File testInstanceId = new File(
-        new File(new File(new File("target"), "instanceTest"), 
ServerConstants.INSTANCE_ID_DIR),
-        UUID.fromString("00000000-0000-0000-0000-000000000000").toString());
+    File testInstanceId =
+        new File(new File(new File(new File("target"), "instanceTest"), 
Constants.INSTANCE_ID_DIR),
+            
UUID.fromString("00000000-0000-0000-0000-000000000000").toString());
     if (!testInstanceId.exists()) {
       assertTrue(
           testInstanceId.getParentFile().mkdirs() || 
testInstanceId.getParentFile().isDirectory());
       assertTrue(testInstanceId.createNewFile());
     }
 
-    File testInstanceVersion = new File(
-        new File(new File(new File("target"), "instanceTest"), 
ServerConstants.VERSION_DIR),
-        ServerConstants.DATA_VERSION + "");
+    File testInstanceVersion =
+        new File(new File(new File(new File("target"), "instanceTest"), 
Constants.VERSION_DIR),
+            AccumuloDataVersion.get() + "");
     if (!testInstanceVersion.exists()) {
       assertTrue(testInstanceVersion.getParentFile().mkdirs()
           || testInstanceVersion.getParentFile().isDirectory());
diff --git 
a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java
 
b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java
index 12152fc..a0e4c05 100644
--- 
a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java
+++ 
b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java
@@ -36,7 +36,6 @@ import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.TableId;
 import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily;
 import org.apache.accumulo.gc.GarbageCollectionEnvironment.Reference;
-import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.replication.StatusUtil;
 import org.apache.accumulo.server.replication.proto.Replication.Status;
 import org.apache.htrace.Trace;
@@ -86,11 +85,11 @@ public class GarbageCollectionAlgorithm {
     }
 
     if (tokens.length > 3 && path.contains(":")) {
-      if (tokens[tokens.length - 4].equals(ServerConstants.TABLE_DIR)
+      if (tokens[tokens.length - 4].equals(Constants.TABLE_DIR)
           && (expectedLen == 0 || expectedLen == 3)) {
         relPath = tokens[tokens.length - 3] + "/" + tokens[tokens.length - 2] 
+ "/"
             + tokens[tokens.length - 1];
-      } else if (tokens[tokens.length - 3].equals(ServerConstants.TABLE_DIR)
+      } else if (tokens[tokens.length - 3].equals(Constants.TABLE_DIR)
           && (expectedLen == 0 || expectedLen == 2)) {
         relPath = tokens[tokens.length - 2] + "/" + tokens[tokens.length - 1];
       } else {
diff --git 
a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/tableExport/WriteExportFiles.java
 
b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/tableExport/WriteExportFiles.java
index bb604ae..3a891aa 100644
--- 
a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/tableExport/WriteExportFiles.java
+++ 
b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/tableExport/WriteExportFiles.java
@@ -62,7 +62,7 @@ import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.manager.Manager;
 import org.apache.accumulo.manager.tableOps.ManagerRepo;
 import org.apache.accumulo.manager.tableOps.Utils;
-import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.AccumuloDataVersion;
 import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.conf.TableConfiguration;
 import org.apache.accumulo.server.fs.VolumeManager;
@@ -172,7 +172,7 @@ class WriteExportFiles extends ManagerRepo {
       osw.append("srcZookeepers:" + context.getZooKeepers() + "\n");
       osw.append("srcTableName:" + tableName + "\n");
       osw.append("srcTableID:" + tableID.canonical() + "\n");
-      osw.append(ExportTable.DATA_VERSION_PROP + ":" + 
ServerConstants.DATA_VERSION + "\n");
+      osw.append(ExportTable.DATA_VERSION_PROP + ":" + 
AccumuloDataVersion.get() + "\n");
       osw.append("srcCodeVersion:" + Constants.VERSION + "\n");
 
       osw.flush();
diff --git 
a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/tableImport/ImportTable.java
 
b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/tableImport/ImportTable.java
index bd5e168..c578959 100644
--- 
a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/tableImport/ImportTable.java
+++ 
b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/tableImport/ImportTable.java
@@ -44,7 +44,7 @@ import org.apache.accumulo.manager.Manager;
 import org.apache.accumulo.manager.tableOps.ManagerRepo;
 import org.apache.accumulo.manager.tableOps.Utils;
 import org.apache.accumulo.manager.tableOps.tableExport.ExportTable;
-import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.AccumuloDataVersion;
 import org.apache.hadoop.fs.Path;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -142,7 +142,7 @@ public class ImportTable extends ManagerRepo {
           TableOperation.IMPORT, TableOperationExceptionType.OTHER,
           "Incompatible export version " + exportVersion);
 
-    if (dataVersion == null || dataVersion > ServerConstants.DATA_VERSION)
+    if (dataVersion == null || dataVersion > AccumuloDataVersion.get())
       throw new AcceptableThriftTableOperationException(null, 
tableInfo.tableName,
           TableOperation.IMPORT, TableOperationExceptionType.OTHER,
           "Incompatible data version " + dataVersion);
diff --git 
a/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/UpgradeCoordinator.java
 
b/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/UpgradeCoordinator.java
index a5a7879..d464b1e 100644
--- 
a/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/UpgradeCoordinator.java
+++ 
b/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/UpgradeCoordinator.java
@@ -35,8 +35,9 @@ import org.apache.accumulo.fate.ReadOnlyStore;
 import org.apache.accumulo.fate.ReadOnlyTStore;
 import org.apache.accumulo.fate.ZooStore;
 import org.apache.accumulo.manager.EventCoordinator;
-import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.AccumuloDataVersion;
 import org.apache.accumulo.server.ServerContext;
+import org.apache.accumulo.server.ServerDirs;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.hadoop.fs.Path;
 import org.slf4j.Logger;
@@ -106,8 +107,8 @@ public class UpgradeCoordinator {
   private static Logger log = 
LoggerFactory.getLogger(UpgradeCoordinator.class);
 
   private int currentVersion;
-  private Map<Integer,Upgrader> upgraders = 
Map.of(ServerConstants.SHORTEN_RFILE_KEYS,
-      new Upgrader8to9(), ServerConstants.CRYPTO_CHANGES, new Upgrader9to10());
+  private Map<Integer,Upgrader> upgraders = 
Map.of(AccumuloDataVersion.SHORTEN_RFILE_KEYS,
+      new Upgrader8to9(), AccumuloDataVersion.CRYPTO_CHANGES, new 
Upgrader9to10());
 
   private volatile UpgradeStatus status;
 
@@ -139,20 +140,20 @@ public class UpgradeCoordinator {
         "Not currently in a suitable state to do zookeeper upgrade %s", 
status);
 
     try {
-      int cv = context.getServerConstants()
+      int cv = context.getServerDirs()
           .getAccumuloPersistentVersion(context.getVolumeManager().getFirst());
       ServerContext.ensureDataVersionCompatible(cv);
       this.currentVersion = cv;
 
-      if (cv == ServerConstants.DATA_VERSION) {
+      if (cv == AccumuloDataVersion.get()) {
         status = UpgradeStatus.COMPLETE;
         return;
       }
 
-      if (currentVersion < ServerConstants.DATA_VERSION) {
+      if (currentVersion < AccumuloDataVersion.get()) {
         abortIfFateTransactions(context);
 
-        for (int v = currentVersion; v < ServerConstants.DATA_VERSION; v++) {
+        for (int v = currentVersion; v < AccumuloDataVersion.get(); v++) {
           log.info("Upgrading Zookeeper from data version {}", v);
           upgraders.get(v).upgradeZookeeper(context);
         }
@@ -173,25 +174,25 @@ public class UpgradeCoordinator {
     Preconditions.checkState(status == UpgradeStatus.UPGRADED_ZOOKEEPER,
         "Not currently in a suitable state to do metadata upgrade %s", status);
 
-    if (currentVersion < ServerConstants.DATA_VERSION) {
+    if (currentVersion < AccumuloDataVersion.get()) {
       return ThreadPools.createThreadPool(0, Integer.MAX_VALUE, 60L, 
TimeUnit.SECONDS,
           "UpgradeMetadataThreads", new SynchronousQueue<Runnable>(), 
OptionalInt.empty(), false)
           .submit(() -> {
             try {
-              for (int v = currentVersion; v < ServerConstants.DATA_VERSION; 
v++) {
+              for (int v = currentVersion; v < AccumuloDataVersion.get(); v++) 
{
                 log.info("Upgrading Root from data version {}", v);
                 upgraders.get(v).upgradeRoot(context);
               }
 
               setStatus(UpgradeStatus.UPGRADED_ROOT, eventCoordinator);
 
-              for (int v = currentVersion; v < ServerConstants.DATA_VERSION; 
v++) {
+              for (int v = currentVersion; v < AccumuloDataVersion.get(); v++) 
{
                 log.info("Upgrading Metadata from data version {}", v);
                 upgraders.get(v).upgradeMetadata(context);
               }
 
               log.info("Updating persistent data version.");
-              updateAccumuloVersion(context.getServerConstants(), 
context.getVolumeManager(),
+              updateAccumuloVersion(context.getServerDirs(), 
context.getVolumeManager(),
                   currentVersion);
               log.info("Upgrade complete");
               setStatus(UpgradeStatus.COMPLETE, eventCoordinator);
@@ -206,14 +207,13 @@ public class UpgradeCoordinator {
   }
 
   // visible for testing
-  synchronized void updateAccumuloVersion(ServerConstants constants, 
VolumeManager fs,
-      int oldVersion) {
+  synchronized void updateAccumuloVersion(ServerDirs serverDirs, VolumeManager 
fs, int oldVersion) {
     for (Volume volume : fs.getVolumes()) {
       try {
-        if (constants.getAccumuloPersistentVersion(volume) == oldVersion) {
+        if (serverDirs.getAccumuloPersistentVersion(volume) == oldVersion) {
           log.debug("Attempting to upgrade {}", volume);
-          Path dataVersionLocation = constants.getDataVersionLocation(volume);
-          fs.create(new Path(dataVersionLocation, 
Integer.toString(ServerConstants.DATA_VERSION)))
+          Path dataVersionLocation = serverDirs.getDataVersionLocation(volume);
+          fs.create(new Path(dataVersionLocation, 
Integer.toString(AccumuloDataVersion.get())))
               .close();
           // TODO document failure mode & recovery if FS permissions cause 
above to work and below
           // to fail ACCUMULO-2596
diff --git 
a/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/Upgrader8to9.java
 
b/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/Upgrader8to9.java
index 81da2d8..953ce44 100644
--- 
a/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/Upgrader8to9.java
+++ 
b/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/Upgrader8to9.java
@@ -18,11 +18,11 @@
  */
 package org.apache.accumulo.manager.upgrade;
 
-import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.AccumuloDataVersion;
 import org.apache.accumulo.server.ServerContext;
 
 /**
- * See {@link ServerConstants#CRYPTO_CHANGES}
+ * See {@link AccumuloDataVersion#CRYPTO_CHANGES}
  */
 public class Upgrader8to9 implements Upgrader {
 
diff --git 
a/server/manager/src/test/java/org/apache/accumulo/manager/upgrade/AccumuloTest.java
 
b/server/manager/src/test/java/org/apache/accumulo/manager/upgrade/AccumuloTest.java
index 4db09c0..cd5836e 100644
--- 
a/server/manager/src/test/java/org/apache/accumulo/manager/upgrade/AccumuloTest.java
+++ 
b/server/manager/src/test/java/org/apache/accumulo/manager/upgrade/AccumuloTest.java
@@ -25,9 +25,11 @@ import static org.junit.Assert.assertEquals;
 
 import java.io.FileNotFoundException;
 
+import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.conf.DefaultConfiguration;
 import org.apache.accumulo.core.volume.Volume;
-import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.AccumuloDataVersion;
+import org.apache.accumulo.server.ServerDirs;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -42,13 +44,13 @@ import com.google.common.collect.Sets;
 public class AccumuloTest {
   private FileSystem fs;
   private Path path;
-  private ServerConstants serverConstants;
+  private ServerDirs serverDirs;
 
   @Before
   public void setUp() {
     fs = createMock(FileSystem.class);
     path = createMock(Path.class);
-    serverConstants = new ServerConstants(DefaultConfiguration.getInstance(), 
new Configuration());
+    serverDirs = new ServerDirs(DefaultConfiguration.getInstance(), new 
Configuration());
   }
 
   private FileStatus[] mockPersistentVersion(String s) {
@@ -68,7 +70,7 @@ public class AccumuloTest {
     expect(fs.listStatus(path)).andReturn(files);
     replay(fs);
 
-    assertEquals(42, serverConstants.getAccumuloPersistentVersion(fs, path));
+    assertEquals(42, serverDirs.getAccumuloPersistentVersion(fs, path));
   }
 
   @Test
@@ -76,7 +78,7 @@ public class AccumuloTest {
     expect(fs.listStatus(path)).andReturn(null);
     replay(fs);
 
-    assertEquals(-1, serverConstants.getAccumuloPersistentVersion(fs, path));
+    assertEquals(-1, serverDirs.getAccumuloPersistentVersion(fs, path));
   }
 
   @Test
@@ -84,7 +86,7 @@ public class AccumuloTest {
     expect(fs.listStatus(path)).andReturn(new FileStatus[0]);
     replay(fs);
 
-    assertEquals(-1, serverConstants.getAccumuloPersistentVersion(fs, path));
+    assertEquals(-1, serverDirs.getAccumuloPersistentVersion(fs, path));
   }
 
   @Test(expected = RuntimeException.class)
@@ -92,7 +94,7 @@ public class AccumuloTest {
     expect(fs.listStatus(path)).andThrow(new FileNotFoundException());
     replay(fs);
 
-    assertEquals(-1, serverConstants.getAccumuloPersistentVersion(fs, path));
+    assertEquals(-1, serverDirs.getAccumuloPersistentVersion(fs, path));
   }
 
   @Test
@@ -101,7 +103,7 @@ public class AccumuloTest {
     FileSystem fs1 = createMock(FileSystem.class);
     Path baseVersion1 = new Path("hdfs://volume1/accumulo/version");
     Path oldVersion1 = new Path("hdfs://volume1/accumulo/version/7");
-    Path newVersion1 = new Path("hdfs://volume1/accumulo/version/" + 
ServerConstants.DATA_VERSION);
+    Path newVersion1 = new Path("hdfs://volume1/accumulo/version/" + 
AccumuloDataVersion.get());
 
     FileStatus[] files1 = mockPersistentVersion("7");
     expect(fs1.listStatus(baseVersion1)).andReturn(files1);
@@ -109,7 +111,7 @@ public class AccumuloTest {
 
     FSDataOutputStream fsdos1 = createMock(FSDataOutputStream.class);
     expect(v1.getFileSystem()).andReturn(fs1);
-    
expect(v1.prefixChild(ServerConstants.VERSION_DIR)).andReturn(baseVersion1).times(2);
+    
expect(v1.prefixChild(Constants.VERSION_DIR)).andReturn(baseVersion1).times(2);
     replay(v1);
     fsdos1.close();
     replay(fsdos1);
@@ -118,7 +120,7 @@ public class AccumuloTest {
     FileSystem fs2 = createMock(FileSystem.class);
     Path baseVersion2 = new Path("hdfs://volume2/accumulo/version");
     Path oldVersion2 = new Path("hdfs://volume2/accumulo/version/7");
-    Path newVersion2 = new Path("hdfs://volume2/accumulo/version/" + 
ServerConstants.DATA_VERSION);
+    Path newVersion2 = new Path("hdfs://volume2/accumulo/version/" + 
AccumuloDataVersion.get());
 
     FileStatus[] files2 = mockPersistentVersion("7");
     expect(fs2.listStatus(baseVersion2)).andReturn(files2);
@@ -126,7 +128,7 @@ public class AccumuloTest {
 
     FSDataOutputStream fsdos2 = createMock(FSDataOutputStream.class);
     expect(v2.getFileSystem()).andReturn(fs2);
-    
expect(v2.prefixChild(ServerConstants.VERSION_DIR)).andReturn(baseVersion2).times(2);
+    
expect(v2.prefixChild(Constants.VERSION_DIR)).andReturn(baseVersion2).times(2);
     replay(v2);
     fsdos2.close();
     replay(fsdos2);
@@ -140,8 +142,7 @@ public class AccumuloTest {
     replay(vm);
 
     UpgradeCoordinator upgradeCoordinator = new UpgradeCoordinator();
-    ServerConstants constants =
-        new ServerConstants(DefaultConfiguration.getInstance(), new 
Configuration());
+    ServerDirs constants = new ServerDirs(DefaultConfiguration.getInstance(), 
new Configuration());
     upgradeCoordinator.updateAccumuloVersion(constants, vm, 7);
   }
 }
diff --git 
a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java 
b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
index 8003f74..1ccb071 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
@@ -101,7 +101,6 @@ import org.apache.accumulo.fate.zookeeper.ZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.server.AbstractServer;
 import org.apache.accumulo.server.GarbageCollectionLogger;
-import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.ServerOpts;
 import org.apache.accumulo.server.TabletLevel;
@@ -996,7 +995,7 @@ public class TabletServer extends AbstractServer {
 
     boolean warned = false;
     for (String prefix : prefixes) {
-      String logPath = prefix + Path.SEPARATOR + ServerConstants.WAL_DIR;
+      String logPath = prefix + Path.SEPARATOR + Constants.WAL_DIR;
       if (!context.getVolumeManager().canSyncAndFlush(new Path(logPath))) {
         // sleep a few seconds in case this is at cluster start...give monitor
         // time to start so the warning will be more visible
diff --git 
a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java 
b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
index 3a4ea4e..ac9ee8b 100644
--- 
a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
+++ 
b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
@@ -43,6 +43,7 @@ import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.atomic.AtomicLong;
 
+import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Durability;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
@@ -60,7 +61,6 @@ import org.apache.accumulo.core.spi.crypto.FileEncrypter;
 import org.apache.accumulo.core.spi.crypto.NoCryptoService;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.core.util.threads.Threads;
-import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.fs.VolumeChooserEnvironmentImpl;
 import org.apache.accumulo.server.fs.VolumeManager;
@@ -406,8 +406,8 @@ public class DfsLogger implements Comparable<DfsLogger> {
 
     var chooserEnv = new VolumeChooserEnvironmentImpl(
         org.apache.accumulo.core.spi.fs.VolumeChooserEnvironment.Scope.LOGGER, 
context);
-    logPath = fs.choose(chooserEnv, context.getBaseUris()) + Path.SEPARATOR
-        + ServerConstants.WAL_DIR + Path.SEPARATOR + logger + Path.SEPARATOR + 
filename;
+    logPath = fs.choose(chooserEnv, context.getBaseUris()) + Path.SEPARATOR + 
Constants.WAL_DIR
+        + Path.SEPARATOR + logger + Path.SEPARATOR + filename;
 
     metaReference = toString();
     LoggerOperation op = null;
diff --git 
a/test/src/main/java/org/apache/accumulo/test/MissingWalHeaderCompletesRecoveryIT.java
 
b/test/src/main/java/org/apache/accumulo/test/MissingWalHeaderCompletesRecoveryIT.java
index 2dc63ab..cd74f62 100644
--- 
a/test/src/main/java/org/apache/accumulo/test/MissingWalHeaderCompletesRecoveryIT.java
+++ 
b/test/src/main/java/org/apache/accumulo/test/MissingWalHeaderCompletesRecoveryIT.java
@@ -26,6 +26,7 @@ import static org.junit.Assert.assertTrue;
 import java.io.File;
 import java.util.UUID;
 
+import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchWriter;
@@ -41,7 +42,6 @@ import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.tabletserver.log.LogEntry;
 import org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl;
 import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.test.functional.ConfigurableMacBase;
 import org.apache.accumulo.tserver.log.DfsLogger;
 import org.apache.hadoop.conf.Configuration;
@@ -120,7 +120,7 @@ public class MissingWalHeaderCompletesRecoveryIT extends 
ConfigurableMacBase {
       // Fake out something that looks like host:port, it's irrelevant
       String fakeServer = "127.0.0.1:12345";
 
-      File walogs = new File(cluster.getConfig().getAccumuloDir(), 
ServerConstants.WAL_DIR);
+      File walogs = new File(cluster.getConfig().getAccumuloDir(), 
Constants.WAL_DIR);
       File walogServerDir = new File(walogs, fakeServer.replace(':', '+'));
       File emptyWalog = new File(walogServerDir, UUID.randomUUID().toString());
 
@@ -176,7 +176,7 @@ public class MissingWalHeaderCompletesRecoveryIT extends 
ConfigurableMacBase {
       // Fake out something that looks like host:port, it's irrelevant
       String fakeServer = "127.0.0.1:12345";
 
-      File walogs = new File(cluster.getConfig().getAccumuloDir(), 
ServerConstants.WAL_DIR);
+      File walogs = new File(cluster.getConfig().getAccumuloDir(), 
Constants.WAL_DIR);
       File walogServerDir = new File(walogs, fakeServer.replace(':', '+'));
       File partialHeaderWalog = new File(walogServerDir, 
UUID.randomUUID().toString());
 
diff --git a/test/src/main/java/org/apache/accumulo/test/VolumeIT.java 
b/test/src/main/java/org/apache/accumulo/test/VolumeIT.java
index 37081b9..17208b2 100644
--- a/test/src/main/java/org/apache/accumulo/test/VolumeIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/VolumeIT.java
@@ -38,6 +38,7 @@ import java.util.SortedSet;
 import java.util.TreeSet;
 import java.util.function.Consumer;
 
+import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
@@ -65,7 +66,6 @@ import org.apache.accumulo.core.metadata.StoredTabletFile;
 import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.init.Initialize;
 import org.apache.accumulo.server.log.WalStateManager;
 import org.apache.accumulo.server.log.WalStateManager.WalMarkerException;
@@ -249,7 +249,7 @@ public class VolumeIT extends ConfigurableMacBase {
   private void checkVolumesInitialized(List<Path> volumes, String uuid) throws 
Exception {
     for (Path volumePath : volumes) {
       FileSystem fs = 
volumePath.getFileSystem(cluster.getServerContext().getHadoopConf());
-      Path vp = new Path(volumePath, ServerConstants.INSTANCE_ID_DIR);
+      Path vp = new Path(volumePath, Constants.INSTANCE_ID_DIR);
       FileStatus[] iids = fs.listStatus(vp);
       assertEquals(1, iids.length);
       assertEquals(uuid, iids[0].getPath().getName());
diff --git 
a/test/src/main/java/org/apache/accumulo/test/replication/UnusedWalDoesntCloseReplicationStatusIT.java
 
b/test/src/main/java/org/apache/accumulo/test/replication/UnusedWalDoesntCloseReplicationStatusIT.java
index 39c17cb..02d503d 100644
--- 
a/test/src/main/java/org/apache/accumulo/test/replication/UnusedWalDoesntCloseReplicationStatusIT.java
+++ 
b/test/src/main/java/org/apache/accumulo/test/replication/UnusedWalDoesntCloseReplicationStatusIT.java
@@ -31,6 +31,7 @@ import java.util.Collections;
 import java.util.Map.Entry;
 import java.util.UUID;
 
+import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchWriter;
@@ -49,7 +50,6 @@ import org.apache.accumulo.core.protobuf.ProtobufUtil;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.data.ServerMutation;
 import org.apache.accumulo.server.replication.ReplicaSystemFactory;
 import org.apache.accumulo.server.replication.StatusUtil;
@@ -102,7 +102,7 @@ public class UnusedWalDoesntCloseReplicationStatusIT 
extends ConfigurableMacBase
 
     FileSystem fs = FileSystem.getLocal(new Configuration());
     File tserverWalDir =
-        new File(accumuloDir, ServerConstants.WAL_DIR + Path.SEPARATOR + 
"faketserver+port");
+        new File(accumuloDir, Constants.WAL_DIR + Path.SEPARATOR + 
"faketserver+port");
     File tserverWal = new File(tserverWalDir, UUID.randomUUID().toString());
     fs.mkdirs(new Path(tserverWalDir.getAbsolutePath()));
 

Reply via email to