This is an automated email from the ASF dual-hosted git repository.

dlmarion pushed a commit to branch elasticity
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/elasticity by this push:
     new b6769788c0 Removed FileUtil.cleanupIndexOp to resolve TODO, related 
changes (#4385)
b6769788c0 is described below

commit b6769788c054407bc1ee269758684a86b8ca6e10
Author: Dave Marion <dlmar...@apache.org>
AuthorDate: Fri Apr 5 16:12:51 2024 -0400

    Removed FileUtil.cleanupIndexOp to resolve TODO, related changes (#4385)
    
    The existing TODO in FileUtil was to determine if the split code
    in elasticity was missing something. The cleanupIndexOp method was
    called in earlier versions, but is no longer called in elasticity.
    I determined that the SplitUtils.IndexIterable.close method was a
    likely replacement for the cleanupIndexOp method. I removed this
    method and FileUtilTest as it was only testing this method. The
    remaining method in FileUtil is only called from Splitter, so I
    moved the method, related code, and associated test. I also fixed
    up references that were broken due to the code move.
---
 .../org/apache/accumulo/server/util/FileUtil.java  | 135 ----------------
 .../apache/accumulo/server/util/FileUtilTest.java  | 176 ---------------------
 .../apache/accumulo/manager/split/Splitter.java    |  81 +++++++++-
 .../manager/tableOps/split/UpdateTablets.java      |   6 +-
 .../manager/upgrade/SplitRecovery12to13.java       |   6 +-
 .../manager/tableOps/split}/FileInfoTest.java      |   4 +-
 .../manager/tableOps/split/UpdateTabletsTest.java  |   5 +-
 7 files changed, 88 insertions(+), 325 deletions(-)

diff --git 
a/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java 
b/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java
deleted file mode 100644
index 78a541ca6e..0000000000
--- a/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.accumulo.server.util;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.accumulo.core.file.FileOperations;
-import org.apache.accumulo.core.file.FileSKVIterator;
-import org.apache.accumulo.core.metadata.TabletFile;
-import org.apache.accumulo.server.ServerContext;
-import org.apache.accumulo.server.conf.TableConfiguration;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class FileUtil {
-
-  public static class FileInfo {
-    final Text firstRow;
-    final Text lastRow;
-
-    public FileInfo(Text firstRow, Text lastRow) {
-      this.firstRow = firstRow;
-      this.lastRow = lastRow;
-    }
-
-    public Text getFirstRow() {
-      return firstRow;
-    }
-
-    public Text getLastRow() {
-      return lastRow;
-    }
-  }
-
-  private static final Logger log = LoggerFactory.getLogger(FileUtil.class);
-
-  // ELASTICITY_TODO this is only used by test. Determine what the test are 
doing and if some
-  // functionality is missing in the new split code.
-  protected static void cleanupIndexOp(Path tmpDir, VolumeManager fs,
-      ArrayList<FileSKVIterator> readers) throws IOException {
-    // close all of the index sequence files
-    for (FileSKVIterator r : readers) {
-      try {
-        if (r != null) {
-          r.close();
-        }
-      } catch (IOException e) {
-        // okay, try to close the rest anyway
-        log.error("{}", e.getMessage(), e);
-      }
-    }
-
-    if (tmpDir != null) {
-      FileSystem actualFs = fs.getFileSystemByPath(tmpDir);
-      if (actualFs.exists(tmpDir)) {
-        fs.deleteRecursively(tmpDir);
-        return;
-      }
-
-      log.error("Did not delete tmp dir because it wasn't a tmp dir {}", 
tmpDir);
-    }
-  }
-
-  public static <T extends TabletFile> Map<T,FileInfo> 
tryToGetFirstAndLastRows(
-      ServerContext context, TableConfiguration tableConf, Set<T> dataFiles) {
-
-    HashMap<T,FileInfo> dataFilesInfo = new HashMap<>();
-
-    long t1 = System.currentTimeMillis();
-
-    for (T dataFile : dataFiles) {
-
-      FileSKVIterator reader = null;
-      FileSystem ns = 
context.getVolumeManager().getFileSystemByPath(dataFile.getPath());
-      try {
-        reader = FileOperations.getInstance().newReaderBuilder()
-            .forFile(dataFile, ns, ns.getConf(), tableConf.getCryptoService())
-            .withTableConfiguration(tableConf).build();
-
-        Text firstRow = reader.getFirstRow();
-        if (firstRow != null) {
-          dataFilesInfo.put(dataFile, new FileInfo(firstRow, 
reader.getLastRow()));
-        }
-
-      } catch (IOException ioe) {
-        log.warn("Failed to read data file to determine first and last key : " 
+ dataFile, ioe);
-      } finally {
-        if (reader != null) {
-          try {
-            reader.close();
-          } catch (IOException ioe) {
-            log.warn("failed to close " + dataFile, ioe);
-          }
-        }
-      }
-
-    }
-
-    long t2 = System.currentTimeMillis();
-
-    String message = String.format("Found first and last keys for %d data 
files in %6.2f secs",
-        dataFiles.size(), (t2 - t1) / 1000.0);
-    if (t2 - t1 > 500) {
-      log.debug(message);
-    } else {
-      log.trace(message);
-    }
-
-    return dataFilesInfo;
-  }
-}
diff --git 
a/server/base/src/test/java/org/apache/accumulo/server/util/FileUtilTest.java 
b/server/base/src/test/java/org/apache/accumulo/server/util/FileUtilTest.java
deleted file mode 100644
index e72cab901f..0000000000
--- 
a/server/base/src/test/java/org/apache/accumulo/server/util/FileUtilTest.java
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.accumulo.server.util;
-
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.server.WithTestNames;
-import org.apache.accumulo.server.fs.VolumeManagerImpl;
-import org.apache.hadoop.fs.Path;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.io.TempDir;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-
-@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "paths not 
set by user input")
-public class FileUtilTest extends WithTestNames {
-
-  @TempDir
-  private static File tempDir;
-  private File accumuloDir;
-
-  @BeforeEach
-  public void createTmpDir() {
-    accumuloDir = new File(tempDir, testName());
-    assertTrue(accumuloDir.isDirectory() || accumuloDir.mkdir());
-  }
-
-  @Test
-  public void testCleanupIndexOpWithDfsDir() throws IOException {
-    // And a "unique" tmp directory for each volume
-    File tmp1 = new File(accumuloDir, "tmp");
-    assertTrue(tmp1.mkdirs() || tmp1.isDirectory());
-    Path tmpPath1 = new Path(tmp1.toURI());
-
-    HashMap<Property,String> testProps = new HashMap<>();
-    testProps.put(Property.INSTANCE_VOLUMES, accumuloDir.getAbsolutePath());
-
-    try (var fs = 
VolumeManagerImpl.getLocalForTesting(accumuloDir.getAbsolutePath())) {
-      FileUtil.cleanupIndexOp(tmpPath1, fs, new ArrayList<>());
-    }
-
-    assertFalse(tmp1.exists(), "Expected " + tmp1 + " to be cleaned up but it 
wasn't");
-  }
-
-  @Test
-  public void testCleanupIndexOpWithCommonParentVolume() throws IOException {
-    File volumeDir = new File(accumuloDir, "volumes");
-    assertTrue(volumeDir.mkdirs() || volumeDir.isDirectory());
-
-    // Make some directories to simulate multiple volumes
-    File v1 = new File(volumeDir, "v1"), v2 = new File(volumeDir, "v2");
-    assertTrue(v1.mkdirs() || v1.isDirectory());
-    assertTrue(v2.mkdirs() || v2.isDirectory());
-
-    // And a "unique" tmp directory for each volume
-    File tmp1 = new File(v1, "tmp"), tmp2 = new File(v2, "tmp");
-    assertTrue(tmp1.mkdirs() || tmp1.isDirectory());
-    assertTrue(tmp2.mkdirs() || tmp2.isDirectory());
-    Path tmpPath1 = new Path(tmp1.toURI()), tmpPath2 = new Path(tmp2.toURI());
-
-    HashMap<Property,String> testProps = new HashMap<>();
-    testProps.put(Property.INSTANCE_VOLUMES, v1.toURI() + "," + v2.toURI());
-
-    try (var fs = 
VolumeManagerImpl.getLocalForTesting(accumuloDir.getAbsolutePath())) {
-      FileUtil.cleanupIndexOp(tmpPath1, fs, new ArrayList<>());
-      assertFalse(tmp1.exists(), "Expected " + tmp1 + " to be cleaned up but 
it wasn't");
-      FileUtil.cleanupIndexOp(tmpPath2, fs, new ArrayList<>());
-      assertFalse(tmp2.exists(), "Expected " + tmp2 + " to be cleaned up but 
it wasn't");
-    }
-  }
-
-  @Test
-  public void testCleanupIndexOpWithCommonParentVolumeWithDepth() throws 
IOException {
-    File volumeDir = new File(accumuloDir, "volumes");
-    assertTrue(volumeDir.mkdirs() || volumeDir.isDirectory());
-
-    // Make some directories to simulate multiple volumes
-    File v1 = new File(volumeDir, "v1"), v2 = new File(volumeDir, "v2");
-    assertTrue(v1.mkdirs() || v1.isDirectory());
-    assertTrue(v2.mkdirs() || v2.isDirectory());
-
-    // And a "unique" tmp directory for each volume
-    // Make sure we can handle nested directories (a single tmpdir with 
potentially multiple unique
-    // dirs)
-    File tmp1 = new File(new File(v1, "tmp"), "tmp_1"),
-        tmp2 = new File(new File(v2, "tmp"), "tmp_1");
-    assertTrue(tmp1.mkdirs() || tmp1.isDirectory());
-    assertTrue(tmp2.mkdirs() || tmp2.isDirectory());
-    Path tmpPath1 = new Path(tmp1.toURI()), tmpPath2 = new Path(tmp2.toURI());
-
-    HashMap<Property,String> testProps = new HashMap<>();
-    testProps.put(Property.INSTANCE_VOLUMES, v1.toURI() + "," + v2.toURI());
-
-    try (var fs = 
VolumeManagerImpl.getLocalForTesting(accumuloDir.getAbsolutePath())) {
-      FileUtil.cleanupIndexOp(tmpPath1, fs, new ArrayList<>());
-      assertFalse(tmp1.exists(), "Expected " + tmp1 + " to be cleaned up but 
it wasn't");
-      FileUtil.cleanupIndexOp(tmpPath2, fs, new ArrayList<>());
-      assertFalse(tmp2.exists(), "Expected " + tmp2 + " to be cleaned up but 
it wasn't");
-    }
-  }
-
-  @Test
-  public void testCleanupIndexOpWithoutCommonParentVolume() throws IOException 
{
-    // Make some directories to simulate multiple volumes
-    File v1 = new File(accumuloDir, "v1"), v2 = new File(accumuloDir, "v2");
-    assertTrue(v1.mkdirs() || v1.isDirectory());
-    assertTrue(v2.mkdirs() || v2.isDirectory());
-
-    // And a "unique" tmp directory for each volume
-    File tmp1 = new File(v1, "tmp"), tmp2 = new File(v2, "tmp");
-    assertTrue(tmp1.mkdirs() || tmp1.isDirectory());
-    assertTrue(tmp2.mkdirs() || tmp2.isDirectory());
-    Path tmpPath1 = new Path(tmp1.toURI()), tmpPath2 = new Path(tmp2.toURI());
-
-    HashMap<Property,String> testProps = new HashMap<>();
-    testProps.put(Property.INSTANCE_VOLUMES, v1.toURI() + "," + v2.toURI());
-
-    try (var fs = 
VolumeManagerImpl.getLocalForTesting(accumuloDir.getAbsolutePath())) {
-      FileUtil.cleanupIndexOp(tmpPath1, fs, new ArrayList<>());
-      assertFalse(tmp1.exists(), "Expected " + tmp1 + " to be cleaned up but 
it wasn't");
-      FileUtil.cleanupIndexOp(tmpPath2, fs, new ArrayList<>());
-      assertFalse(tmp2.exists(), "Expected " + tmp2 + " to be cleaned up but 
it wasn't");
-    }
-  }
-
-  @Test
-  public void testCleanupIndexOpWithoutCommonParentVolumeWithDepth() throws 
IOException {
-    // Make some directories to simulate multiple volumes
-    File v1 = new File(accumuloDir, "v1"), v2 = new File(accumuloDir, "v2");
-    assertTrue(v1.mkdirs() || v1.isDirectory());
-    assertTrue(v2.mkdirs() || v2.isDirectory());
-
-    // And a "unique" tmp directory for each volume
-    // Make sure we can handle nested directories (a single tmpdir with 
potentially multiple unique
-    // dirs)
-    File tmp1 = new File(new File(v1, "tmp"), "tmp_1"),
-        tmp2 = new File(new File(v2, "tmp"), "tmp_1");
-    assertTrue(tmp1.mkdirs() || tmp1.isDirectory());
-    assertTrue(tmp2.mkdirs() || tmp2.isDirectory());
-    Path tmpPath1 = new Path(tmp1.toURI()), tmpPath2 = new Path(tmp2.toURI());
-
-    HashMap<Property,String> testProps = new HashMap<>();
-    testProps.put(Property.INSTANCE_VOLUMES, v1.toURI() + "," + v2.toURI());
-
-    try (var fs = 
VolumeManagerImpl.getLocalForTesting(accumuloDir.getAbsolutePath())) {
-      FileUtil.cleanupIndexOp(tmpPath1, fs, new ArrayList<>());
-      assertFalse(tmp1.exists(), "Expected " + tmp1 + " to be cleaned up but 
it wasn't");
-      FileUtil.cleanupIndexOp(tmpPath2, fs, new ArrayList<>());
-      assertFalse(tmp2.exists(), "Expected " + tmp2 + " to be cleaned up but 
it wasn't");
-    }
-  }
-}
diff --git 
a/server/manager/src/main/java/org/apache/accumulo/manager/split/Splitter.java 
b/server/manager/src/main/java/org/apache/accumulo/manager/split/Splitter.java
index d8e0a87a40..b2ff72b419 100644
--- 
a/server/manager/src/main/java/org/apache/accumulo/manager/split/Splitter.java
+++ 
b/server/manager/src/main/java/org/apache/accumulo/manager/split/Splitter.java
@@ -18,6 +18,9 @@
  */
 package org.apache.accumulo.manager.split;
 
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
 import java.util.Objects;
 import java.util.Set;
 import java.util.concurrent.ArrayBlockingQueue;
@@ -27,12 +30,16 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.core.file.FileOperations;
+import org.apache.accumulo.core.file.FileSKVIterator;
 import org.apache.accumulo.core.metadata.TabletFile;
 import org.apache.accumulo.core.util.cache.Caches.CacheName;
 import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.conf.TableConfiguration;
-import org.apache.accumulo.server.util.FileUtil;
-import org.apache.accumulo.server.util.FileUtil.FileInfo;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.io.Text;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.github.benmanes.caffeine.cache.CacheLoader;
 import com.github.benmanes.caffeine.cache.LoadingCache;
@@ -40,8 +47,76 @@ import com.github.benmanes.caffeine.cache.Weigher;
 
 public class Splitter {
 
+  private static final Logger LOG = LoggerFactory.getLogger(Splitter.class);
+
   private final ThreadPoolExecutor splitExecutor;
 
+  public static class FileInfo {
+    final Text firstRow;
+    final Text lastRow;
+
+    public FileInfo(Text firstRow, Text lastRow) {
+      this.firstRow = firstRow;
+      this.lastRow = lastRow;
+    }
+
+    public Text getFirstRow() {
+      return firstRow;
+    }
+
+    public Text getLastRow() {
+      return lastRow;
+    }
+  }
+
+  public static <T extends TabletFile> Map<T,FileInfo> 
tryToGetFirstAndLastRows(
+      ServerContext context, TableConfiguration tableConf, Set<T> dataFiles) {
+
+    HashMap<T,FileInfo> dataFilesInfo = new HashMap<>();
+
+    long t1 = System.currentTimeMillis();
+
+    for (T dataFile : dataFiles) {
+
+      FileSKVIterator reader = null;
+      FileSystem ns = 
context.getVolumeManager().getFileSystemByPath(dataFile.getPath());
+      try {
+        reader = FileOperations.getInstance().newReaderBuilder()
+            .forFile(dataFile, ns, ns.getConf(), tableConf.getCryptoService())
+            .withTableConfiguration(tableConf).build();
+
+        Text firstRow = reader.getFirstRow();
+        if (firstRow != null) {
+          dataFilesInfo.put(dataFile, new FileInfo(firstRow, 
reader.getLastRow()));
+        }
+
+      } catch (IOException ioe) {
+        LOG.warn("Failed to read data file to determine first and last key : " 
+ dataFile, ioe);
+      } finally {
+        if (reader != null) {
+          try {
+            reader.close();
+          } catch (IOException ioe) {
+            LOG.warn("failed to close " + dataFile, ioe);
+          }
+        }
+      }
+
+    }
+
+    long t2 = System.currentTimeMillis();
+
+    String message = String.format("Found first and last keys for %d data 
files in %6.2f secs",
+        dataFiles.size(), (t2 - t1) / 1000.0);
+    if (t2 - t1 > 500) {
+      LOG.debug(message);
+    } else {
+      LOG.trace(message);
+    }
+
+    return dataFilesInfo;
+  }
+
   private static class CacheKey {
 
     final TableId tableId;
@@ -95,7 +170,7 @@ public class Splitter {
 
     CacheLoader<CacheKey,FileInfo> loader = key -> {
       TableConfiguration tableConf = 
context.getTableConfiguration(key.tableId);
-      return FileUtil.tryToGetFirstAndLastRows(context, tableConf, 
Set.of(key.tabletFile))
+      return tryToGetFirstAndLastRows(context, tableConf, 
Set.of(key.tabletFile))
           .get(key.tabletFile);
     };
 
diff --git 
a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/split/UpdateTablets.java
 
b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/split/UpdateTablets.java
index 98623f75c4..d8be0ab711 100644
--- 
a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/split/UpdateTablets.java
+++ 
b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/split/UpdateTablets.java
@@ -39,8 +39,8 @@ import 
org.apache.accumulo.core.metadata.schema.TabletMetadata;
 import org.apache.accumulo.core.metadata.schema.TabletOperationId;
 import org.apache.accumulo.core.metadata.schema.TabletOperationType;
 import org.apache.accumulo.manager.Manager;
+import org.apache.accumulo.manager.split.Splitter;
 import org.apache.accumulo.manager.tableOps.ManagerRepo;
-import org.apache.accumulo.server.util.FileUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -123,7 +123,7 @@ public class UpdateTablets extends ManagerRepo {
    */
   static Map<KeyExtent,Map<StoredTabletFile,DataFileValue>> getNewTabletFiles(
       Set<KeyExtent> newTablets, TabletMetadata tabletMetadata,
-      Function<StoredTabletFile,FileUtil.FileInfo> fileInfoProvider) {
+      Function<StoredTabletFile,Splitter.FileInfo> fileInfoProvider) {
 
     Map<KeyExtent,Map<StoredTabletFile,DataFileValue>> tabletsFiles = new 
TreeMap<>();
 
@@ -131,7 +131,7 @@ public class UpdateTablets extends ManagerRepo {
 
     // determine which files overlap which tablets and their estimated sizes
     tabletMetadata.getFilesMap().forEach((file, dataFileValue) -> {
-      FileUtil.FileInfo fileInfo = fileInfoProvider.apply(file);
+      Splitter.FileInfo fileInfo = fileInfoProvider.apply(file);
 
       Range fileRange;
       if (fileInfo != null) {
diff --git 
a/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/SplitRecovery12to13.java
 
b/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/SplitRecovery12to13.java
index b339462503..436f96c5d4 100644
--- 
a/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/SplitRecovery12to13.java
+++ 
b/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/SplitRecovery12to13.java
@@ -47,8 +47,8 @@ import 
org.apache.accumulo.core.metadata.schema.ExternalCompactionId;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.manager.split.Splitter;
 import org.apache.accumulo.server.ServerContext;
-import org.apache.accumulo.server.util.FileUtil;
 import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.hadoop.io.Text;
 import org.slf4j.Logger;
@@ -148,7 +148,7 @@ public class SplitRecovery12to13 {
   }
 
   public static void splitDatafiles(Text midRow, double splitRatio,
-      Map<StoredTabletFile,FileUtil.FileInfo> firstAndLastRows,
+      Map<StoredTabletFile,Splitter.FileInfo> firstAndLastRows,
       SortedMap<StoredTabletFile,DataFileValue> datafiles,
       SortedMap<StoredTabletFile,DataFileValue> lowDatafileSizes,
       SortedMap<StoredTabletFile,DataFileValue> highDatafileSizes,
@@ -161,7 +161,7 @@ public class SplitRecovery12to13 {
 
       boolean rowsKnown = false;
 
-      FileUtil.FileInfo mfi = firstAndLastRows.get(entry.getKey());
+      Splitter.FileInfo mfi = firstAndLastRows.get(entry.getKey());
 
       if (mfi != null) {
         firstRow = mfi.getFirstRow();
diff --git 
a/server/base/src/test/java/org/apache/accumulo/server/util/FileInfoTest.java 
b/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/split/FileInfoTest.java
similarity index 92%
rename from 
server/base/src/test/java/org/apache/accumulo/server/util/FileInfoTest.java
rename to 
server/manager/src/test/java/org/apache/accumulo/manager/tableOps/split/FileInfoTest.java
index a65d776e7b..5cf84ce3d0 100644
--- 
a/server/base/src/test/java/org/apache/accumulo/server/util/FileInfoTest.java
+++ 
b/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/split/FileInfoTest.java
@@ -16,11 +16,11 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.accumulo.server.util;
+package org.apache.accumulo.manager.tableOps.split;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
 
-import org.apache.accumulo.server.util.FileUtil.FileInfo;
+import org.apache.accumulo.manager.split.Splitter.FileInfo;
 import org.apache.hadoop.io.Text;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
diff --git 
a/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/split/UpdateTabletsTest.java
 
b/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/split/UpdateTabletsTest.java
index 1e9c018785..ffb776f3bf 100644
--- 
a/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/split/UpdateTabletsTest.java
+++ 
b/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/split/UpdateTabletsTest.java
@@ -58,7 +58,6 @@ import org.apache.accumulo.manager.Manager;
 import org.apache.accumulo.manager.split.Splitter;
 import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.metadata.ConditionalTabletMutatorImpl;
-import org.apache.accumulo.server.util.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.easymock.EasyMock;
@@ -72,8 +71,8 @@ public class UpdateTabletsTest {
         .insert();
   }
 
-  FileUtil.FileInfo newFileInfo(String start, String end) {
-    return new FileUtil.FileInfo(new Text(start), new Text(end));
+  Splitter.FileInfo newFileInfo(String start, String end) {
+    return new Splitter.FileInfo(new Text(start), new Text(end));
   }
 
   /**

Reply via email to