This is an automated email from the ASF dual-hosted git repository.

michaelsmith pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 8c51f72e10388b0130811a9bfb594b51099b6bb6
Author: stiga-huang <[email protected]>
AuthorDate: Sat Feb 22 20:00:56 2025 +0800

    IMPALA-13789: Defer creating Path objects in loading file metadata
    
    When loading file metadata of a HdfsTable, we create a
    org.apache.hadoop.fs.Path object for each partition dir before actually
    loading its file metadata. These Path objects have a large memory
    footprint as the underlying java.net.URI objects have extra fields
    extracted from the location string. E.g. a location string that takes
    160 bytes has a corresponding Path object that takes 704 bytes. See more
    details of this example in the JIRA description.
    
    These Path objects are used as the keys of several Maps for file
    metadata loaders. We create them before actually loading the metadata.
    This patch fixes this by using the location strings as the keys and only
    creating the Path objects when we start loading file metadata of the
    partition.
    
    Tests:
     - Ran CORE tests
     - Analyzed the heap dump during file metadata loading, didn't see lots
       of Path objects anymore.
    
    Change-Id: I6ec1fc932eaf7c833ef6ee6cdb08bba235e38271
    Reviewed-on: http://gerrit.cloudera.org:8080/22535
    Reviewed-by: Impala Public Jenkins <[email protected]>
    Tested-by: Impala Public Jenkins <[email protected]>
---
 .../apache/impala/catalog/CatalogHmsAPIHelper.java | 14 ++++-----
 .../apache/impala/catalog/FileMetadataLoader.java  | 34 ++++++++++++----------
 .../impala/catalog/IcebergFileMetadataLoader.java  | 28 ++++++++++--------
 .../impala/catalog/ParallelFileMetadataLoader.java | 14 ++++-----
 .../impala/catalog/local/DirectMetaProvider.java   |  5 +---
 .../org/apache/impala/common/FileSystemUtil.java   | 15 ++++++++++
 .../impala/catalog/FileMetadataLoaderTest.java     | 16 +++++-----
 .../apache/impala/catalog/HdfsPartitionTest.java   |  2 +-
 8 files changed, 71 insertions(+), 57 deletions(-)

diff --git 
a/fe/src/main/java/org/apache/impala/catalog/CatalogHmsAPIHelper.java 
b/fe/src/main/java/org/apache/impala/catalog/CatalogHmsAPIHelper.java
index ec5ebda6a..bb9140854 100644
--- a/fe/src/main/java/org/apache/impala/catalog/CatalogHmsAPIHelper.java
+++ b/fe/src/main/java/org/apache/impala/catalog/CatalogHmsAPIHelper.java
@@ -503,12 +503,11 @@ public class CatalogHmsAPIHelper {
       checkCondition(tbl != null, "Table is null");
       checkCondition(tbl.getSd() != null && tbl.getSd().getLocation() != null,
           "Cannot get the location of table %s.%s", tbl.getDbName(), 
tbl.getTableName());
-      Path tblPath = new Path(tbl.getSd().getLocation());
       // since this table doesn't exist in catalogd we compute the network 
addresses
       // for the files which are being returned.
       ListMap<TNetworkAddress> hostIndex = new ListMap<>();
-      FileMetadataLoader fmLoader = new FileMetadataLoader(tblPath, true,
-          Collections.EMPTY_LIST, hostIndex, validTxnList, writeIdList);
+      FileMetadataLoader fmLoader = new 
FileMetadataLoader(tbl.getSd().getLocation(),
+          true, Collections.EMPTY_LIST, hostIndex, validTxnList, writeIdList);
       boolean success = getFileMetadata(Arrays.asList(fmLoader));
       checkCondition(success,
           "Could not load file-metadata for table %s.%s. See catalogd log for 
details",
@@ -553,10 +552,9 @@ public class CatalogHmsAPIHelper {
         checkCondition(part.getSd() != null && part.getSd().getLocation() != 
null,
             "Could not get the location for partition %s of table %s.%s",
             part.getValues(), part.getDbName(), part.getTableName());
-        Path partPath = new Path(part.getSd().getLocation());
         fileMdLoaders.put(part,
-            new FileMetadataLoader(partPath, true, Collections.EMPTY_LIST, 
hostIndex,
-                txnList, writeIdList));
+            new FileMetadataLoader(part.getSd().getLocation(), true,
+                Collections.EMPTY_LIST, hostIndex, txnList, writeIdList));
       }
       boolean success = getFileMetadata(fileMdLoaders.values());
       checkCondition(success,
@@ -597,7 +595,7 @@ public class CatalogHmsAPIHelper {
    * @return false if there were errors, else returns true.
    */
   private static boolean getFileMetadata(Collection<FileMetadataLoader> 
loaders) {
-    List<Pair<Path, Future<Void>>> futures = new ArrayList<>(loaders.size());
+    List<Pair<String, Future<Void>>> futures = new ArrayList<>(loaders.size());
     for (FileMetadataLoader fmdLoader : loaders) {
       futures.add(new Pair<>(fmdLoader.getPartDir(), 
fallbackFdLoaderPool.submit(() -> {
         fmdLoader.load();
@@ -606,7 +604,7 @@ public class CatalogHmsAPIHelper {
     }
     int numberOfErrorsToLog = 100;
     int errors = 0;
-    for (Pair<Path, Future<Void>> pair : futures) {
+    for (Pair<String, Future<Void>> pair : futures) {
       try {
         pair.second.get();
       } catch (InterruptedException | ExecutionException e) {
diff --git a/fe/src/main/java/org/apache/impala/catalog/FileMetadataLoader.java 
b/fe/src/main/java/org/apache/impala/catalog/FileMetadataLoader.java
index 6298f5bac..214811241 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FileMetadataLoader.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FileMetadataLoader.java
@@ -59,7 +59,7 @@ public class FileMetadataLoader {
   // at the end of load().
   public static final AtomicInteger TOTAL_TASKS = new AtomicInteger();
 
-  protected final Path partDir_;
+  protected final String partDir_;
   protected final boolean recursive_;
   protected final ImmutableMap<String, ? extends FileDescriptor> oldFdsByPath_;
   private final ListMap<TNetworkAddress> hostIndex_;
@@ -96,7 +96,7 @@ public class FileMetadataLoader {
    * @param fileFormat if non-null and equal to HdfsFileFormat.HUDI_PARQUET,
    *   this loader will filter files based on Hudi's HoodieROTablePathFilter 
method
    */
-  public FileMetadataLoader(Path partDir, boolean recursive,
+  public FileMetadataLoader(String partDir, boolean recursive,
       Iterable<? extends FileDescriptor> oldFds, ListMap<TNetworkAddress> 
hostIndex,
       @Nullable ValidTxnList validTxnList, @Nullable ValidWriteIdList writeIds,
       @Nullable HdfsFileFormat fileFormat) {
@@ -117,7 +117,7 @@ public class FileMetadataLoader {
     TOTAL_TASKS.incrementAndGet();
   }
 
-  public FileMetadataLoader(Path partDir, boolean recursive,
+  public FileMetadataLoader(String partDir, boolean recursive,
       Iterable<? extends FileDescriptor> oldFds, ListMap<TNetworkAddress> 
hostIndex,
       @Nullable ValidTxnList validTxnList, @Nullable ValidWriteIdList 
writeIds) {
     this(partDir, recursive, oldFds, hostIndex, validTxnList, writeIds, null);
@@ -158,7 +158,7 @@ public class FileMetadataLoader {
     return loadStats_;
   }
 
-  Path getPartDir() { return partDir_; }
+  String getPartDir() { return partDir_; }
 
   /**
    * Load the file descriptors, which may later be fetched using {@link 
#getLoadedFds()}.
@@ -181,9 +181,10 @@ public class FileMetadataLoader {
 
   private void loadInternal() throws CatalogException, IOException {
     Preconditions.checkState(loadStats_ == null, "already loaded");
+    Path partPath = FileSystemUtil.createFullyQualifiedPath(new 
Path(partDir_));
     loadStats_ = new LoadStats(partDir_);
     fileMetadataStats_ = new FileMetadataStats();
-    FileSystem fs = partDir_.getFileSystem(CONF);
+    FileSystem fs = partPath.getFileSystem(CONF);
 
     // If we don't have any prior FDs from which we could re-use old block 
location info,
     // we'll need to fetch info for every returned file. In this case we can 
inline
@@ -200,7 +201,7 @@ public class FileMetadataLoader {
         listWithLocations ? " with eager location-fetching" : "", partDir_);
     LOG.trace(msg);
     try (ThreadNameAnnotator tna = new ThreadNameAnnotator(msg)) {
-      List<FileStatus> fileStatuses = getFileStatuses(fs, listWithLocations);
+      List<FileStatus> fileStatuses = getFileStatuses(fs, partPath, 
listWithLocations);
 
       loadedFds_ = new ArrayList<>();
       if (fileStatuses == null) return;
@@ -208,7 +209,7 @@ public class FileMetadataLoader {
       Reference<Long> numUnknownDiskIds = new Reference<>(0L);
 
       if (writeIds_ != null) {
-        fileStatuses = AcidUtils.filterFilesForAcidState(fileStatuses, 
partDir_,
+        fileStatuses = AcidUtils.filterFilesForAcidState(fileStatuses, 
partPath,
             validTxnList_, writeIds_, loadStats_);
       }
 
@@ -227,7 +228,7 @@ public class FileMetadataLoader {
         }
 
         FileDescriptor fd = getFileDescriptor(fs, listWithLocations, 
numUnknownDiskIds,
-            fileStatus);
+            fileStatus, partPath);
         loadedFds_.add(Preconditions.checkNotNull(fd));
         fileMetadataStats_.accumulate(fd);
       }
@@ -257,8 +258,9 @@ public class FileMetadataLoader {
    * Return fd created by the given fileStatus or from the 
cache(oldFdsByPath_).
    */
   protected FileDescriptor getFileDescriptor(FileSystem fs, boolean 
listWithLocations,
-      Reference<Long> numUnknownDiskIds, FileStatus fileStatus) throws 
IOException {
-    String relPath = FileSystemUtil.relativizePath(fileStatus.getPath(), 
partDir_);
+      Reference<Long> numUnknownDiskIds, FileStatus fileStatus, Path partPath)
+      throws IOException {
+    String relPath = FileSystemUtil.relativizePath(fileStatus.getPath(), 
partPath);
     FileDescriptor fd = oldFdsByPath_.get(relPath);
     if (listWithLocations || forceRefreshLocations || fd == null ||
         fd.isChanged(fileStatus)) {
@@ -273,15 +275,15 @@ public class FileMetadataLoader {
   /**
    * Return located file status list when listWithLocations is true.
    */
-  protected List<FileStatus> getFileStatuses(FileSystem fs, boolean 
listWithLocations)
-      throws IOException {
+  protected List<FileStatus> getFileStatuses(FileSystem fs, Path partPath,
+      boolean listWithLocations) throws IOException {
     RemoteIterator<? extends FileStatus> fileStatuses;
     if (listWithLocations) {
       fileStatuses = FileSystemUtil
-          .listFiles(fs, partDir_, recursive_, debugAction_);
+          .listFiles(fs, partPath, recursive_, debugAction_);
     } else {
       fileStatuses = FileSystemUtil
-          .listStatus(fs, partDir_, recursive_, debugAction_);
+          .listStatus(fs, partPath, recursive_, debugAction_);
       // TODO(todd): we could look at the result of listing without locations, 
and if
       // we see that a substantial number of the files have changed, it may be 
better
       // to go back and re-list with locations vs doing an RPC per file.
@@ -349,8 +351,8 @@ public class FileMetadataLoader {
 
   // File/Block metadata loading stats for a single HDFS path.
   public static class LoadStats {
-    private final Path partDir_;
-    LoadStats(Path partDir) {
+    private final String partDir_;
+    LoadStats(String partDir) {
       this.partDir_ = Preconditions.checkNotNull(partDir);
     }
     /** Number of files skipped because they pertain to an uncommitted ACID 
transaction */
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/IcebergFileMetadataLoader.java 
b/fe/src/main/java/org/apache/impala/catalog/IcebergFileMetadataLoader.java
index ac56f8d12..1b258467e 100644
--- a/fe/src/main/java/org/apache/impala/catalog/IcebergFileMetadataLoader.java
+++ b/fe/src/main/java/org/apache/impala/catalog/IcebergFileMetadataLoader.java
@@ -91,8 +91,8 @@ public class IcebergFileMetadataLoader extends 
FileMetadataLoader {
       Iterable<IcebergFileDescriptor> oldFds, ListMap<TNetworkAddress> 
hostIndex,
       GroupedContentFiles icebergFiles, boolean 
requiresDataFilesInTableLocation,
       int newFilesThresholdParam) {
-    super(FileSystemUtil.createFullyQualifiedPath(new 
Path(iceTbl.location())), true,
-        oldFds, hostIndex, null, null, HdfsFileFormat.ICEBERG);
+    super(iceTbl.location(), true, oldFds, hostIndex, null, null,
+        HdfsFileFormat.ICEBERG);
     iceTbl_ = iceTbl;
     icebergFiles_ = icebergFiles;
     requiresDataFilesInTableLocation_ = requiresDataFilesInTableLocation;
@@ -125,17 +125,18 @@ public class IcebergFileMetadataLoader extends 
FileMetadataLoader {
   }
 
   private void loadInternal() throws CatalogException, IOException {
+    Path partPath = FileSystemUtil.createFullyQualifiedPath(new 
Path(partDir_));
     loadedFds_ = new ArrayList<>();
     loadStats_ = new LoadStats(partDir_);
     fileMetadataStats_ = new FileMetadataStats();
 
     // Process the existing Fd ContentFile and return the newly added 
ContentFile
-    Iterable<ContentFile<?>> newContentFiles = loadContentFilesWithOldFds();
+    Iterable<ContentFile<?>> newContentFiles = 
loadContentFilesWithOldFds(partPath);
     // Iterate through all the newContentFiles, determine if StorageIds are 
supported,
     // and use different handling methods accordingly.
     // This considers that different ContentFiles are on different FileSystems
     List<Pair<FileSystem, ContentFile<?>>> filesSupportsStorageIds = 
Lists.newArrayList();
-    FileSystem fsForTable = FileSystemUtil.getFileSystemForPath(partDir_);
+    FileSystem fsForTable = FileSystemUtil.getFileSystemForPath(partPath);
     FileSystem defaultFs = FileSystemUtil.getDefaultFileSystem();
     for (ContentFile<?> contentFile : newContentFiles) {
       FileSystem fsForPath = fsForTable;
@@ -151,7 +152,7 @@ public class IcebergFileMetadataLoader extends 
FileMetadataLoader {
       if (FileSystemUtil.supportsStorageIds(fsForPath)) {
         filesSupportsStorageIds.add(Pair.create(fsForPath, contentFile));
       } else {
-        IcebergFileDescriptor fd = createFd(fsForPath, contentFile, null, 
null);
+        IcebergFileDescriptor fd = createFd(fsForPath, contentFile, null, 
partPath, null);
         loadedFds_.add(fd);
         fileMetadataStats_.accumulate(fd);
         ++loadStats_.loadedFiles;
@@ -172,7 +173,7 @@ public class IcebergFileMetadataLoader extends 
FileMetadataLoader {
           new Path(contentFileInfo.getSecond().path().toString()));
       FileStatus stat = nameToFileStatus.get(path);
       IcebergFileDescriptor fd = createFd(contentFileInfo.getFirst(),
-          contentFileInfo.getSecond(), stat, numUnknownDiskIds);
+          contentFileInfo.getSecond(), stat, partPath, numUnknownDiskIds);
       loadedFds_.add(fd);
       fileMetadataStats_.accumulate(fd);
     }
@@ -192,13 +193,14 @@ public class IcebergFileMetadataLoader extends 
FileMetadataLoader {
    *  Iceberg tables are a collection of immutable, uniquely identifiable data 
files,
    *  which means we can safely reuse the old FDs.
    */
-  private Iterable<ContentFile<?>> loadContentFilesWithOldFds() throws 
IOException {
+  private Iterable<ContentFile<?>> loadContentFilesWithOldFds(Path partPath)
+      throws IOException {
     if (forceRefreshLocations || oldFdsByPath_.isEmpty()) {
       return icebergFiles_.getAllContentFiles();
     }
     List<ContentFile<?>> newContentFiles = Lists.newArrayList();
     for (ContentFile<?> contentFile : icebergFiles_.getAllContentFiles()) {
-      FileDescriptor fd = getOldFd(contentFile);
+      FileDescriptor fd = getOldFd(contentFile, partPath);
       if (fd == null) {
         newContentFiles.add(contentFile);
       } else {
@@ -211,7 +213,8 @@ public class IcebergFileMetadataLoader extends 
FileMetadataLoader {
   }
 
   private IcebergFileDescriptor createFd(FileSystem fs, ContentFile<?> 
contentFile,
-      FileStatus stat, Reference<Long> numUnknownDiskIds) throws IOException {
+      FileStatus stat, Path partPath, Reference<Long> numUnknownDiskIds)
+      throws IOException {
     if (stat == null) {
       Path fileLoc = FileSystemUtil.createFullyQualifiedPath(
           new Path(contentFile.path().toString()));
@@ -224,7 +227,7 @@ public class IcebergFileMetadataLoader extends 
FileMetadataLoader {
     }
 
     String absPath = null;
-    String relPath = FileSystemUtil.relativizePathNoThrow(stat.getPath(), 
partDir_);
+    String relPath = FileSystemUtil.relativizePathNoThrow(stat.getPath(), 
partPath);
     if (relPath == null) {
       if (requiresDataFilesInTableLocation_) {
         throw new IOException(String.format("Failed to load Iceberg datafile 
%s, because "
@@ -299,10 +302,11 @@ public class IcebergFileMetadataLoader extends 
FileMetadataLoader {
     return null;
   }
 
-  IcebergFileDescriptor getOldFd(ContentFile<?> contentFile) throws 
IOException {
+  IcebergFileDescriptor getOldFd(ContentFile<?> contentFile, Path partPath)
+      throws IOException {
     Path contentFilePath = FileSystemUtil.createFullyQualifiedPath(
         new Path(contentFile.path().toString()));
-    String lookupPath = FileSystemUtil.relativizePathNoThrow(contentFilePath, 
partDir_);
+    String lookupPath = FileSystemUtil.relativizePathNoThrow(contentFilePath, 
partPath);
     if (lookupPath == null) {
       if (requiresDataFilesInTableLocation_) {
         throw new IOException(String.format("Failed to load Iceberg datafile 
%s, because "
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/ParallelFileMetadataLoader.java 
b/fe/src/main/java/org/apache/impala/catalog/ParallelFileMetadataLoader.java
index 6f35f146b..385f3298a 100644
--- a/fe/src/main/java/org/apache/impala/catalog/ParallelFileMetadataLoader.java
+++ b/fe/src/main/java/org/apache/impala/catalog/ParallelFileMetadataLoader.java
@@ -71,8 +71,8 @@ public class ParallelFileMetadataLoader {
   private static final int MAX_PATH_METADATA_LOADING_ERRORS_TO_LOG = 100;
 
   private final String logPrefix_;
-  private final Map<Path, FileMetadataLoader> loaders_;
-  private final Map<Path, List<HdfsPartition.Builder>> partsByPath_;
+  private final Map<String, FileMetadataLoader> loaders_;
+  private final Map<String, List<HdfsPartition.Builder>> partsByPath_;
   private final FileSystem fs_;
 
   public ParallelFileMetadataLoader(FileSystem fs,
@@ -89,13 +89,12 @@ public class ParallelFileMetadataLoader {
     // path).
     partsByPath_ = Maps.newHashMap();
     for (HdfsPartition.Builder p : partBuilders) {
-      Path partPath = FileSystemUtil.createFullyQualifiedPath(new 
Path(p.getLocation()));
-      partsByPath_.computeIfAbsent(partPath, (path) -> new ArrayList<>())
+      partsByPath_.computeIfAbsent(p.getLocation(), (path) -> new 
ArrayList<>())
           .add(p);
     }
     // Create a FileMetadataLoader for each path.
     loaders_ = Maps.newHashMap();
-    for (Map.Entry<Path, List<HdfsPartition.Builder>> e : 
partsByPath_.entrySet()) {
+    for (Map.Entry<String, List<HdfsPartition.Builder>> e : 
partsByPath_.entrySet()) {
       List<FileDescriptor> oldFds = e.getValue().get(0).getFileDescriptors();
       FileMetadataLoader loader;
       HdfsFileFormat format = e.getValue().get(0).getFileFormat();
@@ -124,9 +123,8 @@ public class ParallelFileMetadataLoader {
     loadInternal();
 
     // Store the loaded FDs into the partitions.
-    for (Map.Entry<Path, List<HdfsPartition.Builder>> e : 
partsByPath_.entrySet()) {
-      Path p = e.getKey();
-      FileMetadataLoader loader = loaders_.get(p);
+    for (Map.Entry<String, List<HdfsPartition.Builder>> e : 
partsByPath_.entrySet()) {
+      FileMetadataLoader loader = loaders_.get(e.getKey());
 
       for (HdfsPartition.Builder partBuilder : e.getValue()) {
         // Checks if we can reuse the old file descriptors. Partition builders 
in the list
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/local/DirectMetaProvider.java 
b/fe/src/main/java/org/apache/impala/catalog/local/DirectMetaProvider.java
index 5cc14a120..e52b98203 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/DirectMetaProvider.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/DirectMetaProvider.java
@@ -309,7 +309,6 @@ class DirectMetaProvider implements MetaProvider {
   private Map<String, PartitionMetadata> loadUnpartitionedPartition(
       TableMetaRefImpl table, List<PartitionRef> partitionRefs,
       ListMap<TNetworkAddress> hostIndex) throws CatalogException {
-    //TODO(IMPALA-9042): Remove "throws MetaException"
     Preconditions.checkArgument(partitionRefs.size() == 1,
         "Expected exactly one partition to load for unpartitioned table");
     PartitionRef ref = partitionRefs.get(0);
@@ -381,13 +380,11 @@ class DirectMetaProvider implements MetaProvider {
   private FileMetadataLoader loadFileMetadata(String fullTableName,
       String partName, Partition msPartition, ListMap<TNetworkAddress> 
hostIndex)
         throws CatalogException {
-    //TODO(IMPALA-9042): Remove "throws MetaException"
-    Path partDir = new Path(msPartition.getSd().getLocation());
     // TODO(todd): The table property to disable recursive loading is not 
supported
     // by this code path. However, DirectMetaProvider is not yet a supported 
feature.
     // TODO(todd) this code path would have to change to handle ACID tables -- 
we don't
     // have the write ID list passed down at this point in the code.
-    FileMetadataLoader fml = new FileMetadataLoader(partDir,
+    FileMetadataLoader fml = new 
FileMetadataLoader(msPartition.getSd().getLocation(),
         /* recursive= */BackendConfig.INSTANCE.recursivelyListPartitions(),
         /* oldFds= */Collections.emptyList(),
         hostIndex,
diff --git a/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java 
b/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java
index e17880db1..4ea9f6b78 100644
--- a/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java
+++ b/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java
@@ -736,6 +736,21 @@ public class FileSystemUtil {
     return p.getFileSystem(CONF);
   }
 
+  /**
+   * Returns the FileSystem object for a path string (might not be 
fully-qualified).
+   * I.e. a wrapper for getFileSystemForPath(createFullyQualifiedPath(new 
Path(p))).
+   */
+  public static FileSystem getFileSystemForPath(String p) throws IOException {
+    Path location = new Path(p);
+    URI defaultUri = FileSystem.getDefaultUri(CONF);
+    URI locationUri = location.toUri();
+    if (locationUri.getScheme() == null ||
+        locationUri.getScheme().equalsIgnoreCase(defaultUri.getScheme())) {
+      return getDefaultFileSystem();
+    }
+    return location.getFileSystem(CONF);
+  }
+
   public static DistributedFileSystem getDistributedFileSystem() throws 
IOException {
     FileSystem fs = getDefaultFileSystem();
     Preconditions.checkState(fs instanceof DistributedFileSystem);
diff --git 
a/fe/src/test/java/org/apache/impala/catalog/FileMetadataLoaderTest.java 
b/fe/src/test/java/org/apache/impala/catalog/FileMetadataLoaderTest.java
index a645c1081..bff749c34 100644
--- a/fe/src/test/java/org/apache/impala/catalog/FileMetadataLoaderTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/FileMetadataLoaderTest.java
@@ -49,7 +49,7 @@ public class FileMetadataLoaderTest {
   public void testRecursiveLoading() throws IOException, CatalogException {
     //TODO(IMPALA-9042): Remove "throws CatalogException"
     ListMap<TNetworkAddress> hostIndex = new ListMap<>();
-    Path tablePath = new 
Path("hdfs://localhost:20500/test-warehouse/alltypes/");
+    String tablePath = "hdfs://localhost:20500/test-warehouse/alltypes/";
     FileMetadataLoader fml = new FileMetadataLoader(tablePath, /* 
recursive=*/true,
         /* oldFds = */Collections.emptyList(), hostIndex, null, null);
     fml.load();
@@ -72,9 +72,9 @@ public class FileMetadataLoaderTest {
     assertEquals(fml.getLoadedFds(), refreshFml.getLoadedFds());
 
     // Touch a file and make sure that we reload locations for that file.
-    FileSystem fs = tablePath.getFileSystem(new Configuration());
     FileDescriptor fd = fml.getLoadedFds().get(0);
     Path filePath = new Path(tablePath, fd.getRelativePath());
+    FileSystem fs = filePath.getFileSystem(new Configuration());
     fs.setTimes(filePath, fd.getModificationTime() + 1, /* atime= */-1);
 
     refreshFml = new FileMetadataLoader(tablePath, /* recursive=*/true,
@@ -87,7 +87,7 @@ public class FileMetadataLoaderTest {
   public void testHudiParquetLoading() throws IOException, CatalogException {
     //TODO(IMPALA-9042): Remove "throws CatalogException"
     ListMap<TNetworkAddress> hostIndex = new ListMap<>();
-    Path tablePath = new 
Path("hdfs://localhost:20500/test-warehouse/hudi_parquet/");
+    String tablePath = "hdfs://localhost:20500/test-warehouse/hudi_parquet/";
     FileMetadataLoader fml = new FileMetadataLoader(tablePath, /* 
recursive=*/true,
         /* oldFds = */ Collections.emptyList(), hostIndex, null, null,
         HdfsFileFormat.HUDI_PARQUET);
@@ -358,8 +358,7 @@ public class FileMetadataLoaderTest {
     ListMap<TNetworkAddress> hostIndex = new ListMap<>();
     ValidWriteIdList writeIds =
         MetastoreShim.getValidWriteIdListFromString(validWriteIdString);
-    Path tablePath = new Path(path);
-    FileMetadataLoader fml = new FileMetadataLoader(tablePath, /* 
recursive=*/true,
+    FileMetadataLoader fml = new FileMetadataLoader(path, /* recursive=*/true,
         /* oldFds = */ Collections.emptyList(), hostIndex, new 
ValidReadTxnList(""),
         writeIds, format);
     fml.load();
@@ -393,7 +392,7 @@ public class FileMetadataLoaderTest {
     //TODO(IMPALA-9042): Remove "throws CatalogException"
     for (boolean recursive : ImmutableList.of(false, true)) {
       ListMap<TNetworkAddress> hostIndex = new ListMap<>();
-      Path tablePath = new 
Path("hdfs://localhost:20500/test-warehouse/does-not-exist/");
+      String tablePath = 
"hdfs://localhost:20500/test-warehouse/does-not-exist/";
       FileMetadataLoader fml = new FileMetadataLoader(tablePath, recursive,
           /* oldFds = */Collections.emptyList(), hostIndex, null, null);
       fml.load();
@@ -405,7 +404,8 @@ public class FileMetadataLoaderTest {
   @Test
   public void testSkipHiddenDirectories() throws IOException, CatalogException 
{
     Path sourcePath = new 
Path("hdfs://localhost:20500/test-warehouse/alltypes/");
-    Path tmpTestPath = new 
Path("hdfs://localhost:20500/tmp/test-filemetadata-loader");
+    String tmpTestPathStr = 
"hdfs://localhost:20500/tmp/test-filemetadata-loader";
+    Path tmpTestPath = new Path(tmpTestPathStr);
     Configuration conf = new Configuration();
     FileSystem dstFs = tmpTestPath.getFileSystem(conf);
     FileSystem srcFs = sourcePath.getFileSystem(conf);
@@ -421,7 +421,7 @@ public class FileMetadataLoaderTest {
     dstFs.createNewFile(new Path(hiveStaging, "tmp-stats"));
     dstFs.createNewFile(new Path(hiveStaging, ".hidden-tmp-stats"));
 
-    FileMetadataLoader fml = new FileMetadataLoader(tmpTestPath, true,
+    FileMetadataLoader fml = new FileMetadataLoader(tmpTestPathStr, true,
         Collections.emptyList(), new ListMap <>(), null, null);
     fml.load();
     assertEquals(24, fml.getStats().loadedFiles);
diff --git a/fe/src/test/java/org/apache/impala/catalog/HdfsPartitionTest.java 
b/fe/src/test/java/org/apache/impala/catalog/HdfsPartitionTest.java
index f4a0fb21d..ef2db6aec 100644
--- a/fe/src/test/java/org/apache/impala/catalog/HdfsPartitionTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/HdfsPartitionTest.java
@@ -147,7 +147,7 @@ public class HdfsPartitionTest {
   @Test
   public void testCloneWithNewHostIndex() throws Exception {
     // Fetch some metadata from a directory in HDFS.
-    Path p = new Path("hdfs://localhost:20500/test-warehouse/schemas");
+    String p = "hdfs://localhost:20500/test-warehouse/schemas";
     ListMap<TNetworkAddress> origIndex = new ListMap<>();
     FileMetadataLoader fml = new FileMetadataLoader(p, /* recursive= */false,
         Collections.emptyList(), origIndex, /*validTxnList=*/null, 
/*writeIds=*/null);

Reply via email to