This is an automated email from the ASF dual-hosted git repository.

stigahuang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 9c5ed44d51a115e7c2fb5b05c3ab33f2a88bd203
Author: Gabor Kaszab <[email protected]>
AuthorDate: Mon Feb 3 15:40:58 2025 +0100

    IMPALA-13739: Part1: Move FileDescriptor, FileBlock and BlockReplica
    
    Move the above classes out from HdfsPartition onto their own classes.
    
    Change-Id: I2eb713619d4d231cec65a58255c3ced7b12d1880
    Reviewed-on: http://gerrit.cloudera.org:8080/22441
    Reviewed-by: Gabor Kaszab <[email protected]>
    Tested-by: Gabor Kaszab <[email protected]>
---
 .../apache/impala/analysis/ComputeStatsStmt.java   |   2 +-
 .../org/apache/impala/analysis/OptimizeStmt.java   |   9 +-
 .../java/org/apache/impala/analysis/TableRef.java  |   8 +-
 .../org/apache/impala/catalog/BlockReplica.java    |  59 +++
 .../apache/impala/catalog/CatalogHmsAPIHelper.java |   1 -
 .../impala/catalog/CatalogServiceCatalog.java      |   1 -
 .../org/apache/impala/catalog/FeCatalogUtils.java  |   1 -
 .../org/apache/impala/catalog/FeFsPartition.java   |   1 -
 .../java/org/apache/impala/catalog/FeFsTable.java  |   1 -
 .../org/apache/impala/catalog/FeIcebergTable.java  |  10 +-
 .../java/org/apache/impala/catalog/FileBlock.java  | 215 +++++++++
 .../org/apache/impala/catalog/FileDescriptor.java  | 367 ++++++++++++++
 .../apache/impala/catalog/FileMetadataLoader.java  |   1 -
 .../org/apache/impala/catalog/HdfsPartition.java   | 531 +--------------------
 .../java/org/apache/impala/catalog/HdfsTable.java  |   4 +-
 .../impala/catalog/IcebergContentFileStore.java    |   1 -
 .../apache/impala/catalog/IcebergDeleteTable.java  |   1 -
 .../impala/catalog/IcebergEqualityDeleteTable.java |   1 -
 .../impala/catalog/IcebergFileMetadataLoader.java  |   3 -
 .../impala/catalog/IcebergPositionDeleteTable.java |   1 -
 .../impala/catalog/ParallelFileMetadataLoader.java |   1 -
 .../impala/catalog/local/CatalogdMetaProvider.java |   2 +-
 .../impala/catalog/local/DirectMetaProvider.java   |   2 +-
 .../impala/catalog/local/LocalFsPartition.java     |   2 +-
 .../apache/impala/catalog/local/LocalFsTable.java  |   2 +-
 .../apache/impala/catalog/local/MetaProvider.java  |   2 +-
 .../catalog/metastore/CatalogHmsClientUtils.java   |   2 +-
 .../org/apache/impala/planner/HdfsScanNode.java    |   4 +-
 .../org/apache/impala/planner/IcebergScanNode.java |   2 +-
 .../apache/impala/planner/IcebergScanPlanner.java  |   2 +-
 .../java/org/apache/impala/util/AcidUtils.java     |   2 +-
 .../impala/catalog/FileMetadataLoaderTest.java     |   1 -
 .../apache/impala/catalog/HdfsPartitionTest.java   |   1 -
 .../catalog/IcebergContentFileStoreTest.java       |   1 -
 .../catalog/PartialCatalogInfoWriteIdTest.java     |   4 +-
 .../events/MetastoreEventsProcessorTest.java       |   9 +-
 .../catalog/local/CatalogdMetaProviderTest.java    |   3 +-
 .../impala/catalog/local/LocalCatalogTest.java     |   2 +-
 .../metastore/CatalogHmsFileMetadataTest.java      |   4 +-
 .../metastore/EnableCatalogdHmsCacheFlagTest.java  |   2 +-
 .../org/apache/impala/planner/ExplainTest.java     |   5 +-
 .../apache/impala/testutil/BlockIdGenerator.java   |   2 +-
 42 files changed, 690 insertions(+), 585 deletions(-)

diff --git a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java 
b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
index 99bb77c85..f1e1e4116 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
@@ -37,8 +37,8 @@ import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeHBaseTable;
 import org.apache.impala.catalog.FeIcebergTable;
 import org.apache.impala.catalog.FeTable;
+import org.apache.impala.catalog.FileDescriptor;
 import org.apache.impala.catalog.HdfsFileFormat;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.PartitionStatsUtil;
 import org.apache.impala.catalog.Type;
diff --git a/fe/src/main/java/org/apache/impala/analysis/OptimizeStmt.java 
b/fe/src/main/java/org/apache/impala/analysis/OptimizeStmt.java
index 6fc16892c..3fecd954b 100644
--- a/fe/src/main/java/org/apache/impala/analysis/OptimizeStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/OptimizeStmt.java
@@ -23,6 +23,7 @@ import org.apache.iceberg.DataFile;
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.catalog.Column;
 import org.apache.impala.catalog.FeIcebergTable;
+import org.apache.impala.catalog.FileDescriptor;
 import org.apache.impala.catalog.HdfsPartition;
 import org.apache.impala.catalog.IcebergContentFileStore;
 import org.apache.impala.catalog.iceberg.GroupedContentFiles;
@@ -219,7 +220,7 @@ public class OptimizeStmt extends DmlStatementBase {
         mode_ = filterResult.getOptimizationMode();
 
         if (mode_ == TIcebergOptimizationMode.PARTIAL) {
-          List<HdfsPartition.FileDescriptor> selectedDataFilesWithoutDeletes =
+          List<FileDescriptor> selectedDataFilesWithoutDeletes =
               dataFilesWithoutDeletesToFileDescriptors(
                   filterResult.getSelectedFilesWithoutDeletes(), iceTable);
           
tableRef_.setSelectedDataFilesForOptimize(selectedDataFilesWithoutDeletes);
@@ -231,7 +232,7 @@ public class OptimizeStmt extends DmlStatementBase {
     }
   }
 
-  private List<HdfsPartition.FileDescriptor> 
dataFilesWithoutDeletesToFileDescriptors(
+  private List<FileDescriptor> dataFilesWithoutDeletesToFileDescriptors(
       List<DataFile> contentFiles, FeIcebergTable iceTable)
       throws IOException, ImpalaRuntimeException {
     GroupedContentFiles selectedContentFiles = new GroupedContentFiles();
@@ -241,8 +242,8 @@ public class OptimizeStmt extends DmlStatementBase {
     return selectedFiles.getDataFilesWithoutDeletes();
   }
 
-  private void collectAbsolutePaths(List<HdfsPartition.FileDescriptor> 
selectedFiles) {
-    for (HdfsPartition.FileDescriptor fileDesc : selectedFiles) {
+  private void collectAbsolutePaths(List<FileDescriptor> selectedFiles) {
+    for (FileDescriptor fileDesc : selectedFiles) {
       org.apache.hadoop.fs.Path path = new org.apache.hadoop.fs.Path(
           fileDesc.getAbsolutePath(((FeIcebergTable) 
table_).getHdfsBaseDir()));
       selectedIcebergFilePaths_.add(path.toUri().toString());
diff --git a/fe/src/main/java/org/apache/impala/analysis/TableRef.java 
b/fe/src/main/java/org/apache/impala/analysis/TableRef.java
index d46c82cba..7a6160058 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TableRef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TableRef.java
@@ -33,6 +33,7 @@ import org.apache.impala.catalog.Column;
 import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
+import org.apache.impala.catalog.FileDescriptor;
 import org.apache.impala.catalog.HdfsPartition;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.planner.JoinNode.DistributionMode;
@@ -165,7 +166,7 @@ public class TableRef extends StmtNode {
 
   // Iceberg data files without deletes selected for OPTIMIZE from this table 
ref.
   // Used only in PARTIAL optimization mode, otherwise it is null.
-  private List<HdfsPartition.FileDescriptor> 
selectedDataFilesWithoutDeletesForOptimize_;
+  private List<FileDescriptor> selectedDataFilesWithoutDeletesForOptimize_;
 
   // END: Members that need to be reset()
   /////////////////////////////////////////
@@ -850,12 +851,11 @@ public class TableRef extends StmtNode {
     return res;
   }
 
-  public void setSelectedDataFilesForOptimize(
-      List<HdfsPartition.FileDescriptor> fileDescs) {
+  public void setSelectedDataFilesForOptimize(List<FileDescriptor> fileDescs) {
     selectedDataFilesWithoutDeletesForOptimize_ = fileDescs;
   }
 
-  public List<HdfsPartition.FileDescriptor> getSelectedDataFilesForOptimize() {
+  public List<FileDescriptor> getSelectedDataFilesForOptimize() {
     return selectedDataFilesWithoutDeletesForOptimize_;
   }
 
diff --git a/fe/src/main/java/org/apache/impala/catalog/BlockReplica.java 
b/fe/src/main/java/org/apache/impala/catalog/BlockReplica.java
new file mode 100644
index 000000000..4538a9d9e
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/catalog/BlockReplica.java
@@ -0,0 +1,59 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.catalog;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.impala.thrift.TNetworkAddress;
+
+/**
+ * Represents the metadata of a single block replica.
+ */
+public class BlockReplica {
+  private final boolean isCached_;
+  private final short hostIdx_;
+
+  /**
+   * Creates a BlockReplica given a host ID/index and a flag specifying 
whether this
+   * replica is cached. Host IDs are assigned when loading the block metadata 
in
+   * HdfsTable.
+   */
+  public BlockReplica(short hostIdx, boolean isCached) {
+    hostIdx_ = hostIdx;
+    isCached_ = isCached;
+  }
+
+  /**
+   * Parses the location (an ip address:port string) of the replica and 
returns a
+   * TNetworkAddress with this information, or null if parsing fails.
+   */
+  public static TNetworkAddress parseLocation(String location) {
+    Preconditions.checkNotNull(location);
+    String[] ip_port = location.split(":");
+    if (ip_port.length != 2) return null;
+    try {
+      return CatalogInterners.internNetworkAddress(
+          new TNetworkAddress(ip_port[0], Integer.parseInt(ip_port[1])));
+    } catch (NumberFormatException e) {
+      return null;
+    }
+  }
+
+  public boolean isCached() { return isCached_; }
+  public short getHostIdx() { return hostIdx_; }
+}
\ No newline at end of file
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/CatalogHmsAPIHelper.java 
b/fe/src/main/java/org/apache/impala/catalog/CatalogHmsAPIHelper.java
index 9dd7fdf6d..ec5ebda6a 100644
--- a/fe/src/main/java/org/apache/impala/catalog/CatalogHmsAPIHelper.java
+++ b/fe/src/main/java/org/apache/impala/catalog/CatalogHmsAPIHelper.java
@@ -65,7 +65,6 @@ import 
org.apache.hadoop.hive.metastore.api.PartitionsByExprResult;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.impala.analysis.TableName;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.metastore.CatalogMetastoreServiceHandler;
 import org.apache.impala.catalog.metastore.HmsApiNameEnum;
 import org.apache.impala.common.Pair;
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java 
b/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
index cb1e31eb9..273497062 100644
--- a/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
+++ b/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
@@ -70,7 +70,6 @@ import org.apache.impala.authorization.AuthorizationDelta;
 import org.apache.impala.authorization.AuthorizationManager;
 import org.apache.impala.authorization.AuthorizationPolicy;
 import org.apache.impala.catalog.FeFsTable.Utils;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.MetaStoreClientPool.MetaStoreClient;
 import org.apache.impala.catalog.events.ExternalEventsProcessor;
 import 
org.apache.impala.catalog.events.MetastoreEvents.EventFactoryForSyncToLatestEvent;
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeCatalogUtils.java 
b/fe/src/main/java/org/apache/impala/catalog/FeCatalogUtils.java
index 833234523..9b9bc20cb 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FeCatalogUtils.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FeCatalogUtils.java
@@ -37,7 +37,6 @@ import org.apache.impala.analysis.NullLiteral;
 import org.apache.impala.analysis.PartitionKeyValue;
 import org.apache.impala.analysis.ToSqlUtils;
 import org.apache.impala.catalog.CatalogObject.ThriftObjectType;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.local.CatalogdMetaProvider;
 import org.apache.impala.catalog.local.LocalCatalog;
 import org.apache.impala.catalog.local.LocalFsTable;
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeFsPartition.java 
b/fe/src/main/java/org/apache/impala/catalog/FeFsPartition.java
index 8ec49bb29..bcec2a634 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FeFsPartition.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FeFsPartition.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.impala.analysis.LiteralExpr;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.common.FileSystemUtil;
 import org.apache.impala.thrift.TAccessLevel;
 import org.apache.impala.thrift.THdfsPartitionLocation;
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java 
b/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java
index e5853251f..8deda66fb 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 import org.apache.impala.analysis.Expr;
 import org.apache.impala.analysis.LiteralExpr;
 import org.apache.impala.analysis.PartitionKeyValue;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.FileSystemUtil;
 import org.apache.impala.common.PrintUtils;
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeIcebergTable.java 
b/fe/src/main/java/org/apache/impala/catalog/FeIcebergTable.java
index bad11d109..8acc4ca41 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FeIcebergTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FeIcebergTable.java
@@ -59,8 +59,6 @@ import org.apache.impala.analysis.LiteralExpr;
 import org.apache.impala.analysis.TimeTravelSpec;
 import org.apache.impala.analysis.TimeTravelSpec.Kind;
 import org.apache.impala.catalog.CatalogObject.ThriftObjectType;
-import org.apache.impala.catalog.HdfsPartition.FileBlock;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.iceberg.GroupedContentFiles;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.FileSystemUtil;
@@ -709,7 +707,7 @@ public interface FeIcebergTable extends FeFsTable {
     /**
      * Get FileDescriptor by data file location
      */
-    public static HdfsPartition.FileDescriptor getFileDescriptor(
+    public static FileDescriptor getFileDescriptor(
         ContentFile<?> contentFile, FeIcebergTable table) throws IOException {
       Path fileLoc = FileSystemUtil.createFullyQualifiedPath(
           new Path(contentFile.path().toString()));
@@ -724,7 +722,7 @@ public interface FeIcebergTable extends FeFsTable {
       return getFileDescriptor(fsForPath, fileStatus, table);
     }
 
-    private static HdfsPartition.FileDescriptor getFileDescriptor(FileSystem 
fs,
+    private static FileDescriptor getFileDescriptor(FileSystem fs,
         FileStatus fileStatus, FeIcebergTable table) throws IOException {
       Reference<Long> numUnknownDiskIds = new Reference<>(0L);
 
@@ -741,7 +739,7 @@ public interface FeIcebergTable extends FeFsTable {
       }
 
       if (!FileSystemUtil.supportsStorageIds(fs)) {
-        return HdfsPartition.FileDescriptor.createWithNoBlocks(
+        return FileDescriptor.createWithNoBlocks(
             fileStatus, relPath, absPath);
       }
 
@@ -752,7 +750,7 @@ public interface FeIcebergTable extends FeFsTable {
         locations = fs.getFileBlockLocations(fileStatus, 0, 
fileStatus.getLen());
       }
 
-      return HdfsPartition.FileDescriptor.create(fileStatus, relPath, 
locations,
+      return FileDescriptor.create(fileStatus, relPath, locations,
           table.getHostIndex(), fileStatus.isEncrypted(), 
fileStatus.isErasureCoded(),
           numUnknownDiskIds, absPath);
     }
diff --git a/fe/src/main/java/org/apache/impala/catalog/FileBlock.java 
b/fe/src/main/java/org/apache/impala/catalog/FileBlock.java
new file mode 100644
index 000000000..acdb3c6a0
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/catalog/FileBlock.java
@@ -0,0 +1,215 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.catalog;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Set;
+
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import com.google.flatbuffers.FlatBufferBuilder;
+
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.impala.common.Reference;
+import org.apache.impala.fb.FbFileBlock;
+import org.apache.impala.thrift.TNetworkAddress;
+import org.apache.impala.util.ListMap;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+// Static utility methods to serialize and access file block metadata from 
FlatBuffers.
+public class FileBlock {
+  private final static Logger LOG = LoggerFactory.getLogger(FileBlock.class);
+
+  // Bit mask used to extract the replica host id and cache info of a file 
block.
+  // Use ~REPLICA_HOST_IDX_MASK to extract the cache info (stored in MSB).
+  private final static short REPLICA_HOST_IDX_MASK = (1 << 15) - 1;
+
+  /**
+   * Constructs an FbFileBlock object from the block location metadata
+   * 'loc'. Serializes the file block metadata into a FlatBuffer using 'fbb' 
and
+   * returns the offset in the underlying buffer where the encoded file block 
starts.
+   * 'hostIndex' stores the network addresses of the datanodes that store the 
files of
+   * the parent HdfsTable. Populates 'numUnknownDiskIds' with the number of 
unknown disk
+   * ids.
+   */
+  public static int createFbFileBlock(
+      FlatBufferBuilder fbb,
+      BlockLocation loc,
+      ListMap<TNetworkAddress> hostIndex,
+      Reference<Long> numUnknownDiskIds) throws IOException {
+    Preconditions.checkNotNull(fbb);
+    Preconditions.checkNotNull(loc);
+    Preconditions.checkNotNull(hostIndex);
+    // replica host ids
+    FbFileBlock.startReplicaHostIdxsVector(fbb, loc.getNames().length);
+    Set<String> cachedHosts = Sets.newHashSet(loc.getCachedHosts());
+    // Enumerate all replicas of the block, adding any unknown hosts
+    // to hostIndex. We pick the network address from getNames() and
+    // map it to the corresponding hostname from getHosts().
+    for (int i = 0; i < loc.getNames().length; ++i) {
+      TNetworkAddress networkAddress = 
BlockReplica.parseLocation(loc.getNames()[i]);
+      short replicaIdx = (short) hostIndex.getOrAddIndex(networkAddress);
+      boolean isReplicaCached = cachedHosts.contains(loc.getHosts()[i]);
+      replicaIdx = makeReplicaIdx(isReplicaCached, replicaIdx);
+      fbb.addShort(replicaIdx);
+    }
+    int fbReplicaHostIdxOffset = fbb.endVector();
+    short[] diskIds = createDiskIds(loc, numUnknownDiskIds);
+    Preconditions.checkState(diskIds.length == loc.getNames().length,
+        "Mismatch detected between number of diskIDs and block locations for 
block: " +
+            loc.toString());
+    int fbDiskIdsOffset = FbFileBlock.createDiskIdsVector(fbb, diskIds);
+    FbFileBlock.startFbFileBlock(fbb);
+    FbFileBlock.addOffset(fbb, loc.getOffset());
+    FbFileBlock.addLength(fbb, loc.getLength());
+    FbFileBlock.addReplicaHostIdxs(fbb, fbReplicaHostIdxOffset);
+    FbFileBlock.addDiskIds(fbb, fbDiskIdsOffset);
+    return FbFileBlock.endFbFileBlock(fbb);
+  }
+
+  public static short makeReplicaIdx(boolean isReplicaCached, int hostIdx) {
+    Preconditions.checkArgument((hostIdx & REPLICA_HOST_IDX_MASK) == hostIdx,
+        "invalid hostIdx: %s", hostIdx);
+    return isReplicaCached ? (short) (hostIdx | ~REPLICA_HOST_IDX_MASK)
+        : (short) hostIdx;
+  }
+
+  /**
+   * Constructs an FbFileBlock object from the file block metadata that 
comprise block's
+   * 'offset', 'length' and replica index 'replicaIdx'. Serializes the file 
block
+   * metadata into a FlatBuffer using 'fbb' and returns the offset in the 
underlying
+   * buffer where the encoded file block starts.
+   */
+  public static int createFbFileBlock(
+      FlatBufferBuilder fbb,
+      long offset,
+      long length,
+      short replicaIdx) {
+    Preconditions.checkNotNull(fbb);
+    FbFileBlock.startReplicaHostIdxsVector(fbb, 1);
+    fbb.addShort(replicaIdx);
+    int fbReplicaHostIdxOffset = fbb.endVector();
+    FbFileBlock.startFbFileBlock(fbb);
+    FbFileBlock.addOffset(fbb, offset);
+    FbFileBlock.addLength(fbb, length);
+    FbFileBlock.addReplicaHostIdxs(fbb, fbReplicaHostIdxOffset);
+    return FbFileBlock.endFbFileBlock(fbb);
+  }
+
+  /**
+   * Creates the disk ids of a block from its BlockLocation 'location'. 
Returns the
+   * disk ids and populates 'numUnknownDiskIds' with the number of unknown 
disk ids.
+   */
+  private static short[] createDiskIds(
+      BlockLocation location,
+      Reference<Long> numUnknownDiskIds) throws IOException {
+    long unknownDiskIdCount = 0;
+    String[] storageIds = location.getStorageIds();
+    String[] hosts = location.getHosts();
+    if (storageIds.length != hosts.length) {
+      if (LOG.isTraceEnabled()) {
+        LOG.trace(String.format("Number of storage IDs and number of hosts for 
block " +
+            "%s mismatch (storageIDs:hosts) %d:%d. Skipping disk ID loading 
for this " +
+            "block.", location.toString(), storageIds.length, hosts.length));
+      }
+      storageIds = new String[hosts.length];
+    }
+    short[] diskIDs = new short[storageIds.length];
+    for (int i = 0; i < storageIds.length; ++i) {
+      if (Strings.isNullOrEmpty(storageIds[i])) {
+        diskIDs[i] = (short) -1;
+        ++unknownDiskIdCount;
+      } else {
+        diskIDs[i] = DiskIdMapper.INSTANCE.getDiskId(hosts[i], storageIds[i]);
+      }
+    }
+    long count = numUnknownDiskIds.getRef() + unknownDiskIdCount;
+    numUnknownDiskIds.setRef(Long.valueOf(count));
+    return diskIDs;
+  }
+
+  public static long getOffset(FbFileBlock fbFileBlock) {
+    return fbFileBlock.offset();
+  }
+
+  public static long getLength(FbFileBlock fbFileBlock) {
+    return fbFileBlock.length();
+  }
+
+  // Returns true if there is at least one cached replica.
+  public static boolean hasCachedReplica(FbFileBlock fbFileBlock) {
+    boolean hasCachedReplica = false;
+    for (int i = 0; i < fbFileBlock.replicaHostIdxsLength(); ++i) {
+      hasCachedReplica |= isReplicaCached(fbFileBlock, i);
+    }
+    return hasCachedReplica;
+  }
+
+  public static int getNumReplicaHosts(FbFileBlock fbFileBlock) {
+    return fbFileBlock.replicaHostIdxsLength();
+  }
+
+  public static int getReplicaHostIdx(FbFileBlock fbFileBlock, int pos) {
+    int idx = fbFileBlock.replicaHostIdxs(pos);
+    return idx & REPLICA_HOST_IDX_MASK;
+  }
+
+  // Returns true if the block replica 'replicaIdx' is cached.
+  public static boolean isReplicaCached(FbFileBlock fbFileBlock, int 
replicaIdx) {
+    int idx = fbFileBlock.replicaHostIdxs(replicaIdx);
+    return (idx & ~REPLICA_HOST_IDX_MASK) != 0;
+  }
+
+  /**
+   * Return the disk id of the block in BlockLocation.getNames()[hostIndex]; 
-1 if
+   * disk id is not supported.
+   */
+  public static int getDiskId(FbFileBlock fbFileBlock, int hostIndex) {
+    if (fbFileBlock.diskIdsLength() == 0) return -1;
+    return fbFileBlock.diskIds(hostIndex);
+  }
+
+  /**
+   * Returns a string representation of a FbFileBlock.
+   */
+  public static String debugString(FbFileBlock fbFileBlock) {
+    int numReplicaHosts = getNumReplicaHosts(fbFileBlock);
+    List<Integer> diskIds = Lists.newArrayListWithCapacity(numReplicaHosts);
+    List<Integer> replicaHosts = 
Lists.newArrayListWithCapacity(numReplicaHosts);
+    List<Boolean> isBlockCached = 
Lists.newArrayListWithCapacity(numReplicaHosts);
+    for (int i = 0; i < numReplicaHosts; ++i) {
+      diskIds.add(getDiskId(fbFileBlock, i));
+      replicaHosts.add(getReplicaHostIdx(fbFileBlock, i));
+      isBlockCached.add(isReplicaCached(fbFileBlock, i));
+    }
+    StringBuilder builder = new StringBuilder();
+    return builder.append("Offset: " + getOffset(fbFileBlock))
+        .append("Length: " + getLength(fbFileBlock))
+        .append("IsCached: " + hasCachedReplica(fbFileBlock))
+        .append("ReplicaHosts: " + Joiner.on(", ").join(replicaHosts))
+        .append("DiskIds: " + Joiner.on(", ").join(diskIds))
+        .append("Caching: " + Joiner.on(", ").join(isBlockCached))
+        .toString();
+  }
+}
\ No newline at end of file
diff --git a/fe/src/main/java/org/apache/impala/catalog/FileDescriptor.java 
b/fe/src/main/java/org/apache/impala/catalog/FileDescriptor.java
new file mode 100644
index 000000000..9dbbd16ec
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/catalog/FileDescriptor.java
@@ -0,0 +1,367 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.catalog;
+
+import com.google.common.base.Function;
+import com.google.common.base.Joiner;
+import com.google.common.base.MoreObjects;
+import com.google.common.base.MoreObjects.ToStringHelper;
+import com.google.common.collect.Lists;
+import com.google.flatbuffers.FlatBufferBuilder;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Collection;
+import java.util.List;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.impala.common.Reference;
+import org.apache.impala.fb.FbCompression;
+import org.apache.impala.fb.FbFileBlock;
+import org.apache.impala.fb.FbFileDesc;
+import org.apache.impala.fb.FbFileMetadata;
+import org.apache.impala.thrift.THdfsFileDesc;
+import org.apache.impala.thrift.TNetworkAddress;
+import org.apache.impala.util.ListMap;
+
+// Metadata for a single file.
+public class FileDescriptor implements Comparable<FileDescriptor> {
+  // An invalid network address, which will always be treated as remote.
+  private final static TNetworkAddress REMOTE_NETWORK_ADDRESS =
+      new TNetworkAddress("remote*addr", 0);
+
+  // Minimum block size in bytes allowed for synthetic file blocks (other than 
the last
+  // block, which may be shorter).
+  public final static long MIN_SYNTHETIC_BLOCK_SIZE = 1024 * 1024;
+
+  // Internal representation of a file descriptor using a FlatBuffer.
+  private final FbFileDesc fbFileDescriptor_;
+
+  // Internal representation of additional file metadata, e.g. Iceberg 
metadata.
+  private final FbFileMetadata fbFileMetadata_;
+
+  private FileDescriptor(FbFileDesc fileDescData) {
+    fbFileDescriptor_ = fileDescData;
+    fbFileMetadata_ = null;
+  }
+
+  public FileDescriptor(FbFileDesc fileDescData, FbFileMetadata fileMetadata) {
+    fbFileDescriptor_ = fileDescData;
+    fbFileMetadata_ = fileMetadata;
+  }
+
+  public static FileDescriptor fromThrift(THdfsFileDesc desc) {
+    ByteBuffer bb = ByteBuffer.wrap(desc.getFile_desc_data());
+    if (desc.isSetFile_metadata()) {
+      ByteBuffer bbMd = ByteBuffer.wrap(desc.getFile_metadata());
+      return new FileDescriptor(FbFileDesc.getRootAsFbFileDesc(bb),
+          FbFileMetadata.getRootAsFbFileMetadata(bbMd));
+    }
+    return new FileDescriptor(FbFileDesc.getRootAsFbFileDesc(bb));
+  }
+
+  /**
+   * Clone the descriptor, but change the replica indexes to reference the new 
host
+   * index 'dstIndex' instead of the original index 'origIndex'.
+   */
+  public FileDescriptor cloneWithNewHostIndex(
+      List<TNetworkAddress> origIndex, ListMap<TNetworkAddress> dstIndex) {
+    // First clone the flatbuffer with no changes.
+    ByteBuffer oldBuf = fbFileDescriptor_.getByteBuffer();
+    ByteBuffer newBuf = ByteBuffer.allocate(oldBuf.remaining());
+    newBuf.put(oldBuf.array(), oldBuf.position(), oldBuf.remaining());
+    newBuf.rewind();
+    FbFileDesc cloned = FbFileDesc.getRootAsFbFileDesc(newBuf);
+
+    // Now iterate over the blocks in the new flatbuffer and mutate the 
indexes.
+    FbFileBlock it = new FbFileBlock();
+    for (int i = 0; i < cloned.fileBlocksLength(); i++) {
+      it = cloned.fileBlocks(it, i);
+      for (int j = 0; j < it.replicaHostIdxsLength(); j++) {
+        int origHostIdx = FileBlock.getReplicaHostIdx(it, j);
+        boolean isCached = FileBlock.isReplicaCached(it, j);
+        TNetworkAddress origHost = origIndex.get(origHostIdx);
+        int newHostIdx = dstIndex.getOrAddIndex(origHost);
+        it.mutateReplicaHostIdxs(j, FileBlock.makeReplicaIdx(isCached, 
newHostIdx));
+      }
+    }
+    return new FileDescriptor(cloned, fbFileMetadata_);
+  }
+
+  public FileDescriptor cloneWithFileMetadata(FbFileMetadata fileMetadata) {
+    return new FileDescriptor(fbFileDescriptor_, fileMetadata);
+  }
+
+  /**
+   * Creates the file descriptor of a file represented by 'fileStatus' with 
blocks
+   * stored in 'blockLocations'. 'fileSystem' is the filesystem where the
+   * file resides and 'hostIndex' stores the network addresses of the hosts 
that store
+   * blocks of the parent HdfsTable. 'isEc' indicates whether the file is 
erasure-coded.
+   * Populates 'numUnknownDiskIds' with the number of unknown disk ids.
+   *
+   * @param fileStatus        the status returned from file listing
+   * @param relPath           the path of the file relative to the partition 
directory
+   * @param blockLocations    the block locations for the file
+   * @param hostIndex         the host index to use for encoding the hosts
+   * @param isEc              true if the file is known to be erasure-coded
+   * @param numUnknownDiskIds reference which will be set to the number of 
blocks
+   *                          for which no disk ID could be determined
+   */
+  public static FileDescriptor create(
+      FileStatus fileStatus,
+      String relPath,
+      BlockLocation[] blockLocations,
+      ListMap<TNetworkAddress> hostIndex,
+      boolean isEncrypted,
+      boolean isEc,
+      Reference<Long> numUnknownDiskIds,
+      String absPath) throws IOException {
+    FlatBufferBuilder fbb = new FlatBufferBuilder(1);
+    int[] fbFileBlockOffsets = new int[blockLocations.length];
+    int blockIdx = 0;
+    for (BlockLocation loc : blockLocations) {
+      if (isEc) {
+        fbFileBlockOffsets[blockIdx++] = FileBlock.createFbFileBlock(
+            fbb,
+            loc.getOffset(),
+            loc.getLength(),
+            (short) hostIndex.getOrAddIndex(REMOTE_NETWORK_ADDRESS));
+      } else {
+        fbFileBlockOffsets[blockIdx++] =
+            FileBlock.createFbFileBlock(fbb, loc, hostIndex, 
numUnknownDiskIds);
+      }
+    }
+    return new FileDescriptor(createFbFileDesc(fbb, fileStatus, relPath,
+        fbFileBlockOffsets, isEncrypted, isEc, absPath));
+  }
+
+  /**
+   * Creates the file descriptor of a file represented by 'fileStatus' that
+   * resides in a filesystem that doesn't support the BlockLocation API (e.g. 
S3).
+   */
+  public static FileDescriptor createWithNoBlocks(
+      FileStatus fileStatus, String relPath, String absPath) {
+    FlatBufferBuilder fbb = new FlatBufferBuilder(1);
+    return new FileDescriptor(
+        createFbFileDesc(fbb, fileStatus, relPath, null, false, false, 
absPath));
+  }
+
+  /**
+   * Serializes the metadata of a file descriptor represented by 'fileStatus' 
into a
+   * FlatBuffer using 'fbb' and returns the associated FbFileDesc object.
+   * 'fbFileBlockOffsets' are the offsets of the serialized block metadata of 
this file
+   * in the underlying buffer. Can be null if there are no blocks.
+   */
+  private static FbFileDesc createFbFileDesc(
+      FlatBufferBuilder fbb,
+      FileStatus fileStatus,
+      String relPath,
+      int[] fbFileBlockOffsets,
+      boolean isEncrypted,
+      boolean isEc,
+      String absPath) {
+    int relPathOffset = fbb.createString(relPath == null ? StringUtils.EMPTY : 
relPath);
+    // A negative block vector offset is used when no block offsets are 
specified.
+    int blockVectorOffset = -1;
+    if (fbFileBlockOffsets != null) {
+      blockVectorOffset = FbFileDesc.createFileBlocksVector(fbb, 
fbFileBlockOffsets);
+    }
+    int absPathOffset = -1;
+    if (StringUtils.isNotEmpty(absPath)) absPathOffset = 
fbb.createString(absPath);
+    FbFileDesc.startFbFileDesc(fbb);
+    // TODO(todd) rename to RelativePath in the FBS
+    FbFileDesc.addRelativePath(fbb, relPathOffset);
+    FbFileDesc.addLength(fbb, fileStatus.getLen());
+    FbFileDesc.addLastModificationTime(fbb, fileStatus.getModificationTime());
+    FbFileDesc.addIsEncrypted(fbb, isEncrypted);
+    FbFileDesc.addIsEc(fbb, isEc);
+    HdfsCompression comp = 
HdfsCompression.fromFileName(fileStatus.getPath().getName());
+    FbFileDesc.addCompression(fbb, comp.toFb());
+    if (blockVectorOffset >= 0) FbFileDesc.addFileBlocks(fbb, 
blockVectorOffset);
+    if (absPathOffset >= 0) FbFileDesc.addAbsolutePath(fbb, absPathOffset);
+    fbb.finish(FbFileDesc.endFbFileDesc(fbb));
+    // To eliminate memory fragmentation, copy the contents of the FlatBuffer 
to the
+    // smallest possible ByteBuffer.
+    ByteBuffer bb = fbb.dataBuffer().slice();
+    ByteBuffer compressedBb = ByteBuffer.allocate(bb.capacity());
+    compressedBb.put(bb);
+    return FbFileDesc.getRootAsFbFileDesc((ByteBuffer) compressedBb.flip());
+  }
+
+  public String getRelativePath() {
+    return fbFileDescriptor_.relativePath();
+  }
+
+  public String getAbsolutePath() {
+    return StringUtils.isEmpty(fbFileDescriptor_.absolutePath()) ?
+        StringUtils.EMPTY :
+        fbFileDescriptor_.absolutePath();
+  }
+
+  public String getAbsolutePath(String rootPath) {
+    if (StringUtils.isEmpty(fbFileDescriptor_.relativePath())
+        && StringUtils.isNotEmpty(fbFileDescriptor_.absolutePath())) {
+      return fbFileDescriptor_.absolutePath();
+    } else {
+      return rootPath + Path.SEPARATOR + fbFileDescriptor_.relativePath();
+    }
+  }
+
+  public String getPath() {
+    if (StringUtils.isEmpty(fbFileDescriptor_.relativePath())
+        && StringUtils.isNotEmpty(fbFileDescriptor_.absolutePath())) {
+      return fbFileDescriptor_.absolutePath();
+    } else {
+      return fbFileDescriptor_.relativePath();
+    }
+  }
+
+  public long getFileLength() {
+    return fbFileDescriptor_.length();
+  }
+
+  /**
+   * Compute the total length of files in fileDescs
+   */
+  public static long computeTotalFileLength(Collection<FileDescriptor> 
fileDescs) {
+    long totalLength = 0;
+    for (FileDescriptor fileDesc : fileDescs) {
+      totalLength += fileDesc.getFileLength();
+    }
+    return totalLength;
+  }
+
+  public HdfsCompression getFileCompression() {
+    return 
HdfsCompression.valueOf(FbCompression.name(fbFileDescriptor_.compression()));
+  }
+
+  public long getModificationTime() {
+    return fbFileDescriptor_.lastModificationTime();
+  }
+
+  public int getNumFileBlocks() {
+    return fbFileDescriptor_.fileBlocksLength();
+  }
+
+  public boolean getIsEncrypted() {
+    return fbFileDescriptor_.isEncrypted();
+  }
+
+  public boolean getIsEc() {
+    return fbFileDescriptor_.isEc();
+  }
+
+  public FbFileBlock getFbFileBlock(int idx) {
+    return fbFileDescriptor_.fileBlocks(idx);
+  }
+
+  public FbFileDesc getFbFileDescriptor() {
+    return fbFileDescriptor_;
+  }
+
+  public FbFileMetadata getFbFileMetadata() {
+    return fbFileMetadata_;
+  }
+
+  public THdfsFileDesc toThrift() {
+    THdfsFileDesc fd = new THdfsFileDesc();
+    ByteBuffer bb = fbFileDescriptor_.getByteBuffer();
+    fd.setFile_desc_data(bb);
+    if (fbFileMetadata_ != null) {
+      fd.setFile_metadata(fbFileMetadata_.getByteBuffer());
+    }
+    return fd;
+  }
+
+  @Override
+  public String toString() {
+    int numFileBlocks = getNumFileBlocks();
+    List<String> blocks = Lists.newArrayListWithCapacity(numFileBlocks);
+    for (int i = 0; i < numFileBlocks; ++i) {
+      blocks.add(FileBlock.debugString(getFbFileBlock(i)));
+    }
+    ToStringHelper stringHelper = MoreObjects.toStringHelper(this)
+        .add("RelativePath", getRelativePath())
+        .add("Length", getFileLength())
+        .add("Compression", getFileCompression())
+        .add("ModificationTime", getModificationTime())
+        .add("Blocks", Joiner.on(", ").join(blocks));
+    if (StringUtils.isNotEmpty(getAbsolutePath())) {
+      stringHelper.add("AbsolutePath", getAbsolutePath());
+    }
+    return stringHelper.toString();
+  }
+
+  @Override
+  public int compareTo(FileDescriptor otherFd) {
+    return getPath().compareTo(otherFd.getPath());
+  }
+
+  /**
+   * Compares the modification time and file size between current 
FileDescriptor and the
+   * latest FileStatus to determine if the file has changed. Returns true if 
the file
+   * has changed and false otherwise. Note that block location changes are not
+   * considered as file changes. Table reloading won't recognize block 
location changes
+   * which require an INVALIDATE METADATA command on the table to clear the 
stale
+   * locations.
+   */
+  public boolean isChanged(FileStatus latestStatus) {
+    return latestStatus == null || getFileLength() != latestStatus.getLen()
+        || getModificationTime() != latestStatus.getModificationTime();
+  }
+
+  /**
+   * Same as above but compares to a FileDescriptor instance.
+   */
+  public boolean isChanged(FileDescriptor latestFd) {
+    return latestFd == null || getFileLength() != latestFd.getFileLength()
+        || getModificationTime() != latestFd.getModificationTime();
+  }
+
+  /**
+   * Function to convert from a byte[] flatbuffer to the wrapper class. Note 
that
+   * this returns a shallow copy which continues to reflect any changes to the
+   * passed byte[].
+   */
+  public static final Function<byte[], FileDescriptor> FROM_BYTES =
+      new Function<byte[], FileDescriptor>() {
+        @Override
+        public FileDescriptor apply(byte[] input) {
+          ByteBuffer bb = ByteBuffer.wrap(input);
+          return new FileDescriptor(FbFileDesc.getRootAsFbFileDesc(bb));
+        }
+      };
+
+  /**
+   * Function to convert from the wrapper class to a raw byte[]. Note that
+   * this returns a shallow copy and callers should not modify the returned 
array.
+   */
+  public static final Function<FileDescriptor, byte[]> TO_BYTES =
+      new Function<FileDescriptor, byte[]>() {
+        @Override
+        public byte[] apply(FileDescriptor fd) {
+          ByteBuffer bb = fd.fbFileDescriptor_.getByteBuffer();
+          byte[] arr = bb.array();
+          assert bb.arrayOffset() == 0 && bb.remaining() == arr.length;
+          return arr;
+        }
+      };
+}
\ No newline at end of file
diff --git a/fe/src/main/java/org/apache/impala/catalog/FileMetadataLoader.java 
b/fe/src/main/java/org/apache/impala/catalog/FileMetadataLoader.java
index b31053102..f94114d9a 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FileMetadataLoader.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FileMetadataLoader.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.common.ValidWriteIdList;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.HdfsTable.FileMetadataStats;
 import org.apache.impala.common.FileSystemUtil;
 import org.apache.impala.common.Reference;
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java 
b/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
index b712e9a7b..d967d050b 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
@@ -18,24 +18,15 @@
 package org.apache.impala.catalog;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Function;
-import com.google.common.base.Joiner;
 import com.google.common.base.MoreObjects;
-import com.google.common.base.MoreObjects.ToStringHelper;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Predicate;
-import com.google.common.base.Strings;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-import com.google.flatbuffers.FlatBufferBuilder;
 
-import java.io.IOException;
-import java.nio.ByteBuffer;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.HashMap;
@@ -47,9 +38,6 @@ import java.util.concurrent.atomic.AtomicLong;
 import javax.annotation.Nonnull;
 import javax.annotation.Nullable;
 
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
@@ -64,10 +52,6 @@ import org.apache.impala.common.ImpalaException;
 import org.apache.impala.common.Pair;
 import org.apache.impala.common.Reference;
 import org.apache.impala.compat.MetastoreShim;
-import org.apache.impala.fb.FbCompression;
-import org.apache.impala.fb.FbFileBlock;
-import org.apache.impala.fb.FbFileDesc;
-import org.apache.impala.fb.FbFileMetadata;
 import org.apache.impala.thrift.CatalogObjectsConstants;
 import org.apache.impala.thrift.TAccessLevel;
 import org.apache.impala.thrift.TCatalogObject;
@@ -101,509 +85,6 @@ import org.slf4j.LoggerFactory;
  */
 public class HdfsPartition extends CatalogObjectImpl
     implements FeFsPartition, PrunablePartition {
-  /**
-   * Metadata for a single file in this partition.
-   */
-  static public class FileDescriptor implements Comparable<FileDescriptor> {
-    // An invalid network address, which will always be treated as remote.
-    private final static TNetworkAddress REMOTE_NETWORK_ADDRESS =
-        new TNetworkAddress("remote*addr", 0);
-
-    // Minimum block size in bytes allowed for synthetic file blocks (other 
than the last
-    // block, which may be shorter).
-    public final static long MIN_SYNTHETIC_BLOCK_SIZE = 1024 * 1024;
-
-    // Internal representation of a file descriptor using a FlatBuffer.
-    private final FbFileDesc fbFileDescriptor_;
-
-    // Internal representation of additional file metadata, e.g. Iceberg 
metadata.
-    private final FbFileMetadata fbFileMetadata_;
-
-    private FileDescriptor(FbFileDesc fileDescData) {
-      fbFileDescriptor_ = fileDescData;
-      fbFileMetadata_ = null;
-    }
-
-    public FileDescriptor(FbFileDesc fileDescData, FbFileMetadata 
fileMetadata) {
-      fbFileDescriptor_ = fileDescData;
-      fbFileMetadata_ = fileMetadata;
-    }
-
-    public static FileDescriptor fromThrift(THdfsFileDesc desc) {
-      ByteBuffer bb = ByteBuffer.wrap(desc.getFile_desc_data());
-      if (desc.isSetFile_metadata()) {
-        ByteBuffer bbMd = ByteBuffer.wrap(desc.getFile_metadata());
-        return new FileDescriptor(FbFileDesc.getRootAsFbFileDesc(bb),
-                                  
FbFileMetadata.getRootAsFbFileMetadata(bbMd));
-      }
-      return new FileDescriptor(FbFileDesc.getRootAsFbFileDesc(bb));
-    }
-
-    /**
-     * Clone the descriptor, but change the replica indexes to reference the 
new host
-     * index 'dstIndex' instead of the original index 'origIndex'.
-     */
-    public FileDescriptor cloneWithNewHostIndex(List<TNetworkAddress> 
origIndex,
-        ListMap<TNetworkAddress> dstIndex) {
-      // First clone the flatbuffer with no changes.
-      ByteBuffer oldBuf = fbFileDescriptor_.getByteBuffer();
-      ByteBuffer newBuf = ByteBuffer.allocate(oldBuf.remaining());
-      newBuf.put(oldBuf.array(), oldBuf.position(), oldBuf.remaining());
-      newBuf.rewind();
-      FbFileDesc cloned = FbFileDesc.getRootAsFbFileDesc(newBuf);
-
-      // Now iterate over the blocks in the new flatbuffer and mutate the 
indexes.
-      FbFileBlock it = new FbFileBlock();
-      for (int i = 0; i < cloned.fileBlocksLength(); i++) {
-        it = cloned.fileBlocks(it, i);
-        for (int j = 0; j < it.replicaHostIdxsLength(); j++) {
-          int origHostIdx = FileBlock.getReplicaHostIdx(it, j);
-          boolean isCached = FileBlock.isReplicaCached(it, j);
-          TNetworkAddress origHost = origIndex.get(origHostIdx);
-          int newHostIdx = dstIndex.getOrAddIndex(origHost);
-          it.mutateReplicaHostIdxs(j, FileBlock.makeReplicaIdx(isCached, 
newHostIdx));
-        }
-      }
-      return new FileDescriptor(cloned, fbFileMetadata_);
-    }
-
-    public FileDescriptor cloneWithFileMetadata(FbFileMetadata fileMetadata) {
-      return new FileDescriptor(fbFileDescriptor_, fileMetadata);
-    }
-
-    /**
-     * Creates the file descriptor of a file represented by 'fileStatus' with 
blocks
-     * stored in 'blockLocations'. 'fileSystem' is the filesystem where the
-     * file resides and 'hostIndex' stores the network addresses of the hosts 
that store
-     * blocks of the parent HdfsTable. 'isEc' indicates whether the file is 
erasure-coded.
-     * Populates 'numUnknownDiskIds' with the number of unknown disk ids.
-     *
-     *
-     */
-    /**
-     * Creates a FileDescriptor with block locations.
-     *
-     * @param fileStatus the status returned from file listing
-     * @param relPath the path of the file relative to the partition directory
-     * @param blockLocations the block locations for the file
-     * @param hostIndex the host index to use for encoding the hosts
-     * @param isEc true if the file is known to be erasure-coded
-     * @param numUnknownDiskIds reference which will be set to the number of 
blocks
-     *                          for which no disk ID could be determined
-     */
-    public static FileDescriptor create(FileStatus fileStatus, String relPath,
-        BlockLocation[] blockLocations, ListMap<TNetworkAddress> hostIndex,
-        boolean isEncrypted, boolean isEc, Reference<Long> numUnknownDiskIds,
-        String absPath) throws IOException {
-      FlatBufferBuilder fbb = new FlatBufferBuilder(1);
-      int[] fbFileBlockOffsets = new int[blockLocations.length];
-      int blockIdx = 0;
-      for (BlockLocation loc: blockLocations) {
-        if (isEc) {
-          fbFileBlockOffsets[blockIdx++] = FileBlock.createFbFileBlock(fbb,
-              loc.getOffset(), loc.getLength(),
-              (short) hostIndex.getOrAddIndex(REMOTE_NETWORK_ADDRESS));
-        } else {
-          fbFileBlockOffsets[blockIdx++] =
-              FileBlock.createFbFileBlock(fbb, loc, hostIndex, 
numUnknownDiskIds);
-        }
-      }
-      return new FileDescriptor(createFbFileDesc(fbb, fileStatus, relPath,
-          fbFileBlockOffsets, isEncrypted, isEc, absPath));
-    }
-
-    /**
-     * Creates the file descriptor of a file represented by 'fileStatus' that
-     * resides in a filesystem that doesn't support the BlockLocation API 
(e.g. S3).
-     */
-    public static FileDescriptor createWithNoBlocks(
-        FileStatus fileStatus, String relPath, String absPath) {
-      FlatBufferBuilder fbb = new FlatBufferBuilder(1);
-      return new FileDescriptor(
-          createFbFileDesc(fbb, fileStatus, relPath, null, false, false, 
absPath));
-    }
-    /**
-     * Serializes the metadata of a file descriptor represented by 
'fileStatus' into a
-     * FlatBuffer using 'fbb' and returns the associated FbFileDesc object.
-     * 'fbFileBlockOffsets' are the offsets of the serialized block metadata 
of this file
-     * in the underlying buffer. Can be null if there are no blocks.
-     */
-    private static FbFileDesc createFbFileDesc(FlatBufferBuilder fbb,
-        FileStatus fileStatus, String relPath, int[] fbFileBlockOffsets,
-        boolean isEncrypted, boolean isEc, String absPath) {
-      int relPathOffset = fbb.createString(relPath == null ? StringUtils.EMPTY 
: relPath);
-      // A negative block vector offset is used when no block offsets are 
specified.
-      int blockVectorOffset = -1;
-      if (fbFileBlockOffsets != null) {
-        blockVectorOffset = FbFileDesc.createFileBlocksVector(fbb, 
fbFileBlockOffsets);
-      }
-      int absPathOffset = -1;
-      if (StringUtils.isNotEmpty(absPath)) absPathOffset = 
fbb.createString(absPath);
-      FbFileDesc.startFbFileDesc(fbb);
-      // TODO(todd) rename to RelativePath in the FBS
-      FbFileDesc.addRelativePath(fbb, relPathOffset);
-      FbFileDesc.addLength(fbb, fileStatus.getLen());
-      FbFileDesc.addLastModificationTime(fbb, 
fileStatus.getModificationTime());
-      FbFileDesc.addIsEncrypted(fbb, isEncrypted);
-      FbFileDesc.addIsEc(fbb, isEc);
-      HdfsCompression comp = 
HdfsCompression.fromFileName(fileStatus.getPath().getName());
-      FbFileDesc.addCompression(fbb, comp.toFb());
-      if (blockVectorOffset >= 0) FbFileDesc.addFileBlocks(fbb, 
blockVectorOffset);
-      if (absPathOffset >= 0) FbFileDesc.addAbsolutePath(fbb, absPathOffset);
-      fbb.finish(FbFileDesc.endFbFileDesc(fbb));
-      // To eliminate memory fragmentation, copy the contents of the 
FlatBuffer to the
-      // smallest possible ByteBuffer.
-      ByteBuffer bb = fbb.dataBuffer().slice();
-      ByteBuffer compressedBb = ByteBuffer.allocate(bb.capacity());
-      compressedBb.put(bb);
-      return FbFileDesc.getRootAsFbFileDesc((ByteBuffer) compressedBb.flip());
-    }
-
-    public String getRelativePath() { return fbFileDescriptor_.relativePath(); 
}
-
-    public String getAbsolutePath() {
-      return StringUtils.isEmpty(fbFileDescriptor_.absolutePath()) ?
-          StringUtils.EMPTY :
-          fbFileDescriptor_.absolutePath();
-    }
-
-    public String getAbsolutePath(String rootPath) {
-      if (StringUtils.isEmpty(fbFileDescriptor_.relativePath())
-          && StringUtils.isNotEmpty(fbFileDescriptor_.absolutePath())) {
-        return fbFileDescriptor_.absolutePath();
-      } else {
-        return rootPath + Path.SEPARATOR + fbFileDescriptor_.relativePath();
-      }
-    }
-
-    public String getPath() {
-      if (StringUtils.isEmpty(fbFileDescriptor_.relativePath())
-          && StringUtils.isNotEmpty(fbFileDescriptor_.absolutePath())) {
-        return fbFileDescriptor_.absolutePath();
-      } else {
-        return fbFileDescriptor_.relativePath();
-      }
-    }
-
-    public long getFileLength() { return fbFileDescriptor_.length(); }
-
-    /** Compute the total length of files in fileDescs */
-    public static long computeTotalFileLength(Collection<FileDescriptor> 
fileDescs) {
-      long totalLength = 0;
-      for (FileDescriptor fileDesc: fileDescs) {
-        totalLength += fileDesc.getFileLength();
-      }
-      return totalLength;
-    }
-
-    public HdfsCompression getFileCompression() {
-      return 
HdfsCompression.valueOf(FbCompression.name(fbFileDescriptor_.compression()));
-    }
-
-    public long getModificationTime() { return 
fbFileDescriptor_.lastModificationTime(); }
-    public int getNumFileBlocks() { return 
fbFileDescriptor_.fileBlocksLength(); }
-    public boolean getIsEncrypted() {return fbFileDescriptor_.isEncrypted(); }
-    public boolean getIsEc() {return fbFileDescriptor_.isEc(); }
-
-    public FbFileBlock getFbFileBlock(int idx) {
-      return fbFileDescriptor_.fileBlocks(idx);
-    }
-
-    public FbFileDesc getFbFileDescriptor() {
-      return fbFileDescriptor_;
-    }
-
-    public FbFileMetadata getFbFileMetadata() {
-      return fbFileMetadata_;
-    }
-
-    public THdfsFileDesc toThrift() {
-      THdfsFileDesc fd = new THdfsFileDesc();
-      ByteBuffer bb = fbFileDescriptor_.getByteBuffer();
-      fd.setFile_desc_data(bb);
-      if (fbFileMetadata_ != null) {
-        fd.setFile_metadata(fbFileMetadata_.getByteBuffer());
-      }
-      return fd;
-    }
-
-    @Override
-    public String toString() {
-      int numFileBlocks = getNumFileBlocks();
-      List<String> blocks = Lists.newArrayListWithCapacity(numFileBlocks);
-      for (int i = 0; i < numFileBlocks; ++i) {
-        blocks.add(FileBlock.debugString(getFbFileBlock(i)));
-      }
-      ToStringHelper stringHelper = MoreObjects.toStringHelper(this)
-          .add("RelativePath", getRelativePath())
-          .add("Length", getFileLength())
-          .add("Compression", getFileCompression())
-          .add("ModificationTime", getModificationTime())
-          .add("Blocks", Joiner.on(", ").join(blocks));
-      if (StringUtils.isNotEmpty(getAbsolutePath())) {
-        stringHelper.add("AbsolutePath", getAbsolutePath());
-      }
-      return stringHelper.toString();
-    }
-
-    @Override
-    public int compareTo(FileDescriptor otherFd) {
-      return getPath().compareTo(otherFd.getPath());
-    }
-
-    /**
-     * Compares the modification time and file size between current 
FileDescriptor and the
-     * latest FileStatus to determine if the file has changed. Returns true if 
the file
-     * has changed and false otherwise. Note that block location changes are 
not
-     * considered as file changes. Table reloading won't recognize block 
location changes
-     * which require an INVALIDATE METADATA command on the table to clear the 
stale
-     * locations.
-     */
-    public boolean isChanged(FileStatus latestStatus) {
-      return latestStatus == null || getFileLength() != latestStatus.getLen()
-          || getModificationTime() != latestStatus.getModificationTime();
-    }
-
-    /**
-     * Same as above but compares to a FileDescriptor instance.
-     */
-    public boolean isChanged(FileDescriptor latestFd) {
-      return latestFd == null || getFileLength() != latestFd.getFileLength()
-          || getModificationTime() != latestFd.getModificationTime();
-    }
-
-    /**
-     * Function to convert from a byte[] flatbuffer to the wrapper class. Note 
that
-     * this returns a shallow copy which continues to reflect any changes to 
the
-     * passed byte[].
-     */
-    public static final Function<byte[], FileDescriptor> FROM_BYTES =
-        new Function<byte[], FileDescriptor>() {
-          @Override
-          public FileDescriptor apply(byte[] input) {
-            ByteBuffer bb = ByteBuffer.wrap(input);
-            return new FileDescriptor(FbFileDesc.getRootAsFbFileDesc(bb));
-          }
-        };
-
-    /**
-     * Function to convert from the wrapper class to a raw byte[]. Note that
-     * this returns a shallow copy and callers should not modify the returned 
array.
-     */
-    public static final Function<FileDescriptor, byte[]> TO_BYTES =
-        new Function<FileDescriptor, byte[]>() {
-          @Override
-          public byte[] apply(FileDescriptor fd) {
-            ByteBuffer bb = fd.fbFileDescriptor_.getByteBuffer();
-            byte[] arr = bb.array();
-            assert bb.arrayOffset() == 0 && bb.remaining() == arr.length;
-            return arr;
-          }
-        };
-  }
-
-  /**
-   * Represents metadata of a single block replica.
-   */
-  public static class BlockReplica {
-    private final boolean isCached_;
-    private final short hostIdx_;
-
-    /**
-     * Creates a BlockReplica given a host ID/index and a flag specifying 
whether this
-     * replica is cahced. Host IDs are assigned when loading the block 
metadata in
-     * HdfsTable.
-     */
-    public BlockReplica(short hostIdx, boolean isCached) {
-      hostIdx_ = hostIdx;
-      isCached_ = isCached;
-    }
-
-    /**
-     * Parses the location (an ip address:port string) of the replica and 
returns a
-     * TNetworkAddress with this information, or null if parsing fails.
-     */
-    public static TNetworkAddress parseLocation(String location) {
-      Preconditions.checkNotNull(location);
-      String[] ip_port = location.split(":");
-      if (ip_port.length != 2) return null;
-      try {
-        return CatalogInterners.internNetworkAddress(
-            new TNetworkAddress(ip_port[0], Integer.parseInt(ip_port[1])));
-      } catch (NumberFormatException e) {
-        return null;
-      }
-    }
-
-    public boolean isCached() { return isCached_; }
-    public short getHostIdx() { return hostIdx_; }
-  }
-
-  /**
-   * Static utility methods to serialize and access file block metadata from 
FlatBuffers.
-   */
-  public static class FileBlock {
-    // Bit mask used to extract the replica host id and cache info of a file 
block.
-    // Use ~REPLICA_HOST_IDX_MASK to extract the cache info (stored in MSB).
-    private static short REPLICA_HOST_IDX_MASK = (1 << 15) - 1;
-
-    /**
-     * Constructs an FbFileBlock object from the block location metadata
-     * 'loc'. Serializes the file block metadata into a FlatBuffer using 'fbb' 
and
-     * returns the offset in the underlying buffer where the encoded file 
block starts.
-     * 'hostIndex' stores the network addresses of the datanodes that store 
the files of
-     * the parent HdfsTable. Populates 'numUnknownDiskIds' with the number of 
unknown disk
-     * ids.
-     */
-    public static int createFbFileBlock(FlatBufferBuilder fbb, BlockLocation 
loc,
-        ListMap<TNetworkAddress> hostIndex, Reference<Long> numUnknownDiskIds)
-        throws IOException {
-      Preconditions.checkNotNull(fbb);
-      Preconditions.checkNotNull(loc);
-      Preconditions.checkNotNull(hostIndex);
-      // replica host ids
-      FbFileBlock.startReplicaHostIdxsVector(fbb, loc.getNames().length);
-      Set<String> cachedHosts = Sets.newHashSet(loc.getCachedHosts());
-      // Enumerate all replicas of the block, adding any unknown hosts
-      // to hostIndex. We pick the network address from getNames() and
-      // map it to the corresponding hostname from getHosts().
-      for (int i = 0; i < loc.getNames().length; ++i) {
-        TNetworkAddress networkAddress = 
BlockReplica.parseLocation(loc.getNames()[i]);
-        short replicaIdx = (short) hostIndex.getOrAddIndex(networkAddress);
-        boolean isReplicaCached = cachedHosts.contains(loc.getHosts()[i]);
-        replicaIdx = makeReplicaIdx(isReplicaCached, replicaIdx);
-        fbb.addShort(replicaIdx);
-      }
-      int fbReplicaHostIdxOffset = fbb.endVector();
-      short[] diskIds = createDiskIds(loc, numUnknownDiskIds);
-      Preconditions.checkState(diskIds.length == loc.getNames().length,
-          "Mismatch detected between number of diskIDs and block locations for 
block: " +
-          loc.toString());
-      int fbDiskIdsOffset = FbFileBlock.createDiskIdsVector(fbb, diskIds);
-      FbFileBlock.startFbFileBlock(fbb);
-      FbFileBlock.addOffset(fbb, loc.getOffset());
-      FbFileBlock.addLength(fbb, loc.getLength());
-      FbFileBlock.addReplicaHostIdxs(fbb, fbReplicaHostIdxOffset);
-      FbFileBlock.addDiskIds(fbb, fbDiskIdsOffset);
-      return FbFileBlock.endFbFileBlock(fbb);
-    }
-
-    private static short makeReplicaIdx(boolean isReplicaCached, int hostIdx) {
-      Preconditions.checkArgument((hostIdx & REPLICA_HOST_IDX_MASK) == hostIdx,
-          "invalid hostIdx: %s", hostIdx);
-      return isReplicaCached ? (short) (hostIdx | ~REPLICA_HOST_IDX_MASK)
-          : (short)hostIdx;
-    }
-
-    /**
-     * Constructs an FbFileBlock object from the file block metadata that 
comprise block's
-     * 'offset', 'length' and replica index 'replicaIdx'. Serializes the file 
block
-     * metadata into a FlatBuffer using 'fbb' and returns the offset in the 
underlying
-     * buffer where the encoded file block starts.
-     */
-    public static int createFbFileBlock(FlatBufferBuilder fbb, long offset, 
long length,
-        short replicaIdx) {
-      Preconditions.checkNotNull(fbb);
-      FbFileBlock.startReplicaHostIdxsVector(fbb, 1);
-      fbb.addShort(replicaIdx);
-      int fbReplicaHostIdxOffset = fbb.endVector();
-      FbFileBlock.startFbFileBlock(fbb);
-      FbFileBlock.addOffset(fbb, offset);
-      FbFileBlock.addLength(fbb, length);
-      FbFileBlock.addReplicaHostIdxs(fbb, fbReplicaHostIdxOffset);
-      return FbFileBlock.endFbFileBlock(fbb);
-    }
-
-    /**
-     * Creates the disk ids of a block from its BlockLocation 'location'. 
Returns the
-     * disk ids and populates 'numUnknownDiskIds' with the number of unknown 
disk ids.
-     */
-    private static short[] createDiskIds(BlockLocation location,
-        Reference<Long> numUnknownDiskIds) throws IOException {
-      long unknownDiskIdCount = 0;
-      String[] storageIds = location.getStorageIds();
-      String[] hosts = location.getHosts();
-      if (storageIds.length != hosts.length) {
-        if (LOG.isTraceEnabled()) {
-          LOG.trace(String.format("Number of storage IDs and number of hosts 
for block " +
-              "%s mismatch (storageIDs:hosts) %d:%d. Skipping disk ID loading 
for this " +
-              "block.", location.toString(), storageIds.length, hosts.length));
-        }
-        storageIds = new String[hosts.length];
-      }
-      short[] diskIDs = new short[storageIds.length];
-      for (int i = 0; i < storageIds.length; ++i) {
-        if (Strings.isNullOrEmpty(storageIds[i])) {
-          diskIDs[i] = (short) -1;
-          ++unknownDiskIdCount;
-        } else {
-          diskIDs[i] = DiskIdMapper.INSTANCE.getDiskId(hosts[i], 
storageIds[i]);
-        }
-      }
-      long count = numUnknownDiskIds.getRef() + unknownDiskIdCount;
-      numUnknownDiskIds.setRef(Long.valueOf(count));
-      return diskIDs;
-    }
-
-    public static long getOffset(FbFileBlock fbFileBlock) { return 
fbFileBlock.offset(); }
-    public static long getLength(FbFileBlock fbFileBlock) { return 
fbFileBlock.length(); }
-    // Returns true if there is at least one cached replica.
-    public static boolean hasCachedReplica(FbFileBlock fbFileBlock) {
-      boolean hasCachedReplica = false;
-      for (int i = 0; i < fbFileBlock.replicaHostIdxsLength(); ++i) {
-        hasCachedReplica |= isReplicaCached(fbFileBlock, i);
-      }
-      return hasCachedReplica;
-    }
-
-    public static int getNumReplicaHosts(FbFileBlock fbFileBlock) {
-      return fbFileBlock.replicaHostIdxsLength();
-    }
-
-    public static int getReplicaHostIdx(FbFileBlock fbFileBlock, int pos) {
-      int idx = fbFileBlock.replicaHostIdxs(pos);
-      return idx & REPLICA_HOST_IDX_MASK;
-    }
-
-    // Returns true if the block replica 'replicaIdx' is cached.
-    public static boolean isReplicaCached(FbFileBlock fbFileBlock, int 
replicaIdx) {
-      int idx = fbFileBlock.replicaHostIdxs(replicaIdx);
-      return (idx & ~REPLICA_HOST_IDX_MASK) != 0;
-    }
-
-    /**
-     * Return the disk id of the block in BlockLocation.getNames()[hostIndex]; 
-1 if
-     * disk id is not supported.
-     */
-    public static int getDiskId(FbFileBlock fbFileBlock, int hostIndex) {
-      if (fbFileBlock.diskIdsLength() == 0) return -1;
-      return fbFileBlock.diskIds(hostIndex);
-    }
-
-    /**
-     * Returns a string representation of a FbFileBlock.
-     */
-    public static String debugString(FbFileBlock fbFileBlock) {
-      int numReplicaHosts = getNumReplicaHosts(fbFileBlock);
-      List<Integer> diskIds = Lists.newArrayListWithCapacity(numReplicaHosts);
-      List<Integer> replicaHosts = 
Lists.newArrayListWithCapacity(numReplicaHosts);
-      List<Boolean> isBlockCached = 
Lists.newArrayListWithCapacity(numReplicaHosts);
-      for (int i = 0; i < numReplicaHosts; ++i) {
-        diskIds.add(getDiskId(fbFileBlock, i));
-        replicaHosts.add(getReplicaHostIdx(fbFileBlock, i));
-        isBlockCached.add(isReplicaCached(fbFileBlock, i));
-      }
-      StringBuilder builder = new StringBuilder();
-      return builder.append("Offset: " + getOffset(fbFileBlock))
-          .append("Length: " + getLength(fbFileBlock))
-          .append("IsCached: " + hasCachedReplica(fbFileBlock))
-          .append("ReplicaHosts: " + Joiner.on(", ").join(replicaHosts))
-          .append("DiskIds: " + Joiner.on(", ").join(diskIds))
-          .append("Caching: " + Joiner.on(", ").join(isBlockCached))
-          .toString();
-    }
-  }
-
   // Struct-style class for caching all the information we need to reconstruct 
an
   // HMS-compatible Partition object, for use in RPCs to the metastore. We do 
this rather
   // than cache the Thrift partition object itself as the latter can be large 
- thanks
@@ -1066,9 +547,9 @@ public class HdfsPartition extends CatalogObjectImpl
   public LiteralExpr getPartitionValue(int i) { return 
partitionKeyValues_.get(i); }
 
   @Override // FeFsPartition
-  public List<HdfsPartition.FileDescriptor> getFileDescriptors() {
+  public List<FileDescriptor> getFileDescriptors() {
     // Return a lazily transformed list from our internal bytes storage.
-    List<HdfsPartition.FileDescriptor> ret = new ArrayList<>();
+    List<FileDescriptor> ret = new ArrayList<>();
     ret.addAll(Lists.transform(encodedFileDescriptors_, 
FileDescriptor.FROM_BYTES));
     ret.addAll(Lists.transform(encodedInsertFileDescriptors_, 
FileDescriptor.FROM_BYTES));
     ret.addAll(Lists.transform(encodedDeleteFileDescriptors_, 
FileDescriptor.FROM_BYTES));
@@ -1076,13 +557,13 @@ public class HdfsPartition extends CatalogObjectImpl
   }
 
   @Override // FeFsPartition
-  public List<HdfsPartition.FileDescriptor> getInsertFileDescriptors() {
+  public List<FileDescriptor> getInsertFileDescriptors() {
     // Return a lazily transformed list from our internal bytes storage.
     return Lists.transform(encodedInsertFileDescriptors_, 
FileDescriptor.FROM_BYTES);
   }
 
   @Override // FeFsPartition
-  public List<HdfsPartition.FileDescriptor> getDeleteFileDescriptors() {
+  public List<FileDescriptor> getDeleteFileDescriptors() {
     // Return a lazily transformed list from our internal bytes storage.
     return Lists.transform(encodedDeleteFileDescriptors_, 
FileDescriptor.FROM_BYTES);
   }
@@ -1224,7 +705,7 @@ public class HdfsPartition extends CatalogObjectImpl
       return fileMetadataStats_.totalFileBytes;
     }
     long result = 0;
-    for (HdfsPartition.FileDescriptor fileDescriptor: getFileDescriptors()) {
+    for (FileDescriptor fileDescriptor: getFileDescriptors()) {
       result += fileDescriptor.getFileLength();
     }
     return result;
@@ -1759,7 +1240,7 @@ public class HdfsPartition extends CatalogObjectImpl
     private List<FileDescriptor> fdsFromThrift(List<THdfsFileDesc> tFileDescs) 
{
       List<FileDescriptor> ret = new ArrayList<>();
       for (THdfsFileDesc desc : tFileDescs) {
-        ret.add(HdfsPartition.FileDescriptor.fromThrift(desc));
+        ret.add(FileDescriptor.fromThrift(desc));
       }
       return ret;
     }
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java 
b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
index 32c58a590..0f57d4f15 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
@@ -58,8 +58,6 @@ import org.apache.impala.analysis.NullLiteral;
 import org.apache.impala.analysis.NumericLiteral;
 import org.apache.impala.analysis.PartitionKeyValue;
 import org.apache.impala.catalog.events.MetastoreEventsProcessor;
-import org.apache.impala.catalog.HdfsPartition.FileBlock;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.iceberg.GroupedContentFiles;
 import org.apache.impala.common.FileSystemUtil;
 import org.apache.impala.common.ImpalaException;
@@ -367,7 +365,7 @@ public class HdfsTable extends Table implements FeFsTable {
      * with a List of FileDescriptor's.
      */
     public FileMetadataStats(List<FileDescriptor> fds) {
-      for (HdfsPartition.FileDescriptor fd : fds) {
+      for (FileDescriptor fd : fds) {
         accumulate(fd);
       }
     }
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/IcebergContentFileStore.java 
b/fe/src/main/java/org/apache/impala/catalog/IcebergContentFileStore.java
index 57ee5f537..562a85d50 100644
--- a/fe/src/main/java/org/apache/impala/catalog/IcebergContentFileStore.java
+++ b/fe/src/main/java/org/apache/impala/catalog/IcebergContentFileStore.java
@@ -36,7 +36,6 @@ import org.apache.iceberg.ContentFile;
 import org.apache.iceberg.DataFile;
 import org.apache.iceberg.DeleteFile;
 import org.apache.iceberg.FileContent;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.iceberg.GroupedContentFiles;
 import org.apache.impala.common.Pair;
 import org.apache.impala.fb.FbFileDesc;
diff --git a/fe/src/main/java/org/apache/impala/catalog/IcebergDeleteTable.java 
b/fe/src/main/java/org/apache/impala/catalog/IcebergDeleteTable.java
index 26253d0cc..a5ff5c51b 100644
--- a/fe/src/main/java/org/apache/impala/catalog/IcebergDeleteTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/IcebergDeleteTable.java
@@ -25,7 +25,6 @@ import org.apache.commons.lang3.NotImplementedException;
 import org.apache.iceberg.Table;
 import org.apache.impala.analysis.IcebergPartitionSpec;
 import org.apache.impala.catalog.CatalogObject.ThriftObjectType;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.thrift.TCompressionCodec;
 import org.apache.impala.thrift.THdfsTable;
 import org.apache.impala.thrift.TIcebergCatalog;
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/IcebergEqualityDeleteTable.java 
b/fe/src/main/java/org/apache/impala/catalog/IcebergEqualityDeleteTable.java
index a74ca380a..0503bb21f 100644
--- a/fe/src/main/java/org/apache/impala/catalog/IcebergEqualityDeleteTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/IcebergEqualityDeleteTable.java
@@ -21,7 +21,6 @@ import java.util.Arrays;
 import java.util.List;
 import java.util.Set;
 
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.common.ImpalaRuntimeException;
 import org.apache.impala.util.IcebergSchemaConverter;
 
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/IcebergFileMetadataLoader.java 
b/fe/src/main/java/org/apache/impala/catalog/IcebergFileMetadataLoader.java
index 69ec01190..d4f6edcf2 100644
--- a/fe/src/main/java/org/apache/impala/catalog/IcebergFileMetadataLoader.java
+++ b/fe/src/main/java/org/apache/impala/catalog/IcebergFileMetadataLoader.java
@@ -22,7 +22,6 @@ import static 
org.apache.impala.catalog.ParallelFileMetadataLoader.createPool;
 import static org.apache.impala.catalog.ParallelFileMetadataLoader.getPoolSize;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 
@@ -39,7 +38,6 @@ import java.util.concurrent.Future;
 import java.util.stream.Collectors;
 import java.util.stream.StreamSupport;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -48,7 +46,6 @@ import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.common.ValidWriteIdList;
 import org.apache.iceberg.ContentFile;
 import org.apache.impala.catalog.FeIcebergTable.Utils;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.HdfsTable.FileMetadataStats;
 import org.apache.impala.catalog.iceberg.GroupedContentFiles;
 import org.apache.impala.common.FileSystemUtil;
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/IcebergPositionDeleteTable.java 
b/fe/src/main/java/org/apache/impala/catalog/IcebergPositionDeleteTable.java
index fb1667638..06ed85aff 100644
--- a/fe/src/main/java/org/apache/impala/catalog/IcebergPositionDeleteTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/IcebergPositionDeleteTable.java
@@ -20,7 +20,6 @@ package org.apache.impala.catalog;
 import java.util.Collections;
 import java.util.Set;
 
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.thrift.TColumnStats;
 
 /**
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/ParallelFileMetadataLoader.java 
b/fe/src/main/java/org/apache/impala/catalog/ParallelFileMetadataLoader.java
index 7070884a8..b3362da03 100644
--- a/fe/src/main/java/org/apache/impala/catalog/ParallelFileMetadataLoader.java
+++ b/fe/src/main/java/org/apache/impala/catalog/ParallelFileMetadataLoader.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.common.ValidWriteIdList;
 import org.apache.impala.catalog.HdfsPartition.Builder;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.iceberg.GroupedContentFiles;
 import org.apache.impala.common.FileSystemUtil;
 import org.apache.impala.common.Pair;
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/local/CatalogdMetaProvider.java 
b/fe/src/main/java/org/apache/impala/catalog/local/CatalogdMetaProvider.java
index 275ebb4a5..596c8517b 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/CatalogdMetaProvider.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/CatalogdMetaProvider.java
@@ -62,9 +62,9 @@ import org.apache.impala.catalog.CatalogDeltaLog;
 import org.apache.impala.catalog.CatalogException;
 import org.apache.impala.catalog.CatalogObjectCache;
 import org.apache.impala.catalog.DataSource;
+import org.apache.impala.catalog.FileDescriptor;
 import org.apache.impala.catalog.Function;
 import org.apache.impala.catalog.HdfsCachePool;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.HdfsPartitionLocationCompressor;
 import org.apache.impala.catalog.HdfsStorageDescriptor;
 import org.apache.impala.catalog.HdfsTable;
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/local/DirectMetaProvider.java 
b/fe/src/main/java/org/apache/impala/catalog/local/DirectMetaProvider.java
index f860f44c4..5cc14a120 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/DirectMetaProvider.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/DirectMetaProvider.java
@@ -38,10 +38,10 @@ import org.apache.hadoop.hive.metastore.api.TableMeta;
 import org.apache.impala.authorization.AuthorizationPolicy;
 import org.apache.impala.catalog.CatalogException;
 import org.apache.impala.catalog.DataSource;
+import org.apache.impala.catalog.FileDescriptor;
 import org.apache.impala.catalog.FileMetadataLoader;
 import org.apache.impala.catalog.Function;
 import org.apache.impala.catalog.HdfsCachePool;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.HdfsPartitionLocationCompressor;
 import org.apache.impala.catalog.HdfsStorageDescriptor;
 import org.apache.impala.catalog.MetaStoreClientPool;
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/local/LocalFsPartition.java 
b/fe/src/main/java/org/apache/impala/catalog/local/LocalFsPartition.java
index f958c5837..1c9b196ea 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalFsPartition.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalFsPartition.java
@@ -28,8 +28,8 @@ import org.apache.impala.analysis.LiteralExpr;
 import org.apache.impala.catalog.FeCatalogUtils;
 import org.apache.impala.catalog.FeFsPartition;
 import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.FileDescriptor;
 import org.apache.impala.catalog.HdfsFileFormat;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.HdfsPartitionLocationCompressor;
 import org.apache.impala.catalog.HdfsStorageDescriptor;
 import org.apache.impala.catalog.PartitionStatsUtil;
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java 
b/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java
index c9d10d5c0..4f5521ddf 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java
@@ -42,8 +42,8 @@ import org.apache.impala.catalog.ColumnStats;
 import org.apache.impala.catalog.FeCatalogUtils;
 import org.apache.impala.catalog.FeFsPartition;
 import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.FileDescriptor;
 import org.apache.impala.catalog.HdfsFileFormat;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.HdfsStorageDescriptor;
 import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.PrunablePartition;
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/MetaProvider.java 
b/fe/src/main/java/org/apache/impala/catalog/local/MetaProvider.java
index d08b21b70..15ade59f7 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/MetaProvider.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/MetaProvider.java
@@ -29,9 +29,9 @@ import 
org.apache.hadoop.hive.metastore.api.UnknownDBException;
 import org.apache.impala.authorization.AuthorizationPolicy;
 import org.apache.impala.catalog.CatalogException;
 import org.apache.impala.catalog.DataSource;
+import org.apache.impala.catalog.FileDescriptor;
 import org.apache.impala.catalog.Function;
 import org.apache.impala.catalog.HdfsCachePool;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.HdfsPartitionLocationCompressor;
 import org.apache.impala.catalog.HdfsStorageDescriptor;
 import org.apache.impala.catalog.SqlConstraints;
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/metastore/CatalogHmsClientUtils.java
 
b/fe/src/main/java/org/apache/impala/catalog/metastore/CatalogHmsClientUtils.java
index 6d05747c9..9bbbace14 100644
--- 
a/fe/src/main/java/org/apache/impala/catalog/metastore/CatalogHmsClientUtils.java
+++ 
b/fe/src/main/java/org/apache/impala/catalog/metastore/CatalogHmsClientUtils.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.impala.catalog.CatalogException;
 import org.apache.impala.catalog.CatalogHmsAPIHelper;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
+import org.apache.impala.catalog.FileDescriptor;
 import org.apache.impala.thrift.THdfsFileDesc;
 import org.apache.impala.thrift.TNetworkAddress;
 import org.apache.impala.util.ListMap;
diff --git a/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java 
b/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
index c7bde15d7..396db73ba 100644
--- a/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
@@ -56,10 +56,10 @@ import org.apache.impala.catalog.ColumnStats;
 import org.apache.impala.catalog.FeFsPartition;
 import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeTable;
+import org.apache.impala.catalog.FileBlock;
+import org.apache.impala.catalog.FileDescriptor;
 import org.apache.impala.catalog.HdfsCompression;
 import org.apache.impala.catalog.HdfsFileFormat;
-import org.apache.impala.catalog.HdfsPartition.FileBlock;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.PrimitiveType;
 import org.apache.impala.catalog.ScalarType;
 import org.apache.impala.catalog.Table;
diff --git a/fe/src/main/java/org/apache/impala/planner/IcebergScanNode.java 
b/fe/src/main/java/org/apache/impala/planner/IcebergScanNode.java
index d859835b3..5cd64aebf 100644
--- a/fe/src/main/java/org/apache/impala/planner/IcebergScanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/IcebergScanNode.java
@@ -33,8 +33,8 @@ import org.apache.impala.catalog.FeCatalogUtils;
 import org.apache.impala.catalog.FeFsPartition;
 import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeIcebergTable;
+import org.apache.impala.catalog.FileDescriptor;
 import org.apache.impala.catalog.HdfsFileFormat;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.ImpalaRuntimeException;
 import org.apache.impala.common.ThriftSerializationCtx;
diff --git a/fe/src/main/java/org/apache/impala/planner/IcebergScanPlanner.java 
b/fe/src/main/java/org/apache/impala/planner/IcebergScanPlanner.java
index b48415996..7e20f0f5a 100644
--- a/fe/src/main/java/org/apache/impala/planner/IcebergScanPlanner.java
+++ b/fe/src/main/java/org/apache/impala/planner/IcebergScanPlanner.java
@@ -60,7 +60,7 @@ import org.apache.impala.analysis.TupleId;
 import org.apache.impala.catalog.Column;
 import org.apache.impala.catalog.ColumnStats;
 import org.apache.impala.catalog.FeIcebergTable;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
+import org.apache.impala.catalog.FileDescriptor;
 import org.apache.impala.catalog.IcebergColumn;
 import org.apache.impala.catalog.IcebergContentFileStore;
 import org.apache.impala.catalog.IcebergEqualityDeleteTable;
diff --git a/fe/src/main/java/org/apache/impala/util/AcidUtils.java 
b/fe/src/main/java/org/apache/impala/util/AcidUtils.java
index c8efe788b..8b369a1a4 100644
--- a/fe/src/main/java/org/apache/impala/util/AcidUtils.java
+++ b/fe/src/main/java/org/apache/impala/util/AcidUtils.java
@@ -43,9 +43,9 @@ import 
org.apache.hadoop.hive.common.ValidWriteIdList.RangeResponse;
 import org.apache.impala.catalog.CatalogException;
 import org.apache.impala.catalog.CatalogServiceCatalog;
 import org.apache.impala.catalog.Column;
+import org.apache.impala.catalog.FileDescriptor;
 import org.apache.impala.catalog.FileMetadataLoader.LoadStats;
 import org.apache.impala.catalog.HdfsPartition;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.ScalarType;
 import org.apache.impala.catalog.StructField;
diff --git 
a/fe/src/test/java/org/apache/impala/catalog/FileMetadataLoaderTest.java 
b/fe/src/test/java/org/apache/impala/catalog/FileMetadataLoaderTest.java
index af9c6b62e..30dc8c75b 100644
--- a/fe/src/test/java/org/apache/impala/catalog/FileMetadataLoaderTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/FileMetadataLoaderTest.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.ValidReadTxnList;
 import org.apache.hadoop.hive.common.ValidWriteIdList;
 import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.iceberg.GroupedContentFiles;
 import org.apache.impala.compat.MetastoreShim;
 import org.apache.impala.service.BackendConfig;
diff --git a/fe/src/test/java/org/apache/impala/catalog/HdfsPartitionTest.java 
b/fe/src/test/java/org/apache/impala/catalog/HdfsPartitionTest.java
index 5f9eb30c9..f4a0fb21d 100644
--- a/fe/src/test/java/org/apache/impala/catalog/HdfsPartitionTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/HdfsPartitionTest.java
@@ -31,7 +31,6 @@ import org.apache.impala.analysis.LiteralExpr;
 import org.apache.impala.analysis.NullLiteral;
 import org.apache.impala.analysis.NumericLiteral;
 import org.apache.impala.analysis.StringLiteral;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.service.FeSupport;
 import org.apache.impala.thrift.TNetworkAddress;
 import org.apache.impala.util.ListMap;
diff --git 
a/fe/src/test/java/org/apache/impala/catalog/IcebergContentFileStoreTest.java 
b/fe/src/test/java/org/apache/impala/catalog/IcebergContentFileStoreTest.java
index 6d335c6bb..3eed0e7fb 100644
--- 
a/fe/src/test/java/org/apache/impala/catalog/IcebergContentFileStoreTest.java
+++ 
b/fe/src/test/java/org/apache/impala/catalog/IcebergContentFileStoreTest.java
@@ -26,7 +26,6 @@ import com.google.flatbuffers.FlatBufferBuilder;
 import java.nio.ByteBuffer;
 import java.util.HashMap;
 import java.util.Map;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.fb.FbFileDesc;
 import org.apache.impala.testutil.CatalogServiceTestCatalog;
 import org.junit.After;
diff --git 
a/fe/src/test/java/org/apache/impala/catalog/PartialCatalogInfoWriteIdTest.java 
b/fe/src/test/java/org/apache/impala/catalog/PartialCatalogInfoWriteIdTest.java
index ee3b1ad93..1b31eadd5 100644
--- 
a/fe/src/test/java/org/apache/impala/catalog/PartialCatalogInfoWriteIdTest.java
+++ 
b/fe/src/test/java/org/apache/impala/catalog/PartialCatalogInfoWriteIdTest.java
@@ -629,8 +629,8 @@ public class PartialCatalogInfoWriteIdTest {
 
   private List<String> getPathsFromFileDescriptors(List<THdfsFileDesc> 
fileDescriptors) {
     return fileDescriptors.stream()
-        .map(HdfsPartition.FileDescriptor::fromThrift)
-        .map(HdfsPartition.FileDescriptor::getPath)
+        .map(FileDescriptor::fromThrift)
+        .map(FileDescriptor::getPath)
         .collect(Collectors.toList());
   }
 
diff --git 
a/fe/src/test/java/org/apache/impala/catalog/events/MetastoreEventsProcessorTest.java
 
b/fe/src/test/java/org/apache/impala/catalog/events/MetastoreEventsProcessorTest.java
index e550bfdb1..b1080d9dd 100644
--- 
a/fe/src/test/java/org/apache/impala/catalog/events/MetastoreEventsProcessorTest.java
+++ 
b/fe/src/test/java/org/apache/impala/catalog/events/MetastoreEventsProcessorTest.java
@@ -75,6 +75,7 @@ import org.apache.impala.catalog.DatabaseNotFoundException;
 import org.apache.impala.catalog.Db;
 import org.apache.impala.catalog.FeCatalogUtils;
 import org.apache.impala.catalog.FeFsPartition;
+import org.apache.impala.catalog.FileDescriptor;
 import org.apache.impala.catalog.HdfsPartition;
 import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.IncompleteTable;
@@ -3051,7 +3052,7 @@ public class MetastoreEventsProcessorTest {
           .getParameters().get("testAlterPartition"));
       long numLoadFMDBefore =
           
tbl.getMetrics().getCounter(HdfsTable.NUM_LOAD_FILEMETADATA_METRIC).getCount();
-      List<HdfsPartition.FileDescriptor> FDbefore = tbl.getPartitionsForNames(
+      List<FileDescriptor> FDbefore = tbl.getPartitionsForNames(
           Collections.singletonList("p1=1")).get(0).getFileDescriptors();
       eventsProcessor_.processEvents();
       // After event processing, parameters should be updated
@@ -3060,14 +3061,14 @@ public class MetastoreEventsProcessorTest {
       // However, file metadata should not be reloaded after an alter 
partition event
       long numLoadFMDAfter =
           
tbl.getMetrics().getCounter(HdfsTable.NUM_LOAD_FILEMETADATA_METRIC).getCount();
-      List<HdfsPartition.FileDescriptor> FDafter = tbl.getPartitionsForNames(
+      List<FileDescriptor> FDafter = tbl.getPartitionsForNames(
           Collections.singletonList("p1=1")).get(0).getFileDescriptors();
       assertEquals("File metadata should not be reloaded",
           numLoadFMDBefore, numLoadFMDAfter);
       // getFileDescriptors() always returns a new instance, so we need to 
compare the
       // underlying array
-      assertEquals(Lists.transform(FDbefore, 
HdfsPartition.FileDescriptor.TO_BYTES),
-          Lists.transform(FDafter, HdfsPartition.FileDescriptor.TO_BYTES));
+      assertEquals(Lists.transform(FDbefore, FileDescriptor.TO_BYTES),
+          Lists.transform(FDafter, FileDescriptor.TO_BYTES));
     } finally {
       // Restore original config
       BackendConfig.create(origCfg);
diff --git 
a/fe/src/test/java/org/apache/impala/catalog/local/CatalogdMetaProviderTest.java
 
b/fe/src/test/java/org/apache/impala/catalog/local/CatalogdMetaProviderTest.java
index 57daa7765..64845affc 100644
--- 
a/fe/src/test/java/org/apache/impala/catalog/local/CatalogdMetaProviderTest.java
+++ 
b/fe/src/test/java/org/apache/impala/catalog/local/CatalogdMetaProviderTest.java
@@ -34,6 +34,7 @@ import java.util.stream.Collectors;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.impala.catalog.FileDescriptor;
 import org.apache.impala.catalog.HdfsPartition;
 import org.apache.impala.catalog.local.CatalogdMetaProvider.SizeOfWeigher;
 import org.apache.impala.catalog.local.MetaProvider.PartitionMetadata;
@@ -661,7 +662,7 @@ public class CatalogdMetaProviderTest {
       List<String> paths = partMap.values().stream()
           .map(PartitionMetadata::getFileDescriptors)
           .flatMap(Collection::stream)
-          .map(HdfsPartition.FileDescriptor::getPath)
+          .map(FileDescriptor::getPath)
           .collect(Collectors.toList());
       assertEquals("Actual paths: " + paths, 1, paths.size());
     }
diff --git 
a/fe/src/test/java/org/apache/impala/catalog/local/LocalCatalogTest.java 
b/fe/src/test/java/org/apache/impala/catalog/local/LocalCatalogTest.java
index f6baa7e86..248710d1d 100644
--- a/fe/src/test/java/org/apache/impala/catalog/local/LocalCatalogTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/local/LocalCatalogTest.java
@@ -37,8 +37,8 @@ import org.apache.impala.catalog.FeFsPartition;
 import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.FeView;
+import org.apache.impala.catalog.FileDescriptor;
 import org.apache.impala.catalog.IcebergContentFileStore;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.fb.FbFileBlock;
 import org.apache.impala.service.BackendConfig;
diff --git 
a/fe/src/test/java/org/apache/impala/catalog/metastore/CatalogHmsFileMetadataTest.java
 
b/fe/src/test/java/org/apache/impala/catalog/metastore/CatalogHmsFileMetadataTest.java
index ff67596cb..81f5066ba 100644
--- 
a/fe/src/test/java/org/apache/impala/catalog/metastore/CatalogHmsFileMetadataTest.java
+++ 
b/fe/src/test/java/org/apache/impala/catalog/metastore/CatalogHmsFileMetadataTest.java
@@ -36,9 +36,9 @@ import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.impala.catalog.CatalogHmsAPIHelper;
+import org.apache.impala.catalog.FileBlock;
+import org.apache.impala.catalog.FileDescriptor;
 import org.apache.impala.catalog.HdfsPartition;
-import org.apache.impala.catalog.HdfsPartition.FileBlock;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.MetaStoreClientPool.MetaStoreClient;
 import org.apache.impala.fb.FbFileBlock;
diff --git 
a/fe/src/test/java/org/apache/impala/catalog/metastore/EnableCatalogdHmsCacheFlagTest.java
 
b/fe/src/test/java/org/apache/impala/catalog/metastore/EnableCatalogdHmsCacheFlagTest.java
index a9e9530ac..a9d3e279c 100644
--- 
a/fe/src/test/java/org/apache/impala/catalog/metastore/EnableCatalogdHmsCacheFlagTest.java
+++ 
b/fe/src/test/java/org/apache/impala/catalog/metastore/EnableCatalogdHmsCacheFlagTest.java
@@ -30,8 +30,8 @@ import 
org.apache.hadoop.hive.metastore.api.GetPartitionsByNamesRequest;
 import org.apache.hadoop.hive.metastore.api.GetPartitionsByNamesResult;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+import org.apache.impala.catalog.FileDescriptor;
 import org.apache.impala.catalog.HdfsPartition;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.service.BackendConfig;
 import org.junit.Test;
diff --git a/fe/src/test/java/org/apache/impala/planner/ExplainTest.java 
b/fe/src/test/java/org/apache/impala/planner/ExplainTest.java
index ec84ea947..12d24ce11 100644
--- a/fe/src/test/java/org/apache/impala/planner/ExplainTest.java
+++ b/fe/src/test/java/org/apache/impala/planner/ExplainTest.java
@@ -29,6 +29,7 @@ import org.apache.impala.analysis.TupleDescriptor;
 import org.apache.impala.analysis.TupleId;
 import org.apache.impala.catalog.FeDb;
 import org.apache.impala.catalog.FeFsTable;
+import org.apache.impala.catalog.FileDescriptor;
 import org.apache.impala.catalog.HdfsFileFormat;
 import org.apache.impala.catalog.HdfsPartition;
 import org.apache.impala.common.FileSystemUtil;
@@ -154,8 +155,8 @@ public class ExplainTest extends FrontendTestBase {
       String path, FileSystemUtil.FsType fsType) {
     HdfsPartition mockHdfsPartition = mock(HdfsPartition.class);
 
-    List<HdfsPartition.FileDescriptor> mockFilesDescs = new ArrayList<>();
-    HdfsPartition.FileDescriptor mockFileDesc = 
mock(HdfsPartition.FileDescriptor.class);
+    List<FileDescriptor> mockFilesDescs = new ArrayList<>();
+    FileDescriptor mockFileDesc = mock(FileDescriptor.class);
     when(mockFileDesc.getFileLength()).thenReturn(1L);
     when(mockFileDesc.getRelativePath()).thenReturn("");
     when(mockFileDesc.getPath()).thenReturn("");
diff --git a/fe/src/test/java/org/apache/impala/testutil/BlockIdGenerator.java 
b/fe/src/test/java/org/apache/impala/testutil/BlockIdGenerator.java
index 50dfe99e5..f101fd3b6 100644
--- a/fe/src/test/java/org/apache/impala/testutil/BlockIdGenerator.java
+++ b/fe/src/test/java/org/apache/impala/testutil/BlockIdGenerator.java
@@ -33,7 +33,7 @@ import org.apache.impala.catalog.FeCatalogUtils;
 import org.apache.impala.catalog.FeDb;
 import org.apache.impala.catalog.FeFsPartition;
 import org.apache.impala.catalog.FeTable;
-import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
+import org.apache.impala.catalog.FileDescriptor;
 import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.util.PatternMatcher;
 


Reply via email to