This is an automated email from the ASF dual-hosted git repository.

stigahuang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit e7aa31296c8cb51572992294ab4fec8eb3d8541c
Author: Noemi Pap-Takacs <[email protected]>
AuthorDate: Thu Apr 3 13:19:08 2025 +0200

    IMPALA-13738 (Part2): Clean up code in Catalog's table and partition 
interfaces
    
    Prepare for FE/Catalog refactor by resolving some TODOs and
    cleaning up unused code in most table and partition interfaces.
     - removed dead code
     - removed unused imports
     - moved static functions from Util classes to interfaces
       as default methods
    
    Testing:
      Run existing tests to validate changes.
    
    Change-Id: I8d9c7ba876e39fa4f4d067761f25dc4ecfd55702
    Reviewed-on: http://gerrit.cloudera.org:8080/22728
    Reviewed-by: Impala Public Jenkins <[email protected]>
    Tested-by: Impala Public Jenkins <[email protected]>
---
 common/thrift/CatalogObjects.thrift                |   6 +-
 .../apache/impala/analysis/ComputeStatsStmt.java   |  20 +--
 .../org/apache/impala/catalog/CtasTargetTable.java |   5 -
 .../org/apache/impala/catalog/FeCatalogUtils.java  |  74 +---------
 .../org/apache/impala/catalog/FeFsPartition.java   |  60 +++++---
 .../java/org/apache/impala/catalog/FeFsTable.java  | 163 ++++++++++++++++++---
 .../org/apache/impala/catalog/FeIcebergTable.java  |   5 -
 .../java/org/apache/impala/catalog/FeTable.java    |  17 ++-
 .../org/apache/impala/catalog/HdfsPartition.java   |  19 +--
 .../java/org/apache/impala/catalog/HdfsTable.java  | 162 ++------------------
 .../apache/impala/catalog/IcebergDeleteTable.java  |   2 +-
 .../impala/catalog/IcebergTimeTravelTable.java     |  13 --
 .../apache/impala/catalog/PrunablePartition.java   |   8 +-
 .../main/java/org/apache/impala/catalog/Table.java |  15 --
 .../org/apache/impala/catalog/VirtualTable.java    |   3 -
 .../impala/catalog/iceberg/IcebergCtasTarget.java  |   3 +-
 .../catalog/iceberg/IcebergMetadataTable.java      |   4 -
 .../apache/impala/catalog/local/LocalCatalog.java  |   3 +-
 .../impala/catalog/local/LocalDataSourceTable.java |   3 +-
 .../impala/catalog/local/LocalFsPartition.java     |  10 --
 .../apache/impala/catalog/local/LocalFsTable.java  |  14 +-
 .../impala/catalog/local/LocalHbaseTable.java      |   3 +-
 .../impala/catalog/local/LocalIcebergTable.java    |   6 +-
 .../impala/catalog/local/LocalKuduTable.java       |   6 +-
 .../impala/catalog/local/LocalPartitionSpec.java   |   2 +-
 .../impala/catalog/local/LocalSystemTable.java     |   3 +-
 .../apache/impala/catalog/local/LocalTable.java    |   6 -
 .../org/apache/impala/planner/IcebergScanNode.java |   4 +-
 .../apache/impala/service/CatalogOpExecutor.java   |  24 +--
 .../catalog/CatalogObjectToFromThriftTest.java     |   8 +-
 .../org/apache/impala/catalog/CatalogTest.java     |   3 +-
 .../events/MetastoreEventsProcessorTest.java       |  20 +--
 .../impala/catalog/local/LocalCatalogTest.java     |   7 +-
 .../CatalogHmsSyncToLatestEventIdTest.java         |  10 +-
 .../apache/impala/testutil/BlockIdGenerator.java   |   4 +-
 35 files changed, 276 insertions(+), 439 deletions(-)

diff --git a/common/thrift/CatalogObjects.thrift 
b/common/thrift/CatalogObjects.thrift
index 0afc4f3a1..57b1ddf18 100644
--- a/common/thrift/CatalogObjects.thrift
+++ b/common/thrift/CatalogObjects.thrift
@@ -472,7 +472,7 @@ struct THdfsTable {
   // String to indicate a NULL column value in text files
   5: required string nullColumnValue
 
-  // Set to the table's Avro schema if this is an Avro table
+  // Set to the table's Avro schema if this table contains Avro files
   6: optional string avroSchema
 
   // Map from partition id to partition metadata.
@@ -721,7 +721,7 @@ struct TTable {
   // Determines the table type - either HDFS, HBASE, or VIEW.
   9: optional TTableType table_type
 
-  // Set iff this is an HDFS table
+  // Set iff this is an HDFS table or Iceberg table
   10: optional THdfsTable hdfs_table
 
   // Set iff this is an Hbase table
@@ -741,7 +741,7 @@ struct TTable {
   // Time used for storage loading in nanoseconds.
   16: optional i64 storage_metadata_load_time_ns
 
-  // Set if this a iceberg table
+  // Set if this is an Iceberg table
   17: optional TIcebergTable iceberg_table
 
   // Comment of the table/view. Set only for FeIncompleteTable where msTable 
doesn't
diff --git a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java 
b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
index 762b5607b..bd2dd195a 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
@@ -45,7 +45,6 @@ import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.PrintUtils;
 import org.apache.impala.common.RuntimeEnv;
-import org.apache.impala.planner.HdfsScanNode;
 import org.apache.impala.service.BackendConfig;
 import org.apache.impala.service.CatalogOpExecutor;
 import org.apache.impala.service.FrontendProfile;
@@ -464,8 +463,7 @@ public class ComputeStatsStmt extends StatementBase 
implements SingleTableStmt {
       // error if the estimate is greater than --inc_stats_size_limit_bytes in 
bytes
       if (isIncremental_) {
         long numOfAllIncStatsPartitions = 0;
-        Collection<? extends FeFsPartition> allPartitions =
-            FeCatalogUtils.loadAllPartitions(hdfsTable);
+        Collection<? extends FeFsPartition> allPartitions = 
hdfsTable.loadAllPartitions();
 
         if (partitionSet_ == null) {
           numOfAllIncStatsPartitions = allPartitions.size();
@@ -540,8 +538,7 @@ public class ComputeStatsStmt extends StatementBase 
implements SingleTableStmt {
         }
 
         // Get incremental statistics from all relevant partitions.
-        Collection<? extends FeFsPartition> allPartitions =
-            FeCatalogUtils.loadAllPartitions(hdfsTable);
+        Collection<? extends FeFsPartition> allPartitions = 
hdfsTable.loadAllPartitions();
         Map<Long, TPartitionStats> partitionStats =
             getOrFetchPartitionStats(analyzer, hdfsTable, allPartitions,
                 /* excludedPartitions= */ Collections.<Long>emptySet());
@@ -578,8 +575,7 @@ public class ComputeStatsStmt extends StatementBase 
implements SingleTableStmt {
           targetPartitions.add(p.getId());
         }
         // Get incremental statistics for partitions that are not recomputed.
-        Collection<? extends FeFsPartition> allPartitions =
-            FeCatalogUtils.loadAllPartitions(hdfsTable);
+        Collection<? extends FeFsPartition> allPartitions = 
hdfsTable.loadAllPartitions();
         Map<Long, TPartitionStats> partitionStats = getOrFetchPartitionStats(
             analyzer, hdfsTable, allPartitions, targetPartitions);
         validPartStats_.addAll(partitionStats.values());
@@ -832,8 +828,7 @@ public class ComputeStatsStmt extends StatementBase 
implements SingleTableStmt {
     long minSampleBytes = 
analyzer.getQueryOptions().compute_stats_min_sample_size;
     long samplePerc = sampleParams_.getPercentBytes();
     // TODO(todd): can we avoid loading all the partitions for this?
-    Collection<? extends FeFsPartition> partitions =
-        FeCatalogUtils.loadAllPartitions(hdfsTable);
+    Collection<? extends FeFsPartition> partitions = 
hdfsTable.loadAllPartitions();
     Map<Long, List<FileDescriptor>> sample = FeFsTable.Utils.getFilesSample(
         hdfsTable, partitions, samplePerc, minSampleBytes, sampleSeed);
     long sampleFileBytes = 0;
@@ -982,7 +977,7 @@ public class ComputeStatsStmt extends StatementBase 
implements SingleTableStmt {
       affectedPartitions = partitionSet_.getPartitions();
     } else {
       FeFsTable hdfsTable = (FeFsTable)table_;
-      affectedPartitions = FeCatalogUtils.loadAllPartitions(hdfsTable);
+      affectedPartitions = hdfsTable.loadAllPartitions();
     }
     for (FeFsPartition partition: affectedPartitions) {
       if (partition.getFileFormat() != HdfsFileFormat.PARQUET
@@ -1007,14 +1002,13 @@ public class ComputeStatsStmt extends StatementBase 
implements SingleTableStmt {
     Set<Long> partitionIds = hdfsTable.getPartitionIds();
     if (partitionIds.size() > 0) {
       for (Long partitionId : partitionIds) {
-        FeFsPartition partition = FeCatalogUtils.loadPartition(hdfsTable, 
partitionId);
+        FeFsPartition partition = hdfsTable.loadPartition(partitionId);
         if (partition.getFileFormat().isParquetBased()) {
           return true;
         }
       }
     } else {
-      Collection<? extends FeFsPartition> allPartitions =
-          FeCatalogUtils.loadAllPartitions(hdfsTable);
+      Collection<? extends FeFsPartition> allPartitions = 
hdfsTable.loadAllPartitions();
       for (FeFsPartition partition : allPartitions) {
         if (partition.getFileFormat().isParquetBased()) {
           return true;
diff --git a/fe/src/main/java/org/apache/impala/catalog/CtasTargetTable.java 
b/fe/src/main/java/org/apache/impala/catalog/CtasTargetTable.java
index d697e52f9..cd8247e25 100644
--- a/fe/src/main/java/org/apache/impala/catalog/CtasTargetTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/CtasTargetTable.java
@@ -125,11 +125,6 @@ public abstract class CtasTargetTable implements FeTable {
   @Override
   public List<String> getColumnNames() { return 
Column.toColumnNames(colsByPos_); }
 
-  @Override
-  public SqlConstraints getSqlConstraints() {
-    return null;
-  }
-
   @Override
   public int getNumClusteringCols() {
     return numClusteringCols_;
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeCatalogUtils.java 
b/fe/src/main/java/org/apache/impala/catalog/FeCatalogUtils.java
index 6fb4f6399..b8a892ade 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FeCatalogUtils.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FeCatalogUtils.java
@@ -35,7 +35,6 @@ import org.apache.impala.analysis.Expr;
 import org.apache.impala.analysis.LiteralExpr;
 import org.apache.impala.analysis.NullLiteral;
 import org.apache.impala.analysis.PartitionKeyValue;
-import org.apache.impala.analysis.ToSqlUtils;
 import org.apache.impala.catalog.CatalogObject.ThriftObjectType;
 import org.apache.impala.catalog.local.CatalogdMetaProvider;
 import org.apache.impala.catalog.local.LocalCatalog;
@@ -49,7 +48,6 @@ import org.apache.impala.common.ImpalaException;
 import org.apache.impala.common.NotImplementedException;
 import org.apache.impala.service.BackendConfig;
 import org.apache.impala.thrift.TCatalogObject;
-import org.apache.impala.thrift.TColumnDescriptor;
 import org.apache.impala.thrift.TGetCatalogMetricsResult;
 import org.apache.impala.thrift.THdfsPartition;
 import org.apache.impala.thrift.TTable;
@@ -146,15 +144,6 @@ public abstract class FeCatalogUtils {
     }
   }
 
-  // TODO(todd): move to a default method in FeTable in Java8
-  public static List<TColumnDescriptor> getTColumnDescriptors(FeTable table) {
-    List<TColumnDescriptor> colDescs = new ArrayList<>();
-    for (Column col: table.getColumns()) {
-      colDescs.add(col.toDescriptor());
-    }
-    return colDescs;
-  }
-
   /**
    * Given the list of column stats returned from the metastore, inject those
    * stats into matching columns in 'table'.
@@ -212,32 +201,6 @@ public abstract class FeCatalogUtils {
     return -1;
   }
 
-  /**
-   * Convenience method to load exactly one partition from a table.
-   *
-   * TODO(todd): upon moving to Java8 this could be a default method
-   * in FeFsTable.
-   */
-  public static FeFsPartition loadPartition(FeFsTable table,
-      long partitionId) {
-    Collection<? extends FeFsPartition> partCol = table.loadPartitions(
-        Collections.singleton(partitionId));
-    if (partCol.size() != 1) {
-      throw new AssertionError(String.format(
-          "expected exactly one result fetching partition ID %s from table %s 
" +
-          "(got %s)", partitionId, table.getFullName(), partCol.size()));
-    }
-    return Iterables.getOnlyElement(partCol);
-  }
-
-  /**
-   * Load all partitions from the given table.
-   */
-  public static Collection<? extends FeFsPartition> loadAllPartitions(
-      FeFsTable table) {
-    return table.loadPartitions(table.getPartitionIds());
-  }
-
   /**
    * Parse the partition key values out of their stringified format used by 
HMS.
    */
@@ -282,22 +245,7 @@ public abstract class FeCatalogUtils {
    */
   public static String getPartitionName(FeFsPartition partition) {
     return getPartitionName(partition.getTable(),
-        getPartitionValuesAsStrings(partition, true));
-  }
-
-  // TODO: this could be a default method in FeFsPartition in Java 8.
-  public static List<String> getPartitionValuesAsStrings(
-      FeFsPartition partition, boolean mapNullsToHiveKey) {
-    List<String> ret = new ArrayList<>();
-    for (LiteralExpr partValue: partition.getPartitionValues()) {
-      if (mapNullsToHiveKey) {
-        ret.add(PartitionKeyValue.getPartitionKeyValueString(
-                partValue, partition.getTable().getNullPartitionKeyValue()));
-      } else {
-        ret.add(partValue.getStringValue());
-      }
-    }
-    return ret;
+        partition.getPartitionValuesAsStrings(true));
   }
 
   public static String getPartitionName(HdfsPartition.Builder partBuilder) {
@@ -329,26 +277,6 @@ public abstract class FeCatalogUtils {
     return FileUtils.makePartName(partitionKeys, partitionValues);
   }
 
-  // TODO: this could be a default method in FeFsPartition in Java 8.
-  public static String getConjunctSqlForPartition(FeFsPartition part) {
-    List<String> partColSql = new ArrayList<>();
-    for (Column partCol: part.getTable().getClusteringColumns()) {
-      partColSql.add(ToSqlUtils.getIdentSql(partCol.getName()));
-    }
-
-    List<String> conjuncts = new ArrayList<>();
-    for (int i = 0; i < partColSql.size(); ++i) {
-      LiteralExpr partVal = part.getPartitionValues().get(i);
-      String partValSql = partVal.toSql();
-      if (Expr.IS_NULL_LITERAL.apply(partVal) || partValSql.isEmpty()) {
-        conjuncts.add(partColSql.get(i) + " IS NULL");
-      } else {
-        conjuncts.add(partColSql.get(i) + "=" + partValSql);
-      }
-    }
-    return "(" + Joiner.on(" AND " ).join(conjuncts) + ")";
-  }
-
   /**
    * Return the set of all file formats used in the collection of partitions.
    */
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeFsPartition.java 
b/fe/src/main/java/org/apache/impala/catalog/FeFsPartition.java
index 40fba33c1..cd20ef225 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FeFsPartition.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FeFsPartition.java
@@ -23,10 +23,14 @@ import java.util.Map;
 
 import javax.annotation.Nullable;
 
+import com.google.common.base.Joiner;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.impala.analysis.Expr;
 import org.apache.impala.analysis.LiteralExpr;
+import org.apache.impala.analysis.PartitionKeyValue;
+import org.apache.impala.analysis.ToSqlUtils;
 import org.apache.impala.common.FileSystemUtil;
 import org.apache.impala.thrift.TAccessLevel;
 import org.apache.impala.thrift.TGetPartialCatalogObjectRequest;
@@ -39,18 +43,13 @@ import org.apache.impala.util.ListMap;
 /**
  * Frontend interface for interacting with a single filesystem-based partition.
  */
-public interface FeFsPartition {
+public interface FeFsPartition extends PrunablePartition {
   /**
    * @return a partition name formed by concatenating partition keys and their 
values,
    * compatible with the way Hive names partitions
    */
   String getPartitionName();
 
-  /**
-   * @return the ID for this partition which identifies it within its parent 
table.
-   */
-  long getId();
-
   /**
    * @return the table that contains this partition
    */
@@ -157,7 +156,7 @@ public interface FeFsPartition {
    */
   byte[] getPartitionStatsCompressed();
 
- /**
+  /**
    * @return the size (in bytes) of all the files inside this partition
    */
   long getSize();
@@ -167,23 +166,23 @@ public interface FeFsPartition {
    */
   long getNumRows();
 
-  /**
-   * Utility method which returns a string of conjuncts of equality exprs to 
exactly
-   * select this partition (e.g. ((month=2009) AND (year=2012)).
-   */
-  String getConjunctSql();
-
   /**
    * @return a list of partition values as strings. If mapNullsToHiveKey is 
true, any NULL
    * value is returned as the table's default null partition key string value, 
otherwise
    * they are returned as 'NULL'.
    */
-  List<String> getPartitionValuesAsStrings(boolean mapNullsToHiveKey);
-
-  /**
-   * @return an immutable list of partition key expressions
-   */
-  List<LiteralExpr> getPartitionValues();
+  default List<String> getPartitionValuesAsStrings(boolean mapNullsToHiveKey) {
+    List<String> ret = new ArrayList<>();
+    for (LiteralExpr partValue: getPartitionValues()) {
+      if (mapNullsToHiveKey) {
+        ret.add(PartitionKeyValue.getPartitionKeyValueString(
+            partValue, getTable().getNullPartitionKeyValue()));
+      } else {
+        ret.add(partValue.getStringValue());
+      }
+    }
+    return ret;
+  }
 
   /**
    * @return the value of the given column 'pos' for this partition
@@ -251,4 +250,27 @@ public interface FeFsPartition {
 
     return partInfo;
   }
+
+  /**
+   * Utility method which returns a string of conjuncts of equality exprs to 
exactly
+   * select this partition (e.g. ((month=2009) AND (year=2012)).
+   */
+  default String getConjunctSql() {
+    List<String> partColSql = new ArrayList<>();
+    for (Column partCol: getTable().getClusteringColumns()) {
+      partColSql.add(ToSqlUtils.getIdentSql(partCol.getName()));
+    }
+
+    List<String> conjuncts = new ArrayList<>();
+    for (int i = 0; i < partColSql.size(); ++i) {
+      LiteralExpr partVal = getPartitionValues().get(i);
+      String partValSql = partVal.toSql();
+      if (Expr.IS_NULL_LITERAL.apply(partVal) || partValSql.isEmpty()) {
+        conjuncts.add(partColSql.get(i) + " IS NULL");
+      } else {
+        conjuncts.add(partColSql.get(i) + "=" + partValSql);
+      }
+    }
+    return "(" + Joiner.on(" AND " ).join(conjuncts) + ")";
+  }
 }
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java 
b/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java
index 5d66c8e74..a3f3d0a92 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java
@@ -18,6 +18,7 @@ package org.apache.impala.catalog;
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 
 import java.io.IOException;
@@ -44,6 +45,7 @@ import org.apache.impala.analysis.PartitionKeyValue;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.FileSystemUtil;
 import org.apache.impala.common.PrintUtils;
+import org.apache.impala.fb.FbFileBlock;
 import org.apache.impala.service.BackendConfig;
 import org.apache.impala.thrift.TColumn;
 import org.apache.impala.thrift.TNetworkAddress;
@@ -52,14 +54,12 @@ import org.apache.impala.thrift.TResultSet;
 import org.apache.impala.thrift.TResultSetMetadata;
 import org.apache.impala.thrift.TSortingOrder;
 import org.apache.impala.thrift.TTableStats;
+import org.apache.impala.util.HdfsCachingUtil;
 import org.apache.impala.util.ListMap;
 import org.apache.impala.util.TAccessLevelUtil;
 import org.apache.impala.util.TResultRowBuilder;
 import org.apache.thrift.TException;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 /**
  * Frontend interface for interacting with a filesystem-backed table.
  *
@@ -230,13 +230,6 @@ public interface FeFsTable extends FeTable {
    */
   public String getFirstLocationWithoutWriteAccess();
 
-  /**
-   * @return statistics on this table as a tabular result set. Used for the
-   * SHOW TABLE STATS statement. The schema of the returned TResultSet is set
-   * inside this method.
-   */
-  public TResultSet getTableStats();
-
   /**
    * @return all partitions of this table
    */
@@ -265,6 +258,20 @@ public interface FeFsTable extends FeTable {
    */
   Set<Long> getNullPartitionIds(int colIdx);
 
+  /**
+   * Convenience method to load exactly one partition from a table.
+   */
+  default FeFsPartition loadPartition(long partitionId) {
+    Collection<? extends FeFsPartition> partCol = loadPartitions(
+        Collections.singleton(partitionId));
+    if (partCol.size() != 1) {
+      throw new AssertionError(String.format(
+          "expected exactly one result fetching partition ID %s from table %s 
" +
+              "(got %s)", partitionId, getFullName(), partCol.size()));
+    }
+    return Iterables.getOnlyElement(partCol);
+  }
+
   /**
    * Returns the full partition objects for the given partition IDs, which must
    * have been obtained by prior calls to the above methods.
@@ -273,9 +280,11 @@ public interface FeFsTable extends FeTable {
   List<? extends FeFsPartition> loadPartitions(Collection<Long> ids);
 
   /**
-   * @return: SQL Constraints Information.
+   * Load all partitions from the table.
    */
-  SqlConstraints getSqlConstraints();
+  default Collection<? extends FeFsPartition> loadAllPartitions() {
+    return loadPartitions(getPartitionIds());
+  }
 
   /**
    * @return whether it is a Hive ACID table.
@@ -441,14 +450,135 @@ public interface FeFsTable extends FeTable {
     return sortOrder == TSortingOrder.LEXICAL;
   }
 
+  /**
+   * @return statistics on this table as a tabular result set. Used for the
+   * SHOW TABLE STATS statement. The schema of the returned TResultSet is set
+   * inside this method.
+   */
+  default TResultSet getTableStats() {
+    TResultSet result = new TResultSet();
+    TResultSetMetadata resultSchema = new TResultSetMetadata();
+    result.setSchema(resultSchema);
+
+    for (int i = 0; i < getNumClusteringCols(); ++i) {
+      // Add the partition-key values as strings for simplicity.
+      Column partCol = getColumns().get(i);
+      TColumn colDesc = new TColumn(partCol.getName(), Type.STRING.toThrift());
+      resultSchema.addToColumns(colDesc);
+    }
+
+    boolean statsExtrap = Utils.isStatsExtrapolationEnabled(this);
+
+    resultSchema.addToColumns(new TColumn("#Rows", Type.BIGINT.toThrift()));
+    if (statsExtrap) {
+      resultSchema.addToColumns(new TColumn("Extrap #Rows", 
Type.BIGINT.toThrift()));
+    }
+    resultSchema.addToColumns(new TColumn("#Files", Type.BIGINT.toThrift()));
+    resultSchema.addToColumns(new TColumn("Size", Type.STRING.toThrift()));
+    resultSchema.addToColumns(new TColumn("Bytes Cached", 
Type.STRING.toThrift()));
+    resultSchema.addToColumns(new TColumn("Cache Replication", 
Type.STRING.toThrift()));
+    resultSchema.addToColumns(new TColumn("Format", Type.STRING.toThrift()));
+    resultSchema.addToColumns(new TColumn("Incremental stats", 
Type.STRING.toThrift()));
+    resultSchema.addToColumns(new TColumn("Location", Type.STRING.toThrift()));
+    resultSchema.addToColumns(new TColumn("EC Policy", 
Type.STRING.toThrift()));
+
+    // Pretty print partitions and their stats.
+    List<FeFsPartition> orderedPartitions = new 
ArrayList<>(loadAllPartitions());
+    orderedPartitions.sort(HdfsPartition.KV_COMPARATOR);
+
+    long totalCachedBytes = 0L;
+    long totalBytes = 0L;
+    long totalNumFiles = 0L;
+    for (FeFsPartition p: orderedPartitions) {
+      long numFiles = p.getNumFileDescriptors();
+      long size = p.getSize();
+      totalNumFiles += numFiles;
+      totalBytes += size;
+
+      TResultRowBuilder rowBuilder = new TResultRowBuilder();
+
+      // Add the partition-key values (as strings for simplicity).
+      for (LiteralExpr expr: p.getPartitionValues()) {
+        rowBuilder.add(expr.getStringValue());
+      }
+
+      // Add rows, extrapolated rows, files, bytes, cache stats, and file 
format.
+      rowBuilder.add(p.getNumRows());
+      // Compute and report the extrapolated row count because the set of 
files could
+      // have changed since we last computed stats for this partition. We also 
follow
+      // this policy during scan-cardinality estimation.
+      if (statsExtrap) {
+        rowBuilder.add(Utils.getExtrapolatedNumRows(this, size));
+      }
+
+      rowBuilder.add(numFiles).addBytes(size);
+      if (!p.isMarkedCached()) {
+        // Helps to differentiate partitions that have 0B cached versus 
partitions
+        // that are not marked as cached.
+        rowBuilder.add("NOT CACHED");
+        rowBuilder.add("NOT CACHED");
+      } else {
+        // Calculate the number the number of bytes that are cached.
+        long cachedBytes = 0L;
+        for (FileDescriptor fd: p.getFileDescriptors()) {
+          int numBlocks = fd.getNumFileBlocks();
+          for (int i = 0; i < numBlocks; ++i) {
+            FbFileBlock block = fd.getFbFileBlock(i);
+            if (FileBlock.hasCachedReplica(block)) {
+              cachedBytes += FileBlock.getLength(block);
+            }
+          }
+        }
+        totalCachedBytes += cachedBytes;
+        rowBuilder.addBytes(cachedBytes);
+
+        // Extract cache replication factor from the parameters of the table
+        // if the table is not partitioned or directly from the partition.
+        Short rep = HdfsCachingUtil.getCachedCacheReplication(
+            getNumClusteringCols() == 0 ?
+                p.getTable().getMetaStoreTable().getParameters() :
+                p.getParameters());
+        rowBuilder.add(rep.toString());
+      }
+      rowBuilder.add(p.getFileFormat().toString());
+      rowBuilder.add(String.valueOf(p.hasIncrementalStats()));
+      rowBuilder.add(p.getLocation());
+      
rowBuilder.add(FileSystemUtil.getErasureCodingPolicy(p.getLocationPath()));
+      result.addToRows(rowBuilder.get());
+    }
+
+    // For partitioned tables add a summary row at the bottom.
+    if (getNumClusteringCols() > 0) {
+      TResultRowBuilder rowBuilder = new TResultRowBuilder();
+      int numEmptyCells = getNumClusteringCols() - 1;
+      rowBuilder.add("Total");
+      for (int i = 0; i < numEmptyCells; ++i) {
+        rowBuilder.add("");
+      }
+
+      // Total rows, extrapolated rows, files, bytes, cache stats.
+      // Leave format empty.
+      rowBuilder.add(getNumRows());
+      // Compute and report the extrapolated row count because the set of 
files could
+      // have changed since we last computed stats for this partition. We also 
follow
+      // this policy during scan-cardinality estimation.
+      if (statsExtrap) {
+        rowBuilder.add(Utils.getExtrapolatedNumRows(this, 
getTotalHdfsBytes()));
+      }
+      rowBuilder.add(totalNumFiles)
+          .addBytes(totalBytes)
+          .addBytes(totalCachedBytes).add("").add("").add("").add("").add("");
+      result.addToRows(rowBuilder.get());
+    }
+    return result;
+  }
+
   /**
    * Utility functions for operating on FeFsTable. When we move fully to Java 
8,
    * these can become default methods of the interface.
    */
   abstract class Utils {
 
-    private final static Logger LOG = LoggerFactory.getLogger(Utils.class);
-
     // Table property key for skip.header.line.count
     public static final String TBL_PROP_SKIP_HEADER_LINE_COUNT = 
"skip.header.line.count";
 
@@ -528,7 +658,7 @@ public interface FeFsTable extends FeTable {
 
       List<? extends FeFsPartition> orderedPartitions;
       if (partitionSet == null) {
-        orderedPartitions = 
Lists.newArrayList(FeCatalogUtils.loadAllPartitions(table));
+        orderedPartitions = Lists.newArrayList(table.loadAllPartitions());
       } else {
         // Get a list of HdfsPartition objects for the given partition set.
         orderedPartitions = getPartitionsFromPartitionSet(table, partitionSet);
@@ -738,8 +868,7 @@ public interface FeFsTable extends FeTable {
       }
 
       if (existingTargetPartition != null) {
-        FeFsPartition partition = FeCatalogUtils.loadPartition(table,
-            existingTargetPartition.getId());
+        FeFsPartition partition = 
table.loadPartition(existingTargetPartition.getId());
         String location = partition.getLocation();
         if (!TAccessLevelUtil.impliesWriteAccess(partition.getAccessLevel())) {
           throw new AnalysisException(noWriteAccessErrorMsg + location);
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeIcebergTable.java 
b/fe/src/main/java/org/apache/impala/catalog/FeIcebergTable.java
index 7049e0e1e..4366a2355 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FeIcebergTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FeIcebergTable.java
@@ -294,11 +294,6 @@ public interface FeIcebergTable extends FeFsTable {
     return getFeFsTable().loadPartitions(ids);
   }
 
-  @Override
-  default SqlConstraints getSqlConstraints() {
-    return getFeFsTable().getSqlConstraints();
-  }
-
   @Override
   default ListMap<TNetworkAddress> getHostIndex() {
     return getFeFsTable().getHostIndex();
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeTable.java 
b/fe/src/main/java/org/apache/impala/catalog/FeTable.java
index 75cb667df..03fd94aa9 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FeTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FeTable.java
@@ -16,6 +16,7 @@
 // under the License.
 package org.apache.impala.catalog;
 
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
@@ -26,6 +27,7 @@ import org.apache.hadoop.hive.common.ValidWriteIdList;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.impala.analysis.TableName;
 import org.apache.impala.thrift.TCatalogObjectType;
+import org.apache.impala.thrift.TColumnDescriptor;
 import org.apache.impala.thrift.TImpalaTableType;
 import org.apache.impala.thrift.TTableDescriptor;
 import org.apache.impala.thrift.TTableStats;
@@ -129,7 +131,9 @@ public interface FeTable {
   /**
    * @return SQL constraints for the table.
    */
-  SqlConstraints getSqlConstraints();
+  default SqlConstraints getSqlConstraints() {
+    return new SqlConstraints(new ArrayList<>(), new ArrayList<>());
+  }
 
   /**
    * @return an unmodifiable list of all partition columns.
@@ -221,4 +225,15 @@ public interface FeTable {
    * @return the timestamp when the table is last loaded or reloaded in 
catalogd.
    */
   long getLastLoadedTimeMs();
+
+  /**
+   * Returns a list of thrift column descriptors ordered by position.
+   */
+  default List<TColumnDescriptor> getTColumnDescriptors() {
+    List<TColumnDescriptor> colDescs = new ArrayList<>();
+    for (Column col: getColumns()) {
+      colDescs.add(col.toDescriptor());
+    }
+    return colDescs;
+  }
 }
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java 
b/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
index d642abb60..6128e27f2 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
@@ -86,8 +86,7 @@ import org.slf4j.LoggerFactory;
  * the partition id to identify a partition instance (snapshot). The catalog 
versions are
  * not used actually.
  */
-public class HdfsPartition extends CatalogObjectImpl
-    implements FeFsPartition, PrunablePartition {
+public class HdfsPartition extends CatalogObjectImpl implements FeFsPartition {
   // Struct-style class for caching all the information we need to reconstruct 
an
   // HMS-compatible Partition object, for use in RPCs to the metastore. We do 
this rather
   // than cache the Thrift partition object itself as the latter can be large 
- thanks
@@ -397,18 +396,6 @@ public class HdfsPartition extends CatalogObjectImpl
     return partName_;
   }
 
-  @Override
-  public List<String> getPartitionValuesAsStrings(boolean mapNullsToHiveKey) {
-    return FeCatalogUtils.getPartitionValuesAsStrings(this, mapNullsToHiveKey);
-  }
-
-  @Override // FeFsPartition
-  public String getConjunctSql() {
-    // TODO: Remove this when the TODO elsewhere in this file to save and 
expose the
-    // list of TPartitionKeyValues has been resolved.
-    return FeCatalogUtils.getConjunctSqlForPartition(this);
-  }
-
   /**
    * Returns a string of the form part_key1=value1/part_key2=value2...
    */
@@ -445,7 +432,7 @@ public class HdfsPartition extends CatalogObjectImpl
     return new Path(getLocation());
   }
 
-  @Override // FeFsPartition
+  @Override // PrunablePartition
   public long getId() { return id_; }
 
   @Override // FeFsPartition
@@ -544,7 +531,7 @@ public class HdfsPartition extends CatalogObjectImpl
         (added ? "Added" : "Could not add"), versionNumber, 
inFlightEvents_.print());
   }
 
-  @Override // FeFsPartition
+  @Override // PrunablePartition
   public List<LiteralExpr> getPartitionValues() { return partitionKeyValues_; }
   @Override // FeFsPartition
   public LiteralExpr getPartitionValue(int i) { return 
partitionKeyValues_.get(i); }
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java 
b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
index f1c6852b3..15387f1fd 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
@@ -63,7 +63,6 @@ import org.apache.impala.common.ImpalaException;
 import org.apache.impala.common.Pair;
 import org.apache.impala.common.PrintUtils;
 import org.apache.impala.compat.MetastoreShim;
-import org.apache.impala.fb.FbFileBlock;
 import org.apache.impala.hive.common.MutableValidReaderWriteIdList;
 import org.apache.impala.hive.common.MutableValidWriteIdList;
 import org.apache.impala.service.BackendConfig;
@@ -72,7 +71,6 @@ import org.apache.impala.thrift.CatalogObjectsConstants;
 import org.apache.impala.thrift.TAccessLevel;
 import org.apache.impala.thrift.TCatalogObject;
 import org.apache.impala.thrift.TCatalogObjectType;
-import org.apache.impala.thrift.TColumn;
 import org.apache.impala.thrift.TErrorCode;
 import org.apache.impala.thrift.TGetPartialCatalogObjectRequest;
 import org.apache.impala.thrift.TGetPartialCatalogObjectResponse;
@@ -81,8 +79,6 @@ import org.apache.impala.thrift.THdfsTable;
 import org.apache.impala.thrift.TNetworkAddress;
 import org.apache.impala.thrift.TPartialPartitionInfo;
 import org.apache.impala.thrift.TPartitionKeyValue;
-import org.apache.impala.thrift.TResultSet;
-import org.apache.impala.thrift.TResultSetMetadata;
 import org.apache.impala.thrift.TSqlConstraints;
 import org.apache.impala.thrift.TStatus;
 import org.apache.impala.thrift.TTable;
@@ -100,7 +96,6 @@ import org.apache.impala.util.ListMap;
 import org.apache.impala.util.MetaStoreUtil;
 import org.apache.impala.util.NoOpEventSequence;
 import org.apache.impala.util.TAccessLevelUtil;
-import org.apache.impala.util.TResultRowBuilder;
 import org.apache.impala.util.ThreadNameAnnotator;
 import org.apache.thrift.TException;
 import org.slf4j.Logger;
@@ -385,24 +380,29 @@ public class HdfsTable extends Table implements FeFsTable 
{
   public boolean isMarkedCached() { return isMarkedCached_; }
 
   @Override // FeFsTable
-  public Collection<? extends PrunablePartition> getPartitions() {
+  public Collection<? extends FeFsPartition> getPartitions() {
     return partitionMap_.values();
   }
 
   @Override // FeFsTable
-  public Map<Long, ? extends PrunablePartition> getPartitionMap() {
+  public Map<Long, ? extends FeFsPartition> getPartitionMap() {
     return partitionMap_;
   }
 
+  @Override
+  public FeFsPartition loadPartition(long id) {
+    HdfsPartition partition = partitionMap_.get(id);
+    if (partition == null) {
+      throw new IllegalArgumentException("no such partition id " + id);
+    }
+    return partition;
+  }
+
   @Override // FeFsTable
   public List<FeFsPartition> loadPartitions(Collection<Long> ids) {
     List<FeFsPartition> partitions = 
Lists.newArrayListWithCapacity(ids.size());
     for (Long id : ids) {
-      HdfsPartition partition = partitionMap_.get(id);
-      if (partition == null) {
-        throw new IllegalArgumentException("no such partition id " + id);
-      }
-      partitions.add(partition);
+      partitions.add(loadPartition(id));
     }
     return partitions;
   }
@@ -2440,7 +2440,7 @@ public class HdfsTable extends Table implements FeFsTable 
{
   @Override // FeFsTable
   public ListMap<TNetworkAddress> getHostIndex() { return hostIndex_; }
 
-  @Override
+  @Override // FeTable
   public SqlConstraints getSqlConstraints() {
     return sqlConstraints_;
   }
@@ -2635,11 +2635,6 @@ public class HdfsTable extends Table implements 
FeFsTable {
     return new Pair<>(value, expr);
   }
 
-  @Override // FeFsTable
-  public TResultSet getTableStats() {
-    return getTableStats(this);
-  }
-
   @Override
   public FileSystemUtil.FsType getFsType() {
     Preconditions.checkNotNull(getHdfsBaseDirPath().toUri().getScheme(),
@@ -2647,128 +2642,6 @@ public class HdfsTable extends Table implements 
FeFsTable {
     return 
FileSystemUtil.FsType.getFsType(getHdfsBaseDirPath().toUri().getScheme());
   }
 
-  // TODO(todd): move to FeCatalogUtils. Upon moving to Java 8, could be
-  // a default method of FeFsTable.
-  public static TResultSet getTableStats(FeFsTable table) {
-    TResultSet result = new TResultSet();
-    TResultSetMetadata resultSchema = new TResultSetMetadata();
-    result.setSchema(resultSchema);
-
-    for (int i = 0; i < table.getNumClusteringCols(); ++i) {
-      // Add the partition-key values as strings for simplicity.
-      Column partCol = table.getColumns().get(i);
-      TColumn colDesc = new TColumn(partCol.getName(), Type.STRING.toThrift());
-      resultSchema.addToColumns(colDesc);
-    }
-
-    boolean statsExtrap = Utils.isStatsExtrapolationEnabled(table);
-
-    resultSchema.addToColumns(new TColumn("#Rows", Type.BIGINT.toThrift()));
-    if (statsExtrap) {
-      resultSchema.addToColumns(new TColumn("Extrap #Rows", 
Type.BIGINT.toThrift()));
-    }
-    resultSchema.addToColumns(new TColumn("#Files", Type.BIGINT.toThrift()));
-    resultSchema.addToColumns(new TColumn("Size", Type.STRING.toThrift()));
-    resultSchema.addToColumns(new TColumn("Bytes Cached", 
Type.STRING.toThrift()));
-    resultSchema.addToColumns(new TColumn("Cache Replication", 
Type.STRING.toThrift()));
-    resultSchema.addToColumns(new TColumn("Format", Type.STRING.toThrift()));
-    resultSchema.addToColumns(new TColumn("Incremental stats", 
Type.STRING.toThrift()));
-    resultSchema.addToColumns(new TColumn("Location", Type.STRING.toThrift()));
-    resultSchema.addToColumns(new TColumn("EC Policy", 
Type.STRING.toThrift()));
-
-    // Pretty print partitions and their stats.
-    List<FeFsPartition> orderedPartitions = new ArrayList<>(
-        FeCatalogUtils.loadAllPartitions(table));
-    Collections.sort(orderedPartitions, HdfsPartition.KV_COMPARATOR);
-
-    long totalCachedBytes = 0L;
-    long totalBytes = 0L;
-    long totalNumFiles = 0L;
-    for (FeFsPartition p: orderedPartitions) {
-      long numFiles = p.getNumFileDescriptors();
-      long size = p.getSize();
-      totalNumFiles += numFiles;
-      totalBytes += size;
-
-      TResultRowBuilder rowBuilder = new TResultRowBuilder();
-
-      // Add the partition-key values (as strings for simplicity).
-      for (LiteralExpr expr: p.getPartitionValues()) {
-        rowBuilder.add(expr.getStringValue());
-      }
-
-      // Add rows, extrapolated rows, files, bytes, cache stats, and file 
format.
-      rowBuilder.add(p.getNumRows());
-      // Compute and report the extrapolated row count because the set of 
files could
-      // have changed since we last computed stats for this partition. We also 
follow
-      // this policy during scan-cardinality estimation.
-      if (statsExtrap) {
-        rowBuilder.add(Utils.getExtrapolatedNumRows(table, size));
-      }
-
-      rowBuilder.add(numFiles).addBytes(size);
-      if (!p.isMarkedCached()) {
-        // Helps to differentiate partitions that have 0B cached versus 
partitions
-        // that are not marked as cached.
-        rowBuilder.add("NOT CACHED");
-        rowBuilder.add("NOT CACHED");
-      } else {
-        // Calculate the number the number of bytes that are cached.
-        long cachedBytes = 0L;
-        for (FileDescriptor fd: p.getFileDescriptors()) {
-          int numBlocks = fd.getNumFileBlocks();
-          for (int i = 0; i < numBlocks; ++i) {
-            FbFileBlock block = fd.getFbFileBlock(i);
-            if (FileBlock.hasCachedReplica(block)) {
-              cachedBytes += FileBlock.getLength(block);
-            }
-          }
-        }
-        totalCachedBytes += cachedBytes;
-        rowBuilder.addBytes(cachedBytes);
-
-        // Extract cache replication factor from the parameters of the table
-        // if the table is not partitioned or directly from the partition.
-        Short rep = HdfsCachingUtil.getCachedCacheReplication(
-            table.getNumClusteringCols() == 0 ?
-            p.getTable().getMetaStoreTable().getParameters() :
-            p.getParameters());
-        rowBuilder.add(rep.toString());
-      }
-      rowBuilder.add(p.getFileFormat().toString());
-      rowBuilder.add(String.valueOf(p.hasIncrementalStats()));
-      rowBuilder.add(p.getLocation());
-      
rowBuilder.add(FileSystemUtil.getErasureCodingPolicy(p.getLocationPath()));
-      result.addToRows(rowBuilder.get());
-    }
-
-    // For partitioned tables add a summary row at the bottom.
-    if (table.getNumClusteringCols() > 0) {
-      TResultRowBuilder rowBuilder = new TResultRowBuilder();
-      int numEmptyCells = table.getNumClusteringCols() - 1;
-      rowBuilder.add("Total");
-      for (int i = 0; i < numEmptyCells; ++i) {
-        rowBuilder.add("");
-      }
-
-      // Total rows, extrapolated rows, files, bytes, cache stats.
-      // Leave format empty.
-      rowBuilder.add(table.getNumRows());
-      // Compute and report the extrapolated row count because the set of 
files could
-      // have changed since we last computed stats for this partition. We also 
follow
-      // this policy during scan-cardinality estimation.
-      if (statsExtrap) {
-        rowBuilder.add(Utils.getExtrapolatedNumRows(
-            table, table.getTotalHdfsBytes()));
-      }
-      rowBuilder.add(totalNumFiles)
-          .addBytes(totalBytes)
-          .addBytes(totalCachedBytes).add("").add("").add("").add("").add("");
-      result.addToRows(rowBuilder.get());
-    }
-    return result;
-  }
-
   /**
    * Constructs a partition name from a list of TPartitionKeyValue objects.
    */
@@ -3200,13 +3073,4 @@ public class HdfsTable extends Table implements 
FeFsTable {
   public void setLastVersionSeenByTopicUpdate(long version) {
     lastVersionSeenByTopicUpdate_ = version;
   }
-
-  public boolean isParquetTable() {
-    for (FeFsPartition partition: partitionMap_.values()) {
-      if (!partition.getFileFormat().isParquetBased()) {
-        return false;
-      }
-    }
-    return true;
-  }
 }
diff --git a/fe/src/main/java/org/apache/impala/catalog/IcebergDeleteTable.java 
b/fe/src/main/java/org/apache/impala/catalog/IcebergDeleteTable.java
index 3f41799d1..08e8a2afa 100644
--- a/fe/src/main/java/org/apache/impala/catalog/IcebergDeleteTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/IcebergDeleteTable.java
@@ -85,7 +85,7 @@ public abstract class IcebergDeleteTable extends VirtualTable 
implements FeIcebe
         Set<Long> referencedPartitions) {
       TTableDescriptor desc =
           baseTable_.toThriftDescriptor(tableId, referencedPartitions);
-      desc.setColumnDescriptors(FeCatalogUtils.getTColumnDescriptors(this));
+      desc.setColumnDescriptors(getTColumnDescriptors());
       if (desc.hdfsTable.isSetAvroSchema()) {
         
desc.hdfsTable.setAvroSchema(AvroSchemaConverter.convertColumns(getColumns(),
             getFullName().replaceAll("-", "_")).toString());
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/IcebergTimeTravelTable.java 
b/fe/src/main/java/org/apache/impala/catalog/IcebergTimeTravelTable.java
index 14358f6d6..35aedfe4f 100644
--- a/fe/src/main/java/org/apache/impala/catalog/IcebergTimeTravelTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/IcebergTimeTravelTable.java
@@ -37,7 +37,6 @@ import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.FileSystemUtil;
 import org.apache.impala.common.ImpalaRuntimeException;
 import org.apache.impala.thrift.TCatalogObjectType;
-import org.apache.impala.thrift.TColumnDescriptor;
 import org.apache.impala.thrift.TCompressionCodec;
 import org.apache.impala.thrift.THdfsTable;
 import org.apache.impala.thrift.TIcebergCatalog;
@@ -172,13 +171,6 @@ public class IcebergTimeTravelTable
     return desc;
   }
 
-  /**
-   * Returns a list of thrift column descriptors ordered by position.
-   */
-  public List<TColumnDescriptor> getTColumnDescriptors() {
-    return FeCatalogUtils.getTColumnDescriptors(this);
-  }
-
   public ArrayType getType() { return type_; }
 
   public void addColumn(Column col) {
@@ -438,11 +430,6 @@ class ForwardingFeIcebergTable implements FeIcebergTable {
     return base.loadPartitions(ids);
   }
 
-  @Override
-  public SqlConstraints getSqlConstraints() {
-    return base.getSqlConstraints();
-  }
-
   @Override
   public ListMap<TNetworkAddress> getHostIndex() {
     return base.getHostIndex();
diff --git a/fe/src/main/java/org/apache/impala/catalog/PrunablePartition.java 
b/fe/src/main/java/org/apache/impala/catalog/PrunablePartition.java
index bdcfcf524..cda6c89e4 100644
--- a/fe/src/main/java/org/apache/impala/catalog/PrunablePartition.java
+++ b/fe/src/main/java/org/apache/impala/catalog/PrunablePartition.java
@@ -32,13 +32,15 @@ import org.apache.impala.planner.HdfsPartitionPruner;
  */
 public interface PrunablePartition {
   /**
-   * Returns the identifier of this partition, suitable for later passing
-   * to {@link FeFsTable#loadPartitions(java.util.Collection)}
+   * Returns the identifier of this partition which identifies it within its 
parent table.
+   * Suitable for later passing to
+   * {@link FeFsTable#loadPartitions(java.util.Collection)}
    */
   public long getId();
 
   /**
-   * Returns the values associated with this partition
+   * Returns the values associated with this partition.
+   * @return an immutable list of partition key expressions
    */
   List<LiteralExpr> getPartitionValues();
 }
diff --git a/fe/src/main/java/org/apache/impala/catalog/Table.java 
b/fe/src/main/java/org/apache/impala/catalog/Table.java
index edb29bd82..f8e85a86f 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Table.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Table.java
@@ -50,7 +50,6 @@ import org.apache.impala.thrift.TAccessLevel;
 import org.apache.impala.thrift.TCatalogObject;
 import org.apache.impala.thrift.TCatalogObjectType;
 import org.apache.impala.thrift.TColumn;
-import org.apache.impala.thrift.TColumnDescriptor;
 import org.apache.impala.thrift.TGetPartialCatalogObjectRequest;
 import org.apache.impala.thrift.TGetPartialCatalogObjectResponse;
 import org.apache.impala.thrift.TImpalaTableType;
@@ -131,10 +130,6 @@ public abstract class Table extends CatalogObjectImpl 
implements FeTable {
   // map from lowercase column name to Column object.
   protected final Map<String, Column> colsByName_ = new HashMap<>();
 
-  // List of SQL constraints associated with the table.
-  private final SqlConstraints sqlConstraints_ = new SqlConstraints(new 
ArrayList<>(),
-      new ArrayList<>());
-
   // Type of this table (array of struct) that mirrors the columns. Useful for 
analysis.
   protected final ArrayType type_ = new ArrayType(new StructType());
 
@@ -890,19 +885,9 @@ public abstract class Table extends CatalogObjectImpl 
implements FeTable {
   @Override // FeTable
   public List<VirtualColumn> getVirtualColumns() { return virtualCols_; }
 
-  @Override // FeTable
-  public SqlConstraints getSqlConstraints()  { return sqlConstraints_; }
-
   @Override // FeTable
   public List<String> getColumnNames() { return 
Column.toColumnNames(colsByPos_); }
 
-  /**
-   * Returns a list of thrift column descriptors ordered by position.
-   */
-  public List<TColumnDescriptor> getTColumnDescriptors() {
-    return FeCatalogUtils.getTColumnDescriptors(this);
-  }
-
   /**
    * Subclasses should override this if they provide a storage handler class. 
Currently
    * only HBase tables need to provide a storage handler.
diff --git a/fe/src/main/java/org/apache/impala/catalog/VirtualTable.java 
b/fe/src/main/java/org/apache/impala/catalog/VirtualTable.java
index 01f3cc916..f19800642 100644
--- a/fe/src/main/java/org/apache/impala/catalog/VirtualTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/VirtualTable.java
@@ -129,9 +129,6 @@ public abstract class VirtualTable implements FeTable {
   @Override
   public List<String> getColumnNames() { return 
Column.toColumnNames(colsByPos_); }
 
-  @Override
-  public SqlConstraints getSqlConstraints() { return null; }
-
   @Override
   public int getNumClusteringCols() { return numClusteringCols_; }
 
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/iceberg/IcebergCtasTarget.java 
b/fe/src/main/java/org/apache/impala/catalog/iceberg/IcebergCtasTarget.java
index 2130af0e6..44a317d6f 100644
--- a/fe/src/main/java/org/apache/impala/catalog/iceberg/IcebergCtasTarget.java
+++ b/fe/src/main/java/org/apache/impala/catalog/iceberg/IcebergCtasTarget.java
@@ -39,7 +39,6 @@ import 
org.apache.impala.catalog.CatalogObject.ThriftObjectType;
 import org.apache.impala.catalog.Column;
 import org.apache.impala.catalog.CtasTargetTable;
 import org.apache.impala.catalog.Db;
-import org.apache.impala.catalog.FeCatalogUtils;
 import org.apache.impala.catalog.FeDb;
 import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeIcebergTable;
@@ -289,7 +288,7 @@ public class IcebergCtasTarget extends CtasTargetTable 
implements FeIcebergTable
   public TTableDescriptor toThriftDescriptor(int tableId,
       Set<Long> referencedPartitions) {
     TTableDescriptor desc = new TTableDescriptor(tableId, 
TTableType.ICEBERG_TABLE,
-        FeCatalogUtils.getTColumnDescriptors(this),
+        getTColumnDescriptors(),
         getNumClusteringCols(),
         getName(), db_.getName());
 
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/iceberg/IcebergMetadataTable.java 
b/fe/src/main/java/org/apache/impala/catalog/iceberg/IcebergMetadataTable.java
index ce3f333f3..27b9ff075 100644
--- 
a/fe/src/main/java/org/apache/impala/catalog/iceberg/IcebergMetadataTable.java
+++ 
b/fe/src/main/java/org/apache/impala/catalog/iceberg/IcebergMetadataTable.java
@@ -129,10 +129,6 @@ public class IcebergMetadataTable extends VirtualTable {
     return desc;
   }
 
-  private List<TColumnDescriptor> getTColumnDescriptors() {
-    return FeCatalogUtils.getTColumnDescriptors(this);
-  }
-
   /**
    * Returns true if the table ref is referring to a valid metadata table.
    */
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/LocalCatalog.java 
b/fe/src/main/java/org/apache/impala/catalog/local/LocalCatalog.java
index c35bd72af..fb8bc4405 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalCatalog.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalCatalog.java
@@ -32,7 +32,6 @@ import org.apache.impala.catalog.DataSource;
 import org.apache.impala.catalog.DatabaseNotFoundException;
 import org.apache.impala.catalog.Db;
 import org.apache.impala.catalog.FeCatalog;
-import org.apache.impala.catalog.FeCatalogUtils;
 import org.apache.impala.catalog.FeDataSource;
 import org.apache.impala.catalog.FeDb;
 import org.apache.impala.catalog.FeFsPartition;
@@ -235,7 +234,7 @@ public class LocalCatalog implements FeCatalog {
     PrunablePartition partition = 
FeFsTable.Utils.getPartitionFromThriftPartitionSpec(
         (FeFsTable)table, partitionSpec);
     if (partition == null) throwPartitionNotFound(partitionSpec);
-    return FeCatalogUtils.loadPartition((FeFsTable)table, partition.getId());
+    return ((FeFsTable)table).loadPartition(partition.getId());
   }
 
   @Override
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/local/LocalDataSourceTable.java 
b/fe/src/main/java/org/apache/impala/catalog/local/LocalDataSourceTable.java
index 2d083d795..ca7efe8a2 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalDataSourceTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalDataSourceTable.java
@@ -23,7 +23,6 @@ import java.util.Set;
 
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.impala.catalog.DataSourceTable;
-import org.apache.impala.catalog.FeCatalogUtils;
 import org.apache.impala.catalog.FeDataSourceTable;
 import org.apache.impala.catalog.local.MetaProvider.TableMetaRef;
 import org.apache.impala.catalog.TableLoadingException;
@@ -173,7 +172,7 @@ public class LocalDataSourceTable extends LocalTable 
implements FeDataSourceTabl
   public TTableDescriptor toThriftDescriptor(
       int tableId, Set<Long> referencedPartitions) {
     TTableDescriptor tableDesc = new TTableDescriptor(tableId,
-        TTableType.DATA_SOURCE_TABLE, 
FeCatalogUtils.getTColumnDescriptors(this),
+        TTableType.DATA_SOURCE_TABLE, getTColumnDescriptors(),
         getNumClusteringCols(), getName(), getDb().getName());
     tableDesc.setDataSourceTable(getDataSourceTable());
     return tableDesc;
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/local/LocalFsPartition.java 
b/fe/src/main/java/org/apache/impala/catalog/local/LocalFsPartition.java
index 1c9b196ea..4f9eec695 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalFsPartition.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalFsPartition.java
@@ -238,16 +238,6 @@ public class LocalFsPartition implements FeFsPartition {
     return rowCount;
   }
 
-  @Override
-  public String getConjunctSql() {
-    return FeCatalogUtils.getConjunctSqlForPartition(this);
-  }
-
-  @Override
-  public List<String> getPartitionValuesAsStrings(boolean mapNullsToHiveKey) {
-    return FeCatalogUtils.getPartitionValuesAsStrings(this, mapNullsToHiveKey);
-  }
-
   @Override
   public List<LiteralExpr> getPartitionValues() {
     return spec_.getPartitionValues();
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java 
b/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java
index bd1113368..ff862915b 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalFsTable.java
@@ -45,10 +45,8 @@ import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FileDescriptor;
 import org.apache.impala.catalog.HdfsFileFormat;
 import org.apache.impala.catalog.HdfsStorageDescriptor;
-import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.PrunablePartition;
 import org.apache.impala.catalog.SqlConstraints;
-import org.apache.impala.catalog.VirtualColumn;
 import org.apache.impala.catalog.local.MetaProvider.PartitionMetadata;
 import org.apache.impala.catalog.local.MetaProvider.PartitionRef;
 import org.apache.impala.catalog.local.MetaProvider.TableMetaRef;
@@ -58,7 +56,6 @@ import org.apache.impala.thrift.CatalogObjectsConstants;
 import org.apache.impala.thrift.THdfsPartition;
 import org.apache.impala.thrift.THdfsTable;
 import org.apache.impala.thrift.TNetworkAddress;
-import org.apache.impala.thrift.TResultSet;
 import org.apache.impala.thrift.TTableDescriptor;
 import org.apache.impala.thrift.TTableType;
 import org.apache.impala.thrift.TValidWriteIdList;
@@ -273,7 +270,7 @@ public class LocalFsTable extends LocalTable implements 
FeFsTable {
     // for any INSERT query, even if the partition is specified.
     Collection<? extends FeFsPartition> parts;
     if (ref_ != null) {
-      parts = FeCatalogUtils.loadAllPartitions(this);
+      parts = loadAllPartitions();
     } else {
       // If this is a CTAS target, we don't want to try to load the partition 
list.
       parts = Collections.emptyList();
@@ -302,11 +299,6 @@ public class LocalFsTable extends LocalTable implements 
FeFsTable {
     return AcidUtils.isTransactionalTable(getMetaStoreTable().getParameters());
   }
 
-  @Override
-  public TResultSet getTableStats() {
-    return HdfsTable.getTableStats(this);
-  }
-
   @Override
   public FileSystemUtil.FsType getFsType() {
     Preconditions.checkNotNull(getHdfsBaseDir(),
@@ -321,7 +313,7 @@ public class LocalFsTable extends LocalTable implements 
FeFsTable {
   public TTableDescriptor toThriftDescriptor(int tableId,
       Set<Long> referencedPartitions) {
     TTableDescriptor tableDesc = new TTableDescriptor(tableId, 
TTableType.HDFS_TABLE,
-        FeCatalogUtils.getTColumnDescriptors(this),
+        getTColumnDescriptors(),
         getNumClusteringCols(), name_, db_.getName());
     tableDesc.setHdfsTable(toTHdfsTable(referencedPartitions,
         ThriftObjectType.DESCRIPTOR_ONLY));
@@ -420,7 +412,7 @@ public class LocalFsTable extends LocalTable implements 
FeFsTable {
           "HdfsStorageDescriptor using sd of table");
     }
     LocalPartitionSpec spec = new LocalPartitionSpec(
-        this, CatalogObjectsConstants.PROTOTYPE_PARTITION_ID);
+        CatalogObjectsConstants.PROTOTYPE_PARTITION_ID);
     return new LocalFsPartition(this, spec, Collections.emptyMap(), 
/*writeId=*/-1,
         hdfsStorageDescriptor, /*fileDescriptors=*/null, 
/*insertFileDescriptors=*/null,
         /*deleteFileDescriptors=*/null, /*partitionStats=*/null,
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/local/LocalHbaseTable.java 
b/fe/src/main/java/org/apache/impala/catalog/local/LocalHbaseTable.java
index 06a597af6..2a92bdfae 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalHbaseTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalHbaseTable.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.impala.catalog.Column;
-import org.apache.impala.catalog.FeCatalogUtils;
 import org.apache.impala.catalog.FeHBaseTable;
 import org.apache.impala.catalog.local.MetaProvider.TableMetaRef;
 import org.apache.impala.common.Pair;
@@ -69,7 +68,7 @@ public class LocalHbaseTable extends LocalTable implements 
FeHBaseTable {
       Set<Long> referencedPartitions) {
     TTableDescriptor tableDescriptor =
         new TTableDescriptor(tableId, TTableType.HBASE_TABLE,
-            FeCatalogUtils.getTColumnDescriptors(this), 1, getHBaseTableName(),
+            getTColumnDescriptors(), 1, getHBaseTableName(),
             db_.getName());
     tableDescriptor.setHbaseTable(Util.getTHBaseTable(this));
     return tableDescriptor;
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/local/LocalIcebergTable.java 
b/fe/src/main/java/org/apache/impala/catalog/local/LocalIcebergTable.java
index 5fbaa7ca4..408234da4 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalIcebergTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalIcebergTable.java
@@ -125,7 +125,7 @@ public class LocalIcebergTable extends LocalTable 
implements FeIcebergTable {
       LocalFsTable fsTable) throws Exception {
     db.getCatalog().getMetaProvider().loadTableColumnStatistics(ref,
         getHmsColumnNames(msTable));
-    FeCatalogUtils.loadAllPartitions(fsTable);
+    fsTable.loadAllPartitions();
   }
 
   private static List<String> getHmsColumnNames(Table msTable) {
@@ -251,7 +251,7 @@ public class LocalIcebergTable extends LocalTable 
implements FeIcebergTable {
   public TTableDescriptor toThriftDescriptor(int tableId,
       Set<Long> referencedPartitions) {
     TTableDescriptor desc = new TTableDescriptor(tableId, 
TTableType.ICEBERG_TABLE,
-        FeCatalogUtils.getTColumnDescriptors(this),
+        getTColumnDescriptors(),
         getNumClusteringCols(),
         name_, db_.getName());
     desc.setIcebergTable(Utils.getTIcebergTable(this, 
ThriftObjectType.DESCRIPTOR_ONLY));
@@ -265,7 +265,7 @@ public class LocalIcebergTable extends LocalTable 
implements FeIcebergTable {
     Map<Long, THdfsPartition> idToPartition = new HashMap<>();
     // LocalFsTable transformed from iceberg table only has one partition
     Collection<? extends FeFsPartition> partitions =
-        FeCatalogUtils.loadAllPartitions(localFsTable_);
+        localFsTable_.loadAllPartitions();
     Preconditions.checkState(partitions.size() == 1);
     FeFsPartition partition = (FeFsPartition) partitions.toArray()[0];
     idToPartition.put(partition.getId(),
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/local/LocalKuduTable.java 
b/fe/src/main/java/org/apache/impala/catalog/local/LocalKuduTable.java
index c82605bba..668bfecb0 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalKuduTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalKuduTable.java
@@ -18,7 +18,6 @@
 package org.apache.impala.catalog.local;
 
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -28,7 +27,6 @@ import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.impala.analysis.ColumnDef;
 import org.apache.impala.analysis.KuduPartitionParam;
 import org.apache.impala.catalog.Column;
-import org.apache.impala.catalog.FeCatalogUtils;
 import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.KuduColumn;
 import org.apache.impala.catalog.KuduTable;
@@ -43,8 +41,6 @@ import org.apache.kudu.ColumnSchema;
 import org.apache.kudu.Schema;
 import org.apache.kudu.client.KuduClient;
 import org.apache.kudu.client.KuduException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
@@ -210,7 +206,7 @@ public class LocalKuduTable extends LocalTable implements 
FeKuduTable {
       Set<Long> referencedPartitions) {
     // TODO(todd): the old implementation passes kuduTableName_ instead of 
name below.
     TTableDescriptor desc = new TTableDescriptor(tableId, 
TTableType.KUDU_TABLE,
-        FeCatalogUtils.getTColumnDescriptors(this),
+        getTColumnDescriptors(),
         getNumClusteringCols(),
         name_, db_.getName());
     desc.setKuduTable(toTKuduTable());
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/local/LocalPartitionSpec.java 
b/fe/src/main/java/org/apache/impala/catalog/local/LocalPartitionSpec.java
index c635158dc..8b5b70a7d 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalPartitionSpec.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalPartitionSpec.java
@@ -70,7 +70,7 @@ class LocalPartitionSpec implements PrunablePartition {
     }
   }
 
-  LocalPartitionSpec(LocalFsTable table, long id) {
+  LocalPartitionSpec(long id) {
     // Unpartitioned tables have a single partition with empty name.
     Preconditions.checkArgument(id == 
CatalogObjectsConstants.PROTOTYPE_PARTITION_ID ||
         id == UNPARTITIONED_ID);
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/local/LocalSystemTable.java 
b/fe/src/main/java/org/apache/impala/catalog/local/LocalSystemTable.java
index 2a590afb5..f3999d951 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalSystemTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalSystemTable.java
@@ -20,7 +20,6 @@ package org.apache.impala.catalog.local;
 import java.util.Set;
 
 import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.impala.catalog.FeCatalogUtils;
 import org.apache.impala.catalog.FeSystemTable;
 import org.apache.impala.catalog.local.MetaProvider.TableMetaRef;
 import org.apache.impala.catalog.Type;
@@ -90,7 +89,7 @@ public class LocalSystemTable extends LocalTable implements 
FeSystemTable {
   public TTableDescriptor toThriftDescriptor(
       int tableId, Set<Long> referencedPartitions) {
     TTableDescriptor tableDescriptor = new TTableDescriptor(tableId,
-        TTableType.SYSTEM_TABLE, FeCatalogUtils.getTColumnDescriptors(this),
+        TTableType.SYSTEM_TABLE, getTColumnDescriptors(),
         getNumClusteringCols(), getName(), getDb().getName());
     tableDescriptor.setSystemTable(getTSystemTable());
     return tableDescriptor;
diff --git a/fe/src/main/java/org/apache/impala/catalog/local/LocalTable.java 
b/fe/src/main/java/org/apache/impala/catalog/local/LocalTable.java
index d9e36443c..fe5e39fed 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalTable.java
@@ -43,7 +43,6 @@ import org.apache.impala.catalog.IcebergStructField;
 import org.apache.impala.catalog.IcebergTable;
 import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.catalog.SideloadTableStats;
-import org.apache.impala.catalog.SqlConstraints;
 import org.apache.impala.catalog.StructField;
 import org.apache.impala.catalog.StructType;
 import org.apache.impala.catalog.SystemTable;
@@ -278,11 +277,6 @@ abstract class LocalTable implements FeTable {
     return cols_ == null ? Collections.emptyList() : cols_.colsByPos_;
   }
 
-  @Override
-  public SqlConstraints getSqlConstraints() {
-    return new SqlConstraints(new ArrayList<>(), new ArrayList<>());
-  }
-
   @Override
   public List<Column> getColumnsInHiveOrder() {
     List<Column> columns = Lists.newArrayList(getNonClusteringColumns());
diff --git a/fe/src/main/java/org/apache/impala/planner/IcebergScanNode.java 
b/fe/src/main/java/org/apache/impala/planner/IcebergScanNode.java
index 4eaae76fb..91754ab4c 100644
--- a/fe/src/main/java/org/apache/impala/planner/IcebergScanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/IcebergScanNode.java
@@ -29,7 +29,6 @@ import org.apache.impala.analysis.Analyzer;
 import org.apache.impala.analysis.Expr;
 import org.apache.impala.analysis.MultiAggregateInfo;
 import org.apache.impala.analysis.TableRef;
-import org.apache.impala.catalog.FeCatalogUtils;
 import org.apache.impala.catalog.FeFsPartition;
 import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeIcebergTable;
@@ -171,8 +170,7 @@ public class IcebergScanNode extends HdfsScanNode {
    * unpartitioned hdfs table
    */
   private static List<? extends FeFsPartition> getIcebergPartition(FeFsTable 
feFsTable) {
-    Collection<? extends FeFsPartition> partitions =
-        FeCatalogUtils.loadAllPartitions(feFsTable);
+    Collection<? extends FeFsPartition> partitions = 
feFsTable.loadAllPartitions();
     return new ArrayList<>(partitions);
   }
 
diff --git a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java 
b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
index 565ee58b2..22e2b9cf9 100644
--- a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
+++ b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
@@ -2056,8 +2056,7 @@ public class CatalogOpExecutor {
     Preconditions.checkState(params.isSetPartition_stats());
     List<HdfsPartition.Builder> modifiedParts = Lists.newArrayList();
     // TODO(todd) only load the partitions that were modified in 'params'.
-    Collection<? extends FeFsPartition> parts =
-        FeCatalogUtils.loadAllPartitions(table);
+    Collection<? extends FeFsPartition> parts = table.loadAllPartitions();
     for (FeFsPartition fePartition: parts) {
       // TODO(todd): avoid downcast to implementation class
       HdfsPartition partition = (HdfsPartition)fePartition;
@@ -2856,8 +2855,7 @@ public class CatalogOpExecutor {
 
     // List of partitions that were modified as part of this operation.
     List<HdfsPartition.Builder> modifiedParts = Lists.newArrayList();
-    Collection<? extends FeFsPartition> parts =
-        FeCatalogUtils.loadAllPartitions(hdfsTable);
+    Collection<? extends FeFsPartition> parts = hdfsTable.loadAllPartitions();
     for (FeFsPartition fePart: parts) {
       // TODO(todd): avoid downcast
       HdfsPartition part = (HdfsPartition) fePart;
@@ -3325,8 +3323,7 @@ public class CatalogOpExecutor {
       }
     }
     if (table.getNumClusteringCols() > 0) {
-      Collection<? extends FeFsPartition> parts =
-          FeCatalogUtils.loadAllPartitions(hdfsTable);
+      Collection<? extends FeFsPartition> parts = 
hdfsTable.loadAllPartitions();
       boolean hasTasks = false;
       for (FeFsPartition part: parts) {
         if (part.isMarkedCached()) {
@@ -3471,8 +3468,7 @@ public class CatalogOpExecutor {
           LOG.trace("Time elapsed to truncate table {} using HMS API: {} msec",
               hdfsTable.getFullName(), sw.elapsed(TimeUnit.MILLISECONDS));
         } else {
-          Collection<? extends FeFsPartition> parts =
-              FeCatalogUtils.loadAllPartitions(hdfsTable);
+          Collection<? extends FeFsPartition> parts = 
hdfsTable.loadAllPartitions();
           createEmptyBaseDirectories(parts, tblTxn.writeId);
           LOG.trace("Time elapsed after creating empty base directories for 
table {}: {} "
                   + "msec", table.getFullName(), 
sw.elapsed(TimeUnit.MILLISECONDS));
@@ -3609,8 +3605,7 @@ public class CatalogOpExecutor {
       if (!isTableBeingReplicated) {
         // when table is replicated we let the HMS API handle the file 
deletion logic
         // otherwise we delete the files.
-        Collection<? extends FeFsPartition> parts = FeCatalogUtils
-            .loadAllPartitions(hdfsTable);
+        Collection<? extends FeFsPartition> parts = 
hdfsTable.loadAllPartitions();
         for (FeFsPartition part : parts) {
           FileSystemUtil.deleteAllVisibleFiles(new Path(part.getLocation()));
         }
@@ -6189,8 +6184,7 @@ public class CatalogOpExecutor {
       if (tbl.getNumClusteringCols() > 0) {
         // If this is a partitioned table, submit cache directives for all 
uncached
         // partitions.
-        Collection<? extends FeFsPartition> parts =
-            FeCatalogUtils.loadAllPartitions(hdfsTable);
+        Collection<? extends FeFsPartition> parts = 
hdfsTable.loadAllPartitions();
         // List of partitions that were modified as part of this operation.
         List<HdfsPartition.Builder> modifiedParts = Lists.newArrayList();
         for (FeFsPartition fePartition: parts) {
@@ -6244,8 +6238,7 @@ public class CatalogOpExecutor {
       if (cacheDirId != null) HdfsCachingUtil.removeTblCacheDirective(msTbl);
       // Uncache all table partitions.
       if (tbl.getNumClusteringCols() > 0) {
-        Collection<? extends FeFsPartition> parts =
-            FeCatalogUtils.loadAllPartitions(hdfsTable);
+        Collection<? extends FeFsPartition> parts = 
hdfsTable.loadAllPartitions();
         // List of partitions that were modified as part of this operation.
         List<HdfsPartition.Builder> modifiedParts = Lists.newArrayList();
         for (FeFsPartition fePartition: parts) {
@@ -7336,8 +7329,7 @@ public class CatalogOpExecutor {
                   .build());
         }
       }
-      Collection<? extends FeFsPartition> parts =
-          FeCatalogUtils.loadAllPartitions((FeFsTable)table);
+      Collection<? extends FeFsPartition> parts = 
((FeFsTable)table).loadAllPartitions();
       List<FeFsPartition> affectedExistingPartitions = new ArrayList<>();
       List<org.apache.hadoop.hive.metastore.api.Partition> 
hmsPartitionsStatsUnset =
           Lists.newArrayList();
diff --git 
a/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java 
b/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java
index 8b4f14bde..5e2959613 100644
--- 
a/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java
+++ 
b/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java
@@ -99,8 +99,7 @@ public class CatalogObjectToFromThriftTest {
       HdfsTable newHdfsTable = (HdfsTable) newTable;
       Assert.assertEquals(newHdfsTable.getPartitions().size(), 24);
       Assert.assertEquals(newHdfsTable.getPartitionIds().size(), 24);
-      Collection<? extends FeFsPartition> parts =
-          FeCatalogUtils.loadAllPartitions(newHdfsTable);
+      Collection<? extends FeFsPartition> parts = 
newHdfsTable.loadAllPartitions();
       for (FeFsPartition hdfsPart: parts) {
         Assert.assertEquals(hdfsPart.getFileDescriptors().size(), 1);
         Assert.assertTrue(
@@ -230,9 +229,8 @@ public class CatalogObjectToFromThriftTest {
     // Get any partition with valid HMS parameters to create a
     // dummy partition.
     long id = Iterables.getFirst(hdfsTable.getPartitionIds(), -1L);
-    HdfsPartition part = (HdfsPartition) FeCatalogUtils.loadPartition(
-        hdfsTable, id);
-    Assert.assertNotNull(part);;
+    HdfsPartition part = (HdfsPartition) hdfsTable.loadPartition(id);
+    Assert.assertNotNull(part);
     // Create a dummy partition with an invalid decimal type.
     try {
       new HdfsPartition.Builder(hdfsTable)
diff --git a/fe/src/test/java/org/apache/impala/catalog/CatalogTest.java 
b/fe/src/test/java/org/apache/impala/catalog/CatalogTest.java
index 63e6ed181..01dc27605 100644
--- a/fe/src/test/java/org/apache/impala/catalog/CatalogTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/CatalogTest.java
@@ -512,8 +512,7 @@ public class CatalogTest {
   public static void checkAllTypesPartitioning(FeFsTable table) {
     assertEquals(24, table.getPartitionIds().size());
     assertEquals(24, table.getPartitions().size());
-    Collection<? extends FeFsPartition> partitions =
-        FeCatalogUtils.loadAllPartitions(table);
+    Collection<? extends FeFsPartition> partitions = table.loadAllPartitions();
 
     // check that partition keys cover the date range 1/1/2009-12/31/2010
     // and that we have one file per partition.
diff --git 
a/fe/src/test/java/org/apache/impala/catalog/events/MetastoreEventsProcessorTest.java
 
b/fe/src/test/java/org/apache/impala/catalog/events/MetastoreEventsProcessorTest.java
index 302f79a7f..11e2160ca 100644
--- 
a/fe/src/test/java/org/apache/impala/catalog/events/MetastoreEventsProcessorTest.java
+++ 
b/fe/src/test/java/org/apache/impala/catalog/events/MetastoreEventsProcessorTest.java
@@ -726,8 +726,7 @@ public class MetastoreEventsProcessorTest {
     eventsProcessor_.processEvents();
 
     Collection<? extends FeFsPartition> parts =
-        FeCatalogUtils.loadAllPartitions((HdfsTable)
-            catalog_.getTable(TEST_DB_NAME, testTblName));
+        ((HdfsTable) catalog_.getTable(TEST_DB_NAME, 
testTblName)).loadAllPartitions();
     FeFsPartition singlePartition =
         Iterables.getOnlyElement(parts);
     assertTrue(newLocation.equals(singlePartition.getLocation()));
@@ -739,8 +738,7 @@ public class MetastoreEventsProcessorTest {
     eventsProcessor_.processEvents();
 
     Collection<? extends FeFsPartition> partsAfterTrivialAlter =
-        FeCatalogUtils.loadAllPartitions((HdfsTable)
-            catalog_.getTable(TEST_DB_NAME, testTblName));
+        ((HdfsTable) catalog_.getTable(TEST_DB_NAME, 
testTblName)).loadAllPartitions();
     FeFsPartition singlePartitionAfterTrivialAlter =
         Iterables.getOnlyElement(partsAfterTrivialAlter);
     for (String parameter : MetastoreEvents.parametersToIgnore) {
@@ -1161,7 +1159,7 @@ public class MetastoreEventsProcessorTest {
     Table tblAfterInsert = catalog_.getTable(tbl.getDb().getName(), 
tbl.getName());
     assertFalse(tblAfterInsert instanceof IncompleteTable);
     Collection<? extends FeFsPartition> partsAfterInsert =
-        FeCatalogUtils.loadAllPartitions((HdfsTable) tblAfterInsert);
+        ((HdfsTable) tblAfterInsert).loadAllPartitions();
     assertTrue("Partition not found after insert.",
         partsAfterInsert.size() > 0);
     FeFsPartition singlePart =
@@ -2284,8 +2282,7 @@ public class MetastoreEventsProcessorTest {
         eventsProcessor_.processEvents();
         if (shouldEventBeProcessed) {
           Collection<? extends FeFsPartition> partsAfterAdd =
-              FeCatalogUtils.loadAllPartitions((HdfsTable)
-                  catalog_.getTable(dbName, tblName));
+              ((HdfsTable) catalog_.getTable(dbName, 
tblName)).loadAllPartitions();
           assertTrue("Partitions should have been added.", 
partsAfterAdd.size() == 6);
         } else {
           assertFalse("Table should still have been in loaded state since sync 
is "
@@ -2306,8 +2303,7 @@ public class MetastoreEventsProcessorTest {
         eventsProcessor_.processEvents();
         if (shouldEventBeProcessed) {
           Collection<? extends FeFsPartition> partsAfterDrop =
-              FeCatalogUtils.loadAllPartitions((HdfsTable) 
catalog_.getTable(dbName,
-                  tblName));
+              ((HdfsTable) catalog_.getTable(dbName, 
tblName)).loadAllPartitions();
           assertTrue("Partitions should have been dropped", 
partsAfterDrop.size() == 2);
         } else {
           assertFalse("Table should still have been in loaded state since sync 
is "
@@ -2331,8 +2327,7 @@ public class MetastoreEventsProcessorTest {
         eventsProcessor_.processEvents();
         if (shouldEventBeProcessed) {
           Collection<? extends FeFsPartition> partsAfterAlter =
-              FeCatalogUtils.loadAllPartitions((HdfsTable)
-                  catalog_.getTable(dbName, tblName));
+              ((HdfsTable) catalog_.getTable(dbName, 
tblName)).loadAllPartitions();
           for (FeFsPartition part : partsAfterAlter) {
             assertTrue("Partition location should have been modified by 
alter.",
                 location.equals(part.getLocation()));
@@ -3314,8 +3309,7 @@ public class MetastoreEventsProcessorTest {
     assertEquals(fileMetadataLoadAfter, fileMetadataLoadBefore);
 
     Collection<? extends FeFsPartition> parts =
-        FeCatalogUtils.loadAllPartitions((HdfsTable)
-            catalog_.getTable(TEST_DB_NAME, testTblName));
+        ((HdfsTable) catalog_.getTable(TEST_DB_NAME, 
testTblName)).loadAllPartitions();
     FeFsPartition singlePartition =
         Iterables.getOnlyElement(parts);
     String val = singlePartition.getParameters().getOrDefault(testKey, null);
diff --git 
a/fe/src/test/java/org/apache/impala/catalog/local/LocalCatalogTest.java 
b/fe/src/test/java/org/apache/impala/catalog/local/LocalCatalogTest.java
index 248710d1d..d0ff5836a 100644
--- a/fe/src/test/java/org/apache/impala/catalog/local/LocalCatalogTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/local/LocalCatalogTest.java
@@ -249,8 +249,7 @@ public class LocalCatalogTest {
     int dayCol = t.getColumn("day").getPosition();
     Set<Long> ids = t.getNullPartitionIds(dayCol);
     assertEquals(1,  ids.size());
-    FeFsPartition partition = FeCatalogUtils.loadPartition(
-        t, Iterables.getOnlyElement(ids));
+    FeFsPartition partition = t.loadPartition(Iterables.getOnlyElement(ids));
     assertTrue(Expr.IS_NULL_VALUE.apply(partition.getPartitionValue(dayCol)));
   }
 
@@ -258,7 +257,7 @@ public class LocalCatalogTest {
   public void testLoadFileDescriptors() throws Exception {
     FeFsTable t = (FeFsTable) catalog_.getTable("functional",  "alltypes");
     int totalFds = 0;
-    for (FeFsPartition p: FeCatalogUtils.loadAllPartitions(t)) {
+    for (FeFsPartition p: t.loadAllPartitions()) {
       List<FileDescriptor> fds = p.getFileDescriptors();
       totalFds += fds.size();
       for (FileDescriptor fd : fds) {
@@ -313,7 +312,7 @@ public class LocalCatalogTest {
   public void testLoadFileDescriptorsUnpartitioned() throws Exception {
     FeFsTable t = (FeFsTable) catalog_.getTable("tpch",  "region");
     int totalFds = 0;
-    for (FeFsPartition p: FeCatalogUtils.loadAllPartitions(t)) {
+    for (FeFsPartition p: t.loadAllPartitions()) {
       List<FileDescriptor> fds = p.getFileDescriptors();
       totalFds += fds.size();
       for (FileDescriptor fd : fds) {
diff --git 
a/fe/src/test/java/org/apache/impala/catalog/metastore/CatalogHmsSyncToLatestEventIdTest.java
 
b/fe/src/test/java/org/apache/impala/catalog/metastore/CatalogHmsSyncToLatestEventIdTest.java
index fa0c71e17..301d3e7f3 100644
--- 
a/fe/src/test/java/org/apache/impala/catalog/metastore/CatalogHmsSyncToLatestEventIdTest.java
+++ 
b/fe/src/test/java/org/apache/impala/catalog/metastore/CatalogHmsSyncToLatestEventIdTest.java
@@ -45,7 +45,6 @@ import org.junit.AfterClass;
 import org.junit.Assume;
 import org.junit.Before;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -53,10 +52,8 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 import org.apache.impala.catalog.CatalogException;
-import org.apache.impala.compat.MetastoreShim;
 
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -278,7 +275,7 @@ public class CatalogHmsSyncToLatestEventIdTest extends 
AbstractCatalogMetastoreT
             // assert that  partition with new location from cached table
             // exists
             FeFsPartition modifiedPartition = null;
-            for (FeFsPartition part : FeCatalogUtils.loadAllPartitions(tbl)) {
+            for (FeFsPartition part : tbl.loadAllPartitions()) {
                 if (part.getLocation().equals(newLocation)) {
                     modifiedPartition = part;
                     break;
@@ -343,8 +340,7 @@ public class CatalogHmsSyncToLatestEventIdTest extends 
AbstractCatalogMetastoreT
             assertTrue(destCatalogTbl.getPartitions().size() == 2);
 
             // assert that part with val 1 does not exist in src table
-            for (FeFsPartition srcPartition :
-                FeCatalogUtils.loadAllPartitions(srcCatalogTbl)) {
+            for (FeFsPartition srcPartition : 
srcCatalogTbl.loadAllPartitions()) {
                 List<String> partVals =
                     srcPartition.getPartitionValuesAsStrings(false);
                 assertFalse(partVals.equals(Arrays.asList("1")));
@@ -635,7 +631,7 @@ public class CatalogHmsSyncToLatestEventIdTest extends 
AbstractCatalogMetastoreT
             // assert that  partition with new location from cached table
             // exists
             FeFsPartition modifiedPartition = null;
-            for (FeFsPartition part : FeCatalogUtils.loadAllPartitions(tbl)) {
+            for (FeFsPartition part : tbl.loadAllPartitions()) {
                 if (part.getLocation().equals(newLocation)) {
                     modifiedPartition = part;
                     break;
diff --git a/fe/src/test/java/org/apache/impala/testutil/BlockIdGenerator.java 
b/fe/src/test/java/org/apache/impala/testutil/BlockIdGenerator.java
index f101fd3b6..01662fd2e 100644
--- a/fe/src/test/java/org/apache/impala/testutil/BlockIdGenerator.java
+++ b/fe/src/test/java/org/apache/impala/testutil/BlockIdGenerator.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 
 import org.apache.impala.catalog.Catalog;
-import org.apache.impala.catalog.FeCatalogUtils;
 import org.apache.impala.catalog.FeDb;
 import org.apache.impala.catalog.FeFsPartition;
 import org.apache.impala.catalog.FeTable;
@@ -73,8 +72,7 @@ public class BlockIdGenerator {
 
             // Write the output as <tablename>: <blockid1> <blockid2> <etc>
             writer.write(tableName + ":");
-            Collection<? extends FeFsPartition> parts =
-                FeCatalogUtils.loadAllPartitions(hdfsTable);
+            Collection<? extends FeFsPartition> parts = 
hdfsTable.loadAllPartitions();
             for (FeFsPartition partition : parts) {
               List<FileDescriptor> fileDescriptors = 
partition.getFileDescriptors();
               for (FileDescriptor fd : fileDescriptors) {

Reply via email to