This is an automated email from the ASF dual-hosted git repository.

boroknagyz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git


The following commit(s) were added to refs/heads/master by this push:
     new ef2d50e68 IMPALA-14592: Read Row Lineage of Iceberg tables
ef2d50e68 is described below

commit ef2d50e68987ccbf713122b8f301709a874c5121
Author: Zoltan Borok-Nagy <[email protected]>
AuthorDate: Wed Feb 18 18:39:20 2026 +0100

    IMPALA-14592: Read Row Lineage of Iceberg tables
    
    Iceberg V3 added mandatory row lineage tracking for Iceberg tables.
    This means each field has a row-id and a last-updated-sequence-number
    associated with it. These are either stored in the data files, or can
    be calculated from file metadata the following way:
    
    * row-id: _row_id field of the record. If missing or NULL, then it
      is first-row-id of DataFile plus FILE__POSITION
    * last-updated-sequence-number: _last_updated_sequence_number of the
      record. If missing or NULL, then it is the data-sequence-number of
      the DataFile.
    
    To support Row Lineage in Impala, we introduce the concept of Hidden
    Columns. Hidden Columns are columns of a table that can be stored in
    the data files along with the data, but they don't participate in
    'select *' expansion and they are non-modifiable. Some DBs refer to such
    columns as "system columns". They are different from Virtual Columns
    as Virtual Columns are not stored in the data files.
    
    We introduce the following Hidden Columns:
    * _file_row_id: BIGINT field with field id 2147483540.
    * _file_last_updated_sequence_number: BIGINT field with field id
      2147483539
    
    We also introduce the following Virtual Column:
    * ICEBERG__FIRST__ROW__ID: returns the first-row-id of the DataFile.
      This is stored in the metadata, once for each data file, it is not
      present in the data files.
    
    Now we can calculate Iceberg V3 row-id and last-updated-sequence-number
    the following way:
    
    * row-id:
      COALESCE(_file_row_id,
               ICEBERG__FIRST__ROW__ID + FILE__POSITION)
    * last-updated-sequence-number:
      COALESCE(_file_last_updated_sequence_number,
               ICEBERG__DATA__SEQUENCE__NUMBER)
    
    Later we might add syntactic sugars for the above, for now this patch
    set only makes it possible to calculate the values via the above
    expressions.
    
    Testing
     * e2e tests added with Iceberg V3 tables written by Spark
    
    Change-Id: I71b1076b25c9e7a0a6c9428b24abc986f5382c71
    Reviewed-on: http://gerrit.cloudera.org:8080/24042
    Reviewed-by: Impala Public Jenkins <[email protected]>
    Tested-by: Impala Public Jenkins <[email protected]>
---
 be/src/exec/file-metadata-utils.cc                 |  15 +
 be/src/exec/hdfs-scan-node-base.cc                 |  16 +-
 be/src/exec/hdfs-table-sink.cc                     |   4 +-
 be/src/exec/hdfs-table-writer.cc                   |   6 +-
 be/src/exec/parquet/hdfs-parquet-table-writer.cc   |   3 +-
 be/src/runtime/descriptors.cc                      |   1 +
 be/src/runtime/descriptors.h                       |   5 +
 common/fbs/IcebergObjects.fbs                      |   1 +
 common/thrift/CatalogObjects.thrift                |   4 +-
 .../impala/analysis/AlterTableAlterColStmt.java    |   4 +
 .../impala/analysis/AlterTableDropColStmt.java     |   9 +-
 .../apache/impala/analysis/ComputeStatsStmt.java   |   6 +-
 .../org/apache/impala/analysis/InsertStmt.java     |  12 +-
 .../org/apache/impala/analysis/SelectStmt.java     |   1 +
 .../org/apache/impala/analysis/ToSqlUtils.java     |   1 +
 .../java/org/apache/impala/catalog/Column.java     |  14 +-
 .../org/apache/impala/catalog/FeIcebergTable.java  |  15 +
 .../java/org/apache/impala/catalog/FeTable.java    |   2 +-
 .../org/apache/impala/catalog/IcebergColumn.java   |  13 +
 .../impala/catalog/IcebergFileMetadataLoader.java  |  13 +-
 .../org/apache/impala/catalog/IcebergTable.java    |  21 +
 .../impala/catalog/IcebergTimeTravelTable.java     |  17 +-
 .../org/apache/impala/catalog/VirtualColumn.java   |   5 +
 .../impala/catalog/iceberg/IcebergCtasTarget.java  |  23 +
 .../impala/catalog/local/IcebergMetaProvider.java  |   1 +
 .../impala/catalog/local/LocalIcebergTable.java    |  19 +
 .../apache/impala/planner/IcebergScanPlanner.java  |   2 +-
 .../impala/service/DescribeResultFactory.java      |   2 +
 .../java/org/apache/impala/util/IcebergUtil.java   |   3 +-
 testdata/data/README                               |  10 +
 ...1a8-dc43-4e8b-ad61-b691a1754e16-0-00001.parquet | Bin 0 -> 392 bytes
 ...793-eb01-410d-a963-807e22437794-0-00001.parquet | Bin 0 -> 1157 bytes
 ...4a3-1aa3-4a3c-87a1-cd3d2988c499-0-00001.parquet | Bin 0 -> 392 bytes
 ...9ee-b654-4420-a7a5-9d7964ffd9c9-0-00001.parquet | Bin 0 -> 392 bytes
 .../7411e291-ddc0-4c54-9e25-75ef7878df0d-m0.avro   | Bin 0 -> 7791 bytes
 .../7411e291-ddc0-4c54-9e25-75ef7878df0d-m1.avro   | Bin 0 -> 7793 bytes
 .../7411e291-ddc0-4c54-9e25-75ef7878df0d-m2.avro   | Bin 0 -> 7797 bytes
 .../7411e291-ddc0-4c54-9e25-75ef7878df0d-m3.avro   | Bin 0 -> 7838 bytes
 .../7a6ede87-b2d9-462e-9baa-77e456f07671-m0.avro   | Bin 0 -> 7804 bytes
 .../8ea2cf61-8fe7-4599-923a-d64b424cae3f-m0.avro   | Bin 0 -> 7792 bytes
 .../e46e6fcd-0a4e-4001-a0db-e199a5eb4227-m0.avro   | Bin 0 -> 7790 bytes
 .../fe2e965b-4685-4369-babf-31d13f81f10a-m0.avro   | Bin 0 -> 7792 bytes
 ...808-1-fe2e965b-4685-4369-babf-31d13f81f10a.avro | Bin 0 -> 4877 bytes
 ...432-1-7411e291-ddc0-4c54-9e25-75ef7878df0d.avro | Bin 0 -> 4815 bytes
 ...466-1-e46e6fcd-0a4e-4001-a0db-e199a5eb4227.avro | Bin 0 -> 4828 bytes
 ...324-1-8ea2cf61-8fe7-4599-923a-d64b424cae3f.avro | Bin 0 -> 4754 bytes
 .../metadata/v1.metadata.json                      |   1 +
 .../metadata/v2.metadata.json                      |   1 +
 .../metadata/v3.metadata.json                      |   1 +
 .../metadata/v4.metadata.json                      |   1 +
 .../metadata/v5.metadata.json                      |   1 +
 .../metadata/version-hint.text                     |   1 +
 ...69cb204-0c90-4255-8b0b-7af3aec3f75d-0-00001.orc | Bin 0 -> 235 bytes
 ...ac66c53-638d-4aaf-9084-8a24b7aa2cdf-0-00001.orc | Bin 0 -> 235 bytes
 ...69a801d-ce1f-478e-98e4-f5321d122361-0-00001.orc | Bin 0 -> 235 bytes
 ...4703627-8eea-44f1-a09b-e5bdad596090-0-00001.orc | Bin 0 -> 423 bytes
 .../3159a0a5-681d-4ac9-bf72-4be5814546cf-m0.avro   | Bin 0 -> 7787 bytes
 .../4e12ed17-3e31-4d27-b35f-55467a2bf5fe-m0.avro   | Bin 0 -> 7788 bytes
 .../8542d294-4d10-4efc-9e9d-69d3dce88108-m0.avro   | Bin 0 -> 7787 bytes
 .../8542d294-4d10-4efc-9e9d-69d3dce88108-m1.avro   | Bin 0 -> 7786 bytes
 .../8542d294-4d10-4efc-9e9d-69d3dce88108-m2.avro   | Bin 0 -> 7789 bytes
 .../8542d294-4d10-4efc-9e9d-69d3dce88108-m3.avro   | Bin 0 -> 7825 bytes
 .../e5f99ba8-b804-434f-aa9e-d51e86cc0180-m0.avro   | Bin 0 -> 7784 bytes
 ...345-1-3159a0a5-681d-4ac9-bf72-4be5814546cf.avro | Bin 0 -> 4833 bytes
 ...760-1-4e12ed17-3e31-4d27-b35f-55467a2bf5fe.avro | Bin 0 -> 4884 bytes
 ...232-1-8542d294-4d10-4efc-9e9d-69d3dce88108.avro | Bin 0 -> 4815 bytes
 ...313-1-e5f99ba8-b804-434f-aa9e-d51e86cc0180.avro | Bin 0 -> 4754 bytes
 .../metadata/v1.metadata.json                      |   1 +
 .../metadata/v2.metadata.json                      |   1 +
 .../metadata/v3.metadata.json                      |   1 +
 .../metadata/v4.metadata.json                      |   1 +
 .../metadata/v5.metadata.json                      |   1 +
 .../metadata/version-hint.text                     |   1 +
 .../queries/QueryTest/iceberg-v3-row-lineage.test  | 502 +++++++++++++++++++++
 tests/query_test/test_iceberg.py                   |  50 +-
 75 files changed, 775 insertions(+), 37 deletions(-)

diff --git a/be/src/exec/file-metadata-utils.cc 
b/be/src/exec/file-metadata-utils.cc
index 95fa164eb..10a030792 100644
--- a/be/src/exec/file-metadata-utils.cc
+++ b/be/src/exec/file-metadata-utils.cc
@@ -82,6 +82,21 @@ void FileMetadataUtils::AddFileLevelVirtualColumns(MemPool* 
mem_pool,
       } else {
         template_tuple->SetNull(slot_desc->null_indicator_offset());
       }
+    } else if (slot_desc->virtual_column_type() ==
+        TVirtualColumnType::ICEBERG_FIRST_ROW_ID) {
+      using namespace org::apache::impala::fb;
+      const FbIcebergMetadata* ice_metadata =
+          file_desc_->file_metadata->iceberg_metadata();
+      DCHECK(ice_metadata != nullptr);
+
+      int64_t first_row_id = ice_metadata->first_row_id();
+      if (first_row_id > -1) {
+        int64_t* slot = 
template_tuple->GetBigIntSlot(slot_desc->tuple_offset());
+        *slot = first_row_id;
+        template_tuple->SetNotNull(slot_desc->null_indicator_offset());
+      } else {
+        template_tuple->SetNull(slot_desc->null_indicator_offset());
+      }
     }
   }
 }
diff --git a/be/src/exec/hdfs-scan-node-base.cc 
b/be/src/exec/hdfs-scan-node-base.cc
index 70495e01f..8a0b41272 100644
--- a/be/src/exec/hdfs-scan-node-base.cc
+++ b/be/src/exec/hdfs-scan-node-base.cc
@@ -1125,19 +1125,15 @@ void HdfsScanPlanNode::ComputeSlotMaterializationOrder(
 bool HdfsScanPlanNode::HasVirtualColumnInTemplateTuple() const {
   for (SlotDescriptor* sd : virtual_column_slots_) {
     DCHECK(sd->IsVirtual());
-    if (sd->virtual_column_type() == TVirtualColumnType::INPUT_FILE_NAME) {
-      return true;
-    } else if (sd->virtual_column_type() == TVirtualColumnType::FILE_POSITION) 
{
+    if (sd->virtual_column_type() == TVirtualColumnType::FILE_POSITION) {
       // We return false at the end of the function if there are no virtual
       // columns in the template tuple.
       continue;
-    } else if (sd->virtual_column_type() == 
TVirtualColumnType::PARTITION_SPEC_ID) {
-      return true;
-    } else if (sd->virtual_column_type() ==
-        TVirtualColumnType::ICEBERG_PARTITION_SERIALIZED) {
-      return true;
-    } else if (sd->virtual_column_type() ==
-        TVirtualColumnType::ICEBERG_DATA_SEQUENCE_NUMBER) {
+    } else if (sd->virtual_column_type() == 
TVirtualColumnType::INPUT_FILE_NAME ||
+        sd->virtual_column_type() == TVirtualColumnType::PARTITION_SPEC_ID ||
+        sd->virtual_column_type() == 
TVirtualColumnType::ICEBERG_PARTITION_SERIALIZED ||
+        sd->virtual_column_type() == 
TVirtualColumnType::ICEBERG_DATA_SEQUENCE_NUMBER ||
+        sd->virtual_column_type() == TVirtualColumnType::ICEBERG_FIRST_ROW_ID) 
{
       return true;
     } else {
       // Adding DCHECK here so we don't forget to update this when adding new 
virtual
diff --git a/be/src/exec/hdfs-table-sink.cc b/be/src/exec/hdfs-table-sink.cc
index c1a9b3bef..b18536582 100644
--- a/be/src/exec/hdfs-table-sink.cc
+++ b/be/src/exec/hdfs-table-sink.cc
@@ -124,9 +124,9 @@ Status HdfsTableSink::Prepare(RuntimeState* state, 
MemTracker* parent_mem_tracke
         << DebugString();
     DCHECK_EQ(partition_key_expr_evals_.size(), 
table_desc_->num_clustering_cols())
         << DebugString();
-  }
-  DCHECK_GE(output_expr_evals_.size(),
+    DCHECK_GE(output_expr_evals_.size(),
       table_desc_->num_cols() - table_desc_->num_clustering_cols()) << 
DebugString();
+  }
 
   return Status::OK();
 }
diff --git a/be/src/exec/hdfs-table-writer.cc b/be/src/exec/hdfs-table-writer.cc
index c710ac6f0..890735d26 100644
--- a/be/src/exec/hdfs-table-writer.cc
+++ b/be/src/exec/hdfs-table-writer.cc
@@ -36,8 +36,10 @@ HdfsTableWriter::HdfsTableWriter(TableSinkBase* parent,
     output_expr_evals_(parent->output_expr_evals()) {
   int num_non_partition_cols =
       table_desc_->num_cols() - table_desc_->num_clustering_cols();
-  DCHECK_GE(output_expr_evals_.size(), num_non_partition_cols)
-      << parent_->DebugString();
+  if (!table_desc_->IsIcebergTable() || table_desc_->IcebergFormatVersion() < 
3) {
+    DCHECK_GE(output_expr_evals_.size(), num_non_partition_cols)
+        << parent_->DebugString();
+  }
 }
 
 Status HdfsTableWriter::Write(const uint8_t* data, int32_t len) {
diff --git a/be/src/exec/parquet/hdfs-parquet-table-writer.cc 
b/be/src/exec/parquet/hdfs-parquet-table-writer.cc
index e104eef47..00c02911b 100644
--- a/be/src/exec/parquet/hdfs-parquet-table-writer.cc
+++ b/be/src/exec/parquet/hdfs-parquet-table-writer.cc
@@ -1372,7 +1372,8 @@ Status HdfsParquetTableWriter::Init() {
     page_row_count_limit_ = query_options.parquet_page_row_count_limit;
   }
 
-  int num_cols = table_desc_->num_cols() - table_desc_->num_clustering_cols();
+  int num_cols = min<size_t>(output_expr_evals_.size(),
+      table_desc_->num_cols() - table_desc_->num_clustering_cols());
   // When opening files using the hdfsOpenFile() API, the maximum block size 
is limited to
   // 2GB.
   int64_t min_block_size = MinBlockSize(num_cols);
diff --git a/be/src/runtime/descriptors.cc b/be/src/runtime/descriptors.cc
index e018f7a9d..552001cb0 100644
--- a/be/src/runtime/descriptors.cc
+++ b/be/src/runtime/descriptors.cc
@@ -272,6 +272,7 @@ HdfsTableDescriptor::HdfsTableDescriptor(const 
TTableDescriptor& tdesc, ObjectPo
   valid_write_id_list_ = tdesc.hdfsTable.valid_write_ids;
   if (tdesc.__isset.icebergTable) {
     is_iceberg_ = true;
+    iceberg_format_version_ = tdesc.icebergTable.format_version;
     iceberg_table_location_ = tdesc.icebergTable.table_location;
     iceberg_spec_id_ = tdesc.icebergTable.default_partition_spec_id;
     iceberg_partition_specs_ = tdesc.icebergTable.partition_spec;
diff --git a/be/src/runtime/descriptors.h b/be/src/runtime/descriptors.h
index 071c32a17..ba6a36bee 100644
--- a/be/src/runtime/descriptors.h
+++ b/be/src/runtime/descriptors.h
@@ -517,6 +517,10 @@ class HdfsTableDescriptor : public TableDescriptor {
     return iceberg_spec_id_;
   }
 
+  int32_t IcebergFormatVersion() const {
+    return iceberg_format_version_;
+  }
+
   virtual std::string DebugString() const;
 
  protected:
@@ -539,6 +543,7 @@ class HdfsTableDescriptor : public TableDescriptor {
   int64_t iceberg_parquet_plain_page_size_;
   int64_t iceberg_parquet_dict_page_size_;
   int32_t iceberg_spec_id_;
+  int32_t iceberg_format_version_;
 };
 
 class HBaseTableDescriptor : public TableDescriptor {
diff --git a/common/fbs/IcebergObjects.fbs b/common/fbs/IcebergObjects.fbs
index f75c48766..068b01629 100644
--- a/common/fbs/IcebergObjects.fbs
+++ b/common/fbs/IcebergObjects.fbs
@@ -49,6 +49,7 @@ table FbIcebergMetadata {
   partition_keys : [FbIcebergPartitionTransformValue];
   equality_field_ids : [int];
   part_id : int = -1;
+  first_row_id : long;
 }
 
 table FbIcebergColumnStats {
diff --git a/common/thrift/CatalogObjects.thrift 
b/common/thrift/CatalogObjects.thrift
index 5e97cef72..981684fa0 100644
--- a/common/thrift/CatalogObjects.thrift
+++ b/common/thrift/CatalogObjects.thrift
@@ -94,7 +94,8 @@ enum TVirtualColumnType {
   ICEBERG_PARTITION_SERIALIZED,
   ICEBERG_DATA_SEQUENCE_NUMBER,
   PARTITION_VALUE_SERIALIZED,
-  BUCKET_ID
+  BUCKET_ID,
+  ICEBERG_FIRST_ROW_ID
 }
 
 // TODO: Since compression is also enabled for Kudu columns, we should
@@ -714,6 +715,7 @@ struct TIcebergTable {
   8: optional i64 parquet_plain_page_size;
   9: optional i64 parquet_dict_page_size;
   10: optional map<string, TIcebergPartitionStats> partition_stats;
+  11: optional i32 format_version = -1;
 }
 
 // System Table identifiers.
diff --git 
a/fe/src/main/java/org/apache/impala/analysis/AlterTableAlterColStmt.java 
b/fe/src/main/java/org/apache/impala/analysis/AlterTableAlterColStmt.java
index 8c7308fc1..2040e1e6e 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableAlterColStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableAlterColStmt.java
@@ -119,6 +119,10 @@ public class AlterTableAlterColStmt extends AlterTableStmt 
{
       throw new AnalysisException(String.format(
           "Column '%s' does not exist in table: %s", colName_, tableName));
     }
+    if (column.isHidden()) {
+      throw new AnalysisException(String.format(
+          "Hidden column '%s' of table '%s' cannot be altered.", colName_, 
tableName));
+    }
     // Verify the column being modified isn't a partition column.
     if (t.isClusteringColumn(column)) {
       throw new AnalysisException("Cannot modify partition column: " + 
colName_);
diff --git 
a/fe/src/main/java/org/apache/impala/analysis/AlterTableDropColStmt.java 
b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropColStmt.java
index fb7b14aae..1ad19c1c8 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableDropColStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableDropColStmt.java
@@ -18,6 +18,7 @@
 package org.apache.impala.analysis;
 
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.impala.catalog.Column;
 import org.apache.impala.catalog.FeHBaseTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.common.AnalysisException;
@@ -77,10 +78,14 @@ public class AlterTableDropColStmt extends AlterTableStmt {
           "Cannot drop column '%s' from %s. Tables must contain at least 1 
column.",
           colName_, tableName));
     }
-
-    if (t.getColumn(colName_) == null) {
+    Column column = t.getColumn(colName_);
+    if (column == null) {
       throw new AnalysisException(String.format(
           "Column '%s' does not exist in table: %s", colName_, tableName));
     }
+    if (column.isHidden()) {
+      throw new AnalysisException(String.format(
+          "Hidden column '%s' of table '%s' cannot be dropped.", colName_, 
t.getName()));
+    }
   }
 }
diff --git a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java 
b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
index 2fbf9e510..d791680cd 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
@@ -437,6 +437,10 @@ public class ComputeStatsStmt extends StatementBase 
implements SingleTableStmt {
           throw new AnalysisException(colName + " not found in table: " +
               table_.getName());
         }
+        if (col.isHidden()) {
+          throw new AnalysisException("COMPUTE STATS not supported for hidden 
column " +
+              col.getName() + " of table " + table_.getName());
+        }
         if (table_ instanceof FeFsTable && table_.isClusteringColumn(col)) {
           throw new AnalysisException("COMPUTE STATS not supported for 
partitioning " +
               "column " + col.getName() + " of HDFS table.");
@@ -957,7 +961,7 @@ public class ComputeStatsStmt extends StatementBase 
implements SingleTableStmt {
    */
   private boolean ignoreColumn(Column c) {
     Type t = c.getType();
-    return !t.isValid() || !t.isSupported() || t.isComplexType();
+    return !t.isValid() || !t.isSupported() || t.isComplexType() || 
c.isHidden();
   }
 
   public double getEffectiveSamplingPerc() { return effectiveSamplePerc_; }
diff --git a/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java 
b/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java
index 832d71d46..0c37d8d9b 100644
--- a/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/InsertStmt.java
@@ -344,6 +344,7 @@ public class InsertStmt extends DmlStatementBase {
       List<Column> tableColumns = table_.getColumns();
       for (int i = numClusteringCols; i < tableColumns.size(); ++i) {
         Column c = tableColumns.get(i);
+        if (c.isHidden()) continue;
         // Omit auto-incrementing column for Kudu table since the values of 
the column
         // will be assigned by Kudu engine.
         if (c instanceof KuduColumn && ((KuduColumn)c).isAutoIncrementing()) 
continue;
@@ -365,6 +366,11 @@ public class InsertStmt extends DmlStatementBase {
         throw new AnalysisException(
             "Unknown column '" + columnName + "' in column permutation");
       }
+      if (column.isHidden()) {
+        throw new AnalysisException(
+            "Column '" + columnName + "' in column permutation is hidden and 
cannot be " +
+                "targeted for insert");
+      }
 
       if (!mentionedColumnNames.add(column.getName())) {
         throw new AnalysisException(
@@ -716,9 +722,9 @@ public class InsertStmt extends DmlStatementBase {
   private void checkColumnCoverage(List<Column> selectExprTargetColumns,
       Set<String> mentionedColumnNames, int numSelectListExprs,
       int numStaticPartitionExprs) throws AnalysisException {
+    int colCount = table_.getColumnsInHiveOrder().size();
     // Check that all required cols are mentioned by the permutation and 
partition clauses
-    if (selectExprTargetColumns.size() + numStaticPartitionExprs !=
-        table_.getColumns().size()) {
+    if (selectExprTargetColumns.size() + numStaticPartitionExprs != colCount) {
       // We've already ruled out too many columns in the permutation and 
partition clauses
       // by checking that there are no duplicates and that every column 
mentioned actually
       // exists. So all columns aren't mentioned in the query.
@@ -747,7 +753,7 @@ public class InsertStmt extends DmlStatementBase {
         throw new AnalysisException(String.format(
             "Target table '%s' has %s columns (%s) than the SELECT / VALUES 
clause %s" +
             " (%s)", table_.getFullName(), comparator,
-            table_.getColumns().size(), partitionClause, 
totalColumnsMentioned));
+            colCount, partitionClause, totalColumnsMentioned));
       } else {
         String partitionPrefix =
             (partitionKeyValues_ == null) ? "mentions" : "and PARTITION clause 
mention";
diff --git a/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java 
b/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java
index da7cef4a1..b830d74f7 100644
--- a/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java
@@ -903,6 +903,7 @@ public class SelectStmt extends QueryStmt {
         TupleDescriptor tupleDesc = resolvedPath.destTupleDesc();
         FeTable table = tupleDesc.getTable();
         for (Column c: table.getColumnsInHiveOrder()) {
+          if (c.isHidden()) continue;
           // Omit auto-incrementing column for Kudu table since it's a hidden 
column.
           if (c instanceof KuduColumn && ((KuduColumn)c).isAutoIncrementing()) 
continue;
           addStarExpandedPath(selectListItem, resolvedPath, c.getName());
diff --git a/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java 
b/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
index 1785a8c45..8dbaed19b 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
@@ -509,6 +509,7 @@ public class ToSqlUtils {
         table.getMetaStoreTable().getParameters());
     for (int i = 0; i < table.getColumns().size(); i++) {
       Column col = table.getColumns().get(i);
+      if (col.isHidden()) continue;
       if (!isHbaseTable && i < table.getNumClusteringCols()) {
         partitionColsSql.add(columnToSql(col));
       } else if (isFullAcid && i == table.getNumClusteringCols()) {
diff --git a/fe/src/main/java/org/apache/impala/catalog/Column.java 
b/fe/src/main/java/org/apache/impala/catalog/Column.java
index a376ee801..716f757ec 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Column.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Column.java
@@ -71,6 +71,15 @@ public class Column {
   public ColumnStats getStats() { return stats_; }
   public boolean isVirtual() { return false; }
 
+  /**
+   * Returns true if this column is hidden. Hidden columns are stored in data 
file,
+   * this is how they differ from virtual columns. Hidden columns are not 
expanded
+   * by select *, and are not writable by users. If written, their values are
+   * automatically generated by the system. For example, for Iceberg tables, 
hidden
+   * columns store row lineage information.
+   */
+  public boolean isHidden() { return false; }
+
   public boolean updateStats(ColumnStatisticsData statsData) {
     boolean statsDataCompatibleWithColType = stats_.update(name_, type_, 
statsData);
     if (LOG.isTraceEnabled()) {
@@ -105,9 +114,11 @@ public class Column {
     Column col;
     if (columnDesc.isIs_iceberg_column()) {
       Preconditions.checkState(columnDesc.isSetIceberg_field_id());
+      boolean isHidden = columnDesc.isSetIs_hidden() && 
columnDesc.isIs_hidden();
       col = new IcebergColumn(columnDesc.getColumnName(), type, comment, 
position,
           columnDesc.getIceberg_field_id(), 
columnDesc.getIceberg_field_map_key_id(),
-          columnDesc.getIceberg_field_map_value_id(), 
columnDesc.isIs_nullable());
+          columnDesc.getIceberg_field_map_value_id(), 
columnDesc.isIs_nullable(),
+          isHidden);
     } else if (columnDesc.isIs_paimon_column()) {
       Preconditions.checkState(columnDesc.isSetIceberg_field_id());
       col = new PaimonColumn(columnDesc.getColumnName(), type, comment, 
position,
@@ -135,6 +146,7 @@ public class Column {
     if (comment_ != null) colDesc.setComment(comment_);
     colDesc.setPosition(position_);
     colDesc.setCol_stats(getStats().toThrift());
+    if (isHidden()) { colDesc.setIs_hidden(true); }
     return colDesc;
   }
 
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeIcebergTable.java 
b/fe/src/main/java/org/apache/impala/catalog/FeIcebergTable.java
index 7a5e7a01d..d8adb3a3f 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FeIcebergTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FeIcebergTable.java
@@ -54,6 +54,7 @@ import org.apache.iceberg.expressions.Expression;
 import org.apache.iceberg.FileContent;
 import org.apache.iceberg.FileScanTask;
 import org.apache.iceberg.io.CloseableIterable;
+import org.apache.iceberg.MetadataColumns;
 import org.apache.iceberg.PartitionField;
 import org.apache.iceberg.PartitionSpec;
 import org.apache.iceberg.Schema;
@@ -309,6 +310,19 @@ public interface FeIcebergTable extends FeFsTable {
     return getFeFsTable().getHostIndex();
   }
 
+  static List<Column> getHiddenColumns(int formatVersion, int startPosition) {
+    if (formatVersion < 3) return Collections.emptyList();
+    List<Column> ret = new ArrayList<>();
+    ret.add(new IcebergColumn("_file_row_id", Type.BIGINT,
+        "A unique long assigned for row lineage",
+        startPosition++, MetadataColumns.ROW_ID.fieldId(), -1, -1, true, 
true));
+    ret.add(new IcebergColumn("_file_last_updated_sequence_number", 
Type.BIGINT,
+        "The sequence number which last updated this row",
+        startPosition++, 
MetadataColumns.LAST_UPDATED_SEQUENCE_NUMBER.fieldId(),
+        -1, -1, true, true));
+    return ret;
+  }
+
   /**
    * @return true if there's at least one partition spec that has at least one 
non-VOID
    * partition field.
@@ -826,6 +840,7 @@ public interface FeIcebergTable extends FeFsTable {
     public static TIcebergTable getTIcebergTable(FeIcebergTable icebergTable,
         ThriftObjectType type) {
       TIcebergTable tIcebergTable = new TIcebergTable();
+      tIcebergTable.setFormat_version(icebergTable.getFormatVersion());
       tIcebergTable.setTable_location(icebergTable.getIcebergTableLocation());
 
       for (IcebergPartitionSpec partitionSpec : 
icebergTable.getPartitionSpecs()) {
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeTable.java 
b/fe/src/main/java/org/apache/impala/catalog/FeTable.java
index 03fd94aa9..dd8927c1f 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FeTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FeTable.java
@@ -154,7 +154,7 @@ public interface FeTable {
     if (!isFullAcid) return columns;
     // Filter out row__id as it doesn't exist in HMS.
     return columns.stream()
-        .filter(c -> !c.getName().equals("row__id"))
+        .filter(c -> !c.isHidden() && !c.getName().equals("row__id"))
         .collect(Collectors.toList());
   }
 
diff --git a/fe/src/main/java/org/apache/impala/catalog/IcebergColumn.java 
b/fe/src/main/java/org/apache/impala/catalog/IcebergColumn.java
index 5cbd7f29c..3d7f3aa98 100644
--- a/fe/src/main/java/org/apache/impala/catalog/IcebergColumn.java
+++ b/fe/src/main/java/org/apache/impala/catalog/IcebergColumn.java
@@ -40,14 +40,24 @@ public class IcebergColumn extends Column {
   private final int fieldMapValueId_;
   // False for required Iceberg field, true for optional Iceberg field
   private final boolean isNullable_;
+  // Until only Iceberg columns can be hidden, we can keep this flag here.
+  private final boolean isHidden_;
 
   public IcebergColumn(String name, Type type, String comment, int position,
       int fieldId, int fieldMapKeyId, int fieldMapValueId, boolean isNullable) 
{
+    this(name, type, comment, position, fieldId, fieldMapKeyId, 
fieldMapValueId,
+        isNullable, false);
+  }
+
+  public IcebergColumn(String name, Type type, String comment, int position,
+      int fieldId, int fieldMapKeyId, int fieldMapValueId, boolean isNullable,
+      boolean isHidden) {
     super(name.toLowerCase(), type, comment, position);
     fieldId_ = fieldId;
     fieldMapKeyId_ = fieldMapKeyId;
     fieldMapValueId_ = fieldMapValueId;
     isNullable_ = isNullable;
+    isHidden_ = isHidden;
   }
 
   public static IcebergColumn cloneWithNullability(IcebergColumn source,
@@ -63,6 +73,9 @@ public class IcebergColumn extends Column {
 
   public boolean isNullable() { return isNullable_; }
 
+  @Override
+  public boolean isHidden() { return isHidden_; }
+
   @Override
   public TColumn toThrift() {
     TColumn tcol = super.toThrift();
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/IcebergFileMetadataLoader.java 
b/fe/src/main/java/org/apache/impala/catalog/IcebergFileMetadataLoader.java
index 144782df4..5dc184a2d 100644
--- a/fe/src/main/java/org/apache/impala/catalog/IcebergFileMetadataLoader.java
+++ b/fe/src/main/java/org/apache/impala/catalog/IcebergFileMetadataLoader.java
@@ -52,6 +52,7 @@ import org.apache.impala.catalog.iceberg.GroupedContentFiles;
 import org.apache.impala.common.FileSystemUtil;
 import org.apache.impala.common.PrintUtils;
 import org.apache.impala.common.Pair;
+import org.apache.impala.fb.FbIcebergMetadata;
 import org.apache.impala.thrift.TIcebergPartition;
 import org.apache.impala.thrift.TNetworkAddress;
 import org.apache.impala.util.IcebergUtil;
@@ -192,11 +193,19 @@ public class IcebergFileMetadataLoader extends 
FileMetadataLoader {
         TIcebergPartition partition = oldIcebergPartitions_.get(oldPartId);
         Integer newPartId = loadedIcebergPartitions_.computeIfAbsent(
             partition, k -> nextPartitionId_.getAndIncrement());
+        FbIcebergMetadata icebergMetadata = 
fd.getFbFileMetadata().icebergMetadata();
         // Look up the partition info in this old file descriptor from the 
partition list.
         // Put the partition info in the new partitions map and write the new 
partition id
         // to the file metadata of the fd.
-        if (!fd.getFbFileMetadata().icebergMetadata().mutatePartId(newPartId)) 
{
-          throw new TableLoadingException("Error modifying the Iceberg file 
descriptor.");
+        if (!icebergMetadata.mutatePartId(newPartId)) {
+          throw new TableLoadingException(
+              "Error modifying the partition id of the Iceberg file 
descriptor.");
+        }
+        if (icebergMetadata.firstRowId() == -1 && contentFile.firstRowId() != 
null) {
+          if (!icebergMetadata.mutateFirstRowId(contentFile.firstRowId())) {
+            throw new TableLoadingException(
+                "Error modifying the first-row-id Iceberg file descriptor.");
+          }
         }
         ++loadStats_.skippedFiles;
         loadedFds_.add(fd);
diff --git a/fe/src/main/java/org/apache/impala/catalog/IcebergTable.java 
b/fe/src/main/java/org/apache/impala/catalog/IcebergTable.java
index 176ed01ab..335b4ffeb 100644
--- a/fe/src/main/java/org/apache/impala/catalog/IcebergTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/IcebergTable.java
@@ -33,6 +33,7 @@ import java.util.Map;
 import java.util.Objects;
 import java.util.Set;
 import java.util.TreeMap;
+import java.util.stream.Collectors;
 
 
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
@@ -409,6 +410,17 @@ public class IcebergTable extends Table implements 
FeIcebergTable {
     return icebergFieldIdToCol_.get(fieldId);
   }
 
+  @Override
+  public List<Column> getColumnsInHiveOrder() {
+    if (getFormatVersion() < 3) {
+      return super.getColumnsInHiveOrder();
+    } else {
+      return super.getColumnsInHiveOrder().stream()
+          .filter(col -> !col.isHidden())
+          .collect(Collectors.toList());
+    }
+  }
+
   @Override
   public TTable toThrift() {
     TTable table = super.toThrift();
@@ -675,6 +687,7 @@ public class IcebergTable extends Table implements 
FeIcebergTable {
       throws TableLoadingException, ImpalaRuntimeException {
     loadSchema();
     addVirtualColumns();
+    addHiddenColumns();
     partitionSpecs_ = Utils.loadPartitionSpecByIceberg(this);
     defaultPartitionSpecId_ = icebergApiTable_.spec().specId();
   }
@@ -731,6 +744,14 @@ public class IcebergTable extends Table implements 
FeIcebergTable {
     addVirtualColumn(VirtualColumn.PARTITION_SPEC_ID);
     addVirtualColumn(VirtualColumn.ICEBERG_PARTITION_SERIALIZED);
     addVirtualColumn(VirtualColumn.ICEBERG_DATA_SEQUENCE_NUMBER);
+    addVirtualColumn(VirtualColumn.ICEBERG_FIRST_ROW_ID);
+  }
+
+  private void addHiddenColumns() {
+    for (Column col : FeIcebergTable.getHiddenColumns(
+        getFormatVersion(), getColumns().size())) {
+      addColumn(col);
+    }
   }
 
   @Override
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/IcebergTimeTravelTable.java 
b/fe/src/main/java/org/apache/impala/catalog/IcebergTimeTravelTable.java
index 35aedfe4f..8493edbc2 100644
--- a/fe/src/main/java/org/apache/impala/catalog/IcebergTimeTravelTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/IcebergTimeTravelTable.java
@@ -61,6 +61,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
+import java.util.stream.Collectors;
 
 /**
  * Represents an Iceberg Table involved in Time Travel.
@@ -119,15 +120,29 @@ public class IcebergTimeTravelTable
       for (Column col : 
IcebergSchemaConverter.convertToImpalaSchema(icebergSchema)) {
         addColumn(col);
       }
+      addHiddenColumns();
     } catch (ImpalaRuntimeException e) {
       throw new AnalysisException("Could not create iceberg schema.", e);
     }
   }
 
+  private void addHiddenColumns() {
+    for (Column col : FeIcebergTable.getHiddenColumns(
+        getFormatVersion(), getColumns().size())) {
+      addColumn(col);
+    }
+  }
+
   @Override
   public List<Column> getColumnsInHiveOrder() {
     Preconditions.checkState(base_.getNumClusteringCols() == 0);
-    return colsByPos_;
+    if (getFormatVersion() < 3) {
+      return colsByPos_;
+    } else {
+      return colsByPos_.stream().
+          filter(col -> !col.isHidden()).
+          collect(Collectors.toList());
+    }
   }
 
   @Override
diff --git a/fe/src/main/java/org/apache/impala/catalog/VirtualColumn.java 
b/fe/src/main/java/org/apache/impala/catalog/VirtualColumn.java
index 5810bb908..37f20bc44 100644
--- a/fe/src/main/java/org/apache/impala/catalog/VirtualColumn.java
+++ b/fe/src/main/java/org/apache/impala/catalog/VirtualColumn.java
@@ -49,6 +49,10 @@ public class VirtualColumn extends Column {
       "ICEBERG__DATA__SEQUENCE__NUMBER",
       Type.BIGINT,
       TVirtualColumnType.ICEBERG_DATA_SEQUENCE_NUMBER);
+  public static VirtualColumn ICEBERG_FIRST_ROW_ID = new VirtualColumn(
+      "ICEBERG__FIRST__ROW__ID",
+      Type.BIGINT,
+      TVirtualColumnType.ICEBERG_FIRST_ROW_ID);
 
   // Paimon-related virtual columns.
   public static VirtualColumn PARTITION_VALUE_SERIALIZED = new
@@ -67,6 +71,7 @@ public class VirtualColumn extends Column {
       case ICEBERG_DATA_SEQUENCE_NUMBER: return ICEBERG_DATA_SEQUENCE_NUMBER;
       case PARTITION_VALUE_SERIALIZED: return PARTITION_VALUE_SERIALIZED;
       case BUCKET_ID: return BUCKET_ID;
+      case ICEBERG_FIRST_ROW_ID: return ICEBERG_FIRST_ROW_ID;
       default: break;
     }
     return null;
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/iceberg/IcebergCtasTarget.java 
b/fe/src/main/java/org/apache/impala/catalog/iceberg/IcebergCtasTarget.java
index 44a317d6f..249ebced8 100644
--- a/fe/src/main/java/org/apache/impala/catalog/iceberg/IcebergCtasTarget.java
+++ b/fe/src/main/java/org/apache/impala/catalog/iceberg/IcebergCtasTarget.java
@@ -17,6 +17,7 @@
 
 package org.apache.impala.catalog.iceberg;
 
+import java.lang.reflect.Field;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -29,6 +30,7 @@ import com.google.common.base.Preconditions;
 import org.apache.iceberg.CatalogProperties;
 import org.apache.iceberg.PartitionSpec;
 import org.apache.iceberg.Schema;
+import org.apache.iceberg.TableMetadata;
 import org.apache.iceberg.catalog.Namespace;
 import org.apache.iceberg.catalog.TableIdentifier;
 import org.apache.iceberg.types.TypeUtil;
@@ -276,6 +278,27 @@ public class IcebergCtasTarget extends CtasTargetTable 
implements FeIcebergTable
     return null;
   }
 
+  @Override
+  public int getFormatVersion() {
+    String version = msTable_.getParameters().get(IcebergTable.FORMAT_VERSION);
+    try {
+      if (version != null) {
+        return Integer.parseInt(version);
+      } else {
+        // Try to return the library-default. Since 
DEFAULT_TABLE_FORMAT_VERSION is
+        // private in class TableMetadata, we need to use reflection.
+        Field field = TableMetadata.class.getDeclaredField(
+            "DEFAULT_TABLE_FORMAT_VERSION");
+        field.setAccessible(true);
+        return (int) field.get(null);
+      }
+    } catch (NumberFormatException e) {
+      throw new IllegalStateException("Invalid format version: " + version);
+    } catch (Exception e) {
+      throw new IllegalStateException("Unable to determine default format 
version", e);
+    }
+  }
+
   public void addColumn(IcebergColumn col) {
     colsByPos_.add(col);
     colsByName_.put(col.getName().toLowerCase(), col);
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/local/IcebergMetaProvider.java 
b/fe/src/main/java/org/apache/impala/catalog/local/IcebergMetaProvider.java
index bdbb9cea9..053a4b765 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/IcebergMetaProvider.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/IcebergMetaProvider.java
@@ -511,6 +511,7 @@ public class IcebergMetaProvider implements MetaProvider {
       ret.add(VirtualColumn.PARTITION_SPEC_ID);
       ret.add(VirtualColumn.ICEBERG_PARTITION_SERIALIZED);
       ret.add(VirtualColumn.ICEBERG_DATA_SEQUENCE_NUMBER);
+      ret.add(VirtualColumn.ICEBERG_FIRST_ROW_ID);
       return ret;
     }
 
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/local/LocalIcebergTable.java 
b/fe/src/main/java/org/apache/impala/catalog/local/LocalIcebergTable.java
index 408234da4..f09c62871 100644
--- a/fe/src/main/java/org/apache/impala/catalog/local/LocalIcebergTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/local/LocalIcebergTable.java
@@ -23,9 +23,11 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.stream.Collectors;
 
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.iceberg.BaseTable;
 import org.apache.impala.analysis.IcebergPartitionSpec;
 import org.apache.impala.catalog.CatalogObject.ThriftObjectType;
 import org.apache.impala.catalog.Column;
@@ -33,8 +35,10 @@ import org.apache.impala.catalog.FeCatalogUtils;
 import org.apache.impala.catalog.FeFsPartition;
 import org.apache.impala.catalog.FeFsTable;
 import org.apache.impala.catalog.FeIcebergTable;
+import org.apache.impala.catalog.IcebergColumn;
 import org.apache.impala.catalog.IcebergContentFileStore;
 import org.apache.impala.catalog.TableLoadingException;
+import org.apache.impala.catalog.Type;
 import org.apache.impala.catalog.local.MetaProvider.TableMetaRef;
 import org.apache.impala.common.ImpalaRuntimeException;
 import org.apache.impala.thrift.TCompressionCodec;
@@ -97,6 +101,10 @@ public class LocalIcebergTable extends LocalTable 
implements FeIcebergTable {
       List<Column> iceColumns = IcebergSchemaConverter.convertToImpalaSchema(
           icebergApiTable.schema());
       validateColumns(iceColumns, msTable.getSd().getCols());
+      int formatVersion =
+          ((BaseTable) icebergApiTable).operations().current().formatVersion();
+      iceColumns.addAll(
+          FeIcebergTable.getHiddenColumns(formatVersion, iceColumns.size()));
       ColumnMap colMap = new ColumnMap(iceColumns,
           /*numClusteringCols=*/ 0,
           db.getName() + "." + msTable.getTableName(),
@@ -172,6 +180,17 @@ public class LocalIcebergTable extends LocalTable 
implements FeIcebergTable {
     }
   }
 
+  @Override
+  public List<Column> getColumnsInHiveOrder() {
+    if (getFormatVersion() < 3) {
+      return super.getColumnsInHiveOrder();
+    } else {
+      return super.getColumnsInHiveOrder().stream()
+          .filter(col -> !col.isHidden())
+          .collect(Collectors.toList());
+    }
+  }
+
   @Override
   public TIcebergFileFormat getIcebergFileFormat() {
     return icebergFileFormat_;
diff --git a/fe/src/main/java/org/apache/impala/planner/IcebergScanPlanner.java 
b/fe/src/main/java/org/apache/impala/planner/IcebergScanPlanner.java
index 27ec98de4..3f5546d4b 100644
--- a/fe/src/main/java/org/apache/impala/planner/IcebergScanPlanner.java
+++ b/fe/src/main/java/org/apache/impala/planner/IcebergScanPlanner.java
@@ -267,7 +267,7 @@ public class IcebergScanPlanner {
       if (column == null) continue;
       int fieldId = ((IcebergColumn) column).getFieldId();
       NestedField field = 
getIceTable().getIcebergApiTable().schema().findField(fieldId);
-      if (field.initialDefaultLiteral() != null) {
+      if (field != null && field.initialDefaultLiteral() != null) {
         throw new ImpalaRuntimeException(String.format(
             "Iceberg columns with default values not supported yet. " +
             "Table: %s Column: %s Default value: %s",
diff --git 
a/fe/src/main/java/org/apache/impala/service/DescribeResultFactory.java 
b/fe/src/main/java/org/apache/impala/service/DescribeResultFactory.java
index b40bf4a9e..4ec02b1a1 100644
--- a/fe/src/main/java/org/apache/impala/service/DescribeResultFactory.java
+++ b/fe/src/main/java/org/apache/impala/service/DescribeResultFactory.java
@@ -215,6 +215,7 @@ public class DescribeResultFactory {
     List<Column> nonClustered = new ArrayList<Column>();
     List<Column> clustered = new ArrayList<Column>();
     for (Column col: filteredColumns) {
+      if (col.isHidden()) continue;
       if (table.isClusteringColumn(col)) {
         clustered.add(col);
       } else {
@@ -341,6 +342,7 @@ public class DescribeResultFactory {
     TDescribeResult descResult = new TDescribeResult();
     descResult.results = Lists.newArrayList();
     for (Column c: columns) {
+      if (c.isHidden()) continue;
       Preconditions.checkState(c instanceof IcebergColumn);
       IcebergColumn icebergColumn = (IcebergColumn) c;
       // General describe info.
diff --git a/fe/src/main/java/org/apache/impala/util/IcebergUtil.java 
b/fe/src/main/java/org/apache/impala/util/IcebergUtil.java
index 20738d88b..660722896 100644
--- a/fe/src/main/java/org/apache/impala/util/IcebergUtil.java
+++ b/fe/src/main/java/org/apache/impala/util/IcebergUtil.java
@@ -1186,11 +1186,12 @@ public class IcebergUtil {
       // for manifest entries with status DELETED (older Iceberg versions)."
       FbIcebergMetadata.addDataSequenceNumber(fbb, -1l);
     }
-
     if (eqFieldIdsOffset != -1) {
       FbIcebergMetadata.addEqualityFieldIds(fbb, eqFieldIdsOffset);
     }
     FbIcebergMetadata.addPartId(fbb, partId);
+    FbIcebergMetadata.addFirstRowId(fbb,
+        cf.firstRowId() != null ? cf.firstRowId() : -1L);
     return FbIcebergMetadata.endFbIcebergMetadata(fbb);
   }
 
diff --git a/testdata/data/README b/testdata/data/README
index 831be8871..804b22c27 100644
--- a/testdata/data/README
+++ b/testdata/data/README
@@ -1219,6 +1219,16 @@ Since Spark still doesn't support default values, the 
metadata.json file was edi
 manually to set the default value for column 'j':
 
{"id":2,"name":"j","required":false,"type":"int","initial-default":-1,"write-default":-1}
 
+testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage(_orc):
+Iceberg V3 table created by Spark 4.0.2, see above 
(iceberg_v3_deletion_vectors).
+sql("create table iceberg_v3_row_lineage (i int) using iceberg 
options('format-version'='3')").collect()
+sql("create table iceberg_v3_row_lineage_orc (i int) using iceberg 
options('format-version'='3', 'write.format.default'='orc')").collect()
+The followings are the same for both tables:
+sql("insert into iceberg_v3_row_lineage values(1)").collect()
+sql("insert into iceberg_v3_row_lineage values(2)").collect()
+sql("insert into iceberg_v3_row_lineage values(3)").collect()
+sql("CALL spark_catalog.system.rewrite_data_files('iceberg_v3_row_lineage', 
options => map('min-input-files', '2'))").show()
+
 arrays_big.parq:
 Generated with RandomNestedDataGenerator.java from the following schema:
 {
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/data/00000-0-153001a8-dc43-4e8b-ad61-b691a1754e16-0-00001.parquet
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/data/00000-0-153001a8-dc43-4e8b-ad61-b691a1754e16-0-00001.parquet
new file mode 100644
index 000000000..0ce222477
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/data/00000-0-153001a8-dc43-4e8b-ad61-b691a1754e16-0-00001.parquet
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/data/00000-1-9e4c5793-eb01-410d-a963-807e22437794-0-00001.parquet
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/data/00000-1-9e4c5793-eb01-410d-a963-807e22437794-0-00001.parquet
new file mode 100644
index 000000000..5238874b4
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/data/00000-1-9e4c5793-eb01-410d-a963-807e22437794-0-00001.parquet
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/data/00000-1-e55b64a3-1aa3-4a3c-87a1-cd3d2988c499-0-00001.parquet
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/data/00000-1-e55b64a3-1aa3-4a3c-87a1-cd3d2988c499-0-00001.parquet
new file mode 100644
index 000000000..40c1d159f
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/data/00000-1-e55b64a3-1aa3-4a3c-87a1-cd3d2988c499-0-00001.parquet
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/data/00000-2-d67e29ee-b654-4420-a7a5-9d7964ffd9c9-0-00001.parquet
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/data/00000-2-d67e29ee-b654-4420-a7a5-9d7964ffd9c9-0-00001.parquet
new file mode 100644
index 000000000..8b39ce9bb
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/data/00000-2-d67e29ee-b654-4420-a7a5-9d7964ffd9c9-0-00001.parquet
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/7411e291-ddc0-4c54-9e25-75ef7878df0d-m0.avro
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/7411e291-ddc0-4c54-9e25-75ef7878df0d-m0.avro
new file mode 100644
index 000000000..9eee22538
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/7411e291-ddc0-4c54-9e25-75ef7878df0d-m0.avro
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/7411e291-ddc0-4c54-9e25-75ef7878df0d-m1.avro
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/7411e291-ddc0-4c54-9e25-75ef7878df0d-m1.avro
new file mode 100644
index 000000000..300a51040
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/7411e291-ddc0-4c54-9e25-75ef7878df0d-m1.avro
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/7411e291-ddc0-4c54-9e25-75ef7878df0d-m2.avro
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/7411e291-ddc0-4c54-9e25-75ef7878df0d-m2.avro
new file mode 100644
index 000000000..a2ce9c5fd
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/7411e291-ddc0-4c54-9e25-75ef7878df0d-m2.avro
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/7411e291-ddc0-4c54-9e25-75ef7878df0d-m3.avro
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/7411e291-ddc0-4c54-9e25-75ef7878df0d-m3.avro
new file mode 100644
index 000000000..1e42f733d
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/7411e291-ddc0-4c54-9e25-75ef7878df0d-m3.avro
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/7a6ede87-b2d9-462e-9baa-77e456f07671-m0.avro
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/7a6ede87-b2d9-462e-9baa-77e456f07671-m0.avro
new file mode 100644
index 000000000..7d007a521
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/7a6ede87-b2d9-462e-9baa-77e456f07671-m0.avro
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/8ea2cf61-8fe7-4599-923a-d64b424cae3f-m0.avro
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/8ea2cf61-8fe7-4599-923a-d64b424cae3f-m0.avro
new file mode 100644
index 000000000..bf8034af0
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/8ea2cf61-8fe7-4599-923a-d64b424cae3f-m0.avro
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/e46e6fcd-0a4e-4001-a0db-e199a5eb4227-m0.avro
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/e46e6fcd-0a4e-4001-a0db-e199a5eb4227-m0.avro
new file mode 100644
index 000000000..04c7cde43
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/e46e6fcd-0a4e-4001-a0db-e199a5eb4227-m0.avro
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/fe2e965b-4685-4369-babf-31d13f81f10a-m0.avro
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/fe2e965b-4685-4369-babf-31d13f81f10a-m0.avro
new file mode 100644
index 000000000..6b2ceab36
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/fe2e965b-4685-4369-babf-31d13f81f10a-m0.avro
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/snap-2872597867664652808-1-fe2e965b-4685-4369-babf-31d13f81f10a.avro
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/snap-2872597867664652808-1-fe2e965b-4685-4369-babf-31d13f81f10a.avro
new file mode 100644
index 000000000..84cbc57a8
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/snap-2872597867664652808-1-fe2e965b-4685-4369-babf-31d13f81f10a.avro
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/snap-5398841822738664432-1-7411e291-ddc0-4c54-9e25-75ef7878df0d.avro
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/snap-5398841822738664432-1-7411e291-ddc0-4c54-9e25-75ef7878df0d.avro
new file mode 100644
index 000000000..856258934
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/snap-5398841822738664432-1-7411e291-ddc0-4c54-9e25-75ef7878df0d.avro
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/snap-7384452996480084466-1-e46e6fcd-0a4e-4001-a0db-e199a5eb4227.avro
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/snap-7384452996480084466-1-e46e6fcd-0a4e-4001-a0db-e199a5eb4227.avro
new file mode 100644
index 000000000..b7c4ded01
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/snap-7384452996480084466-1-e46e6fcd-0a4e-4001-a0db-e199a5eb4227.avro
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/snap-8059325670730066324-1-8ea2cf61-8fe7-4599-923a-d64b424cae3f.avro
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/snap-8059325670730066324-1-8ea2cf61-8fe7-4599-923a-d64b424cae3f.avro
new file mode 100644
index 000000000..3e0fe4242
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/snap-8059325670730066324-1-8ea2cf61-8fe7-4599-923a-d64b424cae3f.avro
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/v1.metadata.json
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/v1.metadata.json
new file mode 100644
index 000000000..34f591ea2
--- /dev/null
+++ 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/v1.metadata.json
@@ -0,0 +1 @@
+{"format-version":3,"table-uuid":"a2b65bf0-9c89-4e90-8724-8686cf1ddd86","location":"hdfs://localhost:20500/test-warehouse/iceberg_v3_row_lineage","last-sequence-number":0,"last-updated-ms":1771415078434,"last-column-id":1,"current-schema-id":0,"schemas":[{"type":"struct","schema-id":0,"fields":[{"id":1,"name":"i","required":false,"type":"int"}]}],"default-spec-id":0,"partition-specs":[{"spec-id":0,"fields":[]}],"last-partition-id":999,"default-sort-order-id":0,"sort-orders":[{"order-id":
 [...]
\ No newline at end of file
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/v2.metadata.json
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/v2.metadata.json
new file mode 100644
index 000000000..c35e4134f
--- /dev/null
+++ 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/v2.metadata.json
@@ -0,0 +1 @@
+{"format-version":3,"table-uuid":"a2b65bf0-9c89-4e90-8724-8686cf1ddd86","location":"hdfs://localhost:20500/test-warehouse/iceberg_v3_row_lineage","last-sequence-number":1,"last-updated-ms":1771415097197,"last-column-id":1,"current-schema-id":0,"schemas":[{"type":"struct","schema-id":0,"fields":[{"id":1,"name":"i","required":false,"type":"int"}]}],"default-spec-id":0,"partition-specs":[{"spec-id":0,"fields":[]}],"last-partition-id":999,"default-sort-order-id":0,"sort-orders":[{"order-id":
 [...]
\ No newline at end of file
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/v3.metadata.json
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/v3.metadata.json
new file mode 100644
index 000000000..52c079f3e
--- /dev/null
+++ 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/v3.metadata.json
@@ -0,0 +1 @@
+{"format-version":3,"table-uuid":"a2b65bf0-9c89-4e90-8724-8686cf1ddd86","location":"hdfs://localhost:20500/test-warehouse/iceberg_v3_row_lineage","last-sequence-number":2,"last-updated-ms":1771415102411,"last-column-id":1,"current-schema-id":0,"schemas":[{"type":"struct","schema-id":0,"fields":[{"id":1,"name":"i","required":false,"type":"int"}]}],"default-spec-id":0,"partition-specs":[{"spec-id":0,"fields":[]}],"last-partition-id":999,"default-sort-order-id":0,"sort-orders":[{"order-id":
 [...]
\ No newline at end of file
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/v4.metadata.json
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/v4.metadata.json
new file mode 100644
index 000000000..ee2be388a
--- /dev/null
+++ 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/v4.metadata.json
@@ -0,0 +1 @@
+{"format-version":3,"table-uuid":"a2b65bf0-9c89-4e90-8724-8686cf1ddd86","location":"hdfs://localhost:20500/test-warehouse/iceberg_v3_row_lineage","last-sequence-number":3,"last-updated-ms":1771415104882,"last-column-id":1,"current-schema-id":0,"schemas":[{"type":"struct","schema-id":0,"fields":[{"id":1,"name":"i","required":false,"type":"int"}]}],"default-spec-id":0,"partition-specs":[{"spec-id":0,"fields":[]}],"last-partition-id":999,"default-sort-order-id":0,"sort-orders":[{"order-id":
 [...]
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/v5.metadata.json
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/v5.metadata.json
new file mode 100644
index 000000000..92d004b6f
--- /dev/null
+++ 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/v5.metadata.json
@@ -0,0 +1 @@
+{"format-version":3,"table-uuid":"a2b65bf0-9c89-4e90-8724-8686cf1ddd86","location":"hdfs://localhost:20500/test-warehouse/iceberg_v3_row_lineage","last-sequence-number":4,"last-updated-ms":1771415391328,"last-column-id":1,"current-schema-id":0,"schemas":[{"type":"struct","schema-id":0,"fields":[{"id":1,"name":"i","required":false,"type":"int"}]}],"default-spec-id":0,"partition-specs":[{"spec-id":0,"fields":[]}],"last-partition-id":999,"default-sort-order-id":0,"sort-orders":[{"order-id":
 [...]
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/version-hint.text
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/version-hint.text
new file mode 100644
index 000000000..7ed6ff82d
--- /dev/null
+++ 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage/metadata/version-hint.text
@@ -0,0 +1 @@
+5
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/data/00000-0-e69cb204-0c90-4255-8b0b-7af3aec3f75d-0-00001.orc
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/data/00000-0-e69cb204-0c90-4255-8b0b-7af3aec3f75d-0-00001.orc
new file mode 100644
index 000000000..45eec9326
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/data/00000-0-e69cb204-0c90-4255-8b0b-7af3aec3f75d-0-00001.orc
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/data/00000-1-0ac66c53-638d-4aaf-9084-8a24b7aa2cdf-0-00001.orc
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/data/00000-1-0ac66c53-638d-4aaf-9084-8a24b7aa2cdf-0-00001.orc
new file mode 100644
index 000000000..3bcfe72dc
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/data/00000-1-0ac66c53-638d-4aaf-9084-8a24b7aa2cdf-0-00001.orc
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/data/00000-2-f69a801d-ce1f-478e-98e4-f5321d122361-0-00001.orc
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/data/00000-2-f69a801d-ce1f-478e-98e4-f5321d122361-0-00001.orc
new file mode 100644
index 000000000..e8f08553a
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/data/00000-2-f69a801d-ce1f-478e-98e4-f5321d122361-0-00001.orc
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/data/00000-3-84703627-8eea-44f1-a09b-e5bdad596090-0-00001.orc
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/data/00000-3-84703627-8eea-44f1-a09b-e5bdad596090-0-00001.orc
new file mode 100644
index 000000000..8b781158a
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/data/00000-3-84703627-8eea-44f1-a09b-e5bdad596090-0-00001.orc
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/3159a0a5-681d-4ac9-bf72-4be5814546cf-m0.avro
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/3159a0a5-681d-4ac9-bf72-4be5814546cf-m0.avro
new file mode 100644
index 000000000..135ec7f60
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/3159a0a5-681d-4ac9-bf72-4be5814546cf-m0.avro
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/4e12ed17-3e31-4d27-b35f-55467a2bf5fe-m0.avro
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/4e12ed17-3e31-4d27-b35f-55467a2bf5fe-m0.avro
new file mode 100644
index 000000000..57c145d0b
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/4e12ed17-3e31-4d27-b35f-55467a2bf5fe-m0.avro
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/8542d294-4d10-4efc-9e9d-69d3dce88108-m0.avro
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/8542d294-4d10-4efc-9e9d-69d3dce88108-m0.avro
new file mode 100644
index 000000000..06e65c4ce
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/8542d294-4d10-4efc-9e9d-69d3dce88108-m0.avro
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/8542d294-4d10-4efc-9e9d-69d3dce88108-m1.avro
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/8542d294-4d10-4efc-9e9d-69d3dce88108-m1.avro
new file mode 100644
index 000000000..967829325
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/8542d294-4d10-4efc-9e9d-69d3dce88108-m1.avro
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/8542d294-4d10-4efc-9e9d-69d3dce88108-m2.avro
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/8542d294-4d10-4efc-9e9d-69d3dce88108-m2.avro
new file mode 100644
index 000000000..14b533a69
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/8542d294-4d10-4efc-9e9d-69d3dce88108-m2.avro
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/8542d294-4d10-4efc-9e9d-69d3dce88108-m3.avro
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/8542d294-4d10-4efc-9e9d-69d3dce88108-m3.avro
new file mode 100644
index 000000000..beb8b4692
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/8542d294-4d10-4efc-9e9d-69d3dce88108-m3.avro
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/e5f99ba8-b804-434f-aa9e-d51e86cc0180-m0.avro
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/e5f99ba8-b804-434f-aa9e-d51e86cc0180-m0.avro
new file mode 100644
index 000000000..a23dc050d
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/e5f99ba8-b804-434f-aa9e-d51e86cc0180-m0.avro
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/snap-1530771818348079345-1-3159a0a5-681d-4ac9-bf72-4be5814546cf.avro
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/snap-1530771818348079345-1-3159a0a5-681d-4ac9-bf72-4be5814546cf.avro
new file mode 100644
index 000000000..09d202c16
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/snap-1530771818348079345-1-3159a0a5-681d-4ac9-bf72-4be5814546cf.avro
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/snap-7033898671372067760-1-4e12ed17-3e31-4d27-b35f-55467a2bf5fe.avro
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/snap-7033898671372067760-1-4e12ed17-3e31-4d27-b35f-55467a2bf5fe.avro
new file mode 100644
index 000000000..8f3291024
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/snap-7033898671372067760-1-4e12ed17-3e31-4d27-b35f-55467a2bf5fe.avro
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/snap-7330590250419058232-1-8542d294-4d10-4efc-9e9d-69d3dce88108.avro
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/snap-7330590250419058232-1-8542d294-4d10-4efc-9e9d-69d3dce88108.avro
new file mode 100644
index 000000000..18bfeb7d3
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/snap-7330590250419058232-1-8542d294-4d10-4efc-9e9d-69d3dce88108.avro
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/snap-7480347588879981313-1-e5f99ba8-b804-434f-aa9e-d51e86cc0180.avro
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/snap-7480347588879981313-1-e5f99ba8-b804-434f-aa9e-d51e86cc0180.avro
new file mode 100644
index 000000000..584f8b48b
Binary files /dev/null and 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/snap-7480347588879981313-1-e5f99ba8-b804-434f-aa9e-d51e86cc0180.avro
 differ
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/v1.metadata.json
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/v1.metadata.json
new file mode 100644
index 000000000..da1791cce
--- /dev/null
+++ 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/v1.metadata.json
@@ -0,0 +1 @@
+{"format-version":3,"table-uuid":"3decf673-93ce-40a0-92b2-df8f8574315b","location":"hdfs://localhost:20500/test-warehouse/iceberg_v3_row_lineage_orc","last-sequence-number":0,"last-updated-ms":1772116022882,"last-column-id":1,"current-schema-id":0,"schemas":[{"type":"struct","schema-id":0,"fields":[{"id":1,"name":"i","required":false,"type":"int"}]}],"default-spec-id":0,"partition-specs":[{"spec-id":0,"fields":[]}],"last-partition-id":999,"default-sort-order-id":0,"sort-orders":[{"order-
 [...]
\ No newline at end of file
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/v2.metadata.json
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/v2.metadata.json
new file mode 100644
index 000000000..4c33385de
--- /dev/null
+++ 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/v2.metadata.json
@@ -0,0 +1 @@
+{"format-version":3,"table-uuid":"3decf673-93ce-40a0-92b2-df8f8574315b","location":"hdfs://localhost:20500/test-warehouse/iceberg_v3_row_lineage_orc","last-sequence-number":1,"last-updated-ms":1772116036640,"last-column-id":1,"current-schema-id":0,"schemas":[{"type":"struct","schema-id":0,"fields":[{"id":1,"name":"i","required":false,"type":"int"}]}],"default-spec-id":0,"partition-specs":[{"spec-id":0,"fields":[]}],"last-partition-id":999,"default-sort-order-id":0,"sort-orders":[{"order-
 [...]
\ No newline at end of file
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/v3.metadata.json
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/v3.metadata.json
new file mode 100644
index 000000000..9dd875eae
--- /dev/null
+++ 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/v3.metadata.json
@@ -0,0 +1 @@
+{"format-version":3,"table-uuid":"3decf673-93ce-40a0-92b2-df8f8574315b","location":"hdfs://localhost:20500/test-warehouse/iceberg_v3_row_lineage_orc","last-sequence-number":2,"last-updated-ms":1772116040302,"last-column-id":1,"current-schema-id":0,"schemas":[{"type":"struct","schema-id":0,"fields":[{"id":1,"name":"i","required":false,"type":"int"}]}],"default-spec-id":0,"partition-specs":[{"spec-id":0,"fields":[]}],"last-partition-id":999,"default-sort-order-id":0,"sort-orders":[{"order-
 [...]
\ No newline at end of file
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/v4.metadata.json
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/v4.metadata.json
new file mode 100644
index 000000000..985c0035b
--- /dev/null
+++ 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/v4.metadata.json
@@ -0,0 +1 @@
+{"format-version":3,"table-uuid":"3decf673-93ce-40a0-92b2-df8f8574315b","location":"hdfs://localhost:20500/test-warehouse/iceberg_v3_row_lineage_orc","last-sequence-number":3,"last-updated-ms":1772116049214,"last-column-id":1,"current-schema-id":0,"schemas":[{"type":"struct","schema-id":0,"fields":[{"id":1,"name":"i","required":false,"type":"int"}]}],"default-spec-id":0,"partition-specs":[{"spec-id":0,"fields":[]}],"last-partition-id":999,"default-sort-order-id":0,"sort-orders":[{"order-
 [...]
\ No newline at end of file
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/v5.metadata.json
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/v5.metadata.json
new file mode 100644
index 000000000..9527cd6f5
--- /dev/null
+++ 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/v5.metadata.json
@@ -0,0 +1 @@
+{"format-version":3,"table-uuid":"3decf673-93ce-40a0-92b2-df8f8574315b","location":"hdfs://localhost:20500/test-warehouse/iceberg_v3_row_lineage_orc","last-sequence-number":4,"last-updated-ms":1772116061523,"last-column-id":1,"current-schema-id":0,"schemas":[{"type":"struct","schema-id":0,"fields":[{"id":1,"name":"i","required":false,"type":"int"}]}],"default-spec-id":0,"partition-specs":[{"spec-id":0,"fields":[]}],"last-partition-id":999,"default-sort-order-id":0,"sort-orders":[{"order-
 [...]
diff --git 
a/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/version-hint.text
 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/version-hint.text
new file mode 100644
index 000000000..7ed6ff82d
--- /dev/null
+++ 
b/testdata/data/iceberg_test/iceberg_v3/iceberg_v3_row_lineage_orc/metadata/version-hint.text
@@ -0,0 +1 @@
+5
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-v3-row-lineage.test
 
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-v3-row-lineage.test
new file mode 100644
index 000000000..8c92f88fc
--- /dev/null
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-v3-row-lineage.test
@@ -0,0 +1,502 @@
+====
+---- QUERY
+select i from iceberg_v3_row_lineage;
+---- RESULTS
+1
+2
+3
+---- TYPES
+INT
+====
+---- QUERY
+# Hidden columns are not expanded by 'select *'
+select * from iceberg_v3_row_lineage;
+---- RESULTS
+1
+2
+3
+---- TYPES
+INT
+====
+---- QUERY
+select i, _file_row_id from iceberg_v3_row_lineage;
+---- RESULTS
+1,0
+2,1
+3,2
+---- TYPES
+INT,BIGINT
+====
+---- QUERY
+select _file_row_id from iceberg_v3_row_lineage;
+---- RESULTS
+0
+1
+2
+---- TYPES
+BIGINT
+====
+---- QUERY
+select _file_last_updated_sequence_number from iceberg_v3_row_lineage;
+---- RESULTS
+1
+2
+3
+---- TYPES
+BIGINT
+====
+---- QUERY
+select i, _file_row_id, _file_last_updated_sequence_number from 
iceberg_v3_row_lineage;
+---- RESULTS
+1,0,1
+2,1,2
+3,2,3
+---- TYPES
+INT,BIGINT,BIGINT
+====
+---- QUERY
+select i, _file_row_id, _file_last_updated_sequence_number
+from iceberg_v3_row_lineage for system_version as of 2872597867664652808;
+---- RESULTS
+1,NULL,NULL
+2,NULL,NULL
+3,NULL,NULL
+---- TYPES
+INT,BIGINT,BIGINT
+====
+---- QUERY
+# Calculate 'row_id' and 'last_updated_sequence_number'
+select i, coalesce(_file_row_id, ICEBERG__FIRST__ROW__ID + FILE__POSITION),
+          coalesce(_file_last_updated_sequence_number, 
ICEBERG__DATA__SEQUENCE__NUMBER)
+from iceberg_v3_row_lineage for system_version as of 2872597867664652808;
+---- RESULTS
+1,0,1
+2,1,2
+3,2,3
+---- TYPES
+INT,BIGINT,BIGINT
+====
+---- QUERY
+# Calculate 'row_id' and 'last_updated_sequence_number'
+select i, coalesce(_file_row_id, ICEBERG__FIRST__ROW__ID + FILE__POSITION),
+          coalesce(_file_last_updated_sequence_number, 
ICEBERG__DATA__SEQUENCE__NUMBER)
+from iceberg_v3_row_lineage;
+---- RESULTS
+1,0,1
+2,1,2
+3,2,3
+---- TYPES
+INT,BIGINT,BIGINT
+====
+---- QUERY
+INSERT INTO iceberg_v3_row_lineage (i) VALUES (4);
+====
+---- QUERY
+select *, _file_row_id, _file_last_updated_sequence_number from 
iceberg_v3_row_lineage;
+---- RESULTS
+1,0,1
+2,1,2
+3,2,3
+4,NULL,NULL
+---- TYPES
+INT,BIGINT,BIGINT
+====
+---- QUERY
+# Calculate 'row_id' and 'last_updated_sequence_number' after insert.
+# The new row should have 'row_id' = 6 because we had 3 original rows and 
during
+# compaction we wrote a file with 3 rows that blindly increased next-row-id of 
the
+# table.
+# The new row should have 'last_updated_sequence_number' = 5 because we had 3 
snapshots
+# for the original INSERTs, 1 snapshot for the compaction, and 1 snapshot for 
the new
+# INSERT.
+select i, coalesce(_file_row_id, ICEBERG__FIRST__ROW__ID + FILE__POSITION),
+          coalesce(_file_last_updated_sequence_number, 
ICEBERG__DATA__SEQUENCE__NUMBER)
+from iceberg_v3_row_lineage;
+---- RESULTS
+1,0,1
+2,1,2
+3,2,3
+4,6,5
+---- TYPES
+INT,BIGINT,BIGINT
+====
+---- QUERY
+INSERT OVERWRITE iceberg_v3_row_lineage (i) VALUES (5);
+====
+---- QUERY
+select *, _file_row_id, _file_last_updated_sequence_number from 
iceberg_v3_row_lineage;
+---- RESULTS
+5,NULL,NULL
+---- TYPES
+INT,BIGINT,BIGINT
+====
+---- QUERY
+# Calculate 'row_id' and 'last_updated_sequence_number' after overwrite.
+select i, coalesce(_file_row_id, ICEBERG__FIRST__ROW__ID + FILE__POSITION),
+          coalesce(_file_last_updated_sequence_number, 
ICEBERG__DATA__SEQUENCE__NUMBER)
+from iceberg_v3_row_lineage;
+---- RESULTS
+5,7,6
+---- TYPES
+INT,BIGINT,BIGINT
+====
+---- QUERY
+ALTER TABLE iceberg_v3_row_lineage EXECUTE ROLLBACK(5398841822738664432);
+====
+---- QUERY
+select *, _file_row_id, _file_last_updated_sequence_number from 
iceberg_v3_row_lineage;
+---- RESULTS
+1,0,1
+2,1,2
+3,2,3
+---- TYPES
+INT,BIGINT,BIGINT
+====
+---- QUERY
+compute stats iceberg_v3_row_lineage;
+show table stats iceberg_v3_row_lineage;
+---- RESULTS
+3,1,regex:.*,'NOT CACHED','NOT 
CACHED','PARQUET','false',regex:.*,'$ERASURECODE_POLICY'
+---- TYPES
+BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING,STRING
+====
+---- QUERY
+show column stats iceberg_v3_row_lineage;
+---- RESULTS
+'i','INT',3,0,4,4.0,-1,-1
+---- TYPES
+STRING,STRING,BIGINT,BIGINT,BIGINT,DOUBLE,BIGINT,BIGINT
+====
+---- QUERY
+describe iceberg_v3_row_lineage;
+---- RESULTS
+'i','int','','true'
+---- TYPES
+STRING,STRING,STRING,STRING
+====
+---- QUERY
+describe formatted iceberg_v3_row_lineage;
+---- RESULTS: VERIFY_IS_SUBSET
+'i','int','NULL'
+---- TYPES
+STRING,STRING,STRING
+====
+---- QUERY
+describe formatted iceberg_v3_row_lineage;
+---- RESULTS: VERIFY_IS_NOT_IN
+'_file_row_id',regex:.*,regex:.*
+'_file_last_updated_sequence_number',regex:.*,regex:.*
+---- TYPES
+STRING,STRING,STRING
+====
+---- QUERY
+# Data files in V2 tables don't have 'first-row-id'.
+CREATE TABLE v2_to_v3 (i int) STORED AS ICEBERG
+TBLPROPERTIES ('format-version'='2');
+
+INSERT INTO v2_to_v3 VALUES (1);
+SELECT ICEBERG__FIRST__ROW__ID, i FROM v2_to_v3;
+---- RESULTS
+NULL,1
+---- TYPES
+BIGINT,INT
+====
+---- QUERY
+# After upgrading to V3, the existing data files still don't have 
'first-row-id'.
+ALTER TABLE v2_to_v3 SET TBLPROPERTIES ('format-version'='3');
+SELECT ICEBERG__FIRST__ROW__ID, i FROM v2_to_v3;
+---- RESULTS
+NULL,1
+---- TYPES
+BIGINT,INT
+====
+---- QUERY
+# After adding new files, even existing files get an associated 'first-row-id'.
+# (Interestingly, their 'first-row-id' is higher then the 'first-row-id' of the
+# newly added files. This matches Spark's behavior)
+INSERT INTO v2_to_v3 VALUES (2);
+SELECT ICEBERG__FIRST__ROW__ID, i FROM v2_to_v3;
+---- RESULTS
+1,1
+0,2
+---- TYPES
+BIGINT,INT
+====
+---- QUERY
+# Data files in V2 tables don't have 'first-row-id'.
+CREATE TABLE v2_to_v3_part (s string)
+PARTITIONED BY SPEC (TRUNCATE(5, s))
+STORED AS ICEBERG
+TBLPROPERTIES ('format-version'='2');
+
+INSERT INTO v2_to_v3_part VALUES ('árvíztűrőtükörfúrógép'), ('árvíztűrő'), 
('űűű'),
+    ('你好hello'), ('你好world'), ('test%value'), ('test_value'), 
('wild%card_mix');
+SELECT ICEBERG__FIRST__ROW__ID, s FROM v2_to_v3_part;
+---- RESULTS: RAW_STRING
+NULL,'árvíztűrőtükörfúrógép'
+NULL,'árvíztűrő'
+NULL,'űűű'
+NULL,'你好hello'
+NULL,'你好world'
+NULL,'test%value'
+NULL,'test_value'
+NULL,'wild%card_mix'
+---- TYPES
+BIGINT,STRING
+====
+---- QUERY
+# After upgrading to V3, the existing data files still don't have 
'first-row-id'.
+ALTER TABLE v2_to_v3_part SET TBLPROPERTIES ('format-version'='3');
+SELECT ICEBERG__FIRST__ROW__ID, s FROM v2_to_v3_part;
+---- RESULTS: RAW_STRING
+NULL,'árvíztűrőtükörfúrógép'
+NULL,'árvíztűrő'
+NULL,'űűű'
+NULL,'你好hello'
+NULL,'你好world'
+NULL,'test%value'
+NULL,'test_value'
+NULL,'wild%card_mix'
+---- TYPES
+BIGINT,STRING
+====
+---- QUERY
+# After adding new files, even existing files get an associated 'first-row-id'.
+# (Interestingly, their 'first-row-id' is higher then the 'first-row-id' of the
+# newly added files. This matches Spark's behavior)
+INSERT INTO v2_to_v3_part VALUES ('IMPALA');
+SELECT ICEBERG__FIRST__ROW__ID, s FROM v2_to_v3_part;
+---- RESULTS: RAW_STRING
+0,'IMPALA'
+1,'árvíztűrőtükörfúrógép'
+1,'árvíztűrő'
+3,'űűű'
+4,'你好hello'
+5,'你好world'
+6,'test%value'
+7,'test_value'
+8,'wild%card_mix'
+---- TYPES
+BIGINT,STRING
+====
+---- QUERY
+SELECT ICEBERG__FIRST__ROW__ID, ICEBERG__DATA__SEQUENCE__NUMBER, s FROM 
v2_to_v3_part;
+---- RESULTS: RAW_STRING
+0,2,'IMPALA'
+1,1,'árvíztűrőtükörfúrógép'
+1,1,'árvíztűrő'
+3,1,'űűű'
+4,1,'你好hello'
+5,1,'你好world'
+6,1,'test%value'
+7,1,'test_value'
+8,1,'wild%card_mix'
+---- TYPES
+BIGINT,BIGINT,STRING
+====
+---- QUERY
+compute stats iceberg_v3_row_lineage (_file_row_id);
+---- CATCH
+COMPUTE STATS not supported for hidden column _file_row_id
+====
+---- QUERY
+compute stats iceberg_v3_row_lineage (_file_last_updated_sequence_number);
+---- CATCH
+COMPUTE STATS not supported for hidden column 
_file_last_updated_sequence_number
+====
+---- QUERY
+alter table iceberg_v3_row_lineage change column _file_row_id _file_row_id INT;
+---- CATCH
+cannot be altered.
+====
+---- QUERY
+alter table iceberg_v3_row_lineage change column 
_file_last_updated_sequence_number _file_last_updated_sequence_number INT;
+---- CATCH
+cannot be altered.
+====
+---- QUERY
+alter table iceberg_v3_row_lineage drop column _file_row_id;
+---- CATCH
+cannot be dropped.
+====
+---- QUERY
+alter table iceberg_v3_row_lineage drop column 
_file_last_updated_sequence_number;
+---- CATCH
+cannot be dropped.
+====
+---- QUERY
+alter table iceberg_v3_row_lineage add column _file_row_id INT;
+---- CATCH
+Column already exists: _file_row_id
+====
+---- QUERY
+alter table iceberg_v3_row_lineage add column 
_file_last_updated_sequence_number INT;
+---- CATCH
+Column already exists: _file_last_updated_sequence_number
+====
+---- QUERY
+insert into iceberg_v3_row_lineage (i, _file_row_id) values (100, 100);
+---- CATCH
+Column '_file_row_id' in column permutation is hidden and cannot be targeted 
for insert
+====
+---- QUERY
+insert into iceberg_v3_row_lineage (i, _file_last_updated_sequence_number) 
values (100, 100);
+---- CATCH
+Column '_file_last_updated_sequence_number' in column permutation is hidden 
and cannot be targeted for insert
+====
+---- QUERY
+select i from iceberg_v3_row_lineage_orc;
+---- RESULTS
+1
+2
+3
+---- TYPES
+INT
+====
+---- QUERY
+# Hidden columns are not expanded by 'select *'
+select * from iceberg_v3_row_lineage_orc;
+---- RESULTS
+1
+2
+3
+---- TYPES
+INT
+====
+---- QUERY
+select i, _file_row_id from iceberg_v3_row_lineage_orc;
+---- RESULTS
+1,0
+2,1
+3,2
+---- TYPES
+INT,BIGINT
+====
+---- QUERY
+select _file_row_id from iceberg_v3_row_lineage_orc;
+---- RESULTS
+0
+1
+2
+---- TYPES
+BIGINT
+====
+---- QUERY
+select _file_last_updated_sequence_number from iceberg_v3_row_lineage_orc;
+---- RESULTS
+1
+2
+3
+---- TYPES
+BIGINT
+====
+---- QUERY
+select i, _file_row_id, _file_last_updated_sequence_number from 
iceberg_v3_row_lineage_orc;
+---- RESULTS
+1,0,1
+2,1,2
+3,2,3
+---- TYPES
+INT,BIGINT,BIGINT
+====
+---- QUERY
+select i, _file_row_id, _file_last_updated_sequence_number
+from iceberg_v3_row_lineage_orc for system_version as of 7033898671372067760;
+---- RESULTS
+1,NULL,NULL
+2,NULL,NULL
+3,NULL,NULL
+---- TYPES
+INT,BIGINT,BIGINT
+====
+---- QUERY
+# Calculate 'row_id' and 'last_updated_sequence_number'
+select i, coalesce(_file_row_id, ICEBERG__FIRST__ROW__ID + FILE__POSITION),
+          coalesce(_file_last_updated_sequence_number, 
ICEBERG__DATA__SEQUENCE__NUMBER)
+from iceberg_v3_row_lineage_orc for system_version as of 7033898671372067760;
+---- RESULTS
+1,0,1
+2,1,2
+3,2,3
+---- TYPES
+INT,BIGINT,BIGINT
+====
+---- QUERY
+# Calculate 'row_id' and 'last_updated_sequence_number'
+select i, coalesce(_file_row_id, ICEBERG__FIRST__ROW__ID + FILE__POSITION),
+          coalesce(_file_last_updated_sequence_number, 
ICEBERG__DATA__SEQUENCE__NUMBER)
+from iceberg_v3_row_lineage_orc;
+---- RESULTS
+1,0,1
+2,1,2
+3,2,3
+---- TYPES
+INT,BIGINT,BIGINT
+====
+---- QUERY
+compute stats iceberg_v3_row_lineage_orc;
+show table stats iceberg_v3_row_lineage_orc;
+---- RESULTS
+3,1,regex:.*,'NOT CACHED','NOT 
CACHED','ORC','false',regex:.*,'$ERASURECODE_POLICY'
+---- TYPES
+BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING,STRING
+====
+---- QUERY
+show column stats iceberg_v3_row_lineage_orc;
+---- RESULTS
+'i','INT',3,0,4,4.0,-1,-1
+---- TYPES
+STRING,STRING,BIGINT,BIGINT,BIGINT,DOUBLE,BIGINT,BIGINT
+====
+---- QUERY
+describe iceberg_v3_row_lineage_orc;
+---- RESULTS
+'i','int','','true'
+---- TYPES
+STRING,STRING,STRING,STRING
+====
+---- QUERY
+describe formatted iceberg_v3_row_lineage_orc;
+---- RESULTS: VERIFY_IS_SUBSET
+'i','int','NULL'
+---- TYPES
+STRING,STRING,STRING
+====
+---- QUERY
+describe formatted iceberg_v3_row_lineage_orc;
+---- RESULTS: VERIFY_IS_NOT_IN
+'_file_row_id',regex:.*,regex:.*
+'_file_last_updated_sequence_number',regex:.*,regex:.*
+---- TYPES
+STRING,STRING,STRING
+====
+---- QUERY
+compute stats iceberg_v3_row_lineage_orc (_file_row_id);
+---- CATCH
+COMPUTE STATS not supported for hidden column _file_row_id
+====
+---- QUERY
+compute stats iceberg_v3_row_lineage_orc (_file_last_updated_sequence_number);
+---- CATCH
+COMPUTE STATS not supported for hidden column 
_file_last_updated_sequence_number
+====
+---- QUERY
+alter table iceberg_v3_row_lineage_orc change column _file_row_id _file_row_id 
INT;
+---- CATCH
+cannot be altered.
+====
+---- QUERY
+alter table iceberg_v3_row_lineage_orc change column 
_file_last_updated_sequence_number _file_last_updated_sequence_number INT;
+---- CATCH
+cannot be altered.
+====
+---- QUERY
+alter table iceberg_v3_row_lineage_orc drop column _file_row_id;
+---- CATCH
+cannot be dropped.
+====
+---- QUERY
+alter table iceberg_v3_row_lineage_orc drop column 
_file_last_updated_sequence_number;
+---- CATCH
+cannot be dropped.
+====
diff --git a/tests/query_test/test_iceberg.py b/tests/query_test/test_iceberg.py
index 63f834ae1..2dfff1f6e 100644
--- a/tests/query_test/test_iceberg.py
+++ b/tests/query_test/test_iceberg.py
@@ -2320,20 +2320,54 @@ class TestIcebergV3Table(IcebergTestSuite):
     cls.ImpalaTestMatrix.add_constraint(
       lambda v: v.get_value('table_format').file_format == 'parquet')
 
+  def load_table(self, database, table_name, format="parquet"):
+    create_iceberg_table_from_directory(self.client, database,
+        table_name, format,
+        table_location="${IMPALA_HOME}/testdata/data/iceberg_test/iceberg_v3",
+        warehouse_prefix=os.getenv("FILESYSTEM_PREFIX"))
+
   def test_v3_basic(self, vector, unique_database):
     self.run_test_case('QueryTest/iceberg-v3-basic', vector, unique_database)
 
   def test_v3_negative(self, vector, unique_database):
-    create_iceberg_table_from_directory(self.client, unique_database,
-        "iceberg_v3_deletion_vectors", "parquet",
-        table_location="${IMPALA_HOME}/testdata/data/iceberg_test/iceberg_v3",
-        warehouse_prefix=os.getenv("FILESYSTEM_PREFIX"))
-    create_iceberg_table_from_directory(self.client, unique_database,
-        "iceberg_v3_default_value", "parquet",
-        table_location="${IMPALA_HOME}/testdata/data/iceberg_test/iceberg_v3",
-        warehouse_prefix=os.getenv("FILESYSTEM_PREFIX"))
+    self.load_table(unique_database, "iceberg_v3_deletion_vectors")
+    self.load_table(unique_database, "iceberg_v3_default_value")
     self.run_test_case('QueryTest/iceberg-v3-negative', vector, 
unique_database)
 
+  def test_v3_row_lineage(self, vector, unique_database):
+    self.load_table(unique_database, "iceberg_v3_row_lineage")
+    self.load_table(unique_database, "iceberg_v3_row_lineage_orc", 
format="orc")
+    self.run_test_case('QueryTest/iceberg-v3-row-lineage', vector, 
unique_database)
+
+  @SkipIf.not_dfs
+  def test_v3_row_lineage_file_schema(self, unique_database):
+    """Test that plain INSERTs only write user columns, not hidden metadata 
columns."""
+    table_name = "ice_impala_lineage"
+    qualified_table_name = "%s.%s" % (unique_database, table_name)
+    query = """create table %s (a string) stored as iceberg
+               tblproperties ('format-version'='3')""" % qualified_table_name
+    self.client.execute(query)
+
+    query = "insert into %s values ('impala')" % qualified_table_name
+    self.client.execute(query)
+
+    # Copy the created file to the local filesystem and parse metadata
+    local_file = '/tmp/ice_impala_lineage_%s.parq' % random.randint(0, 10000)
+    LOG.info("test_v3_row_lineage_file_schema local file name: " + local_file)
+    hdfs_file = get_fs_path('/test-warehouse/%s.db/%s/data/*.parq'
+        % (unique_database, table_name))
+    check_call(['hadoop', 'fs', '-copyToLocal', hdfs_file, local_file])
+    try:
+      metadata = get_parquet_metadata(local_file)
+      # Verify that hidden columns are not written.
+      assert len(metadata.schema) == 2
+      root_schema_element = metadata.schema[0]
+      assert root_schema_element.name == 'schema'
+      a_schema_element = metadata.schema[1]
+      assert a_schema_element.name == 'a'
+    finally:
+      os.remove(local_file)
+
 
 # Tests to exercise the DIRECTED distribution mode for V2 Iceberg tables. 
Note, that most
 # of the test coverage is in TestIcebergV2Table.test_read_position_deletes but 
since it

Reply via email to