This is an automated email from the ASF dual-hosted git repository.

stigahuang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git


The following commit(s) were added to refs/heads/master by this push:
     new 275f03f10 IMPALA-12893: (part 2): Upgrade Iceberg to version 1.5.2
275f03f10 is described below

commit 275f03f10d9d123fa51a368b1bac784b1b50f57f
Author: Zoltan Borok-Nagy <[email protected]>
AuthorDate: Thu Mar 28 20:26:55 2024 +0100

    IMPALA-12893: (part 2): Upgrade Iceberg to version 1.5.2
    
    This patch updates CDP_BUILD_NUMBER to 71942734 to in order to
    upgrade Iceberg to 1.5.2.
    
    This patch updates some tests so they pass with Iceberg 1.5.2. The
    behavior changes of Iceberg 1.5.2 are (compared to 1.3.1):
     * Iceberg V2 tables are created by default
     * Metadata tables have different schema
     * Parquet compression is explicitly set for new tables (even for ORC
       tables)
     * Sequence numbers are assigned a bit differently
    
    Updated the tests where needed.
    
    Code changes to accomodate for the above behavior changes:
     * SHOW CREATE TABLE adds 'format-version'='1' for Iceberg V1 tables
     * CREATE TABLE statements don't throw errors when Parquet compression
       is set for ORC tables
    
    Change-Id: Ic4f9ed3f7ee9f686044023be938d6b1d18c8842e
    Reviewed-on: http://gerrit.cloudera.org:8080/23670
    Reviewed-by: Riza Suminto <[email protected]>
    Tested-by: Impala Public Jenkins <[email protected]>
---
 bin/impala-config.sh                               | 24 +++---
 .../apache/impala/analysis/CreateTableStmt.java    | 13 +--
 .../org/apache/impala/analysis/ToSqlUtils.java     |  8 +-
 .../queries/QueryTest/iceberg-alter-default.test   | 85 -------------------
 .../queries/QueryTest/iceberg-metadata-tables.test |  9 +-
 .../queries/QueryTest/iceberg-negative.test        | 16 ----
 .../iceberg-partitioned-insert-default.test        | 21 ++---
 .../queries/QueryTest/iceberg-virtual-columns.test | 10 +--
 .../QueryTest/show-create-table-with-stats.test    |  4 +-
 .../queries/QueryTest/show-create-table.test       | 95 +++++++++++++++-------
 10 files changed, 112 insertions(+), 173 deletions(-)

diff --git a/bin/impala-config.sh b/bin/impala-config.sh
index 3f8e359cd..3c982421a 100755
--- a/bin/impala-config.sh
+++ b/bin/impala-config.sh
@@ -240,20 +240,20 @@ fi
 : ${IMPALA_TOOLCHAIN_HOST:=native-toolchain.s3.amazonaws.com}
 export IMPALA_TOOLCHAIN_HOST
 
-export CDP_BUILD_NUMBER=66846208
+export CDP_BUILD_NUMBER=71942734
 export CDP_MAVEN_REPOSITORY=\
 
"https://${IMPALA_TOOLCHAIN_HOST}/build/cdp_components/${CDP_BUILD_NUMBER}/maven";
-export CDP_AVRO_JAVA_VERSION=1.11.1.7.3.1.500-30
-export CDP_HADOOP_VERSION=3.1.1.7.3.1.500-30
-export CDP_HBASE_VERSION=2.4.17.7.3.1.500-30
-export CDP_HIVE_VERSION=3.1.3000.7.3.1.500-30
-export CDP_ICEBERG_VERSION=1.3.1.7.3.1.500-30
-export CDP_KNOX_VERSION=2.0.0.7.3.1.500-30
-export CDP_ORC_JAVA_VERSION=1.8.3.7.3.1.500-30
-export CDP_OZONE_VERSION=1.4.0.7.3.1.500-30
-export CDP_PARQUET_VERSION=1.12.3.7.3.1.500-30
-export CDP_RANGER_VERSION=2.4.0.7.3.1.500-30
-export CDP_TEZ_VERSION=0.9.1.7.3.1.500-30
+export CDP_AVRO_JAVA_VERSION=1.11.1.7.3.1.500-182
+export CDP_HADOOP_VERSION=3.1.1.7.3.1.500-182
+export CDP_HBASE_VERSION=2.4.17.7.3.1.500-182
+export CDP_HIVE_VERSION=3.1.3000.7.3.1.500-182
+export CDP_ICEBERG_VERSION=1.5.2.7.3.1.500-182
+export CDP_KNOX_VERSION=2.0.0.7.3.1.500-182
+export CDP_ORC_JAVA_VERSION=1.8.3.7.3.1.500-182
+export CDP_OZONE_VERSION=1.4.0.7.3.1.500-182
+export CDP_PARQUET_VERSION=1.12.3.7.3.1.500-182
+export CDP_RANGER_VERSION=2.4.0.7.3.1.500-182
+export CDP_TEZ_VERSION=0.9.1.7.3.1.500-182
 
 # Ref: https://infra.apache.org/release-download-pages.html#closer
 : ${APACHE_MIRROR:="https://www.apache.org/dyn/closer.cgi"}
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateTableStmt.java 
b/fe/src/main/java/org/apache/impala/analysis/CreateTableStmt.java
index c80e68d06..6f19cd230 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateTableStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateTableStmt.java
@@ -699,7 +699,7 @@ public class CreateTableStmt extends StatementBase 
implements SingleTableStmt {
   private void addMergeOnReadPropertiesIfNeeded() {
     Map<String, String> tblProps = getTblProperties();
     String formatVersion = tblProps.get(TableProperties.FORMAT_VERSION);
-    if (formatVersion == null ||
+    if (formatVersion != null &&
         Integer.valueOf(formatVersion) < IcebergTable.ICEBERG_FORMAT_V2) {
       return;
     }
@@ -715,16 +715,7 @@ public class CreateTableStmt extends StatementBase 
implements SingleTableStmt {
 
   private void validateIcebergParquetCompressionCodec(
       TIcebergFileFormat icebergFileFormat) throws AnalysisException {
-    if (icebergFileFormat != TIcebergFileFormat.PARQUET) {
-      if 
(getTblProperties().containsKey(IcebergTable.PARQUET_COMPRESSION_CODEC)) {
-          throw new AnalysisException(IcebergTable.PARQUET_COMPRESSION_CODEC +
-              " should be set only for parquet file format");
-      }
-      if 
(getTblProperties().containsKey(IcebergTable.PARQUET_COMPRESSION_LEVEL)) {
-          throw new AnalysisException(IcebergTable.PARQUET_COMPRESSION_LEVEL +
-              " should be set only for parquet file format");
-      }
-    } else {
+    if (icebergFileFormat == TIcebergFileFormat.PARQUET) {
       StringBuilder errMsg = new StringBuilder();
       if (IcebergUtil.parseParquetCompressionCodec(true, getTblProperties(), 
errMsg)
           == null) {
diff --git a/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java 
b/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
index 545b27de4..8c0b917da 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ToSqlUtils.java
@@ -35,6 +35,7 @@ import org.apache.commons.lang3.ObjectUtils;
 import org.apache.commons.lang3.StringEscapeUtils;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.ql.parse.HiveLexer;
+import org.apache.iceberg.TableProperties;
 import org.apache.impala.catalog.CatalogException;
 import org.apache.impala.catalog.Column;
 import org.apache.impala.catalog.ColumnStats;
@@ -187,6 +188,11 @@ public class ToSqlUtils {
       }
       commonProps.remove(KuduTable.KEY_TABLE_ID);
     } else if (table instanceof FeIcebergTable) {
+      FeIcebergTable feIcebergTable = (FeIcebergTable) table;
+      if (feIcebergTable.getFormatVersion() == IcebergTable.ICEBERG_FORMAT_V1) 
{
+        commonProps.put(TableProperties.FORMAT_VERSION,
+                String.valueOf(IcebergTable.ICEBERG_FORMAT_V1));
+      }
       // Hide Iceberg internal metadata properties
       removeHiddenIcebergTableProperties(commonProps);
     } else if (table instanceof FePaimonTable) {
@@ -551,8 +557,8 @@ public class ToSqlUtils {
       if (table instanceof FeIcebergTable) {
         storageHandlerClassName = null;
 
+        FeIcebergTable feIcebergTable = (FeIcebergTable)table;
         // Fill "PARTITIONED BY SPEC" part if the Iceberg table is partitioned.
-        FeIcebergTable feIcebergTable= (FeIcebergTable)table;
         if (!feIcebergTable.getPartitionSpecs().isEmpty()) {
           IcebergPartitionSpec latestPartitionSpec =
               feIcebergTable.getDefaultPartitionSpec();
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-alter-default.test
 
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-alter-default.test
index 3a16b4a35..74b25189c 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-alter-default.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-alter-default.test
@@ -352,91 +352,6 @@ DESCRIBE FORMATTED iceberg_changing_parq_tblprops;
 string, string, string
 ====
 ---- QUERY
-CREATE TABLE iceberg_upgrade_v2_no_write_mode (i INT) STORED AS ICEBERG;
-DESCRIBE FORMATTED iceberg_upgrade_v2_no_write_mode;
----- RESULTS: VERIFY_IS_NOT_IN
-'','write.delete.mode   ','merge-on-read       '
-'','write.update.mode   ','merge-on-read       '
-'','write.merge.mode    ','merge-on-read       '
----- TYPES
-string, string, string
-====
----- QUERY
-# Setting format-version to 1 doesn't add write modes.
-ALTER TABLE iceberg_upgrade_v2_no_write_mode SET 
TBLPROPERTIES('format-version'='1');
-DESCRIBE FORMATTED iceberg_upgrade_v2_no_write_mode;
----- RESULTS: VERIFY_IS_NOT_IN
-'','write.delete.mode   ','merge-on-read       '
-'','write.update.mode   ','merge-on-read       '
-'','write.merge.mode    ','merge-on-read       '
----- TYPES
-string, string, string
-====
----- QUERY
-ALTER TABLE iceberg_upgrade_v2_no_write_mode SET 
TBLPROPERTIES('format-version'='2');
-DESCRIBE FORMATTED iceberg_upgrade_v2_no_write_mode;
----- RESULTS: VERIFY_IS_SUBSET
-'','write.delete.mode   ','merge-on-read       '
-'','write.update.mode   ','merge-on-read       '
-'','write.merge.mode    ','merge-on-read       '
----- TYPES
-string, string, string
-====
----- QUERY
-CREATE TABLE iceberg_upgrade_v2_delete_mode (i INT) STORED AS ICEBERG;
-ALTER TABLE iceberg_upgrade_v2_delete_mode
-SET TBLPROPERTIES('format-version'='2', 'write.delete.mode'='copy-on-write');
-DESCRIBE FORMATTED iceberg_upgrade_v2_delete_mode;
----- RESULTS: VERIFY_IS_SUBSET
-'','write.delete.mode   ','copy-on-write       '
----- TYPES
-string, string, string
-====
----- QUERY
-DESCRIBE FORMATTED iceberg_upgrade_v2_delete_mode;
----- RESULTS: VERIFY_IS_NOT_IN
-'','write.update.mode   ','merge-on-read       '
-'','write.merge.mode    ','merge-on-read       '
----- TYPES
-string, string, string
-====
----- QUERY
-CREATE TABLE iceberg_upgrade_v2_update_mode (i INT) STORED AS ICEBERG;
-ALTER TABLE iceberg_upgrade_v2_update_mode
-SET TBLPROPERTIES('format-version'='2', 'write.update.mode'='copy-on-write');
-DESCRIBE FORMATTED iceberg_upgrade_v2_update_mode;
----- RESULTS: VERIFY_IS_SUBSET
-'','write.update.mode   ','copy-on-write       '
----- TYPES
-string, string, string
-====
----- QUERY
-DESCRIBE FORMATTED iceberg_upgrade_v2_update_mode;
----- RESULTS: VERIFY_IS_NOT_IN
-'','write.delete.mode   ','merge-on-read       '
-'','write.merge.mode    ','merge-on-read       '
----- TYPES
-string, string, string
-====
----- QUERY
-CREATE TABLE iceberg_upgrade_v2_merge_mode (i INT) STORED AS ICEBERG;
-ALTER TABLE iceberg_upgrade_v2_merge_mode
-SET TBLPROPERTIES('format-version'='2', 'write.merge.mode'='merge-on-read');
-DESCRIBE FORMATTED iceberg_upgrade_v2_merge_mode;
----- RESULTS: VERIFY_IS_SUBSET
-'','write.merge.mode    ','merge-on-read       '
----- TYPES
-string, string, string
-====
----- QUERY
-DESCRIBE FORMATTED iceberg_upgrade_v2_merge_mode;
----- RESULTS: VERIFY_IS_NOT_IN
-'','write.update.mode   ','merge-on-read       '
-'','write.delete.mode   ','merge-on-read       '
----- TYPES
-string, string, string
-====
----- QUERY
 # Add a column that already exists and a new column that does not exist with
 # "if not exists" clause.
 ALTER TABLE ice_alter_cols ADD IF NOT EXISTS COLUMNS (a bigint, d bigint)
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-metadata-tables.test
 
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-metadata-tables.test
index 2d4b35895..438f11e35 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-metadata-tables.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-metadata-tables.test
@@ -58,7 +58,7 @@ TIMESTAMP,BIGINT,BIGINT,BOOLEAN
 select * from functional_parquet.iceberg_query_metadata.metadata_log_entries;
 ---- RESULTS
 # Example:
-# 2023-08-16 
12:18:11.061000000,'hdfs://localhost:20500/test-warehouse/functional_parquet.db/iceberg_test_metadata/metadata/00000-0ae98ebd-b200-4381-9d97-1f93954423a9.metadata.json',NULL,NULL,NULL
+# 2023-08-16 
12:18:11.061000000,'$NAMENODE/test-warehouse/functional_parquet.db/iceberg_test_metadata/metadata/00000-0ae98ebd-b200-4381-9d97-1f93954423a9.metadata.json',NULL,NULL,NULL
 
row_regex:\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}(\.\d{9})?,'$NAMENODE/test-warehouse/iceberg_test/metadata/iceberg_query_metadata/metadata/.*.metadata.json',NULL,NULL,NULL
 
row_regex:\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}(\.\d{9})?,'$NAMENODE/test-warehouse/iceberg_test/metadata/iceberg_query_metadata/metadata/.*.metadata.json',\d+,0,1
 
row_regex:\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}(\.\d{9})?,'$NAMENODE/test-warehouse/iceberg_test/metadata/iceberg_query_metadata/metadata/.*.metadata.json',\d+,0,2
@@ -101,9 +101,9 @@ INT,STRING,BIGINT,INT,BIGINT,INT,INT,INT,INT,INT,INT,STRING
 ---- QUERY
 select * from functional_parquet.iceberg_query_metadata.`partitions`;
 ---- RESULTS
-3,3,1,1,0,0
+3,3,1053,1,1,0,0,regex:.*,regex:\d+
 ---- TYPES
-BIGINT,INT,BIGINT,INT,BIGINT,INT
+BIGINT, INT, BIGINT, BIGINT, INT, BIGINT, INT, TIMESTAMP, BIGINT
 ====
 ---- QUERY
 select * from functional_parquet.iceberg_query_metadata.all_data_files;
@@ -995,10 +995,13 @@ describe 
functional_parquet.iceberg_query_metadata.`partitions`;
 ---- RESULTS
 'record_count','bigint','Count of records in data files','true'
 'file_count','int','Count of data files','true'
+'total_data_file_size_in_bytes','bigint','Total size in bytes of data 
files','true'
 'position_delete_record_count','bigint','Count of records in position delete 
files','true'
 'position_delete_file_count','int','Count of position delete files','true'
 'equality_delete_record_count','bigint','Count of records in equality delete 
files','true'
 'equality_delete_file_count','int','Count of equality delete files','true'
+'last_updated_at','timestamp','Commit time of snapshot that last updated this 
partition','true'
+'last_updated_snapshot_id','bigint','Id of snapshot that last updated this 
partition','true'
 ---- TYPES
 STRING,STRING,STRING,STRING
 ====
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-negative.test 
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-negative.test
index ff9f2b5aa..c8ec7a609 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-negative.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-negative.test
@@ -602,14 +602,6 @@ 
TBLPROPERTIES('write.parquet.dict-size-bytes'='1073741825');
 Parquet dictionary page size for Iceberg table should fall in the range of 
[65536..1073741824]
 ====
 ---- QUERY
-CREATE TABLE iceberg_wrong_parquet_comp_codec1 ( i int)
-STORED AS ICEBERG
-TBLPROPERTIES('write.format.default'='orc',
-'write.parquet.compression-codec'='snappy');
----- CATCH
-write.parquet.compression-codec should be set only for parquet file format
-====
----- QUERY
 CREATE TABLE iceberg_wrong_parquet_comp_codec2 ( i int)
 STORED AS ICEBERG
 TBLPROPERTIES('write.parquet.compression-codec'='snapp');
@@ -625,14 +617,6 @@ TBLPROPERTIES('write.parquet.compression-codec'='snapp');
 Invalid parquet compression codec for Iceberg table: snapp
 ====
 ---- QUERY
-CREATE TABLE iceberg_wrong_parquet_comp_level1 ( i int)
-STORED AS ICEBERG
-TBLPROPERTIES('write.format.default'='orc',
-'write.parquet.compression-level'='2');
----- CATCH
-write.parquet.compression-level should be set only for parquet file format
-====
----- QUERY
 CREATE TABLE iceberg_wrong_parquet_comp_level2 ( i int)
 STORED AS ICEBERG
 TBLPROPERTIES('write.parquet.compression-level'='2');
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-partitioned-insert-default.test
 
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-partitioned-insert-default.test
index da70926fa..09690af1d 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-partitioned-insert-default.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-partitioned-insert-default.test
@@ -507,8 +507,8 @@ SHOW PARTITIONS ice_alter_part;
 ---- RESULTS
 '{"i":"3","d":"18605"}',1,1
 '{"i":"4","d":"18606"}',1,1
-'{"i":"5","d":null,"d_year":"50","s_bucket_5":"2"}',1,1
-'{"i":"6","d":null,"d_year":"50","s_bucket_5":"0"}',1,1
+'{"i":"5","d_year":"50","s_bucket_5":"2"}',1,1
+'{"i":"6","d_year":"50","s_bucket_5":"0"}',1,1
 '{}',2,1
 ---- TYPES
 STRING, BIGINT, BIGINT
@@ -578,10 +578,10 @@ STRING, STRING, STRING, STRING
 ---- QUERY
 SHOW PARTITIONS ice_void;
 ---- RESULTS
+'{"d_year":"34","i":"4","s_null":null}',1,1
 '{"i_null":null,"s_trunc":"o","d_year":"31"}',2,1
 '{"i_null":null,"s_trunc":"t","d_year":"32"}',1,1
 '{"i_null":null,"s_trunc":"t","d_year":"33"}',1,1
-'{"i_null":null,"s_trunc":null,"d_year":"34","i":"4","s_null":null}',1,1
 ---- TYPES
 STRING, BIGINT, BIGINT
 ====
@@ -616,11 +616,11 @@ STRING, STRING, STRING, STRING
 ---- QUERY
 SHOW PARTITIONS ice_void;
 ---- RESULTS
+'{"d_year":"34","i":"4","s_null":null}',1,1
+'{"i":"5","s_null":null,"d_null":null}',2,1
 '{"i_null":null,"s_trunc":"o","d_year":"31"}',2,1
 '{"i_null":null,"s_trunc":"t","d_year":"32"}',1,1
 '{"i_null":null,"s_trunc":"t","d_year":"33"}',1,1
-'{"i_null":null,"s_trunc":null,"d_year":"34","i":"4","s_null":null}',1,1
-'{"i_null":null,"s_trunc":null,"d_year":null,"i":"5","s_null":null,"d_null":null}',2,1
 ---- TYPES
 STRING, BIGINT, BIGINT
 ====
@@ -658,17 +658,18 @@ STRING, STRING, STRING, STRING
 ---- QUERY
 SHOW PARTITIONS ice_void;
 ---- RESULTS
+'{"d_year":"34","i":"4","s_null":null}',1,1
+'{"i":"5","s_null":null,"d_null":null}',2,1
 '{"i_null":null,"s_trunc":"o","d_year":"31"}',2,1
 '{"i_null":null,"s_trunc":"t","d_year":"32"}',1,1
 '{"i_null":null,"s_trunc":"t","d_year":"33"}',1,1
-'{"i_null":null,"s_trunc":null,"d_year":"34","i":"4","s_null":null}',1,1
-'{"i_null":null,"s_trunc":null,"d_year":null,"i":"5","s_null":null,"d_null":null}',2,1
-'{"i_null":null,"s_trunc":null,"d_year":null,"i":null,"s_null":null,"d_null":null}',2,1
+'{"s_null":null,"d_null":null,"i_null":null}',2,1
 ---- TYPES
 STRING, BIGINT, BIGINT
 ====
 ---- QUERY
-create table store_sales partitioned by spec (ss_sold_date_sk) stored as 
iceberg
+create table store_sales partitioned by spec (ss_sold_date_sk)
+stored as iceberg
 as select * from tpcds_parquet.store_sales;
 select count(*) from store_sales;
 ---- RESULTS
@@ -790,7 +791,7 @@ show partitions special_char_partitions;
 '{"i":"3","s":"11=14=31","s_trunc":"11=1"}',1,1
 '{"i":"4","s":"","s_trunc":""}',1,1
 '{"i":"5","s":null,"s_trunc":null}',1,1
-'{"i":null,"s":null,"s_trunc":null,"s2":"98\\/22"}',1,1
+'{"s2":"98\\/22"}',1,1
 ---- TYPES
 STRING,BIGINT,BIGINT
 ====
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-virtual-columns.test
 
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-virtual-columns.test
index 8e1117189..f16c9080a 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-virtual-columns.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-virtual-columns.test
@@ -43,10 +43,10 @@ select col_i, ICEBERG__DATA__SEQUENCE__NUMBER from ice_tbl 
order by col_i;
 ---- TYPES
 INT,BIGINT
 ---- RESULTS
-0,0
-1,0
-3,0
-5,0
+0,1
+1,1
+3,1
+5,1
 ====
 ---- QUERY
 # select virtual colum without selecting any other slots.
@@ -54,7 +54,7 @@ select max(ICEBERG__DATA__SEQUENCE__NUMBER) from ice_tbl;
 ---- TYPES
 BIGINT
 ---- RESULTS
-0
+1
 ====
 ---- QUERY
 # Testing data sequence number for unpartitioned V2 tables.
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/show-create-table-with-stats.test
 
b/testdata/workloads/functional-query/queries/QueryTest/show-create-table-with-stats.test
index bd09639ce..3283893ae 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/show-create-table-with-stats.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/show-create-table-with-stats.test
@@ -65,8 +65,8 @@ CREATE EXTERNAL TABLE 
show_create_table_test_db.ice_with_stats (
 )
 STORED AS ICEBERG
 LOCATION '$$location_uri$$'
-TBLPROPERTIES ('OBJCAPABILITIES'='EXTREAD,EXTWRITE', 
'engine.hive.enabled'='true', 'external.table.purge'='TRUE', 
'impala.computeStatsSnapshotIds'='<NUM>', 
'impala.lastComputeStatsTime'='<NUM>', 'table_type'='ICEBERG', 
'write.format.default'='parquet');
-ALTER TABLE show_create_table_test_db.ice_with_stats SET TBLPROPERTIES 
('EXTERNAL'='TRUE', 'OBJCAPABILITIES'='EXTREAD,EXTWRITE', 
'engine.hive.enabled'='true', 'external.table.purge'='TRUE', 
'impala.computeStatsSnapshotIds'='<NUM>', 
'impala.events.catalogServiceId'='<NUM>', 
'impala.events.catalogVersion'='<NUM>', 'impala.lastComputeStatsTime'='<NUM>', 
'numFiles'='<NUM>', 'numRows'='<NUM>', 'table_type'='ICEBERG', 
'totalSize'='<NUM>', 'write.format.default'='parquet');
+TBLPROPERTIES ('OBJCAPABILITIES'='EXTREAD,EXTWRITE', 
'engine.hive.enabled'='true', 'external.table.purge'='TRUE', 
'impala.computeStatsSnapshotIds'='<NUM>', 
'impala.lastComputeStatsTime'='<NUM>', 'table_type'='ICEBERG', 
'write.delete.mode'='merge-on-read', 'write.format.default'='parquet', 
'write.merge.mode'='merge-on-read', 'write.parquet.compression-codec'='snappy', 
'write.update.mode'='merge-on-read');
+ALTER TABLE show_create_table_test_db.ice_with_stats SET TBLPROPERTIES 
('EXTERNAL'='TRUE', 'OBJCAPABILITIES'='EXTREAD,EXTWRITE', 
'engine.hive.enabled'='true', 'external.table.purge'='TRUE', 
'impala.computeStatsSnapshotIds'='<NUM>', 
'impala.events.catalogServiceId'='<NUM>', 
'impala.events.catalogVersion'='<NUM>', 'impala.lastComputeStatsTime'='<NUM>', 
'numFiles'='<NUM>', 'numRows'='<NUM>', 'table_type'='ICEBERG', 
'totalSize'='<NUM>', 'write.delete.mode'='merge-on-read', 'write.format.defa 
[...]
 ALTER TABLE show_create_table_test_db.ice_with_stats SET COLUMN STATS i 
('numDVs'='1', 'numNulls'='0', 'numTrues'='-1', 'numFalses'='-1');
 ALTER TABLE show_create_table_test_db.ice_with_stats SET COLUMN STATS s 
('numDVs'='1', 'numNulls'='0', 'maxSize'='1', 'avgSize'='1', 'numTrues'='-1', 
'numFalses'='-1');
 ====
diff --git 
a/testdata/workloads/functional-query/queries/QueryTest/show-create-table.test 
b/testdata/workloads/functional-query/queries/QueryTest/show-create-table.test
index e7b0cd85f..e7b1a7119 100644
--- 
a/testdata/workloads/functional-query/queries/QueryTest/show-create-table.test
+++ 
b/testdata/workloads/functional-query/queries/QueryTest/show-create-table.test
@@ -612,6 +612,9 @@ STORED AS ICEBERG
 LOCATION '$$location_uri$$'
 TBLPROPERTIES ('TRANSLATED_TO_EXTERNAL'='TRUE',
     'external.table.purge'='TRUE',
+    'write.delete.mode'='merge-on-read',
+    'write.update.mode'='merge-on-read',
+    'write.merge.mode'='merge-on-read',
     'write.format.default'='parquet',
     'write.parquet.compression-codec'='zstd',
     'write.parquet.compression-level'='12',
@@ -642,6 +645,9 @@ CREATE EXTERNAL TABLE 
show_create_table_test_db.iceberg_test2 (
 STORED AS ICEBERG
 TBLPROPERTIES ('TRANSLATED_TO_EXTERNAL'='TRUE',
     'external.table.purge'='TRUE',
+    'write.delete.mode'='merge-on-read',
+    'write.update.mode'='merge-on-read',
+    'write.merge.mode'='merge-on-read',
     'write.format.default'='parquet',
     'write.parquet.compression-codec'='zstd',
     'write.parquet.compression-level'='12',
@@ -674,6 +680,9 @@ CREATE EXTERNAL TABLE 
show_create_table_test_db.iceberg_test3 (
 STORED AS ICEBERG
 TBLPROPERTIES ('TRANSLATED_TO_EXTERNAL'='TRUE',
     'external.table.purge'='TRUE',
+    'write.delete.mode'='merge-on-read',
+    'write.update.mode'='merge-on-read',
+    'write.merge.mode'='merge-on-read',
     'write.format.default'='parquet',
     'write.parquet.compression-codec'='zstd',
     'write.parquet.compression-level'='12',
@@ -732,6 +741,9 @@ PARTITIONED BY SPEC (
 STORED AS ICEBERG
 TBLPROPERTIES ('TRANSLATED_TO_EXTERNAL'='TRUE',
     'external.table.purge'='TRUE',
+    'write.delete.mode'='merge-on-read',
+    'write.update.mode'='merge-on-read',
+    'write.merge.mode'='merge-on-read',
     'write.format.default'='parquet',
     'write.parquet.compression-codec'='zstd',
     'write.parquet.compression-level'='12',
@@ -756,8 +768,10 @@ CREATE EXTERNAL TABLE 
show_create_table_test_db.iceberg_test_orc (
 STORED AS ICEBERG
 LOCATION '$$location_uri$$'
 TBLPROPERTIES ('TRANSLATED_TO_EXTERNAL'='TRUE',
-    'external.table.purge'='TRUE', 'write.format.default'='orc',
-'engine.hive.enabled'='true', 'iceberg.catalog'='hadoop.tables')
+'external.table.purge'='TRUE', 'write.format.default'='orc',
+'engine.hive.enabled'='true', 'iceberg.catalog'='hadoop.tables',
+'write.delete.mode'='merge-on-read', 'write.update.mode'='merge-on-read',
+'write.merge.mode'='merge-on-read')
 ====
 ---- CREATE_TABLE
 # Default Iceberg table
@@ -771,8 +785,10 @@ CREATE EXTERNAL TABLE 
show_create_table_test_db.iceberg_default_tbl (
 )
 STORED AS ICEBERG
 LOCATION '$$location_uri$$'
-TBLPROPERTIES ('write.format.default'='parquet', 'engine.hive.enabled'='true',
-'external.table.purge'='TRUE', 'table_type'='ICEBERG')
+TBLPROPERTIES ('external.table.purge'='TRUE', 'write.format.default'='parquet',
+ 'engine.hive.enabled'='true', 'table_type'='ICEBERG', 
'write.delete.mode'='merge-on-read',
+ 'write.update.mode'='merge-on-read', 'write.merge.mode'='merge-on-read',
+ 'write.parquet.compression-codec'='snappy')
 ====
 ---- CREATE_TABLE
 # Default Iceberg table with ORC format
@@ -789,8 +805,10 @@ CREATE EXTERNAL TABLE 
show_create_table_test_db.iceberg_default_tbl_orc (
 STORED AS ICEBERG
 LOCATION '$$location_uri$$'
 TBLPROPERTIES ('TRANSLATED_TO_EXTERNAL'='TRUE',
-    'write.format.default'='orc',
-'engine.hive.enabled'='true', 'external.table.purge'='TRUE', 
'table_type'='ICEBERG')
+'write.format.default'='orc', 'write.parquet.compression-codec'='snappy',
+'engine.hive.enabled'='true', 'external.table.purge'='TRUE', 
'table_type'='ICEBERG',
+'write.update.mode'='merge-on-read', 'write.merge.mode'='merge-on-read',
+'write.delete.mode'='merge-on-read')
 ====
 ---- CREATE_TABLE
 # Iceberg table in HiveCatalog
@@ -807,8 +825,10 @@ CREATE EXTERNAL TABLE 
show_create_table_test_db.iceberg_hive_cat_explicit (
 STORED AS ICEBERG
 LOCATION '$$location_uri$$'
 TBLPROPERTIES ('TRANSLATED_TO_EXTERNAL'='TRUE',
-    'write.format.default'='parquet', 'iceberg.catalog'='hive.catalog',
-'engine.hive.enabled'='true', 'external.table.purge'='TRUE', 
'table_type'='ICEBERG')
+'write.format.default'='parquet', 'iceberg.catalog'='hive.catalog',
+'engine.hive.enabled'='true', 'external.table.purge'='TRUE', 
'table_type'='ICEBERG',
+'write.parquet.compression-codec'='snappy', 
'write.delete.mode'='merge-on-read',
+'write.update.mode'='merge-on-read', 'write.merge.mode'='merge-on-read')
 ====
 ---- CREATE_TABLE
 CREATE TABLE iceberg_nullable_test (
@@ -835,6 +855,9 @@ STORED AS ICEBERG
 LOCATION '$$location_uri$$'
 TBLPROPERTIES ('TRANSLATED_TO_EXTERNAL'='TRUE',
     'external.table.purge'='TRUE',
+    'write.delete.mode'='merge-on-read',
+    'write.update.mode'='merge-on-read',
+    'write.merge.mode'='merge-on-read',
     'write.format.default'='parquet',
     'write.parquet.compression-codec'='zstd',
     'write.parquet.compression-level'='12',
@@ -858,7 +881,9 @@ PARTITIONED BY SPEC (p, d)
 STORED AS ICEBERG
 LOCATION '$$location_uri$$'
 TBLPROPERTIES ('external.table.purge'='TRUE', 'write.format.default'='parquet',
-'engine.hive.enabled'='true', 'table_type'='ICEBERG')
+'engine.hive.enabled'='true', 'table_type'='ICEBERG', 
'write.parquet.compression-codec'='snappy',
+'write.delete.mode'='merge-on-read', 'write.update.mode'='merge-on-read',
+'write.merge.mode'='merge-on-read')
 ====
 ---- CREATE_TABLE
 CREATE TABLE iceberg_ctas
@@ -875,7 +900,9 @@ PARTITIONED BY SPEC (BUCKET(5, id))
 STORED AS ICEBERG
 LOCATION '$$location_uri$$'
 TBLPROPERTIES ('external.table.purge'='TRUE', 'write.format.default'='parquet',
-'engine.hive.enabled'='true', 'table_type'='ICEBERG')
+'engine.hive.enabled'='true', 'table_type'='ICEBERG', 
'write.parquet.compression-codec'='snappy',
+'write.delete.mode'='merge-on-read', 'write.update.mode'='merge-on-read',
+'write.merge.mode'='merge-on-read')
 ====
 ---- CREATE_TABLE
 CREATE TABLE iceberg_ctas_ht
@@ -894,8 +921,9 @@ PARTITIONED BY SPEC (BUCKET(5, id))
 STORED AS ICEBERG
 LOCATION '$$location_uri$$'
 TBLPROPERTIES ('TRANSLATED_TO_EXTERNAL'='TRUE',
-    'external.table.purge'='TRUE', 'write.format.default'='parquet',
-'engine.hive.enabled'='true', 'iceberg.catalog'='hadoop.tables')
+'external.table.purge'='TRUE', 'write.format.default'='parquet',
+'engine.hive.enabled'='true', 'iceberg.catalog'='hadoop.tables', 
'write.delete.mode'='merge-on-read',
+'write.update.mode'='merge-on-read', 'write.merge.mode'='merge-on-read')
 ====
 ---- CREATE_TABLE
 CREATE TABLE iceberg_catalogs_hive (i int)
@@ -909,8 +937,10 @@ PARTITIONED BY SPEC (BUCKET(3, i))
 STORED AS ICEBERG
 LOCATION '$$location_uri$$'
 TBLPROPERTIES ('TRANSLATED_TO_EXTERNAL'='TRUE',
-    'external.table.purge'='TRUE', 'write.format.default'='parquet',
-'engine.hive.enabled'='true', 'table_type'='ICEBERG')
+'external.table.purge'='TRUE', 'write.format.default'='parquet',
+'engine.hive.enabled'='true', 'table_type'='ICEBERG', 
'write.parquet.compression-codec'='snappy',
+'write.delete.mode'='merge-on-read', 'write.update.mode'='merge-on-read',
+'write.merge.mode'='merge-on-read')
 ====
 ---- CREATE_TABLE
 CREATE TABLE iceberg_catalogs_hadoop (i int)
@@ -924,8 +954,9 @@ PARTITIONED BY SPEC (BUCKET(3, i))
 STORED AS ICEBERG
 LOCATION '$$location_uri$$'
 TBLPROPERTIES ('TRANSLATED_TO_EXTERNAL'='TRUE',
-    'external.table.purge'='TRUE', 'write.format.default'='parquet',
-'engine.hive.enabled'='true', 'iceberg.catalog'='ice_hadoop_cat')
+'external.table.purge'='TRUE', 'write.format.default'='parquet',
+'engine.hive.enabled'='true', 'iceberg.catalog'='ice_hadoop_cat', 
'write.delete.mode'='merge-on-read',
+'write.update.mode'='merge-on-read', 'write.merge.mode'='merge-on-read')
 ====
 ---- CREATE_TABLE
 CREATE TABLE iceberg_void_transform (i int, j int)
@@ -937,7 +968,9 @@ PARTITIONED BY SPEC (VOID(i), VOID(j))
 STORED AS ICEBERG
 LOCATION '$$location_uri$$'
 TBLPROPERTIES ('external.table.purge'='TRUE', 'write.format.default'='parquet',
-'engine.hive.enabled'='true', 'table_type'='ICEBERG')
+'engine.hive.enabled'='true', 'table_type'='ICEBERG', 
'write.parquet.compression-codec'='snappy'
+'write.delete.mode'='merge-on-read', 'write.update.mode'='merge-on-read',
+'write.merge.mode'='merge-on-read')
 ====
 ---- CREATE_TABLE
 # Creating V1 tables explicitly should not set 'merge-on-read' write modes if 
no write mode is
@@ -950,8 +983,9 @@ CREATE EXTERNAL TABLE 
show_create_table_test_db.ice_explicit_v1 (i INT NULL)
 STORED AS ICEBERG
 LOCATION '$$location_uri$$'
 TBLPROPERTIES ('TRANSLATED_TO_EXTERNAL'='TRUE',
-    'external.table.purge'='TRUE', 'write.format.default'='parquet',
- 'engine.hive.enabled'='true', 'table_type'='ICEBERG')
+'format-version'='1', 'engine.hive.enabled'='true', 
'external.table.purge'='TRUE',
+'table_type'='ICEBERG',
+'write.format.default'='parquet', 'write.parquet.compression-codec'='snappy')
 ====
 ---- CREATE_TABLE
 # Creating V2 tables should set 'merge-on-read' write modes if no write mode 
is specified.
@@ -965,7 +999,8 @@ LOCATION '$$location_uri$$'
 TBLPROPERTIES ('TRANSLATED_TO_EXTERNAL'='TRUE',
     'external.table.purge'='TRUE', 'write.format.default'='parquet',
  'engine.hive.enabled'='true', 'table_type'='ICEBERG', 
'write.delete.mode'='merge-on-read',
- 'write.update.mode'='merge-on-read', 'write.merge.mode'='merge-on-read')
+ 'write.update.mode'='merge-on-read', 'write.merge.mode'='merge-on-read',
+ 'write.parquet.compression-codec'='snappy')
 ====
 ---- CREATE_TABLE
 # Creating V2 tables should not set write mode if user specified any of it to 
any value.
@@ -978,8 +1013,9 @@ CREATE EXTERNAL TABLE 
show_create_table_test_db.ice_v2_explicit_delete (i INT NU
 STORED AS ICEBERG
 LOCATION '$$location_uri$$'
 TBLPROPERTIES ('TRANSLATED_TO_EXTERNAL'='TRUE',
-    'external.table.purge'='TRUE', 'write.format.default'='parquet',
- 'engine.hive.enabled'='true', 'table_type'='ICEBERG', 
'write.delete.mode'='merge-on-read')
+ 'external.table.purge'='TRUE', 'write.format.default'='parquet',
+ 'engine.hive.enabled'='true', 'table_type'='ICEBERG', 
'write.delete.mode'='merge-on-read',
+ 'write.parquet.compression-codec'='snappy')
 ====
 ---- CREATE_TABLE
 # Creating V2 tables should not set write mode if user specified any of it to 
any value.
@@ -992,8 +1028,9 @@ CREATE EXTERNAL TABLE 
show_create_table_test_db.ice_v2_explicit_delete_2 (i INT
 STORED AS ICEBERG
 LOCATION '$$location_uri$$'
 TBLPROPERTIES ('TRANSLATED_TO_EXTERNAL'='TRUE',
-    'external.table.purge'='TRUE', 'write.format.default'='parquet',
- 'engine.hive.enabled'='true', 'table_type'='ICEBERG', 
'write.delete.mode'='copy-on-write')
+ 'external.table.purge'='TRUE', 'write.format.default'='parquet',
+ 'engine.hive.enabled'='true', 'table_type'='ICEBERG', 
'write.delete.mode'='copy-on-write',
+ 'write.parquet.compression-codec'='snappy')
 ====
 ---- CREATE_TABLE
 # Creating V2 tables should not set write mode if user specified any of it to 
any value.
@@ -1006,8 +1043,9 @@ CREATE EXTERNAL TABLE 
show_create_table_test_db.ice_v2_explicit_update (i INT NU
 STORED AS ICEBERG
 LOCATION '$$location_uri$$'
 TBLPROPERTIES ('TRANSLATED_TO_EXTERNAL'='TRUE',
-    'external.table.purge'='TRUE', 'write.format.default'='parquet',
- 'engine.hive.enabled'='true', 'table_type'='ICEBERG', 
'write.update.mode'='copy-on-write')
+ 'external.table.purge'='TRUE', 'write.format.default'='parquet',
+ 'engine.hive.enabled'='true', 'table_type'='ICEBERG', 
'write.update.mode'='copy-on-write',
+ 'write.parquet.compression-codec'='snappy')
 ====
 ---- CREATE_TABLE
 # Creating V2 tables should not set write mode if user specified any of it to 
any value.
@@ -1020,8 +1058,9 @@ CREATE EXTERNAL TABLE 
show_create_table_test_db.ice_v2_explicit_merge (i INT NUL
 STORED AS ICEBERG
 LOCATION '$$location_uri$$'
 TBLPROPERTIES ('TRANSLATED_TO_EXTERNAL'='TRUE',
-    'external.table.purge'='TRUE', 'write.format.default'='parquet',
- 'engine.hive.enabled'='true', 'table_type'='ICEBERG', 
'write.merge.mode'='copy-on-write')
+ 'external.table.purge'='TRUE', 'write.format.default'='parquet',
+ 'engine.hive.enabled'='true', 'table_type'='ICEBERG', 
'write.merge.mode'='copy-on-write',
+ 'write.parquet.compression-codec'='snappy')
 ====
 ---- CREATE_TABLE
 # Test create Bucketed Table

Reply via email to