This is an automated email from the ASF dual-hosted git repository.

sbadhya pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new b02cef4fe94 HIVE-27731: Iceberg: Perform metadata delete for queries 
with static filters (#4748) (Sourabh Badhya reviewed by Denys Kuzmenko, 
Krisztian Kasa)
b02cef4fe94 is described below

commit b02cef4fe943b9aba597dcdfd3b8f3d3a5efca3e
Author: Sourabh Badhya <[email protected]>
AuthorDate: Thu Oct 19 12:42:35 2023 +0530

    HIVE-27731: Iceberg: Perform metadata delete for queries with static 
filters (#4748) (Sourabh Badhya reviewed by Denys Kuzmenko, Krisztian Kasa)
---
 .../java/org/apache/hadoop/hive/conf/HiveConf.java |   2 +
 .../iceberg/mr/hive/HiveIcebergStorageHandler.java |  54 +++
 .../apache/iceberg/mr/hive/IcebergTableUtil.java   |  14 +
 .../delete_iceberg_copy_on_write_partitioned.q     |   1 +
 .../src/test/queries/positive/metadata_delete.q    |  82 ++++
 .../positive/vectorized_iceberg_merge_mixed.q      |   4 +-
 .../delete_iceberg_copy_on_write_partitioned.q.out | 249 ++--------
 ...elete_iceberg_copy_on_write_unpartitioned.q.out |  68 +--
 .../positive/delete_iceberg_partitioned_avro.q.out |   2 +
 .../positive/delete_iceberg_partitioned_orc.q.out  |   6 +-
 .../delete_iceberg_partitioned_parquet.q.out       |   6 +-
 .../delete_iceberg_unpartitioned_parquet.q.out     |   2 +
 .../positive/iceberg_atomic_merge_update.q.out     |   6 +-
 .../results/positive/iceberg_copy_on_write.q.out   |   8 +-
 ...iceberg_truncate_partition_with_evolution.q.out |  96 ++--
 .../positive/llap/llap_iceberg_read_orc.q.out      |   4 +-
 .../test/results/positive/metadata_delete.q.out    | 501 +++++++++++++++++++++
 .../positive/vectorized_iceberg_merge_mixed.q.out  |   8 +-
 .../results/positive/write_iceberg_branch.q.out    |  16 +-
 .../hive/ql/io/sarg/ConvertAstToSearchArg.java     |  39 +-
 .../hive/ql/metadata/HiveStorageHandler.java       |   5 +
 .../hive/ql/parse/AlterTableExecuteSpec.java       |  22 +-
 .../ql/parse/UpdateDeleteSemanticAnalyzer.java     |  86 +++-
 .../hive/ql/io/sarg/TestConvertAstToSearchArg.java |  91 ++--
 24 files changed, 1004 insertions(+), 368 deletions(-)

diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index adc6503debe..a8378f097d3 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2672,6 +2672,8 @@ public class HiveConf extends Configuration {
 
     HIVE_OPTIMIZE_REPLACE_DELETE_WITH_TRUNCATE("hive.optimize.delete.all", 
false, 
         "Optimize delete the entire data from table, use truncate instead"),
+    HIVE_OPTIMIZE_METADATA_DELETE("hive.optimize.delete.metadata.only", true,
+            "Optimize delete the entire data from table, use truncate 
instead"),
     HIVE_OPTIMIZE_LIMIT("hive.optimize.limit", true,
         "Optimize limit by pushing through Left Outer Joins and Selects"),
     HIVE_OPTIMIZE_TOPNKEY("hive.optimize.topnkey", true, "Whether to enable 
top n key optimizer."),
diff --git 
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java
 
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java
index 8c8b54256f4..c11663e3a65 100644
--- 
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java
+++ 
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java
@@ -155,10 +155,15 @@ import org.apache.iceberg.SortField;
 import org.apache.iceberg.SortOrder;
 import org.apache.iceberg.Table;
 import org.apache.iceberg.TableProperties;
+import org.apache.iceberg.TableScan;
 import org.apache.iceberg.exceptions.NoSuchTableException;
+import org.apache.iceberg.expressions.Evaluator;
 import org.apache.iceberg.expressions.Expression;
+import org.apache.iceberg.expressions.ExpressionUtil;
 import org.apache.iceberg.expressions.Expressions;
+import org.apache.iceberg.expressions.Projections;
 import org.apache.iceberg.expressions.ResidualEvaluator;
+import org.apache.iceberg.expressions.StrictMetricsEvaluator;
 import org.apache.iceberg.hadoop.HadoopConfigurable;
 import org.apache.iceberg.io.CloseableIterable;
 import org.apache.iceberg.mr.Catalogs;
@@ -175,6 +180,7 @@ import 
org.apache.iceberg.relocated.com.google.common.base.Splitter;
 import org.apache.iceberg.relocated.com.google.common.base.Throwables;
 import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
 import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
+import org.apache.iceberg.relocated.com.google.common.collect.Iterables;
 import org.apache.iceberg.relocated.com.google.common.collect.Lists;
 import org.apache.iceberg.relocated.com.google.common.collect.Maps;
 import org.apache.iceberg.relocated.com.google.common.collect.Sets;
@@ -185,6 +191,7 @@ import org.apache.iceberg.types.Types;
 import org.apache.iceberg.util.ByteBuffers;
 import org.apache.iceberg.util.Pair;
 import org.apache.iceberg.util.SerializationUtil;
+import org.apache.iceberg.util.SnapshotUtil;
 import org.apache.iceberg.util.StructProjection;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -832,6 +839,12 @@ public class HiveIcebergStorageHandler implements 
HiveStoragePredicateHandler, H
             (AlterTableExecuteSpec.CherryPickSpec) 
executeSpec.getOperationParams();
         IcebergTableUtil.cherryPick(icebergTable, 
cherryPickSpec.getSnapshotId());
         break;
+      case DELETE_METADATA:
+        AlterTableExecuteSpec.DeleteMetadataSpec deleteMetadataSpec =
+            (AlterTableExecuteSpec.DeleteMetadataSpec) 
executeSpec.getOperationParams();
+        IcebergTableUtil.performMetadataDelete(icebergTable, 
deleteMetadataSpec.getBranchName(),
+            deleteMetadataSpec.getSarg());
+        break;
       default:
         throw new UnsupportedOperationException(
             String.format("Operation type %s is not supported", 
executeSpec.getOperationType().name()));
@@ -1878,4 +1891,45 @@ public class HiveIcebergStorageHandler implements 
HiveStoragePredicateHandler, H
       throw new SemanticException(String.format("Unable to find a column with 
the name: %s", colName));
     }
   }
+
+  @Override
+  public boolean 
canPerformMetadataDelete(org.apache.hadoop.hive.ql.metadata.Table hmsTable,
+      String branchName, SearchArgument sarg) {
+    Expression exp;
+    try {
+      exp = HiveIcebergFilterFactory.generateFilterExpression(sarg);
+    } catch (UnsupportedOperationException e) {
+      LOG.warn("Unable to create Iceberg filter," +
+              " continuing without metadata delete: ", e);
+      return false;
+    }
+    Table table = IcebergTableUtil.getTable(conf, hmsTable.getTTable());
+
+    // The following code is inspired & copied from Iceberg's 
SparkTable.java#canDeleteUsingMetadata
+    if (ExpressionUtil.selectsPartitions(exp, table, false)) {
+      return true;
+    }
+
+    TableScan scan = 
table.newScan().filter(exp).caseSensitive(false).includeColumnStats().ignoreResiduals();
+    if (branchName != null) {
+      scan.useRef(HiveUtils.getTableSnapshotRef(branchName));
+    }
+
+    try (CloseableIterable<FileScanTask> tasks = scan.planFiles()) {
+      Map<Integer, Evaluator> evaluators = Maps.newHashMap();
+      StrictMetricsEvaluator metricsEvaluator =
+          new StrictMetricsEvaluator(SnapshotUtil.schemaFor(table, 
branchName), exp);
+
+      return Iterables.all(tasks, task -> {
+        DataFile file = task.file();
+        PartitionSpec spec = task.spec();
+        Evaluator evaluator = evaluators.computeIfAbsent(spec.specId(), specId 
->
+            new Evaluator(spec.partitionType(), 
Projections.strict(spec).project(exp)));
+        return evaluator.eval(file.partition()) || metricsEvaluator.eval(file);
+      });
+    } catch (IOException ioe) {
+      LOG.warn("Failed to close task iterable", ioe);
+      return false;
+    }
+  }
 }
diff --git 
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/IcebergTableUtil.java
 
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/IcebergTableUtil.java
index 8527e25cbfe..aacbf4c4e3d 100644
--- 
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/IcebergTableUtil.java
+++ 
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/IcebergTableUtil.java
@@ -24,13 +24,17 @@ import java.util.Map;
 import java.util.Optional;
 import java.util.Properties;
 import java.util.function.Function;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.ql.QueryState;
+import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
+import org.apache.hadoop.hive.ql.metadata.HiveUtils;
 import org.apache.hadoop.hive.ql.parse.AlterTableExecuteSpec;
 import org.apache.hadoop.hive.ql.parse.TransformSpec;
 import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.ql.session.SessionStateUtil;
+import org.apache.iceberg.DeleteFiles;
 import org.apache.iceberg.ManageSnapshots;
 import org.apache.iceberg.PartitionSpec;
 import org.apache.iceberg.Schema;
@@ -39,6 +43,7 @@ import org.apache.iceberg.Table;
 import org.apache.iceberg.TableProperties;
 import org.apache.iceberg.UpdatePartitionSpec;
 import org.apache.iceberg.catalog.TableIdentifier;
+import org.apache.iceberg.expressions.Expression;
 import org.apache.iceberg.expressions.Expressions;
 import org.apache.iceberg.mr.Catalogs;
 import org.apache.iceberg.mr.InputFormatConfig;
@@ -278,4 +283,13 @@ public class IcebergTableUtil {
     return props != null &&
         "2".equals(props.get(TableProperties.FORMAT_VERSION));
   }
+
+  public static void performMetadataDelete(Table icebergTable, String 
branchName, SearchArgument sarg) {
+    Expression exp = HiveIcebergFilterFactory.generateFilterExpression(sarg);
+    DeleteFiles deleteFiles = icebergTable.newDelete();
+    if (StringUtils.isNotEmpty(branchName)) {
+      deleteFiles = 
deleteFiles.toBranch(HiveUtils.getTableSnapshotRef(branchName));
+    }
+    deleteFiles.deleteFromRowFilter(exp).commit();
+  }
 }
diff --git 
a/iceberg/iceberg-handler/src/test/queries/positive/delete_iceberg_copy_on_write_partitioned.q
 
b/iceberg/iceberg-handler/src/test/queries/positive/delete_iceberg_copy_on_write_partitioned.q
index 86c1b59ce23..84114c48ded 100644
--- 
a/iceberg/iceberg-handler/src/test/queries/positive/delete_iceberg_copy_on_write_partitioned.q
+++ 
b/iceberg/iceberg-handler/src/test/queries/positive/delete_iceberg_copy_on_write_partitioned.q
@@ -1,3 +1,4 @@
+--! qt:replace:/DeleteMetadataSpec(\S*)/#Masked#/
 set hive.explain.user=false;
 
 drop table if exists tbl_ice;
diff --git 
a/iceberg/iceberg-handler/src/test/queries/positive/metadata_delete.q 
b/iceberg/iceberg-handler/src/test/queries/positive/metadata_delete.q
new file mode 100644
index 00000000000..ca3ff256606
--- /dev/null
+++ b/iceberg/iceberg-handler/src/test/queries/positive/metadata_delete.q
@@ -0,0 +1,82 @@
+-- SORT_QUERY_RESULTS
+--! qt:replace:/DeleteMetadataSpec(\S*)/#Masked#/
+set hive.explain.user=false;
+
+create table ice_date (a int, b date) stored by iceberg stored as orc 
tblproperties ('format-version'='2');
+
+insert into table ice_date values (1, '2021-01-01');
+insert into table ice_date values (2, '2022-02-02'), (3, '2022-03-03');
+
+delete from ice_date where b = '2022-02-02';
+delete from ice_date where a = 1 and b = '2021-01-01';
+
+select * from ice_date;
+
+create table ice_date_year (a int, b date) stored by iceberg stored as orc 
tblproperties ('format-version'='2');
+insert into table ice_date_year values (1, '2021-01-01');
+insert into table ice_date_year values (2, '2022-02-02'), (3, '2022-03-03');
+
+delete from ice_date_year where year(b) = 2022;
+
+select * from ice_date_year;
+
+-- Metadata delete should not be done here and fallback to normal delete.
+create table ice_str_name (first_name string, last_name string) stored by 
iceberg stored as orc tblproperties ('format-version'='2');
+insert into table ice_str_name values ('Alex', 'Clark');
+insert into table ice_str_name values ('Bob', 'Bob');
+
+delete from ice_str_name where first_name = last_name;
+
+select * from ice_str_name;
+
+-- Metadata delete should not be done here and fallback to normal delete.
+create table ice_int_id (first_id int, last_id int) stored by iceberg stored 
as orc tblproperties ('format-version'='2');
+insert into table ice_int_id values (7, 9);
+insert into table ice_int_id values (8, 8);
+
+delete from ice_int_id where first_id = last_id;
+
+select * from ice_int_id;
+
+-- Check if delete on a branch also uses the metadata delete whenever possible.
+create table ice_branch_metadata_delete (a int, b string) stored by iceberg 
stored as orc tblproperties ('format-version'='2');
+insert into table ice_branch_metadata_delete values (1, 'ABC');
+insert into table ice_branch_metadata_delete values (2, 'DEF');
+insert into table ice_branch_metadata_delete values (3, 'GHI');
+insert into table ice_branch_metadata_delete values (4, 'JKL');
+
+alter table ice_branch_metadata_delete create branch test01;
+delete from default.ice_branch_metadata_delete.branch_test01 where a = 1;
+
+select * from default.ice_branch_metadata_delete.branch_test01;
+
+alter table ice_branch_metadata_delete drop branch test01;
+
+-- Metadata delete must not be applied for multi-table scans which have 
subquery and fallback to normal delete logic.
+create table ice_delete_multiple_table1 (a int, b string) stored by iceberg 
stored as orc tblproperties ('format-version' = '2');
+create table ice_delete_multiple_table2 (a int, b string) stored by iceberg 
stored as orc tblproperties ('format-version' = '2');
+insert into table ice_delete_multiple_table1 values (1, 'ABC'), (2, 'DEF'), 
(3, 'GHI');
+insert into table ice_delete_multiple_table1 values (4, 'GHI'), (5, 'JKL'), 
(6, 'PQR');
+insert into table ice_delete_multiple_table2 values (1, 'ABC'), (2, 'DEF'), 
(3, 'GHI');
+insert into table ice_delete_multiple_table2 values (4, 'GHI'), (5, 'JKL'), 
(6, 'PQR');
+
+delete from ice_delete_multiple_table2 where ice_delete_multiple_table2.a in 
(select ice_delete_multiple_table1.a from ice_delete_multiple_table1 where 
ice_delete_multiple_table1.b = 'GHI');
+
+select * from ice_delete_multiple_table2;
+
+create table test_delete_config (a int, b int) stored by iceberg stored as orc 
tblproperties ('format-version'='2');
+insert into table test_delete_config values (1,2), (3,4);
+
+explain delete from test_delete_config where b < 5;
+
+set hive.optimize.delete.metadata.only=false;
+explain delete from test_delete_config where b < 5;
+
+drop table ice_date;
+drop table ice_date_year;
+drop table ice_str_name;
+drop table ice_int_id;
+drop table ice_branch_metadata_delete;
+drop table ice_delete_multiple_table1;
+drop table ice_delete_multiple_table2;
+drop table test_delete_config;
diff --git 
a/iceberg/iceberg-handler/src/test/queries/positive/vectorized_iceberg_merge_mixed.q
 
b/iceberg/iceberg-handler/src/test/queries/positive/vectorized_iceberg_merge_mixed.q
index 3d14c5134ee..080857ef138 100644
--- 
a/iceberg/iceberg-handler/src/test/queries/positive/vectorized_iceberg_merge_mixed.q
+++ 
b/iceberg/iceberg-handler/src/test/queries/positive/vectorized_iceberg_merge_mixed.q
@@ -3,9 +3,9 @@
 -- Mask neededVirtualColumns due to non-strict order
 --! qt:replace:/(\s+neededVirtualColumns:\s)(.*)/$1#Masked#/
 -- Mask width
---! qt:replace:/(width=17)\d+/$1####/
+--! qt:replace:/(width=58)\d+/$1###/
 -- Mask total data size
---! qt:replace:/(Data size: 35)\d+/$1####/
+--! qt:replace:/(Data size: 11)\d+/$1####/
 
 set hive.vectorized.execution.enabled=true;
 set hive.llap.io.enabled=false;
diff --git 
a/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_copy_on_write_partitioned.q.out
 
b/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_copy_on_write_partitioned.q.out
index 0b84e42f45c..91fd18b711f 100644
--- 
a/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_copy_on_write_partitioned.q.out
+++ 
b/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_copy_on_write_partitioned.q.out
@@ -23,211 +23,28 @@ POSTHOOK: Output: default@tbl_ice
 PREHOOK: query: explain delete from tbl_ice where b in ('one', 'four') or a = 
22
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tbl_ice
-PREHOOK: Output: default@tbl_ice
+PREHOOK: Output: hdfs://### HDFS PATH ###
 POSTHOOK: query: explain delete from tbl_ice where b in ('one', 'four') or a = 
22
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tbl_ice
-POSTHOOK: Output: default@tbl_ice
+POSTHOOK: Output: hdfs://### HDFS PATH ###
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-0
 
 STAGE PLANS:
   Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 4 (SIMPLE_EDGE), Union 3 
(CONTAINS)
-        Reducer 4 <- Map 1 (SIMPLE_EDGE)
-        Reducer 6 <- Map 5 (SIMPLE_EDGE), Union 3 (CONTAINS)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: tbl_ice
-                  filterExpr: (((a <> 22) and (b <> 'one') and (b <> 'four')) 
or (b) IN ('one', 'four') or (a = 22)) (type: boolean)
-                  Statistics: Num rows: 7 Data size: 672 Basic stats: COMPLETE 
Column stats: COMPLETE
-                  Filter Operator
-                    predicate: ((a <> 22) and (b <> 'one') and (b <> 'four') 
and FILE__PATH is not null) (type: boolean)
-                    Statistics: Num rows: 7 Data size: 672 Basic stats: 
COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: a (type: int), b (type: string), c (type: 
int), PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), 
FILE__PATH (type: string), ROW__POSITION (type: bigint)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6
-                      Statistics: Num rows: 7 Data size: 2100 Basic stats: 
COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col5 (type: string)
-                        null sort order: z
-                        sort order: +
-                        Map-reduce partition columns: _col5 (type: string)
-                        Statistics: Num rows: 7 Data size: 2100 Basic stats: 
COMPLETE Column stats: COMPLETE
-                        value expressions: _col0 (type: int), _col1 (type: 
string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col6 
(type: bigint)
-                  Filter Operator
-                    predicate: (((b) IN ('one', 'four') or (a = 22)) and 
FILE__PATH is not null) (type: boolean)
-                    Statistics: Num rows: 4 Data size: 368 Basic stats: 
COMPLETE Column stats: COMPLETE
-                    Reduce Output Operator
-                      key expressions: FILE__PATH (type: string)
-                      null sort order: a
-                      sort order: +
-                      Map-reduce partition columns: FILE__PATH (type: string)
-                      Statistics: Num rows: 4 Data size: 368 Basic stats: 
COMPLETE Column stats: COMPLETE
-            Execution mode: vectorized
-        Map 5 
-            Map Operator Tree:
-                TableScan
-                  alias: tbl_ice
-                  filterExpr: ((a = 22) or (b) IN ('one', 'four')) (type: 
boolean)
-                  Statistics: Num rows: 7 Data size: 672 Basic stats: COMPLETE 
Column stats: COMPLETE
-                  Filter Operator
-                    predicate: ((a = 22) or (b) IN ('one', 'four')) (type: 
boolean)
-                    Statistics: Num rows: 4 Data size: 384 Basic stats: 
COMPLETE Column stats: COMPLETE
-                    Reduce Output Operator
-                      key expressions: FILE__PATH (type: string)
-                      null sort order: a
-                      sort order: +
-                      Map-reduce partition columns: FILE__PATH (type: string)
-                      Statistics: Num rows: 4 Data size: 384 Basic stats: 
COMPLETE Column stats: COMPLETE
-                      value expressions: a (type: int), b (type: string), c 
(type: int), PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint)
-            Execution mode: vectorized
-        Reducer 2 
-            Reduce Operator Tree:
-              Merge Join Operator
-                condition map:
-                     Left Semi Join 0 to 1
-                keys:
-                  0 _col5 (type: string)
-                  1 _col0 (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6
-                Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE 
Column stats: COMPLETE
-                Select Operator
-                  expressions: _col3 (type: int), _col4 (type: bigint), _col5 
(type: string), _col6 (type: bigint), _col0 (type: int), _col1 (type: string), 
_col2 (type: int)
-                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6
-                  Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE 
Column stats: COMPLETE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 4 Data size: 1200 Basic stats: 
COMPLETE Column stats: COMPLETE
-                    table:
-                        input format: 
org.apache.iceberg.mr.hive.HiveIcebergInputFormat
-                        output format: 
org.apache.iceberg.mr.hive.HiveIcebergOutputFormat
-                        serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
-                        name: default.tbl_ice
-        Reducer 4 
-            Execution mode: vectorized
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string)
-                outputColumnNames: _col5
-                Statistics: Num rows: 4 Data size: 736 Basic stats: COMPLETE 
Column stats: COMPLETE
-                PTF Operator
-                  Function definitions:
-                      Input definition
-                        input alias: ptf_0
-                        output shape: _col5: string
-                        type: WINDOWING
-                      Windowing table definition
-                        input alias: ptf_1
-                        name: windowingtablefunction
-                        order by: _col5 ASC NULLS FIRST
-                        partition by: _col5
-                        raw input shape:
-                        window functions:
-                            window function definition
-                              alias: row_number_window_0
-                              name: row_number
-                              window function: GenericUDAFRowNumberEvaluator
-                              window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX)
-                              isPivotResult: true
-                  Statistics: Num rows: 4 Data size: 736 Basic stats: COMPLETE 
Column stats: COMPLETE
-                  Filter Operator
-                    predicate: (row_number_window_0 = 1) (type: boolean)
-                    Statistics: Num rows: 2 Data size: 368 Basic stats: 
COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: _col5 (type: string)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 2 Data size: 368 Basic stats: 
COMPLETE Column stats: COMPLETE
-                      Group By Operator
-                        keys: _col0 (type: string)
-                        minReductionHashAggr: 0.4
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 2 Data size: 368 Basic stats: 
COMPLETE Column stats: COMPLETE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: string)
-                          null sort order: z
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: string)
-                          Statistics: Num rows: 2 Data size: 368 Basic stats: 
COMPLETE Column stats: COMPLETE
-        Reducer 6 
-            Execution mode: vectorized
-            Reduce Operator Tree:
-              Select Operator
-                expressions: VALUE._col0 (type: int), VALUE._col1 (type: 
string), VALUE._col2 (type: int), VALUE._col3 (type: int), VALUE._col4 (type: 
bigint), KEY.reducesinkkey0 (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                Statistics: Num rows: 4 Data size: 1168 Basic stats: COMPLETE 
Column stats: COMPLETE
-                PTF Operator
-                  Function definitions:
-                      Input definition
-                        input alias: ptf_0
-                        type: WINDOWING
-                      Windowing table definition
-                        input alias: ptf_1
-                        name: windowingtablefunction
-                        order by: _col5 ASC NULLS FIRST
-                        partition by: _col5
-                        raw input shape:
-                        window functions:
-                            window function definition
-                              alias: row_number_window_0
-                              name: row_number
-                              window function: GenericUDAFRowNumberEvaluator
-                              window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX)
-                              isPivotResult: true
-                  Statistics: Num rows: 4 Data size: 1168 Basic stats: 
COMPLETE Column stats: COMPLETE
-                  Filter Operator
-                    predicate: (row_number_window_0 = 1) (type: boolean)
-                    Statistics: Num rows: 2 Data size: 584 Basic stats: 
COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: _col3 (type: int), _col4 (type: bigint), 
_col5 (type: string), -1L (type: bigint), _col0 (type: int), _col1 (type: 
string), _col2 (type: int)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6
-                      Statistics: Num rows: 2 Data size: 600 Basic stats: 
COMPLETE Column stats: COMPLETE
-                      File Output Operator
-                        compressed: false
-                        Statistics: Num rows: 4 Data size: 1200 Basic stats: 
COMPLETE Column stats: COMPLETE
-                        table:
-                            input format: 
org.apache.iceberg.mr.hive.HiveIcebergInputFormat
-                            output format: 
org.apache.iceberg.mr.hive.HiveIcebergOutputFormat
-                            serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
-                            name: default.tbl_ice
-        Union 3 
-            Vertex: Union 3
-
-  Stage: Stage-2
-    Dependency Collection
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          replace: false
-          table:
-              input format: org.apache.iceberg.mr.hive.HiveIcebergInputFormat
-              output format: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat
-              serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
-              name: default.tbl_ice
-
-  Stage: Stage-3
-    Stats Work
-      Basic Stats Work:
+    Execute operation
+      table name: default.tbl_ice
+      spec: AlterTableExecuteSpec{operationType=DELETE_METADATA, 
operationParams=org.apache.hadoop.hive.ql.parse.AlterTableExecuteSpec$#Masked#
 
 PREHOOK: query: delete from tbl_ice where b in ('one', 'four') or a = 22
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tbl_ice
-PREHOOK: Output: default@tbl_ice
+PREHOOK: Output: hdfs://### HDFS PATH ###
 POSTHOOK: query: delete from tbl_ice where b in ('one', 'four') or a = 22
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tbl_ice
-POSTHOOK: Output: default@tbl_ice
+POSTHOOK: Output: hdfs://### HDFS PATH ###
 PREHOOK: query: select * from tbl_ice order by a
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tbl_ice
@@ -248,6 +65,8 @@ POSTHOOK: query: insert into tbl_ice values (444, 'hola', 
800), (555, 'schola',
 POSTHOOK: type: QUERY
 POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@tbl_ice
+Warning: Shuffle Join MERGEJOIN[61][tables = [$hdt$_0, $hdt$_1]] in Stage 
'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[63][tables = [$hdt$_0, $hdt$_1, $hdt$_2, 
$hdt$_3]] in Stage 'Reducer 4' is a cross product
 Warning: Shuffle Join MERGEJOIN[222][tables = [$hdt$_1, $hdt$_2, $hdt$_3]] in 
Stage 'Reducer 3' is a cross product
 Warning: Shuffle Join MERGEJOIN[220][tables = [$hdt$_2, $hdt$_3]] in Stage 
'Reducer 8' is a cross product
 Warning: Shuffle Join MERGEJOIN[224][tables = [$hdt$_2, $hdt$_3, $hdt$_4, 
$hdt$_5]] in Stage 'Reducer 10' is a cross product
@@ -263,13 +82,13 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tbl_ice
 POSTHOOK: Output: default@tbl_ice
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-0
+  Stage-4 is a root stage
+  Stage-5 depends on stages: Stage-4
+  Stage-3 depends on stages: Stage-5
+  Stage-6 depends on stages: Stage-3
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-4
     Tez
 #### A masked pattern was here ####
       Edges:
@@ -871,10 +690,10 @@ STAGE PLANS:
         Union 7 
             Vertex: Union 7
 
-  Stage: Stage-2
+  Stage: Stage-5
     Dependency Collection
 
-  Stage: Stage-0
+  Stage: Stage-3
     Move Operator
       tables:
           replace: false
@@ -884,10 +703,12 @@ STAGE PLANS:
               serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
               name: default.tbl_ice
 
-  Stage: Stage-3
+  Stage: Stage-6
     Stats Work
       Basic Stats Work:
 
+Warning: Shuffle Join MERGEJOIN[61][tables = [$hdt$_0, $hdt$_1]] in Stage 
'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[63][tables = [$hdt$_0, $hdt$_1, $hdt$_2, 
$hdt$_3]] in Stage 'Reducer 4' is a cross product
 Warning: Shuffle Join MERGEJOIN[222][tables = [$hdt$_1, $hdt$_2, $hdt$_3]] in 
Stage 'Reducer 3' is a cross product
 Warning: Shuffle Join MERGEJOIN[220][tables = [$hdt$_2, $hdt$_3]] in Stage 
'Reducer 8' is a cross product
 Warning: Shuffle Join MERGEJOIN[224][tables = [$hdt$_2, $hdt$_3, $hdt$_4, 
$hdt$_5]] in Stage 'Reducer 10' is a cross product
@@ -946,13 +767,13 @@ POSTHOOK: Input: default@tbl_ice
 POSTHOOK: Input: default@tbl_ice_other
 POSTHOOK: Output: default@tbl_ice
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-0
+  Stage-4 is a root stage
+  Stage-5 depends on stages: Stage-4
+  Stage-3 depends on stages: Stage-5
+  Stage-6 depends on stages: Stage-3
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-4
     Tez
 #### A masked pattern was here ####
       Edges:
@@ -1311,10 +1132,10 @@ STAGE PLANS:
         Union 5 
             Vertex: Union 5
 
-  Stage: Stage-2
+  Stage: Stage-5
     Dependency Collection
 
-  Stage: Stage-0
+  Stage: Stage-3
     Move Operator
       tables:
           replace: false
@@ -1324,7 +1145,7 @@ STAGE PLANS:
               serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
               name: default.tbl_ice
 
-  Stage: Stage-3
+  Stage: Stage-6
     Stats Work
       Basic Stats Work:
 
@@ -1384,13 +1205,13 @@ POSTHOOK: Input: default@tbl_ice
 POSTHOOK: Input: default@tbl_ice_other
 POSTHOOK: Output: default@tbl_ice
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-0
+  Stage-4 is a root stage
+  Stage-5 depends on stages: Stage-4
+  Stage-3 depends on stages: Stage-5
+  Stage-6 depends on stages: Stage-3
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-4
     Tez
 #### A masked pattern was here ####
       Edges:
@@ -1769,10 +1590,10 @@ STAGE PLANS:
         Union 5 
             Vertex: Union 5
 
-  Stage: Stage-2
+  Stage: Stage-5
     Dependency Collection
 
-  Stage: Stage-0
+  Stage: Stage-3
     Move Operator
       tables:
           replace: false
@@ -1782,7 +1603,7 @@ STAGE PLANS:
               serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
               name: default.tbl_ice
 
-  Stage: Stage-3
+  Stage: Stage-6
     Stats Work
       Basic Stats Work:
 
diff --git 
a/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_copy_on_write_unpartitioned.q.out
 
b/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_copy_on_write_unpartitioned.q.out
index d9f95eb36f6..22256bab010 100644
--- 
a/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_copy_on_write_unpartitioned.q.out
+++ 
b/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_copy_on_write_unpartitioned.q.out
@@ -29,13 +29,13 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tbl_ice
 POSTHOOK: Output: default@tbl_ice
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-0
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-1 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-1
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-2
     Tez
 #### A masked pattern was here ####
       Edges:
@@ -203,10 +203,10 @@ STAGE PLANS:
         Union 3 
             Vertex: Union 3
 
-  Stage: Stage-2
+  Stage: Stage-3
     Dependency Collection
 
-  Stage: Stage-0
+  Stage: Stage-1
     Move Operator
       tables:
           replace: false
@@ -216,7 +216,7 @@ STAGE PLANS:
               serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
               name: default.tbl_ice
 
-  Stage: Stage-3
+  Stage: Stage-4
     Stats Work
       Basic Stats Work:
 
@@ -248,6 +248,8 @@ POSTHOOK: query: insert into tbl_ice values (444, 'hola', 
800), (555, 'schola',
 POSTHOOK: type: QUERY
 POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@tbl_ice
+Warning: Shuffle Join MERGEJOIN[61][tables = [$hdt$_0, $hdt$_1]] in Stage 
'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[63][tables = [$hdt$_0, $hdt$_1, $hdt$_2, 
$hdt$_3]] in Stage 'Reducer 4' is a cross product
 Warning: Shuffle Join MERGEJOIN[222][tables = [$hdt$_1, $hdt$_2, $hdt$_3]] in 
Stage 'Reducer 3' is a cross product
 Warning: Shuffle Join MERGEJOIN[220][tables = [$hdt$_2, $hdt$_3]] in Stage 
'Reducer 8' is a cross product
 Warning: Shuffle Join MERGEJOIN[224][tables = [$hdt$_2, $hdt$_3, $hdt$_4, 
$hdt$_5]] in Stage 'Reducer 10' is a cross product
@@ -263,13 +265,13 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tbl_ice
 POSTHOOK: Output: default@tbl_ice
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-0
+  Stage-4 is a root stage
+  Stage-5 depends on stages: Stage-4
+  Stage-3 depends on stages: Stage-5
+  Stage-6 depends on stages: Stage-3
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-4
     Tez
 #### A masked pattern was here ####
       Edges:
@@ -871,10 +873,10 @@ STAGE PLANS:
         Union 7 
             Vertex: Union 7
 
-  Stage: Stage-2
+  Stage: Stage-5
     Dependency Collection
 
-  Stage: Stage-0
+  Stage: Stage-3
     Move Operator
       tables:
           replace: false
@@ -884,10 +886,12 @@ STAGE PLANS:
               serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
               name: default.tbl_ice
 
-  Stage: Stage-3
+  Stage: Stage-6
     Stats Work
       Basic Stats Work:
 
+Warning: Shuffle Join MERGEJOIN[61][tables = [$hdt$_0, $hdt$_1]] in Stage 
'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[63][tables = [$hdt$_0, $hdt$_1, $hdt$_2, 
$hdt$_3]] in Stage 'Reducer 4' is a cross product
 Warning: Shuffle Join MERGEJOIN[222][tables = [$hdt$_1, $hdt$_2, $hdt$_3]] in 
Stage 'Reducer 3' is a cross product
 Warning: Shuffle Join MERGEJOIN[220][tables = [$hdt$_2, $hdt$_3]] in Stage 
'Reducer 8' is a cross product
 Warning: Shuffle Join MERGEJOIN[224][tables = [$hdt$_2, $hdt$_3, $hdt$_4, 
$hdt$_5]] in Stage 'Reducer 10' is a cross product
@@ -946,13 +950,13 @@ POSTHOOK: Input: default@tbl_ice
 POSTHOOK: Input: default@tbl_ice_other
 POSTHOOK: Output: default@tbl_ice
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-0
+  Stage-4 is a root stage
+  Stage-5 depends on stages: Stage-4
+  Stage-3 depends on stages: Stage-5
+  Stage-6 depends on stages: Stage-3
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-4
     Tez
 #### A masked pattern was here ####
       Edges:
@@ -1311,10 +1315,10 @@ STAGE PLANS:
         Union 5 
             Vertex: Union 5
 
-  Stage: Stage-2
+  Stage: Stage-5
     Dependency Collection
 
-  Stage: Stage-0
+  Stage: Stage-3
     Move Operator
       tables:
           replace: false
@@ -1324,7 +1328,7 @@ STAGE PLANS:
               serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
               name: default.tbl_ice
 
-  Stage: Stage-3
+  Stage: Stage-6
     Stats Work
       Basic Stats Work:
 
@@ -1384,13 +1388,13 @@ POSTHOOK: Input: default@tbl_ice
 POSTHOOK: Input: default@tbl_standard_other
 POSTHOOK: Output: default@tbl_ice
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-0
+  Stage-4 is a root stage
+  Stage-5 depends on stages: Stage-4
+  Stage-3 depends on stages: Stage-5
+  Stage-6 depends on stages: Stage-3
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-4
     Tez
 #### A masked pattern was here ####
       Edges:
@@ -1769,10 +1773,10 @@ STAGE PLANS:
         Union 5 
             Vertex: Union 5
 
-  Stage: Stage-2
+  Stage: Stage-5
     Dependency Collection
 
-  Stage: Stage-0
+  Stage: Stage-3
     Move Operator
       tables:
           replace: false
@@ -1782,7 +1786,7 @@ STAGE PLANS:
               serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
               name: default.tbl_ice
 
-  Stage: Stage-3
+  Stage: Stage-6
     Stats Work
       Basic Stats Work:
 
diff --git 
a/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_partitioned_avro.q.out
 
b/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_partitioned_avro.q.out
index cbcb6e1f893..38d0bc83838 100644
--- 
a/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_partitioned_avro.q.out
+++ 
b/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_partitioned_avro.q.out
@@ -48,6 +48,8 @@ POSTHOOK: query: insert into tbl_ice values (444, 'hola', 
800), (555, 'schola',
 POSTHOOK: type: QUERY
 POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@tbl_ice
+Warning: Shuffle Join MERGEJOIN[61][tables = [$hdt$_0, $hdt$_1]] in Stage 
'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[63][tables = [$hdt$_0, $hdt$_1, $hdt$_2, 
$hdt$_3]] in Stage 'Reducer 4' is a cross product
 Warning: Shuffle Join MERGEJOIN[63][tables = [$hdt$_0, $hdt$_1]] in Stage 
'Reducer 2' is a cross product
 Warning: Shuffle Join MERGEJOIN[65][tables = [$hdt$_0, $hdt$_1, $hdt$_2, 
$hdt$_3]] in Stage 'Reducer 4' is a cross product
 PREHOOK: query: delete from tbl_ice where a in (select a from tbl_ice where a 
<= 5) or c in (select c from tbl_ice where c > 800)
diff --git 
a/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_partitioned_orc.q.out
 
b/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_partitioned_orc.q.out
index 7efc58b0e6f..2111826b9bf 100644
--- 
a/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_partitioned_orc.q.out
+++ 
b/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_partitioned_orc.q.out
@@ -23,11 +23,11 @@ POSTHOOK: Output: default@tbl_ice
 PREHOOK: query: delete from tbl_ice where b in ('one', 'four') or a = 22
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tbl_ice
-PREHOOK: Output: default@tbl_ice
+PREHOOK: Output: hdfs://### HDFS PATH ###
 POSTHOOK: query: delete from tbl_ice where b in ('one', 'four') or a = 22
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tbl_ice
-POSTHOOK: Output: default@tbl_ice
+POSTHOOK: Output: hdfs://### HDFS PATH ###
 PREHOOK: query: select * from tbl_ice order by a
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tbl_ice
@@ -48,6 +48,8 @@ POSTHOOK: query: insert into tbl_ice values (444, 'hola', 
800), (555, 'schola',
 POSTHOOK: type: QUERY
 POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@tbl_ice
+Warning: Shuffle Join MERGEJOIN[61][tables = [$hdt$_0, $hdt$_1]] in Stage 
'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[63][tables = [$hdt$_0, $hdt$_1, $hdt$_2, 
$hdt$_3]] in Stage 'Reducer 4' is a cross product
 Warning: Shuffle Join MERGEJOIN[63][tables = [$hdt$_0, $hdt$_1]] in Stage 
'Reducer 2' is a cross product
 Warning: Shuffle Join MERGEJOIN[65][tables = [$hdt$_0, $hdt$_1, $hdt$_2, 
$hdt$_3]] in Stage 'Reducer 4' is a cross product
 PREHOOK: query: delete from tbl_ice where a in (select a from tbl_ice where a 
<= 5) or c in (select c from tbl_ice where c > 800)
diff --git 
a/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_partitioned_parquet.q.out
 
b/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_partitioned_parquet.q.out
index 065128ce711..43e01378a2c 100644
--- 
a/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_partitioned_parquet.q.out
+++ 
b/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_partitioned_parquet.q.out
@@ -23,11 +23,11 @@ POSTHOOK: Output: default@tbl_ice
 PREHOOK: query: delete from tbl_ice where b in ('one', 'four') or a = 22
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tbl_ice
-PREHOOK: Output: default@tbl_ice
+PREHOOK: Output: hdfs://### HDFS PATH ###
 POSTHOOK: query: delete from tbl_ice where b in ('one', 'four') or a = 22
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tbl_ice
-POSTHOOK: Output: default@tbl_ice
+POSTHOOK: Output: hdfs://### HDFS PATH ###
 PREHOOK: query: select * from tbl_ice order by a
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tbl_ice
@@ -48,6 +48,8 @@ POSTHOOK: query: insert into tbl_ice values (444, 'hola', 
800), (555, 'schola',
 POSTHOOK: type: QUERY
 POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@tbl_ice
+Warning: Shuffle Join MERGEJOIN[61][tables = [$hdt$_0, $hdt$_1]] in Stage 
'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[63][tables = [$hdt$_0, $hdt$_1, $hdt$_2, 
$hdt$_3]] in Stage 'Reducer 4' is a cross product
 Warning: Shuffle Join MERGEJOIN[63][tables = [$hdt$_0, $hdt$_1]] in Stage 
'Reducer 2' is a cross product
 Warning: Shuffle Join MERGEJOIN[65][tables = [$hdt$_0, $hdt$_1, $hdt$_2, 
$hdt$_3]] in Stage 'Reducer 4' is a cross product
 PREHOOK: query: delete from tbl_ice where a in (select a from tbl_ice where a 
<= 5) or c in (select c from tbl_ice where c > 800)
diff --git 
a/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_unpartitioned_parquet.q.out
 
b/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_unpartitioned_parquet.q.out
index fbdb9ca29da..3672e210f2e 100644
--- 
a/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_unpartitioned_parquet.q.out
+++ 
b/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_unpartitioned_parquet.q.out
@@ -48,6 +48,8 @@ POSTHOOK: query: insert into tbl_ice values (444, 'hola', 
800), (555, 'schola',
 POSTHOOK: type: QUERY
 POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@tbl_ice
+Warning: Shuffle Join MERGEJOIN[61][tables = [$hdt$_0, $hdt$_1]] in Stage 
'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[63][tables = [$hdt$_0, $hdt$_1, $hdt$_2, 
$hdt$_3]] in Stage 'Reducer 4' is a cross product
 Warning: Shuffle Join MERGEJOIN[63][tables = [$hdt$_0, $hdt$_1]] in Stage 
'Reducer 2' is a cross product
 Warning: Shuffle Join MERGEJOIN[65][tables = [$hdt$_0, $hdt$_1, $hdt$_2, 
$hdt$_3]] in Stage 'Reducer 4' is a cross product
 PREHOOK: query: delete from tbl_ice where a in (select a from tbl_ice where a 
<= 5) or c in (select c from tbl_ice where c > 800)
diff --git 
a/iceberg/iceberg-handler/src/test/results/positive/iceberg_atomic_merge_update.q.out
 
b/iceberg/iceberg-handler/src/test/results/positive/iceberg_atomic_merge_update.q.out
index 7bc1a8db135..403fb63f2c0 100644
--- 
a/iceberg/iceberg-handler/src/test/results/positive/iceberg_atomic_merge_update.q.out
+++ 
b/iceberg/iceberg-handler/src/test/results/positive/iceberg_atomic_merge_update.q.out
@@ -218,11 +218,11 @@ POSTHOOK: Output: default@calls_v2
 PREHOOK: query: delete from calls_v2 where year=2024
 PREHOOK: type: QUERY
 PREHOOK: Input: default@calls_v2
-PREHOOK: Output: default@calls_v2
+PREHOOK: Output: hdfs://### HDFS PATH ###
 POSTHOOK: query: delete from calls_v2 where year=2024
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@calls_v2
-POSTHOOK: Output: default@calls_v2
+POSTHOOK: Output: hdfs://### HDFS PATH ###
 PREHOOK: query: select s.operation, s.summary['added-records'], 
s.summary['deleted-records'] from default.calls_v2.snapshots s
   order by s.snapshot_id
 PREHOOK: type: QUERY
@@ -234,8 +234,8 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@calls_v2
 POSTHOOK: Output: hdfs://### HDFS PATH ###
 append 10      NULL
+delete NULL    4
 overwrite      2       NULL
-overwrite      NULL    NULL
 PREHOOK: query: DROP TABLE calls_v2
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@calls_v2
diff --git 
a/iceberg/iceberg-handler/src/test/results/positive/iceberg_copy_on_write.q.out 
b/iceberg/iceberg-handler/src/test/results/positive/iceberg_copy_on_write.q.out
index 83ee890d7b7..4d80795bc15 100644
--- 
a/iceberg/iceberg-handler/src/test/results/positive/iceberg_copy_on_write.q.out
+++ 
b/iceberg/iceberg-handler/src/test/results/positive/iceberg_copy_on_write.q.out
@@ -72,14 +72,14 @@ Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 4 (SIMPLE_EDGE), 
Union 3 (CONTAINS)
 Reducer 4 <- Map 1 (SIMPLE_EDGE)
 Reducer 6 <- Map 5 (SIMPLE_EDGE), Union 3 (CONTAINS)
 
-Stage-3
+Stage-4
   Stats Work{}
-    Stage-0
+    Stage-1
       Move Operator
         table:{"name:":"default.ice01"}
-        Stage-2
+        Stage-3
           Dependency Collection{}
-            Stage-1
+            Stage-2
               Union 3
               <-Reducer 2 [CONTAINS]
                 File Output Operator [FS_46]
diff --git 
a/iceberg/iceberg-handler/src/test/results/positive/iceberg_truncate_partition_with_evolution.q.out
 
b/iceberg/iceberg-handler/src/test/results/positive/iceberg_truncate_partition_with_evolution.q.out
index 25d1246361e..3217a146827 100644
--- 
a/iceberg/iceberg-handler/src/test/results/positive/iceberg_truncate_partition_with_evolution.q.out
+++ 
b/iceberg/iceberg-handler/src/test/results/positive/iceberg_truncate_partition_with_evolution.q.out
@@ -70,13 +70,13 @@ POSTHOOK: query: explain truncate table test_ice_int 
partition (a = 22)
 POSTHOOK: type: QUERY
 POSTHOOK: Output: default@test_ice_int@a=22
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-0
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-1 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-1
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-2
     Tez
 #### A masked pattern was here ####
       Edges:
@@ -119,10 +119,10 @@ STAGE PLANS:
                       serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
                       name: default.test_ice_int
 
-  Stage: Stage-2
+  Stage: Stage-3
     Dependency Collection
 
-  Stage: Stage-0
+  Stage: Stage-1
     Move Operator
       tables:
           replace: false
@@ -132,7 +132,7 @@ STAGE PLANS:
               serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
               name: default.test_ice_int
 
-  Stage: Stage-3
+  Stage: Stage-4
     Stats Work
       Basic Stats Work:
 
@@ -281,13 +281,13 @@ POSTHOOK: query: explain truncate table test_ice_bigint 
partition (a = 226784902
 POSTHOOK: type: QUERY
 POSTHOOK: Output: default@test_ice_bigint@a=226784902765739
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-0
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-1 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-1
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-2
     Tez
 #### A masked pattern was here ####
       Edges:
@@ -330,10 +330,10 @@ STAGE PLANS:
                       serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
                       name: default.test_ice_bigint
 
-  Stage: Stage-2
+  Stage: Stage-3
     Dependency Collection
 
-  Stage: Stage-0
+  Stage: Stage-1
     Move Operator
       tables:
           replace: false
@@ -343,7 +343,7 @@ STAGE PLANS:
               serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
               name: default.test_ice_bigint
 
-  Stage: Stage-3
+  Stage: Stage-4
     Stats Work
       Basic Stats Work:
 
@@ -518,13 +518,13 @@ POSTHOOK: query: explain truncate table test_ice_str 
partition (b = 'ddd')
 POSTHOOK: type: QUERY
 POSTHOOK: Output: default@test_ice_str@b=ddd
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-0
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-1 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-1
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-2
     Tez
 #### A masked pattern was here ####
       Edges:
@@ -567,10 +567,10 @@ STAGE PLANS:
                       serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
                       name: default.test_ice_str
 
-  Stage: Stage-2
+  Stage: Stage-3
     Dependency Collection
 
-  Stage: Stage-0
+  Stage: Stage-1
     Move Operator
       tables:
           replace: false
@@ -580,7 +580,7 @@ STAGE PLANS:
               serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
               name: default.test_ice_str
 
-  Stage: Stage-3
+  Stage: Stage-4
     Stats Work
       Basic Stats Work:
 
@@ -775,13 +775,13 @@ POSTHOOK: query: explain truncate table test_ice_date 
partition (b = '2022-02-07
 POSTHOOK: type: QUERY
 POSTHOOK: Output: default@test_ice_date@b=2022-02-07
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-0
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-1 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-1
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-2
     Tez
 #### A masked pattern was here ####
       Edges:
@@ -824,10 +824,10 @@ STAGE PLANS:
                       serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
                       name: default.test_ice_date
 
-  Stage: Stage-2
+  Stage: Stage-3
     Dependency Collection
 
-  Stage: Stage-0
+  Stage: Stage-1
     Move Operator
       tables:
           replace: false
@@ -837,7 +837,7 @@ STAGE PLANS:
               serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
               name: default.test_ice_date
 
-  Stage: Stage-3
+  Stage: Stage-4
     Stats Work
       Basic Stats Work:
 
@@ -979,13 +979,13 @@ POSTHOOK: query: explain truncate table test_ice_double 
partition (a = 115674892
 POSTHOOK: type: QUERY
 POSTHOOK: Output: default@test_ice_double@a=115674892756.67590946
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-0
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-1 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-1
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-2
     Tez
 #### A masked pattern was here ####
       Edges:
@@ -1028,10 +1028,10 @@ STAGE PLANS:
                       serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
                       name: default.test_ice_double
 
-  Stage: Stage-2
+  Stage: Stage-3
     Dependency Collection
 
-  Stage: Stage-0
+  Stage: Stage-1
     Move Operator
       tables:
           replace: false
@@ -1041,7 +1041,7 @@ STAGE PLANS:
               serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
               name: default.test_ice_double
 
-  Stage: Stage-3
+  Stage: Stage-4
     Stats Work
       Basic Stats Work:
 
@@ -1177,13 +1177,13 @@ POSTHOOK: query: explain truncate table 
test_ice_double_date partition (a = 1156
 POSTHOOK: type: QUERY
 POSTHOOK: Output: 
default@test_ice_double_date@a=115674892756.67590946/b=2022-02-07
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-0
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-1 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-1
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-2
     Tez
 #### A masked pattern was here ####
       Edges:
@@ -1225,10 +1225,10 @@ STAGE PLANS:
                       serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
                       name: default.test_ice_double_date
 
-  Stage: Stage-2
+  Stage: Stage-3
     Dependency Collection
 
-  Stage: Stage-0
+  Stage: Stage-1
     Move Operator
       tables:
           replace: false
@@ -1238,7 +1238,7 @@ STAGE PLANS:
               serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
               name: default.test_ice_double_date
 
-  Stage: Stage-3
+  Stage: Stage-4
     Stats Work
       Basic Stats Work:
 
diff --git 
a/iceberg/iceberg-handler/src/test/results/positive/llap/llap_iceberg_read_orc.q.out
 
b/iceberg/iceberg-handler/src/test/results/positive/llap/llap_iceberg_read_orc.q.out
index 442257dd540..46bf3a38bcb 100644
--- 
a/iceberg/iceberg-handler/src/test/results/positive/llap/llap_iceberg_read_orc.q.out
+++ 
b/iceberg/iceberg-handler/src/test/results/positive/llap/llap_iceberg_read_orc.q.out
@@ -212,11 +212,11 @@ POSTHOOK: Output: default@target_ice
 PREHOOK: query: DELETE FROM target_ice WHERE a = 1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@target_ice
-PREHOOK: Output: default@target_ice
+#### A masked pattern was here ####
 POSTHOOK: query: DELETE FROM target_ice WHERE a = 1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@target_ice
-POSTHOOK: Output: default@target_ice
+#### A masked pattern was here ####
 PREHOOK: query: SELECT * FROM target_ice
 PREHOOK: type: QUERY
 PREHOOK: Input: default@target_ice
diff --git 
a/iceberg/iceberg-handler/src/test/results/positive/metadata_delete.q.out 
b/iceberg/iceberg-handler/src/test/results/positive/metadata_delete.q.out
new file mode 100644
index 00000000000..e2b343c9d04
--- /dev/null
+++ b/iceberg/iceberg-handler/src/test/results/positive/metadata_delete.q.out
@@ -0,0 +1,501 @@
+PREHOOK: query: create table ice_date (a int, b date) stored by iceberg stored 
as orc tblproperties ('format-version'='2')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_date
+POSTHOOK: query: create table ice_date (a int, b date) stored by iceberg 
stored as orc tblproperties ('format-version'='2')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_date
+PREHOOK: query: insert into table ice_date values (1, '2021-01-01')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_date
+POSTHOOK: query: insert into table ice_date values (1, '2021-01-01')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_date
+PREHOOK: query: insert into table ice_date values (2, '2022-02-02'), (3, 
'2022-03-03')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_date
+POSTHOOK: query: insert into table ice_date values (2, '2022-02-02'), (3, 
'2022-03-03')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_date
+PREHOOK: query: delete from ice_date where b = '2022-02-02'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice_date
+PREHOOK: Output: default@ice_date
+POSTHOOK: query: delete from ice_date where b = '2022-02-02'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice_date
+POSTHOOK: Output: default@ice_date
+PREHOOK: query: delete from ice_date where a = 1 and b = '2021-01-01'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice_date
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: delete from ice_date where a = 1 and b = '2021-01-01'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice_date
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+PREHOOK: query: select * from ice_date
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice_date
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from ice_date
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice_date
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+3      2022-03-03
+PREHOOK: query: create table ice_date_year (a int, b date) stored by iceberg 
stored as orc tblproperties ('format-version'='2')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_date_year
+POSTHOOK: query: create table ice_date_year (a int, b date) stored by iceberg 
stored as orc tblproperties ('format-version'='2')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_date_year
+PREHOOK: query: insert into table ice_date_year values (1, '2021-01-01')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_date_year
+POSTHOOK: query: insert into table ice_date_year values (1, '2021-01-01')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_date_year
+PREHOOK: query: insert into table ice_date_year values (2, '2022-02-02'), (3, 
'2022-03-03')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_date_year
+POSTHOOK: query: insert into table ice_date_year values (2, '2022-02-02'), (3, 
'2022-03-03')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_date_year
+PREHOOK: query: delete from ice_date_year where year(b) = 2022
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice_date_year
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: delete from ice_date_year where year(b) = 2022
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice_date_year
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+PREHOOK: query: select * from ice_date_year
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice_date_year
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from ice_date_year
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice_date_year
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1      2021-01-01
+PREHOOK: query: create table ice_str_name (first_name string, last_name 
string) stored by iceberg stored as orc tblproperties ('format-version'='2')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_str_name
+POSTHOOK: query: create table ice_str_name (first_name string, last_name 
string) stored by iceberg stored as orc tblproperties ('format-version'='2')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_str_name
+PREHOOK: query: insert into table ice_str_name values ('Alex', 'Clark')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_str_name
+POSTHOOK: query: insert into table ice_str_name values ('Alex', 'Clark')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_str_name
+PREHOOK: query: insert into table ice_str_name values ('Bob', 'Bob')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_str_name
+POSTHOOK: query: insert into table ice_str_name values ('Bob', 'Bob')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_str_name
+PREHOOK: query: delete from ice_str_name where first_name = last_name
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice_str_name
+PREHOOK: Output: default@ice_str_name
+POSTHOOK: query: delete from ice_str_name where first_name = last_name
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice_str_name
+POSTHOOK: Output: default@ice_str_name
+PREHOOK: query: select * from ice_str_name
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice_str_name
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from ice_str_name
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice_str_name
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+Alex   Clark
+PREHOOK: query: create table ice_int_id (first_id int, last_id int) stored by 
iceberg stored as orc tblproperties ('format-version'='2')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_int_id
+POSTHOOK: query: create table ice_int_id (first_id int, last_id int) stored by 
iceberg stored as orc tblproperties ('format-version'='2')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_int_id
+PREHOOK: query: insert into table ice_int_id values (7, 9)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_int_id
+POSTHOOK: query: insert into table ice_int_id values (7, 9)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_int_id
+PREHOOK: query: insert into table ice_int_id values (8, 8)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_int_id
+POSTHOOK: query: insert into table ice_int_id values (8, 8)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_int_id
+PREHOOK: query: delete from ice_int_id where first_id = last_id
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice_int_id
+PREHOOK: Output: default@ice_int_id
+POSTHOOK: query: delete from ice_int_id where first_id = last_id
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice_int_id
+POSTHOOK: Output: default@ice_int_id
+PREHOOK: query: select * from ice_int_id
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice_int_id
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from ice_int_id
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice_int_id
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+7      9
+PREHOOK: query: create table ice_branch_metadata_delete (a int, b string) 
stored by iceberg stored as orc tblproperties ('format-version'='2')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_branch_metadata_delete
+POSTHOOK: query: create table ice_branch_metadata_delete (a int, b string) 
stored by iceberg stored as orc tblproperties ('format-version'='2')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_branch_metadata_delete
+PREHOOK: query: insert into table ice_branch_metadata_delete values (1, 'ABC')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_branch_metadata_delete
+POSTHOOK: query: insert into table ice_branch_metadata_delete values (1, 'ABC')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_branch_metadata_delete
+PREHOOK: query: insert into table ice_branch_metadata_delete values (2, 'DEF')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_branch_metadata_delete
+POSTHOOK: query: insert into table ice_branch_metadata_delete values (2, 'DEF')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_branch_metadata_delete
+PREHOOK: query: insert into table ice_branch_metadata_delete values (3, 'GHI')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_branch_metadata_delete
+POSTHOOK: query: insert into table ice_branch_metadata_delete values (3, 'GHI')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_branch_metadata_delete
+PREHOOK: query: insert into table ice_branch_metadata_delete values (4, 'JKL')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_branch_metadata_delete
+POSTHOOK: query: insert into table ice_branch_metadata_delete values (4, 'JKL')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_branch_metadata_delete
+PREHOOK: query: alter table ice_branch_metadata_delete create branch test01
+PREHOOK: type: ALTERTABLE_CREATEBRANCH
+PREHOOK: Input: default@ice_branch_metadata_delete
+POSTHOOK: query: alter table ice_branch_metadata_delete create branch test01
+POSTHOOK: type: ALTERTABLE_CREATEBRANCH
+POSTHOOK: Input: default@ice_branch_metadata_delete
+PREHOOK: query: delete from default.ice_branch_metadata_delete.branch_test01 
where a = 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice_branch_metadata_delete
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: delete from default.ice_branch_metadata_delete.branch_test01 
where a = 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice_branch_metadata_delete
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+PREHOOK: query: select * from default.ice_branch_metadata_delete.branch_test01
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice_branch_metadata_delete
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from default.ice_branch_metadata_delete.branch_test01
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice_branch_metadata_delete
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+2      DEF
+3      GHI
+4      JKL
+PREHOOK: query: alter table ice_branch_metadata_delete drop branch test01
+PREHOOK: type: ALTERTABLE_DROPBRANCH
+PREHOOK: Input: default@ice_branch_metadata_delete
+POSTHOOK: query: alter table ice_branch_metadata_delete drop branch test01
+POSTHOOK: type: ALTERTABLE_DROPBRANCH
+POSTHOOK: Input: default@ice_branch_metadata_delete
+PREHOOK: query: create table ice_delete_multiple_table1 (a int, b string) 
stored by iceberg stored as orc tblproperties ('format-version' = '2')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_delete_multiple_table1
+POSTHOOK: query: create table ice_delete_multiple_table1 (a int, b string) 
stored by iceberg stored as orc tblproperties ('format-version' = '2')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_delete_multiple_table1
+PREHOOK: query: create table ice_delete_multiple_table2 (a int, b string) 
stored by iceberg stored as orc tblproperties ('format-version' = '2')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_delete_multiple_table2
+POSTHOOK: query: create table ice_delete_multiple_table2 (a int, b string) 
stored by iceberg stored as orc tblproperties ('format-version' = '2')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_delete_multiple_table2
+PREHOOK: query: insert into table ice_delete_multiple_table1 values (1, 
'ABC'), (2, 'DEF'), (3, 'GHI')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_delete_multiple_table1
+POSTHOOK: query: insert into table ice_delete_multiple_table1 values (1, 
'ABC'), (2, 'DEF'), (3, 'GHI')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_delete_multiple_table1
+PREHOOK: query: insert into table ice_delete_multiple_table1 values (4, 
'GHI'), (5, 'JKL'), (6, 'PQR')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_delete_multiple_table1
+POSTHOOK: query: insert into table ice_delete_multiple_table1 values (4, 
'GHI'), (5, 'JKL'), (6, 'PQR')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_delete_multiple_table1
+PREHOOK: query: insert into table ice_delete_multiple_table2 values (1, 
'ABC'), (2, 'DEF'), (3, 'GHI')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_delete_multiple_table2
+POSTHOOK: query: insert into table ice_delete_multiple_table2 values (1, 
'ABC'), (2, 'DEF'), (3, 'GHI')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_delete_multiple_table2
+PREHOOK: query: insert into table ice_delete_multiple_table2 values (4, 
'GHI'), (5, 'JKL'), (6, 'PQR')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_delete_multiple_table2
+POSTHOOK: query: insert into table ice_delete_multiple_table2 values (4, 
'GHI'), (5, 'JKL'), (6, 'PQR')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_delete_multiple_table2
+PREHOOK: query: delete from ice_delete_multiple_table2 where 
ice_delete_multiple_table2.a in (select ice_delete_multiple_table1.a from 
ice_delete_multiple_table1 where ice_delete_multiple_table1.b = 'GHI')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice_delete_multiple_table1
+PREHOOK: Input: default@ice_delete_multiple_table2
+PREHOOK: Output: default@ice_delete_multiple_table2
+POSTHOOK: query: delete from ice_delete_multiple_table2 where 
ice_delete_multiple_table2.a in (select ice_delete_multiple_table1.a from 
ice_delete_multiple_table1 where ice_delete_multiple_table1.b = 'GHI')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice_delete_multiple_table1
+POSTHOOK: Input: default@ice_delete_multiple_table2
+POSTHOOK: Output: default@ice_delete_multiple_table2
+PREHOOK: query: select * from ice_delete_multiple_table2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice_delete_multiple_table2
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from ice_delete_multiple_table2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice_delete_multiple_table2
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1      ABC
+2      DEF
+5      JKL
+6      PQR
+PREHOOK: query: create table test_delete_config (a int, b int) stored by 
iceberg stored as orc tblproperties ('format-version'='2')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_delete_config
+POSTHOOK: query: create table test_delete_config (a int, b int) stored by 
iceberg stored as orc tblproperties ('format-version'='2')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_delete_config
+PREHOOK: query: insert into table test_delete_config values (1,2), (3,4)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@test_delete_config
+POSTHOOK: query: insert into table test_delete_config values (1,2), (3,4)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@test_delete_config
+PREHOOK: query: explain delete from test_delete_config where b < 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_delete_config
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: explain delete from test_delete_config where b < 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_delete_config
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-1
+    Execute operation
+      table name: default.test_delete_config
+      spec: AlterTableExecuteSpec{operationType=DELETE_METADATA, 
operationParams=org.apache.hadoop.hive.ql.parse.AlterTableExecuteSpec$#Masked#
+
+PREHOOK: query: explain delete from test_delete_config where b < 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_delete_config
+PREHOOK: Output: default@test_delete_config
+POSTHOOK: query: explain delete from test_delete_config where b < 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_delete_config
+POSTHOOK: Output: default@test_delete_config
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: test_delete_config
+                  filterExpr: (b < 5) (type: boolean)
+                  Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE 
Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (b < 5) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 16 Basic stats: 
COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: PARTITION__SPEC__ID (type: int), 
PARTITION__HASH (type: bigint), FILE__PATH (type: string), ROW__POSITION (type: 
bigint), a (type: int), b (type: int)
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5
+                      Statistics: Num rows: 2 Data size: 424 Basic stats: 
COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: 
bigint), _col2 (type: string), _col3 (type: bigint)
+                        null sort order: zzzz
+                        sort order: ++++
+                        Statistics: Num rows: 2 Data size: 424 Basic stats: 
COMPLETE Column stats: COMPLETE
+                        value expressions: _col4 (type: int), _col5 (type: int)
+            Execution mode: vectorized
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), 
KEY.reducesinkkey1 (type: bigint), KEY.reducesinkkey2 (type: string), 
KEY.reducesinkkey3 (type: bigint), VALUE._col0 (type: int), VALUE._col1 (type: 
int)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                Statistics: Num rows: 2 Data size: 424 Basic stats: COMPLETE 
Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 2 Data size: 424 Basic stats: COMPLETE 
Column stats: COMPLETE
+                  table:
+                      input format: 
org.apache.iceberg.mr.hive.HiveIcebergInputFormat
+                      output format: 
org.apache.iceberg.mr.hive.HiveIcebergOutputFormat
+                      serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
+                      name: default.test_delete_config
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: false
+          table:
+              input format: org.apache.iceberg.mr.hive.HiveIcebergInputFormat
+              output format: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat
+              serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
+              name: default.test_delete_config
+
+  Stage: Stage-3
+    Stats Work
+      Basic Stats Work:
+
+PREHOOK: query: drop table ice_date
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@ice_date
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_date
+POSTHOOK: query: drop table ice_date
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@ice_date
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_date
+PREHOOK: query: drop table ice_date_year
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@ice_date_year
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_date_year
+POSTHOOK: query: drop table ice_date_year
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@ice_date_year
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_date_year
+PREHOOK: query: drop table ice_str_name
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@ice_str_name
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_str_name
+POSTHOOK: query: drop table ice_str_name
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@ice_str_name
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_str_name
+PREHOOK: query: drop table ice_int_id
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@ice_int_id
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_int_id
+POSTHOOK: query: drop table ice_int_id
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@ice_int_id
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_int_id
+PREHOOK: query: drop table ice_branch_metadata_delete
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@ice_branch_metadata_delete
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_branch_metadata_delete
+POSTHOOK: query: drop table ice_branch_metadata_delete
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@ice_branch_metadata_delete
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_branch_metadata_delete
+PREHOOK: query: drop table ice_delete_multiple_table1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@ice_delete_multiple_table1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_delete_multiple_table1
+POSTHOOK: query: drop table ice_delete_multiple_table1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@ice_delete_multiple_table1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_delete_multiple_table1
+PREHOOK: query: drop table ice_delete_multiple_table2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@ice_delete_multiple_table2
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_delete_multiple_table2
+POSTHOOK: query: drop table ice_delete_multiple_table2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@ice_delete_multiple_table2
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_delete_multiple_table2
+PREHOOK: query: drop table test_delete_config
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@test_delete_config
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_delete_config
+POSTHOOK: query: drop table test_delete_config
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@test_delete_config
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_delete_config
diff --git 
a/iceberg/iceberg-handler/src/test/results/positive/vectorized_iceberg_merge_mixed.q.out
 
b/iceberg/iceberg-handler/src/test/results/positive/vectorized_iceberg_merge_mixed.q.out
index 44d5690808e..c031030ee56 100644
--- 
a/iceberg/iceberg-handler/src/test/results/positive/vectorized_iceberg_merge_mixed.q.out
+++ 
b/iceberg/iceberg-handler/src/test/results/positive/vectorized_iceberg_merge_mixed.q.out
@@ -83,11 +83,11 @@ POSTHOOK: Output: default@store_sales
 PREHOOK: query: delete from store_sales where ss_customer_sk > 2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@store_sales
-PREHOOK: Output: default@store_sales
+PREHOOK: Output: hdfs://### HDFS PATH ###
 POSTHOOK: query: delete from store_sales where ss_customer_sk > 2
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@store_sales
-POSTHOOK: Output: default@store_sales
+POSTHOOK: Output: hdfs://### HDFS PATH ###
 PREHOOK: query: select count(*) from store_sales
 PREHOOK: type: QUERY
 PREHOOK: Input: default@store_sales
@@ -533,7 +533,7 @@ STAGE PLANS:
                 TableScan
                   alias: store_sales
                   filterExpr: ((ss_sold_date_sk = 2451181) and ss_item_sk is 
not null and ss_customer_sk is not null) (type: boolean)
-                  Statistics: Num rows: 2 Data size: 35#### Basic stats: 
COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 2 Data size: 11#### Basic stats: 
COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
                       vectorizationSchemaColumns: [0:ss_sold_date_sk:int, 
1:ss_sold_time_sk:int, 2:ss_item_sk:int, 3:ss_customer_sk:int, 
4:ss_cdemo_sk:int, 5:ss_hdemo_sk:int, 6:ss_addr_sk:int, 7:ss_store_sk:int, 
8:ss_promo_sk:int, 9:ss_ticket_number:int, 10:ss_quantity:int, 
11:ss_wholesale_cost:decimal(7,2), 12:ss_list_price:decimal(7,2), 
13:ss_sales_price:decimal(7,2), 14:ss_ext_discount_amt:decimal(7,2), 
15:ss_ext_sales_price:decimal(7,2), 16:ss_ext_wholesale_cost:decimal(7,2), 
17:ss_e [...]
@@ -957,7 +957,7 @@ Stage-6
                                 
Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26"]
                                 Filter Operator [FIL_46] (rows=2 width=700)
                                   predicate:((ss_sold_date_sk = 2451181) and 
ss_item_sk is not null and ss_customer_sk is not null)
-                                  TableScan [TS_2] (rows=2 width=17####)
+                                  TableScan [TS_2] (rows=2 width=58###)
                                     
default@store_sales,store_sales,Tbl:COMPLETE,Col:COMPLETE,Output:["ss_sold_date_sk","ss_sold_time_sk","ss_item_sk","ss_customer_sk","ss_cdemo_sk","ss_hdemo_sk","ss_addr_sk","ss_store_sk","ss_promo_sk","ss_ticket_number","ss_quantity","ss_wholesale_cost","ss_list_price","ss_sales_price","ss_ext_discount_amt","ss_ext_sales_price","ss_ext_wholesale_cost","ss_ext_list_price","ss_ext_tax","ss_coupon_amt","ss_net_paid","ss_net_paid_inc_tax","ss_net_profit"]
                           <-Select Operator [SEL_49] (rows=5 width=380)
                               
Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23"]
diff --git 
a/iceberg/iceberg-handler/src/test/results/positive/write_iceberg_branch.q.out 
b/iceberg/iceberg-handler/src/test/results/positive/write_iceberg_branch.q.out
index dc6fcbfbf7e..dbafa73aa52 100644
--- 
a/iceberg/iceberg-handler/src/test/results/positive/write_iceberg_branch.q.out
+++ 
b/iceberg/iceberg-handler/src/test/results/positive/write_iceberg_branch.q.out
@@ -218,13 +218,13 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@ice01
 POSTHOOK: Output: default@ice01
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-0
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-1 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-1
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-2
     Tez
 #### A masked pattern was here ####
       Edges:
@@ -268,10 +268,10 @@ STAGE PLANS:
                       serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
                       name: default.ice01
 
-  Stage: Stage-2
+  Stage: Stage-3
     Dependency Collection
 
-  Stage: Stage-0
+  Stage: Stage-1
     Move Operator
       tables:
           replace: false
@@ -281,7 +281,7 @@ STAGE PLANS:
               serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe
               name: default.ice01
 
-  Stage: Stage-3
+  Stage: Stage-4
     Stats Work
       Basic Stats Work:
 
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/ConvertAstToSearchArg.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/ConvertAstToSearchArg.java
index 0de48cca843..de6f88b932c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/ConvertAstToSearchArg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/ConvertAstToSearchArg.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hive.ql.io.sarg;
 import java.sql.Date;
 import java.sql.Timestamp;
 import java.time.LocalDate;
-import java.util.GregorianCalendar;
 import java.util.List;
 import java.util.concurrent.ExecutionException;
 
@@ -52,7 +51,6 @@ import 
org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNotEqual;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNotNull;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNull;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr;
-import org.apache.hadoop.hive.serde2.io.DateWritable;
 import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
@@ -74,6 +72,7 @@ public class ConvertAstToSearchArg {
 
   private final SearchArgument.Builder builder;
   private final Configuration conf;
+  private boolean partial = false;
 
   /*
    * Create a new type for handling precision conversions from Decimal -> 
Double/Float
@@ -106,6 +105,14 @@ public class ConvertAstToSearchArg {
     parse(expression);
   }
 
+  /**
+   * Returns whether the given expression is partially converted to a search 
argument from the hive filter.
+   * @return True if the expression is partially converted, otherwise false.
+   */
+  public boolean isPartial() {
+    return partial;
+  }
+
   /**
    * Build the search argument from the expression.
    * @return the search argument
@@ -316,11 +323,13 @@ public class ConvertAstToSearchArg {
     String columnName = getColumnName(expression, variable);
     if (columnName == null) {
       builder.literal(SearchArgument.TruthValue.YES_NO_NULL);
+      partial = true;
       return;
     }
     BoxType boxType = getType(expression.getChildren().get(variable));
     if (boxType == null) {
       builder.literal(SearchArgument.TruthValue.YES_NO_NULL);
+      partial = true;
       return;
     }
 
@@ -370,6 +379,7 @@ public class ConvertAstToSearchArg {
       LOG.warn("Exception thrown during SARG creation. Returning YES_NO_NULL." 
+
           " Exception: " + e.getMessage());
       builder.literal(SearchArgument.TruthValue.YES_NO_NULL);
+      partial = true;
     }
 
     if (needSwap) {
@@ -438,6 +448,7 @@ public class ConvertAstToSearchArg {
 
       // otherwise, we don't know what to do so make it a maybe
       builder.literal(SearchArgument.TruthValue.YES_NO_NULL);
+      partial = true;
       return;
     }
 
@@ -499,6 +510,7 @@ public class ConvertAstToSearchArg {
       // otherwise, we didn't understand it, so mark it maybe
     } else {
       builder.literal(SearchArgument.TruthValue.YES_NO_NULL);
+      partial = true;
     }
   }
 
@@ -556,6 +568,11 @@ public class ConvertAstToSearchArg {
     return new ConvertAstToSearchArg(conf, expression).buildSearchArgument();
   }
 
+  public static ConvertAstToSearchArg.Result 
createSearchArgument(Configuration conf, ExprNodeGenericFuncDesc expression) {
+    ConvertAstToSearchArg convertAstToSearchArg = new 
ConvertAstToSearchArg(conf, expression);
+    return new 
ConvertAstToSearchArg.Result(convertAstToSearchArg.buildSearchArgument(), 
convertAstToSearchArg.isPartial());
+  }
+
   private final static ThreadLocal<Kryo> kryo = new ThreadLocal<Kryo>() {
     protected Kryo initialValue() { return 
SerializationUtilities.createNewKryo(); }
   };
@@ -591,4 +608,22 @@ public class ConvertAstToSearchArg {
     }
   }
 
+  public static final class Result {
+    private final SearchArgument sarg;
+    private final boolean partial;
+
+    Result(SearchArgument sarg, boolean partial) {
+      this.sarg = sarg;
+      this.partial = partial;
+    }
+
+    public SearchArgument getSearchArgument() {
+      return sarg;
+    }
+
+    public boolean isPartial() {
+      return partial;
+    }
+  }
+
 }
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java
index b52e9d36c79..e05ecfb5a50 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java
@@ -46,6 +46,7 @@ import 
org.apache.hadoop.hive.ql.ddl.table.create.like.CreateTableLikeDesc;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
 import org.apache.hadoop.hive.ql.io.StorageFormatDescriptor;
+import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.hive.ql.parse.AlterTableSnapshotRefSpec;
 import org.apache.hadoop.hive.ql.parse.AlterTableExecuteSpec;
 import org.apache.hadoop.hive.ql.parse.StorageFormat.StorageHandlerTypes;
@@ -723,4 +724,8 @@ public interface HiveStorageHandler extends Configurable {
             "for a specific column.");
   }
 
+  default boolean 
canPerformMetadataDelete(org.apache.hadoop.hive.ql.metadata.Table hmsTable, 
String branchName,
+    SearchArgument searchArgument) {
+    return false;
+  }
 }
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/AlterTableExecuteSpec.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/AlterTableExecuteSpec.java
index 5102959f087..b3c8edd9d87 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/AlterTableExecuteSpec.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/AlterTableExecuteSpec.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hive.ql.parse;
 
 import com.google.common.base.MoreObjects;
+import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 
 import java.util.Arrays;
 
@@ -38,7 +39,8 @@ public class AlterTableExecuteSpec<T> {
     EXPIRE_SNAPSHOT,
     SET_CURRENT_SNAPSHOT,
     FAST_FORWARD,
-    CHERRY_PICK;
+    CHERRY_PICK,
+    DELETE_METADATA;
   }
 
   private final ExecuteOperationType operationType;
@@ -234,4 +236,22 @@ public class AlterTableExecuteSpec<T> {
       return MoreObjects.toStringHelper(this).add("snapshotId", 
snapshotId).toString();
     }
   }
+
+  public static class DeleteMetadataSpec {
+    private final String branchName;
+    private final SearchArgument sarg;
+
+    public DeleteMetadataSpec(String branchName, SearchArgument sarg) {
+      this.branchName = branchName;
+      this.sarg = sarg;
+    }
+
+    public String getBranchName() {
+      return branchName;
+    }
+
+    public SearchArgument getSarg() {
+      return sarg;
+    }
+  }
 }
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java
index 3d805021782..9ed1cc2db03 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java
@@ -17,22 +17,31 @@
  */
 package org.apache.hadoop.hive.ql.parse;
 
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.hadoop.hive.common.TableName;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.QueryState;
+import org.apache.hadoop.hive.ql.ddl.DDLWork;
+import org.apache.hadoop.hive.ql.ddl.table.execute.AlterTableExecuteDesc;
+import org.apache.hadoop.hive.ql.exec.TableScanOperator;
+import org.apache.hadoop.hive.ql.exec.TaskFactory;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.io.sarg.ConvertAstToSearchArg;
+import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.hive.ql.lib.Node;
 import org.apache.hadoop.hive.ql.metadata.HiveUtils;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.ParseUtils.ReparseResult;
+import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
 
 /**
  * A subclass of the {@link org.apache.hadoop.hive.ql.parse.SemanticAnalyzer} 
that just handles
@@ -61,7 +70,9 @@ public class UpdateDeleteSemanticAnalyzer extends 
RewriteSemanticAnalyzer {
     switch (tree.getToken().getType()) {
     case HiveParser.TOK_DELETE_FROM:
       operation = Context.Operation.DELETE;
-      reparseAndSuperAnalyze(tree, table, tabNameNode);
+      if (!tryMetadataUpdate(tree, table, tabNameNode)) {
+        reparseAndSuperAnalyze(tree, table, tabNameNode);
+      }
       break;
     case HiveParser.TOK_UPDATE_TABLE:
       boolean nonNativeAcid = AcidUtils.isNonNativeAcidTable(table, true);
@@ -99,7 +110,7 @@ public class UpdateDeleteSemanticAnalyzer extends 
RewriteSemanticAnalyzer {
   private void reparseAndSuperAnalyze(ASTNode tree, Table mTable, ASTNode 
tabNameNode) throws SemanticException {
     List<? extends Node> children = tree.getChildren();
     
-    boolean shouldTruncate = HiveConf.getBoolVar(conf, 
HiveConf.ConfVars.HIVE_OPTIMIZE_REPLACE_DELETE_WITH_TRUNCATE) 
+    boolean shouldTruncate = HiveConf.getBoolVar(conf, 
HiveConf.ConfVars.HIVE_OPTIMIZE_REPLACE_DELETE_WITH_TRUNCATE)
       && children.size() == 1 && deleting();
     if (shouldTruncate) {
       StringBuilder rewrittenQueryStr = new StringBuilder("truncate 
").append(getFullTableNameForSQL(tabNameNode));
@@ -164,15 +175,15 @@ public class UpdateDeleteSemanticAnalyzer extends 
RewriteSemanticAnalyzer {
 
     rewrittenQueryStr.append(" from ");
     rewrittenQueryStr.append(getFullTableNameForSQL(tabNameNode));
-    
+
     ASTNode where = null;
     int whereIndex = deleting() ? 1 : 2;
-    
+
     if (children.size() > whereIndex) {
       where = (ASTNode)children.get(whereIndex);
       assert where.getToken().getType() == HiveParser.TOK_WHERE :
           "Expected where clause, but found " + where.getName();
-      
+
       if (copyOnWriteMode) {
         String whereClause = ctx.getTokenRewriteStream().toString(
             where.getChild(0).getTokenStartIndex(), 
where.getChild(0).getTokenStopIndex());
@@ -213,11 +224,11 @@ public class UpdateDeleteSemanticAnalyzer extends 
RewriteSemanticAnalyzer {
         withQueryStr.append(") q");
         withQueryStr.append("\n").append(INDENT);
         withQueryStr.append("where rn=1\n)\n");
-        
+
         rewrittenQueryStr.insert(0, withQueryStr.toString());
       }
     }
-    
+
     if (!copyOnWriteMode) {
       // Add a sort by clause so that the row ids come out in the correct order
       appendSortBy(rewrittenQueryStr, columnAppender.getSortKeys());
@@ -230,7 +241,7 @@ public class UpdateDeleteSemanticAnalyzer extends 
RewriteSemanticAnalyzer {
       new ASTSearcher().simpleBreadthFirstSearch(rewrittenTree, 
HiveParser.TOK_FROM, HiveParser.TOK_SUBQUERY,
           HiveParser.TOK_UNIONALL).getChild(0).getChild(0) : rewrittenTree)
       .getChild(1);
-    
+
     if (updating()) {
       rewrittenCtx.setOperation(Context.Operation.UPDATE);
       rewrittenCtx.addDestNamePrefix(1, Context.DestClausePrefix.UPDATE);
@@ -295,6 +306,65 @@ public class UpdateDeleteSemanticAnalyzer extends 
RewriteSemanticAnalyzer {
     }
   }
 
+  private boolean tryMetadataUpdate(ASTNode tree, Table table, ASTNode 
tabNameNode) throws SemanticException {
+    // A feature flag on Hive to perform metadata delete on the source table.
+    if (!HiveConf.getBoolVar(conf, 
HiveConf.ConfVars.HIVE_OPTIMIZE_METADATA_DELETE)) {
+      return false;
+    }
+    TableName tableName = getQualifiedTableName(tabNameNode);
+    if (!deleting() || table.getStorageHandler() == null) {
+      return false;
+    }
+    int whereIndex = 1;
+    List<? extends Node> children = tree.getChildren();
+    if (children.size() <= whereIndex) {
+      return false;
+    }
+    ASTNode whereNode = (ASTNode) children.get(whereIndex);
+    String whereClause = ctx.getTokenRewriteStream().toString(
+        whereNode.getChild(0).getTokenStartIndex(), 
whereNode.getChild(0).getTokenStopIndex());
+    StringBuilder sb = new StringBuilder("select * from 
").append(getFullTableNameForSQL(tabNameNode))
+        .append(" where ").append(whereClause);
+    Context context = new Context(conf);
+    ASTNode rewrittenTree;
+    try {
+      rewrittenTree = ParseUtils.parse(sb.toString(), context);
+    } catch (ParseException pe) {
+      throw new SemanticException(pe);
+    }
+    BaseSemanticAnalyzer sem = SemanticAnalyzerFactory.get(queryState, 
rewrittenTree);
+    sem.analyze(rewrittenTree, context);
+
+    Map<String, TableScanOperator> topOps = sem.getParseContext().getTopOps();
+    if (!topOps.containsKey(table.getTableName())) {
+      return false;
+    }
+    ExprNodeGenericFuncDesc hiveFilter = 
topOps.get(table.getTableName()).getConf().getFilterExpr();
+    if (hiveFilter == null) {
+      return false;
+    }
+    ConvertAstToSearchArg.Result result = 
ConvertAstToSearchArg.createSearchArgument(ctx.getConf(), hiveFilter);
+    if (result.isPartial()) {
+      return false;
+    }
+    SearchArgument sarg = result.getSearchArgument();
+    if (!table.getStorageHandler().canPerformMetadataDelete(table, 
tableName.getTableMetaRef(), sarg)) {
+      return false;
+    }
+
+    AlterTableExecuteSpec.DeleteMetadataSpec deleteMetadataSpec =
+        new 
AlterTableExecuteSpec.DeleteMetadataSpec(tableName.getTableMetaRef(), sarg);
+    AlterTableExecuteSpec<AlterTableExecuteSpec.DeleteMetadataSpec> 
executeSpec =
+        new 
AlterTableExecuteSpec<>(AlterTableExecuteSpec.ExecuteOperationType.DELETE_METADATA,
 deleteMetadataSpec);
+    AlterTableExecuteDesc desc = new AlterTableExecuteDesc(tableName, null, 
executeSpec);
+    DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), desc);
+    rootTasks = Collections.singletonList(TaskFactory.get(ddlWork));
+    inputs = sem.getInputs();
+    outputs = sem.getOutputs();
+    updateOutputs(table);
+    return true;
+  }
+
   private boolean updating() {
     return operation == Context.Operation.UPDATE;
   }
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestConvertAstToSearchArg.java 
b/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestConvertAstToSearchArg.java
index d02c57fd1d2..f2e13c53a3e 100644
--- 
a/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestConvertAstToSearchArg.java
+++ 
b/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestConvertAstToSearchArg.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hive.ql.io.sarg;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
@@ -555,8 +556,10 @@ public class TestConvertAstToSearchArg {
         " </object> \n" +
         "</java> \n";
 
+    ConvertAstToSearchArg.Result result = 
ConvertAstToSearchArg.createSearchArgument(conf, getFuncDesc(exprStr));
+    assertFalse(result.isPartial());
     SearchArgumentImpl sarg =
-        (SearchArgumentImpl) ConvertAstToSearchArg.create(conf, 
getFuncDesc(exprStr));
+        (SearchArgumentImpl) result.getSearchArgument();
     List<PredicateLeaf> leaves = sarg.getLeaves();
     assertEquals(9, leaves.size());
 
@@ -848,8 +851,10 @@ public class TestConvertAstToSearchArg {
         " </object> \n" +
         "</java> \n";
 
+    ConvertAstToSearchArg.Result result = 
ConvertAstToSearchArg.createSearchArgument(conf, getFuncDesc(exprStr));
+    assertFalse(result.isPartial());
     SearchArgumentImpl sarg =
-        (SearchArgumentImpl) ConvertAstToSearchArg.create(conf, 
getFuncDesc(exprStr));
+        (SearchArgumentImpl) result.getSearchArgument();
     List<PredicateLeaf> leaves = sarg.getLeaves();
     assertEquals(4, leaves.size());
 
@@ -1285,8 +1290,10 @@ public class TestConvertAstToSearchArg {
         " </object> \n" +
         "</java> \n";
 
+    ConvertAstToSearchArg.Result result = 
ConvertAstToSearchArg.createSearchArgument(conf, getFuncDesc(exprStr));
+    assertTrue(result.isPartial());
     SearchArgumentImpl sarg =
-        (SearchArgumentImpl) ConvertAstToSearchArg.create(conf, 
getFuncDesc(exprStr));
+        (SearchArgumentImpl) result.getSearchArgument();
     List<PredicateLeaf> leaves = sarg.getLeaves();
     assertEquals(3, leaves.size());
 
@@ -1513,8 +1520,10 @@ public class TestConvertAstToSearchArg {
         "</java> \n" +
         "\n";
 
+    ConvertAstToSearchArg.Result result = 
ConvertAstToSearchArg.createSearchArgument(conf, getFuncDesc(exprStr));
+    assertFalse(result.isPartial());
     SearchArgumentImpl sarg =
-        (SearchArgumentImpl) ConvertAstToSearchArg.create(conf, 
getFuncDesc(exprStr));
+        (SearchArgumentImpl) result.getSearchArgument();
     List<PredicateLeaf> leaves = sarg.getLeaves();
     assertEquals(3, leaves.size());
 
@@ -1787,8 +1796,10 @@ public class TestConvertAstToSearchArg {
         " </object> \n" +
         "</java> \n";
 
+    ConvertAstToSearchArg.Result result = 
ConvertAstToSearchArg.createSearchArgument(conf, getFuncDesc(exprStr));
+    assertTrue(result.isPartial());
     SearchArgumentImpl sarg =
-        (SearchArgumentImpl) ConvertAstToSearchArg.create(conf, 
getFuncDesc(exprStr));
+        (SearchArgumentImpl) result.getSearchArgument();
     List<PredicateLeaf> leaves = sarg.getLeaves();
     assertEquals(1, leaves.size());
 
@@ -2275,8 +2286,10 @@ public class TestConvertAstToSearchArg {
         " </object>\n" +
         "</java>";
 
+    ConvertAstToSearchArg.Result result = 
ConvertAstToSearchArg.createSearchArgument(conf, getFuncDesc(exprStr));
+    assertFalse(result.isPartial());
     SearchArgumentImpl sarg =
-        (SearchArgumentImpl) ConvertAstToSearchArg.create(conf, 
getFuncDesc(exprStr));
+        (SearchArgumentImpl) result.getSearchArgument();
     List<PredicateLeaf> leaves = sarg.getLeaves();
     assertEquals(9, leaves.size());
 
@@ -2438,8 +2451,10 @@ public class TestConvertAstToSearchArg {
         " </object> \n" +
         "</java> ";
 
+    ConvertAstToSearchArg.Result result = 
ConvertAstToSearchArg.createSearchArgument(conf, getFuncDesc(exprStr));
+    assertTrue(result.isPartial());
     SearchArgumentImpl sarg =
-        (SearchArgumentImpl) ConvertAstToSearchArg.create(conf, 
getFuncDesc(exprStr));
+        (SearchArgumentImpl) result.getSearchArgument();
     List<PredicateLeaf> leaves = sarg.getLeaves();
     assertEquals(0, leaves.size());
 
@@ -2575,8 +2590,10 @@ public class TestConvertAstToSearchArg {
         " </object> \n" +
         "</java> ";
 
+    ConvertAstToSearchArg.Result result = 
ConvertAstToSearchArg.createSearchArgument(conf, getFuncDesc(exprStr));
+    assertFalse(result.isPartial());
     SearchArgumentImpl sarg =
-        (SearchArgumentImpl) ConvertAstToSearchArg.create(conf, 
getFuncDesc(exprStr));
+        (SearchArgumentImpl) result.getSearchArgument();
     List<PredicateLeaf> leaves = sarg.getLeaves();
     assertEquals(0, leaves.size());
 
@@ -2700,8 +2717,10 @@ public class TestConvertAstToSearchArg {
         " </object> \n" +
         "</java>";
 
+    ConvertAstToSearchArg.Result result = 
ConvertAstToSearchArg.createSearchArgument(conf, getFuncDesc(exprStr));
+    assertFalse(result.isPartial());
     SearchArgumentImpl sarg =
-        (SearchArgumentImpl) ConvertAstToSearchArg.create(conf, 
getFuncDesc(exprStr));
+        (SearchArgumentImpl) result.getSearchArgument();
     List<PredicateLeaf> leaves = sarg.getLeaves();
     assertEquals(1, leaves.size());
 
@@ -2745,9 +2764,9 @@ public class TestConvertAstToSearchArg {
         TypeInfoFactory.timestampTypeInfo, "ts", 
Timestamp.ofEpochMilli(1426595696000L));
     String serialAst = SerializationUtilities.serializeExpression(node);
 
-    SearchArgument sarg =
-        new ConvertAstToSearchArg(conf, 
SerializationUtilities.deserializeExpression(serialAst))
-            .buildSearchArgument();
+    ConvertAstToSearchArg convertAstToSearchArg = new 
ConvertAstToSearchArg(conf, 
SerializationUtilities.deserializeExpression(serialAst));
+    assertFalse(convertAstToSearchArg.isPartial());
+    SearchArgument sarg = convertAstToSearchArg.buildSearchArgument();
     assertEquals("leaf-0", sarg.getExpression().toOldString());
     assertEquals(1, sarg.getLeaves().size());
     PredicateLeaf leaf = sarg.getLeaves().get(0);
@@ -2761,9 +2780,9 @@ public class TestConvertAstToSearchArg {
         getColumnEqualsConstantExpression(TypeInfoFactory.dateTypeInfo, "dt", 
"2015-05-05");
     String serialAst = SerializationUtilities.serializeExpression(node);
 
-    SearchArgument sarg =
-        new ConvertAstToSearchArg(conf, 
SerializationUtilities.deserializeExpression(serialAst))
-            .buildSearchArgument();
+    ConvertAstToSearchArg convertAstToSearchArg = new 
ConvertAstToSearchArg(conf, 
SerializationUtilities.deserializeExpression(serialAst));
+    assertFalse(convertAstToSearchArg.isPartial());
+    SearchArgument sarg = convertAstToSearchArg.buildSearchArgument();
     assertEquals("leaf-0", sarg.getExpression().toOldString());
     assertEquals(1, sarg.getLeaves().size());
     PredicateLeaf leaf = sarg.getLeaves().get(0);
@@ -2777,9 +2796,9 @@ public class TestConvertAstToSearchArg {
         getColumnEqualsConstantExpression(TypeInfoFactory.decimalTypeInfo, 
"dec", 123);
     String serialAst = SerializationUtilities.serializeExpression(node);
 
-    SearchArgument sarg =
-        new ConvertAstToSearchArg(conf, 
SerializationUtilities.deserializeExpression(serialAst))
-            .buildSearchArgument();
+    ConvertAstToSearchArg convertAstToSearchArg = new 
ConvertAstToSearchArg(conf, 
SerializationUtilities.deserializeExpression(serialAst));
+    assertFalse(convertAstToSearchArg.isPartial());
+    SearchArgument sarg = convertAstToSearchArg.buildSearchArgument();
     assertEquals("leaf-0", sarg.getExpression().toOldString());
     assertEquals(1, sarg.getLeaves().size());
     PredicateLeaf leaf = sarg.getLeaves().get(0);
@@ -2793,9 +2812,9 @@ public class TestConvertAstToSearchArg {
         getColumnEqualsConstantExpression(TypeInfoFactory.charTypeInfo, "ch", 
"char      ");
     String serialAst = SerializationUtilities.serializeExpression(node);
 
-    SearchArgument sarg =
-        new ConvertAstToSearchArg(conf, 
SerializationUtilities.deserializeExpression(serialAst))
-            .buildSearchArgument();
+    ConvertAstToSearchArg convertAstToSearchArg = new 
ConvertAstToSearchArg(conf, 
SerializationUtilities.deserializeExpression(serialAst));
+    assertFalse(convertAstToSearchArg.isPartial());
+    SearchArgument sarg = convertAstToSearchArg.buildSearchArgument();
     assertEquals("leaf-0", sarg.getExpression().toOldString());
     assertEquals(1, sarg.getLeaves().size());
     PredicateLeaf leaf = sarg.getLeaves().get(0);
@@ -2809,9 +2828,9 @@ public class TestConvertAstToSearchArg {
         getColumnEqualsConstantExpression(TypeInfoFactory.varcharTypeInfo, 
"vc", "variable");
     String serialAst = SerializationUtilities.serializeExpression(node);
 
-    SearchArgument sarg =
-        new ConvertAstToSearchArg(conf, 
SerializationUtilities.deserializeExpression(serialAst))
-            .buildSearchArgument();
+    ConvertAstToSearchArg convertAstToSearchArg = new 
ConvertAstToSearchArg(conf, 
SerializationUtilities.deserializeExpression(serialAst));
+    assertFalse(convertAstToSearchArg.isPartial());
+    SearchArgument sarg = convertAstToSearchArg.buildSearchArgument();;
     assertEquals("leaf-0", sarg.getExpression().toOldString());
     assertEquals(1, sarg.getLeaves().size());
     PredicateLeaf leaf = sarg.getLeaves().get(0);
@@ -2825,9 +2844,9 @@ public class TestConvertAstToSearchArg {
         getColumnEqualsConstantExpression(TypeInfoFactory.intTypeInfo, "bi", 
12345);
     String serialAst = SerializationUtilities.serializeExpression(node);
 
-    SearchArgument sarg =
-        new ConvertAstToSearchArg(conf, 
SerializationUtilities.deserializeExpression(serialAst))
-            .buildSearchArgument();
+    ConvertAstToSearchArg convertAstToSearchArg = new 
ConvertAstToSearchArg(conf, 
SerializationUtilities.deserializeExpression(serialAst));
+    assertFalse(convertAstToSearchArg.isPartial());
+    SearchArgument sarg = convertAstToSearchArg.buildSearchArgument();
     assertEquals("leaf-0", sarg.getExpression().toOldString());
     assertEquals(1, sarg.getLeaves().size());
     PredicateLeaf leaf = sarg.getLeaves().get(0);
@@ -2856,9 +2875,9 @@ public class TestConvertAstToSearchArg {
         new GenericUDFOPAnd(), children);
     String serialAst = SerializationUtilities.serializeExpression(node);
 
-    SearchArgument sarg =
-        new ConvertAstToSearchArg(conf, 
SerializationUtilities.deserializeExpression(serialAst))
-            .buildSearchArgument();
+    ConvertAstToSearchArg convertAstToSearchArg = new 
ConvertAstToSearchArg(conf, 
SerializationUtilities.deserializeExpression(serialAst));
+    assertFalse(convertAstToSearchArg.isPartial());
+    SearchArgument sarg = convertAstToSearchArg.buildSearchArgument();
     assertEquals("(and leaf-0 leaf-1)", sarg.getExpression().toOldString());
     assertEquals(2, sarg.getLeaves().size());
     PredicateLeaf leaf = sarg.getLeaves().get(0);
@@ -2875,9 +2894,9 @@ public class TestConvertAstToSearchArg {
         getColumnEqualsConstantExpression(TypeInfoFactory.floatTypeInfo, 
"flt", 1.1f);
     String serialAst = SerializationUtilities.serializeExpression(node);
 
-    SearchArgument sarg =
-        new ConvertAstToSearchArg(conf, 
SerializationUtilities.deserializeExpression(serialAst))
-            .buildSearchArgument();
+    ConvertAstToSearchArg convertAstToSearchArg = new 
ConvertAstToSearchArg(conf, 
SerializationUtilities.deserializeExpression(serialAst));
+    assertFalse(convertAstToSearchArg.isPartial());
+    SearchArgument sarg = convertAstToSearchArg.buildSearchArgument();
     assertEquals("leaf-0", sarg.getExpression().toOldString());
     assertEquals(1, sarg.getLeaves().size());
     PredicateLeaf leaf = sarg.getLeaves().get(0);
@@ -2891,9 +2910,9 @@ public class TestConvertAstToSearchArg {
         getColumnEqualsConstantExpression(TypeInfoFactory.doubleTypeInfo, 
"dbl", 2.2);
     String serialAst = SerializationUtilities.serializeExpression(node);
 
-    SearchArgument sarg =
-        new ConvertAstToSearchArg(conf, 
SerializationUtilities.deserializeExpression(serialAst))
-            .buildSearchArgument();
+    ConvertAstToSearchArg convertAstToSearchArg = new 
ConvertAstToSearchArg(conf, 
SerializationUtilities.deserializeExpression(serialAst));
+    assertFalse(convertAstToSearchArg.isPartial());
+    SearchArgument sarg = convertAstToSearchArg.buildSearchArgument();
     assertEquals("leaf-0", sarg.getExpression().toOldString());
     assertEquals(1, sarg.getLeaves().size());
     PredicateLeaf leaf = sarg.getLeaves().get(0);

Reply via email to