This is an automated email from the ASF dual-hosted git repository.
sbadhya pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new b12f84480a4 HIVE-27779: Iceberg: Drop partition support (#4785)
(Sourabh Badhya reviewed by Denys Kuzmenko)
b12f84480a4 is described below
commit b12f84480a4535c1173187b71e44a1bcfbf0edc5
Author: Sourabh Badhya <[email protected]>
AuthorDate: Sat Nov 4 16:01:34 2023 +0530
HIVE-27779: Iceberg: Drop partition support (#4785) (Sourabh Badhya
reviewed by Denys Kuzmenko)
---
.../iceberg/mr/hive/HiveIcebergMetaHook.java | 67 ++-
.../iceberg/mr/hive/HiveIcebergStorageHandler.java | 50 +-
.../apache/iceberg/mr/hive/IcebergTableUtil.java | 14 +
.../test/queries/positive/iceberg_drop_partition.q | 79 +++
.../results/positive/iceberg_drop_partition.q.out | 530 +++++++++++++++++++++
.../org/apache/hadoop/hive/ql/metadata/Hive.java | 33 +-
.../hive/ql/metadata/HiveStorageHandler.java | 10 +
.../apache/hadoop/hive/ql/parse/ParseUtils.java | 4 +-
.../apache/hadoop/hive/metastore/HiveMetaHook.java | 13 +
.../hadoop/hive/metastore/HiveMetaStoreClient.java | 14 +-
.../apache/hadoop/hive/metastore/HMSHandler.java | 3 +-
11 files changed, 797 insertions(+), 20 deletions(-)
diff --git
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java
index ad711d16c40..f174bd7b2fe 100644
---
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java
+++
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.TableName;
import org.apache.hadoop.hive.metastore.HiveMetaHook;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.PartitionDropOptions;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
@@ -51,11 +52,16 @@ import
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.apache.hadoop.hive.ql.ddl.table.AlterTableType;
+import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.io.sarg.ConvertAstToSearchArg;
+import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.parse.PartitionTransform;
import org.apache.hadoop.hive.ql.parse.TransformSpec;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.ql.session.SessionStateUtil;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
@@ -70,9 +76,14 @@ import org.apache.iceberg.BaseMetastoreTableOperations;
import org.apache.iceberg.BaseTable;
import org.apache.iceberg.CatalogUtil;
import org.apache.iceberg.DeleteFiles;
+import org.apache.iceberg.FileScanTask;
+import org.apache.iceberg.MetadataTableType;
+import org.apache.iceberg.MetadataTableUtils;
+import org.apache.iceberg.PartitionData;
import org.apache.iceberg.PartitionField;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.PartitionSpecParser;
+import org.apache.iceberg.PartitionsTable;
import org.apache.iceberg.Schema;
import org.apache.iceberg.SchemaParser;
import org.apache.iceberg.Snapshot;
@@ -89,10 +100,12 @@ import org.apache.iceberg.exceptions.NoSuchTableException;
import org.apache.iceberg.exceptions.NotFoundException;
import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.expressions.Expressions;
+import org.apache.iceberg.expressions.ResidualEvaluator;
import org.apache.iceberg.hive.CachedClientPool;
import org.apache.iceberg.hive.HiveSchemaUtil;
import org.apache.iceberg.hive.HiveTableOperations;
import org.apache.iceberg.hive.MetastoreLock;
+import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.mapping.MappingUtil;
import org.apache.iceberg.mapping.NameMapping;
@@ -106,9 +119,11 @@ import
org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.relocated.com.google.common.collect.Sets;
import org.apache.iceberg.types.Conversions;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.util.Pair;
+import org.apache.iceberg.util.StructProjection;
import org.apache.thrift.TException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -129,7 +144,7 @@ public class HiveIcebergMetaHook implements HiveMetaHook {
AlterTableType.ADDCOLS, AlterTableType.REPLACE_COLUMNS,
AlterTableType.RENAME_COLUMN,
AlterTableType.ADDPROPS, AlterTableType.DROPPROPS,
AlterTableType.SETPARTITIONSPEC,
AlterTableType.UPDATE_COLUMNS, AlterTableType.RENAME,
AlterTableType.EXECUTE, AlterTableType.CREATE_BRANCH,
- AlterTableType.CREATE_TAG, AlterTableType.DROP_BRANCH,
AlterTableType.DROP_TAG);
+ AlterTableType.CREATE_TAG, AlterTableType.DROP_BRANCH,
AlterTableType.DROPPARTITION, AlterTableType.DROP_TAG);
private static final List<String> MIGRATION_ALLOWED_SOURCE_FORMATS =
ImmutableList.of(
FileFormat.PARQUET.name().toLowerCase(),
FileFormat.ORC.name().toLowerCase(),
@@ -997,6 +1012,56 @@ public class HiveIcebergMetaHook implements HiveMetaHook {
}
}
+ @Override
+ public void preDropPartitions(org.apache.hadoop.hive.metastore.api.Table
hmsTable,
+ EnvironmentContext context,
+ List<org.apache.commons.lang3.tuple.Pair<Integer, byte[]>> partExprs)
+ throws MetaException {
+ Table icebergTbl = IcebergTableUtil.getTable(conf, hmsTable);
+ DeleteFiles deleteFiles = icebergTbl.newDelete();
+ List<Expression> expressions = partExprs.stream().map(partExpr -> {
+ ExprNodeDesc exprNodeDesc = SerializationUtilities
+ .deserializeObjectWithTypeInformation(partExpr.getRight(), true);
+ SearchArgument sarg = ConvertAstToSearchArg.create(conf,
(ExprNodeGenericFuncDesc) exprNodeDesc);
+ return HiveIcebergFilterFactory.generateFilterExpression(sarg);
+ }).collect(Collectors.toList());
+ PartitionsTable partitionsTable = (PartitionsTable) MetadataTableUtils
+ .createMetadataTableInstance(icebergTbl, MetadataTableType.PARTITIONS);
+ List<PartitionData> partitionList = Lists.newArrayList();
+ Expression finalExp = Expressions.alwaysFalse();
+ PartitionSpec pSpec = icebergTbl.spec();
+ for (int index = 0; index < expressions.size(); index++) {
+ finalExp = Expressions.or(finalExp, expressions.get(index));
+ }
+ ResidualEvaluator resEval = ResidualEvaluator.of(icebergTbl.spec(),
finalExp, false);
+ try (CloseableIterable<FileScanTask> fileScanTasks =
partitionsTable.newScan().planFiles()) {
+ fileScanTasks.forEach(task ->
+
partitionList.addAll(Sets.newHashSet(CloseableIterable.transform(task.asDataTask().rows(),
row -> {
+ StructProjection data = row.get(0, StructProjection.class);
+ return IcebergTableUtil.toPartitionData(data,
pSpec.partitionType());
+ })).stream()
+ .filter(partitionData ->
resEval.residualFor(partitionData).isEquivalentTo(Expressions.alwaysTrue()))
+ .collect(Collectors.toSet())));
+
+ Expression partitionSetFilter = Expressions.alwaysFalse();
+ for (PartitionData partitionData : partitionList) {
+ Expression partFilter = Expressions.alwaysTrue();
+ for (int index = 0; index < pSpec.fields().size(); index++) {
+ PartitionField field = icebergTbl.spec().fields().get(index);
+ partFilter = Expressions.and(
+ partFilter, Expressions.equal(field.name(),
partitionData.get(index, Object.class)));
+ }
+ partitionSetFilter = Expressions.or(partitionSetFilter, partFilter);
+ }
+
+ deleteFiles.deleteFromRowFilter(partitionSetFilter);
+ deleteFiles.commit();
+ } catch (IOException e) {
+ throw new MetaException(String.format("Error while fetching the
partitions due to: %s", e));
+ }
+ context.putToProperties(HiveMetaStoreClient.SKIP_DROP_PARTITION, "true");
+ }
+
private class PreAlterTableProperties {
private String tableLocation;
private String format;
diff --git
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java
index c11663e3a65..0afdd79ba55 100644
---
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java
+++
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java
@@ -82,10 +82,12 @@ import org.apache.hadoop.hive.ql.io.StorageFormatDescriptor;
import
org.apache.hadoop.hive.ql.io.parquet.vector.VectorizedParquetRecordReader;
import org.apache.hadoop.hive.ql.io.sarg.ConvertAstToSearchArg;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
+import org.apache.hadoop.hive.ql.metadata.DummyPartition;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
import org.apache.hadoop.hive.ql.metadata.HiveStoragePredicateHandler;
import org.apache.hadoop.hive.ql.metadata.HiveUtils;
+import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
import org.apache.hadoop.hive.ql.parse.AlterTableExecuteSpec;
import org.apache.hadoop.hive.ql.parse.AlterTableSnapshotRefSpec;
@@ -165,6 +167,7 @@ import org.apache.iceberg.expressions.Projections;
import org.apache.iceberg.expressions.ResidualEvaluator;
import org.apache.iceberg.expressions.StrictMetricsEvaluator;
import org.apache.iceberg.hadoop.HadoopConfigurable;
+import org.apache.iceberg.hive.HiveSchemaUtil;
import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.mr.Catalogs;
import org.apache.iceberg.mr.InputFormatConfig;
@@ -1133,8 +1136,11 @@ public class HiveIcebergStorageHandler implements
HiveStoragePredicateHandler, H
}
Schema schema = table.schema();
+ List<FieldSchema> hiveSchema = HiveSchemaUtil.convert(schema);
+ Map<String, String> colNameToColType = hiveSchema.stream()
+ .collect(Collectors.toMap(FieldSchema::getName, FieldSchema::getType));
return table.sortOrder().fields().stream().map(s -> new
FieldSchema(schema.findColumnName(s.sourceId()),
- schema.findType(s.sourceId()).toString(),
+ colNameToColType.get(schema.findColumnName(s.sourceId())),
String.format("Transform: %s, Sort direction: %s, Null sort order: %s",
s.transform().toString(), s.direction().name(),
s.nullOrder().name()))).collect(Collectors.toList());
}
@@ -1932,4 +1938,46 @@ public class HiveIcebergStorageHandler implements
HiveStoragePredicateHandler, H
return false;
}
}
+
+ @Override
+ public List<FieldSchema>
getPartitionKeys(org.apache.hadoop.hive.ql.metadata.Table hmsTable) {
+ Table icebergTable = IcebergTableUtil.getTable(conf, hmsTable.getTTable());
+ Schema schema = icebergTable.schema();
+ List<FieldSchema> hiveSchema = HiveSchemaUtil.convert(schema);
+ Map<String, String> colNameToColType = hiveSchema.stream()
+ .collect(Collectors.toMap(FieldSchema::getName, FieldSchema::getType));
+ return icebergTable.spec().fields().stream().map(partField ->
+ new FieldSchema(schema.findColumnName(partField.sourceId()),
+ colNameToColType.get(schema.findColumnName(partField.sourceId())),
+ String.format("Transform: %s",
partField.transform().toString()))).collect(Collectors.toList());
+ }
+
+ @Override
+ public List<Partition>
getPartitionsByExpr(org.apache.hadoop.hive.ql.metadata.Table hmsTable,
ExprNodeDesc desc)
+ throws SemanticException {
+ Table icebergTable = IcebergTableUtil.getTable(conf, hmsTable.getTTable());
+ PartitionSpec pSpec = icebergTable.spec();
+ PartitionsTable partitionsTable = (PartitionsTable) MetadataTableUtils
+ .createMetadataTableInstance(icebergTable,
MetadataTableType.PARTITIONS);
+ SearchArgument sarg = ConvertAstToSearchArg.create(conf,
(ExprNodeGenericFuncDesc) desc);
+ Expression expression =
HiveIcebergFilterFactory.generateFilterExpression(sarg);
+ Set<PartitionData> partitionList = Sets.newHashSet();
+ ResidualEvaluator resEval = ResidualEvaluator.of(pSpec, expression, false);
+ try (CloseableIterable<FileScanTask> fileScanTasks =
partitionsTable.newScan().planFiles()) {
+ fileScanTasks.forEach(task ->
+
partitionList.addAll(Sets.newHashSet(CloseableIterable.transform(task.asDataTask().rows(),
row -> {
+ StructProjection data = row.get(PART_IDX, StructProjection.class);
+ return IcebergTableUtil.toPartitionData(data,
pSpec.partitionType());
+ })).stream()
+ .filter(partitionData ->
resEval.residualFor(partitionData).isEquivalentTo(Expressions.alwaysTrue()))
+ .collect(Collectors.toSet())));
+
+
+ return partitionList.stream()
+ .map(partitionData -> new DummyPartition(hmsTable,
pSpec.partitionToPath(partitionData)))
+ .collect(Collectors.toList());
+ } catch (IOException e) {
+ throw new SemanticException(String.format("Error while fetching the
partitions due to: %s", e));
+ }
+ }
}
diff --git
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/IcebergTableUtil.java
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/IcebergTableUtil.java
index aacbf4c4e3d..e22c5cfcf36 100644
---
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/IcebergTableUtil.java
+++
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/IcebergTableUtil.java
@@ -36,9 +36,11 @@ import org.apache.hadoop.hive.ql.plan.PlanUtils;
import org.apache.hadoop.hive.ql.session.SessionStateUtil;
import org.apache.iceberg.DeleteFiles;
import org.apache.iceberg.ManageSnapshots;
+import org.apache.iceberg.PartitionData;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.SnapshotRef;
+import org.apache.iceberg.StructLike;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableProperties;
import org.apache.iceberg.UpdatePartitionSpec;
@@ -47,6 +49,7 @@ import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.expressions.Expressions;
import org.apache.iceberg.mr.Catalogs;
import org.apache.iceberg.mr.InputFormatConfig;
+import org.apache.iceberg.types.Types;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -292,4 +295,15 @@ public class IcebergTableUtil {
}
deleteFiles.deleteFromRowFilter(exp).commit();
}
+
+ public static PartitionData toPartitionData(StructLike key, Types.StructType
keyType) {
+ PartitionData data = new PartitionData(keyType);
+ for (int i = 0; i < keyType.fields().size(); i++) {
+ Object val = key.get(i,
keyType.fields().get(i).type().typeId().javaClass());
+ if (val != null) {
+ data.set(i, val);
+ }
+ }
+ return data;
+ }
}
diff --git
a/iceberg/iceberg-handler/src/test/queries/positive/iceberg_drop_partition.q
b/iceberg/iceberg-handler/src/test/queries/positive/iceberg_drop_partition.q
new file mode 100644
index 00000000000..aa0fbae9420
--- /dev/null
+++ b/iceberg/iceberg-handler/src/test/queries/positive/iceberg_drop_partition.q
@@ -0,0 +1,79 @@
+-- SORT_QUERY_RESULTS
+create table ice_int (a int, b string) partitioned by spec (a) stored by
iceberg;
+insert into table ice_int values (1, 'ABC');
+insert into table ice_int values (2, 'DEF');
+insert into table ice_int values (3, 'ABC');
+insert into table ice_int values (4, 'DEF');
+insert into table ice_int values (1, 'ABC');
+insert into table ice_int values (2, 'DEF');
+
+alter table ice_int drop partition (a <= 2), partition (a >= 3);
+
+select * from ice_int;
+
+create table ice_str (a int, b string) partitioned by spec (b) stored by
iceberg;
+insert into table ice_str values (1, 'ABC');
+insert into table ice_str values (2, 'DEF');
+insert into table ice_str values (3, 'ABC');
+insert into table ice_str values (4, 'DEF');
+insert into table ice_str values (1, 'ABC');
+insert into table ice_str values (2, 'XYZ');
+
+alter table ice_str drop partition (b != 'ABC'), partition (b == 'XYZ');
+
+select * from ice_str;
+
+create table ice_int_double_part (a int, b int, c int) partitioned by spec (a,
b) stored by iceberg;
+insert into table ice_int_double_part values (1, 2, 3);
+insert into table ice_int_double_part values (2, 3, 4);
+insert into table ice_int_double_part values (3, 4, 5);
+insert into table ice_int_double_part values (4, 5, 6);
+insert into table ice_int_double_part values (1, 2, 4);
+insert into table ice_int_double_part values (2, 1, 5);
+
+alter table ice_int_double_part drop partition (a <= 2, b <= 1), partition (a
>= 3, b != 4);
+
+select * from ice_int_double_part;
+
+create table ice_date_int_double_part (a date, b int, c int) partitioned by
spec (a, b) stored by iceberg;
+insert into table ice_date_int_double_part values ('2022-02-07', 2, 3);
+insert into table ice_date_int_double_part values ('2022-08-07', 3, 4);
+insert into table ice_date_int_double_part values ('2022-10-05', 4, 5);
+insert into table ice_date_int_double_part values ('2022-01-17', 5, 6);
+insert into table ice_date_int_double_part values ('2022-04-08', 2, 4);
+insert into table ice_date_int_double_part values ('2023-02-07', 1, 5);
+
+alter table ice_date_int_double_part drop partition (a <= '2023-02-07', b <=
1), partition (a >= '2022-08-07', b >= 2);
+
+select * from ice_date_int_double_part;
+
+create table ice_date_double_double_part (a date, b double, c int) partitioned
by spec (a, b) stored by iceberg;
+insert into table ice_date_double_double_part values ('2022-02-07', 2.75, 3);
+insert into table ice_date_double_double_part values ('2022-08-07', 3.25, 4);
+insert into table ice_date_double_double_part values ('2022-10-05', 4.23, 5);
+insert into table ice_date_double_double_part values ('2022-01-17', 5.67, 6);
+insert into table ice_date_double_double_part values ('2022-04-08', 2.45, 4);
+insert into table ice_date_double_double_part values ('2023-02-07', 1.08, 5);
+
+alter table ice_date_double_double_part drop partition (a <= '2023-02-07', b
<= 1.09), partition (a >= '2022-08-07', b >= 2.78);
+
+select * from ice_date_int_double_part;
+
+create table ice_date_bigint_double_part (a date, b bigint, c int) partitioned
by spec (a, b) stored by iceberg;
+insert into table ice_date_bigint_double_part values ('2022-02-07',
267859937678997886, 3);
+insert into table ice_date_bigint_double_part values ('2022-08-07',
325678599459970774, 4);
+insert into table ice_date_bigint_double_part values ('2022-10-05',
423789504756478599, 5);
+insert into table ice_date_bigint_double_part values ('2022-01-17',
567890387564883960, 6);
+insert into table ice_date_bigint_double_part values ('2022-04-08',
245789600487678594, 4);
+insert into table ice_date_bigint_double_part values ('2023-02-07',
108789600487566478, 5);
+
+alter table ice_date_bigint_double_part drop partition (a <= '2023-02-07', b
<= 109000000000000000L), partition (a >= '2022-08-07', b >=
278000000000000000L);
+
+select * from ice_date_bigint_double_part;
+
+drop table ice_int;
+drop table ice_str;
+drop table ice_int_double_part;
+drop table ice_date_int_double_part;
+drop table ice_date_double_double_part;
+drop table ice_date_bigint_double_part;
diff --git
a/iceberg/iceberg-handler/src/test/results/positive/iceberg_drop_partition.q.out
b/iceberg/iceberg-handler/src/test/results/positive/iceberg_drop_partition.q.out
new file mode 100644
index 00000000000..078d918cd0e
--- /dev/null
+++
b/iceberg/iceberg-handler/src/test/results/positive/iceberg_drop_partition.q.out
@@ -0,0 +1,530 @@
+PREHOOK: query: create table ice_int (a int, b string) partitioned by spec (a)
stored by iceberg
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_int
+POSTHOOK: query: create table ice_int (a int, b string) partitioned by spec
(a) stored by iceberg
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_int
+PREHOOK: query: insert into table ice_int values (1, 'ABC')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_int
+POSTHOOK: query: insert into table ice_int values (1, 'ABC')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_int
+PREHOOK: query: insert into table ice_int values (2, 'DEF')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_int
+POSTHOOK: query: insert into table ice_int values (2, 'DEF')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_int
+PREHOOK: query: insert into table ice_int values (3, 'ABC')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_int
+POSTHOOK: query: insert into table ice_int values (3, 'ABC')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_int
+PREHOOK: query: insert into table ice_int values (4, 'DEF')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_int
+POSTHOOK: query: insert into table ice_int values (4, 'DEF')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_int
+PREHOOK: query: insert into table ice_int values (1, 'ABC')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_int
+POSTHOOK: query: insert into table ice_int values (1, 'ABC')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_int
+PREHOOK: query: insert into table ice_int values (2, 'DEF')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_int
+POSTHOOK: query: insert into table ice_int values (2, 'DEF')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_int
+PREHOOK: query: alter table ice_int drop partition (a <= 2), partition (a >= 3)
+PREHOOK: type: ALTERTABLE_DROPPARTS
+PREHOOK: Input: default@ice_int
+PREHOOK: Output: default@ice_int@a=1
+PREHOOK: Output: default@ice_int@a=2
+PREHOOK: Output: default@ice_int@a=3
+PREHOOK: Output: default@ice_int@a=4
+POSTHOOK: query: alter table ice_int drop partition (a <= 2), partition (a >=
3)
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Input: default@ice_int
+POSTHOOK: Output: default@ice_int@a=1
+POSTHOOK: Output: default@ice_int@a=2
+POSTHOOK: Output: default@ice_int@a=3
+POSTHOOK: Output: default@ice_int@a=4
+PREHOOK: query: select * from ice_int
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice_int
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from ice_int
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice_int
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+PREHOOK: query: create table ice_str (a int, b string) partitioned by spec (b)
stored by iceberg
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_str
+POSTHOOK: query: create table ice_str (a int, b string) partitioned by spec
(b) stored by iceberg
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_str
+PREHOOK: query: insert into table ice_str values (1, 'ABC')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_str
+POSTHOOK: query: insert into table ice_str values (1, 'ABC')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_str
+PREHOOK: query: insert into table ice_str values (2, 'DEF')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_str
+POSTHOOK: query: insert into table ice_str values (2, 'DEF')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_str
+PREHOOK: query: insert into table ice_str values (3, 'ABC')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_str
+POSTHOOK: query: insert into table ice_str values (3, 'ABC')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_str
+PREHOOK: query: insert into table ice_str values (4, 'DEF')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_str
+POSTHOOK: query: insert into table ice_str values (4, 'DEF')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_str
+PREHOOK: query: insert into table ice_str values (1, 'ABC')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_str
+POSTHOOK: query: insert into table ice_str values (1, 'ABC')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_str
+PREHOOK: query: insert into table ice_str values (2, 'XYZ')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_str
+POSTHOOK: query: insert into table ice_str values (2, 'XYZ')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_str
+PREHOOK: query: alter table ice_str drop partition (b != 'ABC'), partition (b
== 'XYZ')
+PREHOOK: type: ALTERTABLE_DROPPARTS
+PREHOOK: Input: default@ice_str
+PREHOOK: Output: default@ice_str@b=DEF
+PREHOOK: Output: default@ice_str@b=XYZ
+POSTHOOK: query: alter table ice_str drop partition (b != 'ABC'), partition (b
== 'XYZ')
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Input: default@ice_str
+POSTHOOK: Output: default@ice_str@b=DEF
+POSTHOOK: Output: default@ice_str@b=XYZ
+PREHOOK: query: select * from ice_str
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice_str
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from ice_str
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice_str
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1 ABC
+1 ABC
+3 ABC
+PREHOOK: query: create table ice_int_double_part (a int, b int, c int)
partitioned by spec (a, b) stored by iceberg
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_int_double_part
+POSTHOOK: query: create table ice_int_double_part (a int, b int, c int)
partitioned by spec (a, b) stored by iceberg
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_int_double_part
+PREHOOK: query: insert into table ice_int_double_part values (1, 2, 3)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_int_double_part
+POSTHOOK: query: insert into table ice_int_double_part values (1, 2, 3)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_int_double_part
+PREHOOK: query: insert into table ice_int_double_part values (2, 3, 4)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_int_double_part
+POSTHOOK: query: insert into table ice_int_double_part values (2, 3, 4)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_int_double_part
+PREHOOK: query: insert into table ice_int_double_part values (3, 4, 5)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_int_double_part
+POSTHOOK: query: insert into table ice_int_double_part values (3, 4, 5)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_int_double_part
+PREHOOK: query: insert into table ice_int_double_part values (4, 5, 6)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_int_double_part
+POSTHOOK: query: insert into table ice_int_double_part values (4, 5, 6)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_int_double_part
+PREHOOK: query: insert into table ice_int_double_part values (1, 2, 4)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_int_double_part
+POSTHOOK: query: insert into table ice_int_double_part values (1, 2, 4)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_int_double_part
+PREHOOK: query: insert into table ice_int_double_part values (2, 1, 5)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_int_double_part
+POSTHOOK: query: insert into table ice_int_double_part values (2, 1, 5)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_int_double_part
+PREHOOK: query: alter table ice_int_double_part drop partition (a <= 2, b <=
1), partition (a >= 3, b != 4)
+PREHOOK: type: ALTERTABLE_DROPPARTS
+PREHOOK: Input: default@ice_int_double_part
+PREHOOK: Output: default@ice_int_double_part@a=2/b=1
+PREHOOK: Output: default@ice_int_double_part@a=4/b=5
+POSTHOOK: query: alter table ice_int_double_part drop partition (a <= 2, b <=
1), partition (a >= 3, b != 4)
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Input: default@ice_int_double_part
+POSTHOOK: Output: default@ice_int_double_part@a=2/b=1
+POSTHOOK: Output: default@ice_int_double_part@a=4/b=5
+PREHOOK: query: select * from ice_int_double_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice_int_double_part
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from ice_int_double_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice_int_double_part
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1 2 3
+1 2 4
+2 3 4
+3 4 5
+PREHOOK: query: create table ice_date_int_double_part (a date, b int, c int)
partitioned by spec (a, b) stored by iceberg
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_date_int_double_part
+POSTHOOK: query: create table ice_date_int_double_part (a date, b int, c int)
partitioned by spec (a, b) stored by iceberg
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_date_int_double_part
+PREHOOK: query: insert into table ice_date_int_double_part values
('2022-02-07', 2, 3)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_date_int_double_part
+POSTHOOK: query: insert into table ice_date_int_double_part values
('2022-02-07', 2, 3)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_date_int_double_part
+PREHOOK: query: insert into table ice_date_int_double_part values
('2022-08-07', 3, 4)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_date_int_double_part
+POSTHOOK: query: insert into table ice_date_int_double_part values
('2022-08-07', 3, 4)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_date_int_double_part
+PREHOOK: query: insert into table ice_date_int_double_part values
('2022-10-05', 4, 5)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_date_int_double_part
+POSTHOOK: query: insert into table ice_date_int_double_part values
('2022-10-05', 4, 5)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_date_int_double_part
+PREHOOK: query: insert into table ice_date_int_double_part values
('2022-01-17', 5, 6)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_date_int_double_part
+POSTHOOK: query: insert into table ice_date_int_double_part values
('2022-01-17', 5, 6)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_date_int_double_part
+PREHOOK: query: insert into table ice_date_int_double_part values
('2022-04-08', 2, 4)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_date_int_double_part
+POSTHOOK: query: insert into table ice_date_int_double_part values
('2022-04-08', 2, 4)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_date_int_double_part
+PREHOOK: query: insert into table ice_date_int_double_part values
('2023-02-07', 1, 5)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_date_int_double_part
+POSTHOOK: query: insert into table ice_date_int_double_part values
('2023-02-07', 1, 5)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_date_int_double_part
+PREHOOK: query: alter table ice_date_int_double_part drop partition (a <=
'2023-02-07', b <= 1), partition (a >= '2022-08-07', b >= 2)
+PREHOOK: type: ALTERTABLE_DROPPARTS
+PREHOOK: Input: default@ice_date_int_double_part
+PREHOOK: Output: default@ice_date_int_double_part@a=2022-08-07/b=3
+PREHOOK: Output: default@ice_date_int_double_part@a=2022-10-05/b=4
+PREHOOK: Output: default@ice_date_int_double_part@a=2023-02-07/b=1
+POSTHOOK: query: alter table ice_date_int_double_part drop partition (a <=
'2023-02-07', b <= 1), partition (a >= '2022-08-07', b >= 2)
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Input: default@ice_date_int_double_part
+POSTHOOK: Output: default@ice_date_int_double_part@a=2022-08-07/b=3
+POSTHOOK: Output: default@ice_date_int_double_part@a=2022-10-05/b=4
+POSTHOOK: Output: default@ice_date_int_double_part@a=2023-02-07/b=1
+PREHOOK: query: select * from ice_date_int_double_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice_date_int_double_part
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from ice_date_int_double_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice_date_int_double_part
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+2022-01-17 5 6
+2022-02-07 2 3
+2022-04-08 2 4
+PREHOOK: query: create table ice_date_double_double_part (a date, b double, c
int) partitioned by spec (a, b) stored by iceberg
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_date_double_double_part
+POSTHOOK: query: create table ice_date_double_double_part (a date, b double, c
int) partitioned by spec (a, b) stored by iceberg
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_date_double_double_part
+PREHOOK: query: insert into table ice_date_double_double_part values
('2022-02-07', 2.75, 3)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_date_double_double_part
+POSTHOOK: query: insert into table ice_date_double_double_part values
('2022-02-07', 2.75, 3)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_date_double_double_part
+PREHOOK: query: insert into table ice_date_double_double_part values
('2022-08-07', 3.25, 4)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_date_double_double_part
+POSTHOOK: query: insert into table ice_date_double_double_part values
('2022-08-07', 3.25, 4)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_date_double_double_part
+PREHOOK: query: insert into table ice_date_double_double_part values
('2022-10-05', 4.23, 5)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_date_double_double_part
+POSTHOOK: query: insert into table ice_date_double_double_part values
('2022-10-05', 4.23, 5)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_date_double_double_part
+PREHOOK: query: insert into table ice_date_double_double_part values
('2022-01-17', 5.67, 6)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_date_double_double_part
+POSTHOOK: query: insert into table ice_date_double_double_part values
('2022-01-17', 5.67, 6)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_date_double_double_part
+PREHOOK: query: insert into table ice_date_double_double_part values
('2022-04-08', 2.45, 4)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_date_double_double_part
+POSTHOOK: query: insert into table ice_date_double_double_part values
('2022-04-08', 2.45, 4)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_date_double_double_part
+PREHOOK: query: insert into table ice_date_double_double_part values
('2023-02-07', 1.08, 5)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_date_double_double_part
+POSTHOOK: query: insert into table ice_date_double_double_part values
('2023-02-07', 1.08, 5)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_date_double_double_part
+PREHOOK: query: alter table ice_date_double_double_part drop partition (a <=
'2023-02-07', b <= 1.09), partition (a >= '2022-08-07', b >= 2.78)
+PREHOOK: type: ALTERTABLE_DROPPARTS
+PREHOOK: Input: default@ice_date_double_double_part
+PREHOOK: Output: default@ice_date_double_double_part@a=2022-08-07/b=3.25
+PREHOOK: Output: default@ice_date_double_double_part@a=2022-10-05/b=4.23
+PREHOOK: Output: default@ice_date_double_double_part@a=2023-02-07/b=1.08
+POSTHOOK: query: alter table ice_date_double_double_part drop partition (a <=
'2023-02-07', b <= 1.09), partition (a >= '2022-08-07', b >= 2.78)
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Input: default@ice_date_double_double_part
+POSTHOOK: Output: default@ice_date_double_double_part@a=2022-08-07/b=3.25
+POSTHOOK: Output: default@ice_date_double_double_part@a=2022-10-05/b=4.23
+POSTHOOK: Output: default@ice_date_double_double_part@a=2023-02-07/b=1.08
+PREHOOK: query: select * from ice_date_int_double_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice_date_int_double_part
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from ice_date_int_double_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice_date_int_double_part
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+2022-01-17 5 6
+2022-02-07 2 3
+2022-04-08 2 4
+PREHOOK: query: create table ice_date_bigint_double_part (a date, b bigint, c
int) partitioned by spec (a, b) stored by iceberg
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_date_bigint_double_part
+POSTHOOK: query: create table ice_date_bigint_double_part (a date, b bigint, c
int) partitioned by spec (a, b) stored by iceberg
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_date_bigint_double_part
+PREHOOK: query: insert into table ice_date_bigint_double_part values
('2022-02-07', 267859937678997886, 3)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_date_bigint_double_part
+POSTHOOK: query: insert into table ice_date_bigint_double_part values
('2022-02-07', 267859937678997886, 3)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_date_bigint_double_part
+PREHOOK: query: insert into table ice_date_bigint_double_part values
('2022-08-07', 325678599459970774, 4)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_date_bigint_double_part
+POSTHOOK: query: insert into table ice_date_bigint_double_part values
('2022-08-07', 325678599459970774, 4)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_date_bigint_double_part
+PREHOOK: query: insert into table ice_date_bigint_double_part values
('2022-10-05', 423789504756478599, 5)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_date_bigint_double_part
+POSTHOOK: query: insert into table ice_date_bigint_double_part values
('2022-10-05', 423789504756478599, 5)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_date_bigint_double_part
+PREHOOK: query: insert into table ice_date_bigint_double_part values
('2022-01-17', 567890387564883960, 6)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_date_bigint_double_part
+POSTHOOK: query: insert into table ice_date_bigint_double_part values
('2022-01-17', 567890387564883960, 6)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_date_bigint_double_part
+PREHOOK: query: insert into table ice_date_bigint_double_part values
('2022-04-08', 245789600487678594, 4)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_date_bigint_double_part
+POSTHOOK: query: insert into table ice_date_bigint_double_part values
('2022-04-08', 245789600487678594, 4)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_date_bigint_double_part
+PREHOOK: query: insert into table ice_date_bigint_double_part values
('2023-02-07', 108789600487566478, 5)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice_date_bigint_double_part
+POSTHOOK: query: insert into table ice_date_bigint_double_part values
('2023-02-07', 108789600487566478, 5)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice_date_bigint_double_part
+PREHOOK: query: alter table ice_date_bigint_double_part drop partition (a <=
'2023-02-07', b <= 109000000000000000L), partition (a >= '2022-08-07', b >=
278000000000000000L)
+PREHOOK: type: ALTERTABLE_DROPPARTS
+PREHOOK: Input: default@ice_date_bigint_double_part
+PREHOOK: Output:
default@ice_date_bigint_double_part@a=2022-08-07/b=325678599459970774
+PREHOOK: Output:
default@ice_date_bigint_double_part@a=2022-10-05/b=423789504756478599
+PREHOOK: Output:
default@ice_date_bigint_double_part@a=2023-02-07/b=108789600487566478
+POSTHOOK: query: alter table ice_date_bigint_double_part drop partition (a <=
'2023-02-07', b <= 109000000000000000L), partition (a >= '2022-08-07', b >=
278000000000000000L)
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Input: default@ice_date_bigint_double_part
+POSTHOOK: Output:
default@ice_date_bigint_double_part@a=2022-08-07/b=325678599459970774
+POSTHOOK: Output:
default@ice_date_bigint_double_part@a=2022-10-05/b=423789504756478599
+POSTHOOK: Output:
default@ice_date_bigint_double_part@a=2023-02-07/b=108789600487566478
+PREHOOK: query: select * from ice_date_bigint_double_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice_date_bigint_double_part
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from ice_date_bigint_double_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice_date_bigint_double_part
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+2022-01-17 567890387564883960 6
+2022-02-07 267859937678997886 3
+2022-04-08 245789600487678594 4
+PREHOOK: query: drop table ice_int
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@ice_int
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_int
+POSTHOOK: query: drop table ice_int
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@ice_int
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_int
+PREHOOK: query: drop table ice_str
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@ice_str
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_str
+POSTHOOK: query: drop table ice_str
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@ice_str
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_str
+PREHOOK: query: drop table ice_int_double_part
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@ice_int_double_part
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_int_double_part
+POSTHOOK: query: drop table ice_int_double_part
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@ice_int_double_part
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_int_double_part
+PREHOOK: query: drop table ice_date_int_double_part
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@ice_date_int_double_part
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_date_int_double_part
+POSTHOOK: query: drop table ice_date_int_double_part
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@ice_date_int_double_part
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_date_int_double_part
+PREHOOK: query: drop table ice_date_double_double_part
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@ice_date_double_double_part
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_date_double_double_part
+POSTHOOK: query: drop table ice_date_double_double_part
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@ice_date_double_double_part
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_date_double_double_part
+PREHOOK: query: drop table ice_date_bigint_double_part
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@ice_date_bigint_double_part
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice_date_bigint_double_part
+POSTHOOK: query: drop table ice_date_bigint_double_part
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@ice_date_bigint_double_part
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice_date_bigint_double_part
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 51133a3636c..5022b77fc36 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -4491,25 +4491,30 @@ private void constructOneLBLocationMap(FileStatus fSta,
perfLogger.perfLogBegin(CLASS_NAME,
PerfLogger.HIVE_GET_PARTITIONS_BY_EXPR);
try {
Preconditions.checkNotNull(partitions);
- byte[] exprBytes =
SerializationUtilities.serializeObjectWithTypeInformation(expr);
String defaultPartitionName = HiveConf.getVar(conf,
ConfVars.DEFAULTPARTITIONNAME);
- List<org.apache.hadoop.hive.metastore.api.PartitionSpec> msParts =
- new ArrayList<>();
- ValidWriteIdList validWriteIdList = null;
+ if (tbl.getStorageHandler() != null &&
tbl.getStorageHandler().alwaysUnpartitioned()) {
+ partitions.addAll(tbl.getStorageHandler().getPartitionsByExpr(tbl,
expr));
+ return false;
+ } else {
+ byte[] exprBytes =
SerializationUtilities.serializeObjectWithTypeInformation(expr);
+ List<org.apache.hadoop.hive.metastore.api.PartitionSpec> msParts =
+ new ArrayList<>();
+ ValidWriteIdList validWriteIdList = null;
- PartitionsByExprRequest req = buildPartitionByExprRequest(tbl,
exprBytes, defaultPartitionName, conf,
- null);
+ PartitionsByExprRequest req = buildPartitionByExprRequest(tbl,
exprBytes, defaultPartitionName, conf,
+ null);
- if (AcidUtils.isTransactionalTable(tbl)) {
- validWriteIdList = getValidWriteIdList(tbl.getDbName(),
tbl.getTableName());
- req.setValidWriteIdList(validWriteIdList != null ?
validWriteIdList.toString() : null);
- req.setId(tbl.getTTable().getId());
- }
+ if (AcidUtils.isTransactionalTable(tbl)) {
+ validWriteIdList = getValidWriteIdList(tbl.getDbName(),
tbl.getTableName());
+ req.setValidWriteIdList(validWriteIdList != null ?
validWriteIdList.toString() : null);
+ req.setId(tbl.getTTable().getId());
+ }
- boolean hasUnknownParts = getMSC().listPartitionsSpecByExpr(req,
msParts);
- partitions.addAll(convertFromPartSpec(msParts.iterator(), tbl));
+ boolean hasUnknownParts = getMSC().listPartitionsSpecByExpr(req,
msParts);
+ partitions.addAll(convertFromPartSpec(msParts.iterator(), tbl));
- return hasUnknownParts;
+ return hasUnknownParts;
+ }
} finally {
perfLogger.perfLogEnd(CLASS_NAME,
PerfLogger.HIVE_GET_PARTITIONS_BY_EXPR, "HS2-cache");
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java
index e05ecfb5a50..4548b067364 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java
@@ -728,4 +728,14 @@ public interface HiveStorageHandler extends Configurable {
SearchArgument searchArgument) {
return false;
}
+ default List<FieldSchema>
getPartitionKeys(org.apache.hadoop.hive.ql.metadata.Table hmsTable) {
+ throw new UnsupportedOperationException("Storage handler does not support
getting partition keys " +
+ "for a table.");
+ }
+
+ default List<Partition>
getPartitionsByExpr(org.apache.hadoop.hive.ql.metadata.Table hmsTable,
ExprNodeDesc desc)
+ throws SemanticException {
+ throw new UnsupportedOperationException("Storage handler does not support
getting partitions by expression " +
+ "for a table.");
+ }
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java
b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java
index 9eefe23d147..a246e3af2a8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java
@@ -565,7 +565,9 @@ public final class ParseUtils {
CommonTree ast, Table table, Configuration conf, boolean canGroupExprs)
throws SemanticException {
String defaultPartitionName = HiveConf.getVar(conf,
HiveConf.ConfVars.DEFAULTPARTITIONNAME);
Map<String, String> colTypes = new HashMap<>();
- for (FieldSchema fs : table.getPartitionKeys()) {
+ List<FieldSchema> partitionKeys = table.getStorageHandler() != null &&
table.getStorageHandler().alwaysUnpartitioned() ?
+ table.getStorageHandler().getPartitionKeys(table) :
table.getPartitionKeys();
+ for (FieldSchema fs : partitionKeys) {
colTypes.put(fs.getName().toLowerCase(), fs.getType());
}
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java
index 5986015a513..8a0970a474b 100644
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java
+++
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hive.metastore;
+import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
@@ -197,4 +198,16 @@ public interface HiveMetaHook {
default void postGetTable(Table table) {
// Do nothing
}
+
+ /**
+ * Called before dropping the partitions from the table in the metastore
during ALTER TABLE DROP PARTITION.
+ * @param table table whose partition needs to be dropped
+ * @param context context of the operation
+ * @param partExprs List of partition expressions
+ * @throws MetaException
+ */
+ default void preDropPartitions(Table table,
+ EnvironmentContext context, List<Pair<Integer, byte[]>> partExprs)
throws MetaException {
+ // Do nothing
+ }
}
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 71a8dfb6200..116d2a41598 100644
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -120,6 +120,7 @@ public class HiveMetaStoreClient implements
IMetaStoreClient, AutoCloseable {
public static final String MANUALLY_INITIATED_COMPACTION = "manual";
public static final String TRUNCATE_SKIP_DATA_DELETION =
"truncateSkipDataDeletion";
+ public static final String SKIP_DROP_PARTITION = "dropPartitionSkip";
public static final String RENAME_PARTITION_MAKE_COPY =
"renamePartitionMakeCopy";
/**
@@ -1931,6 +1932,16 @@ public class HiveMetaStoreClient implements
IMetaStoreClient, AutoCloseable {
PartitionDropOptions options) throws
TException {
RequestPartsSpec rps = new RequestPartsSpec();
List<DropPartitionsExpr> exprs = new ArrayList<>(partExprs.size());
+ Table table = getTable(catName, dbName, tblName);
+ HiveMetaHook hook = getHook(table);
+ EnvironmentContext context = new EnvironmentContext();
+ if (hook != null) {
+ hook.preDropPartitions(table, context, partExprs);
+ }
+ if (context.getProperties() != null &&
+
Boolean.parseBoolean(context.getProperties().get(SKIP_DROP_PARTITION))) {
+ return Lists.newArrayList();
+ }
for (Pair<Integer, byte[]> partExpr : partExprs) {
DropPartitionsExpr dpe = new DropPartitionsExpr();
dpe.setExpr(partExpr.getRight());
@@ -1944,10 +1955,9 @@ public class HiveMetaStoreClient implements
IMetaStoreClient, AutoCloseable {
req.setNeedResult(options.returnResults);
req.setIfExists(options.ifExists);
- EnvironmentContext context = null;
if (options.purgeData) {
LOG.info("Dropped partitions will be purged!");
- context = getEnvironmentContextWithIfPurgeSet();
+ context.putToProperties("ifPurge", "true");
}
if (options.writeId != null) {
context = Optional.ofNullable(context).orElse(new EnvironmentContext());
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
index feb9701fac4..dfabec77ac5 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
@@ -5013,7 +5013,8 @@ public class HMSHandler extends FacebookBase implements
IHMSHandler {
// 3. Either
// 3.1. User has specified PURGE from the commandline, and if not,
// 3.2. User has set the table to auto-purge.
- return ((envContext != null) &&
Boolean.parseBoolean(envContext.getProperties().get("ifPurge")))
+ return (envContext != null && envContext.getProperties() != null
+ && Boolean.parseBoolean(envContext.getProperties().get("ifPurge")))
|| MetaStoreUtils.isSkipTrash(tbl.getParameters());
}