This is an automated email from the ASF dual-hosted git repository.
dkuzmenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new e77e4129885 HIVE-26832: Add "SHOW PARTITIONS" support for Iceberg
tables (Simhadri Govindappa, reviewed by Attila Turoczy, Denys Kuzmenko, okumin)
e77e4129885 is described below
commit e77e4129885b1504563ce066098fb6488dce65bd
Author: Simhadri Govindappa <[email protected]>
AuthorDate: Sat Jun 24 22:54:36 2023 +0530
HIVE-26832: Add "SHOW PARTITIONS" support for Iceberg tables (Simhadri
Govindappa, reviewed by Attila Turoczy, Denys Kuzmenko, okumin)
Closes #4346
---
.../org/apache/hadoop/hive/conf/Constants.java | 4 +
.../iceberg/mr/hive/HiveIcebergStorageHandler.java | 72 ++++++-
.../org/apache/iceberg/mr/hive/HiveTableUtil.java | 36 ++++
.../negative/show_partitions_negative_test.q | 11 ++
.../test/queries/positive/show_partitions_test.q | 39 ++++
.../negative/show_partitions_negative_test.q.out | 20 ++
.../results/positive/show_partitions_test.q.out | 214 +++++++++++++++++++++
.../partition/show/ShowPartitionsOperation.java | 12 +-
.../hive/ql/metadata/HiveStorageHandler.java | 14 ++
9 files changed, 411 insertions(+), 11 deletions(-)
diff --git a/common/src/java/org/apache/hadoop/hive/conf/Constants.java
b/common/src/java/org/apache/hadoop/hive/conf/Constants.java
index ef05123560f..99d84105962 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/Constants.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/Constants.java
@@ -106,4 +106,8 @@ public class Constants {
public static final String HTTP_HEADER_REQUEST_TRACK = "X-Request-ID";
public static final String TIME_POSTFIX_REQUEST_TRACK = "_TIME";
+
+ public static final String ICEBERG = "iceberg";
+ public static final String ICEBERG_PARTITION_TABLE_SCHEMA =
"partition,record_count,file_count,spec_id";
+ public static final String DELIMITED_JSON_SERDE =
"org.apache.hadoop.hive.serde2.DelimitedJSONSerDe";
}
diff --git
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java
index ece13c519ca..7ec03084b53 100644
---
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java
+++
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java
@@ -19,6 +19,7 @@
package org.apache.iceberg.mr.hive;
+import com.fasterxml.jackson.databind.ObjectMapper;
import java.io.IOException;
import java.io.Serializable;
import java.net.URI;
@@ -61,10 +62,12 @@ import org.apache.hadoop.hive.ql.Context;
import org.apache.hadoop.hive.ql.Context.Operation;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.QueryState;
+import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc;
import org.apache.hadoop.hive.ql.ddl.table.AlterTableType;
import org.apache.hadoop.hive.ql.ddl.table.create.like.CreateTableLikeDesc;
import
org.apache.hadoop.hive.ql.ddl.table.misc.properties.AlterTableSetPropertiesDesc;
+import org.apache.hadoop.hive.ql.exec.FetchOperator;
import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.hooks.WriteEntity;
@@ -97,15 +100,22 @@ import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.ql.session.SessionStateUtil;
import org.apache.hadoop.hive.ql.stats.Partish;
import org.apache.hadoop.hive.serde2.AbstractSerDe;
+import org.apache.hadoop.hive.serde2.DefaultFetchFormatter;
import org.apache.hadoop.hive.serde2.Deserializer;
+import org.apache.hadoop.hive.serde2.FetchFormatter;
import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobContext;
import org.apache.hadoop.mapred.JobContextImpl;
import org.apache.hadoop.mapred.JobID;
import org.apache.hadoop.mapred.OutputFormat;
+import org.apache.hadoop.mapred.RecordReader;
+import org.apache.hadoop.mapred.Reporter;
import org.apache.iceberg.BaseMetastoreTableOperations;
import org.apache.iceberg.BaseTable;
import org.apache.iceberg.FileFormat;
@@ -157,11 +167,13 @@ public class HiveIcebergStorageHandler implements
HiveStoragePredicateHandler, H
private static final String ICEBERG_URI_PREFIX = "iceberg://";
private static final Splitter TABLE_NAME_SPLITTER = Splitter.on("..");
private static final String TABLE_NAME_SEPARATOR = "..";
- private static final String ICEBERG = "iceberg";
- private static final String PUFFIN = "puffin";
+ // Column index for partition metadata table
+ private static final int SPEC_IDX = 3;
+ private static final int PART_IDX = 0;
public static final String COPY_ON_WRITE = "copy-on-write";
public static final String MERGE_ON_READ = "merge-on-read";
public static final String STATS = "/stats/";
+
/**
* Function template for producing a custom sort expression function:
* Takes the source column index and the bucket count to creat a function
where Iceberg bucket UDF is used to build
@@ -367,7 +379,7 @@ public class HiveIcebergStorageHandler implements
HiveStoragePredicateHandler, H
// For write queries where rows got modified, don't fetch from cache as
values could have changed.
Table table = getTable(hmsTable);
Map<String, String> stats = Maps.newHashMap();
- if (getStatsSource().equals(ICEBERG)) {
+ if (getStatsSource().equals(Constants.ICEBERG)) {
if (table.currentSnapshot() != null) {
Map<String, String> summary = table.currentSnapshot().summary();
if (summary != null) {
@@ -418,7 +430,7 @@ public class HiveIcebergStorageHandler implements
HiveStoragePredicateHandler, H
@Override
public boolean canSetColStatistics(org.apache.hadoop.hive.ql.metadata.Table
hmsTable) {
Table table = IcebergTableUtil.getTable(conf, hmsTable.getTTable());
- return table.currentSnapshot() != null && getStatsSource().equals(ICEBERG);
+ return table.currentSnapshot() != null &&
getStatsSource().equals(Constants.ICEBERG);
}
@Override
@@ -484,7 +496,7 @@ public class HiveIcebergStorageHandler implements
HiveStoragePredicateHandler, H
@Override
public boolean
canComputeQueryUsingStats(org.apache.hadoop.hive.ql.metadata.Table hmsTable) {
- if (getStatsSource().equals(ICEBERG)) {
+ if (getStatsSource().equals(Constants.ICEBERG)) {
Table table = getTable(hmsTable);
if (table.currentSnapshot() != null) {
Map<String, String> summary = table.currentSnapshot().summary();
@@ -501,7 +513,7 @@ public class HiveIcebergStorageHandler implements
HiveStoragePredicateHandler, H
}
private String getStatsSource() {
- return HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_ICEBERG_STATS_SOURCE,
ICEBERG).toLowerCase();
+ return HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_ICEBERG_STATS_SOURCE,
Constants.ICEBERG).toLowerCase();
}
private Path getStatsPath(Table table) {
@@ -1447,4 +1459,52 @@ public class HiveIcebergStorageHandler implements
HiveStoragePredicateHandler, H
return null;
}
+
+ @Override
+ public List<String> showPartitions(DDLOperationContext context,
org.apache.hadoop.hive.ql.metadata.Table hmstbl)
+ throws HiveException {
+ Configuration confs = context.getConf();
+ JobConf job = HiveTableUtil.getPartJobConf(confs, hmstbl);
+ Class<? extends InputFormat> formatter = hmstbl.getInputFormatClass();
+
+ try {
+ InputFormat inputFormat =
FetchOperator.getInputFormatFromCache(formatter, job);
+ InputSplit[] splits = inputFormat.getSplits(job, 1);
+ try (RecordReader<WritableComparable, Writable> reader =
inputFormat.getRecordReader(splits[0], job,
+ Reporter.NULL)) {
+ return getPartitions(context, job, reader, hmstbl);
+ }
+ } catch (Exception e) {
+ throw new HiveException(e, ErrorMsg.GENERIC_ERROR,
+ "show partitions for table " + hmstbl.getTableName() + ". " +
ErrorMsg.TABLE_NOT_PARTITIONED +
+ " or the table is empty ");
+ }
+ }
+
+ private List<String> getPartitions(DDLOperationContext context,
Configuration job,
+ RecordReader<WritableComparable, Writable> reader,
org.apache.hadoop.hive.ql.metadata.Table hmstbl)
+ throws Exception {
+
+ List<String> parts = Lists.newArrayList();
+ Writable value = reader.createValue();
+ WritableComparable key = reader.createKey();
+
+ try (FetchFormatter fetcher = new DefaultFetchFormatter()) {
+ fetcher.initialize(job, HiveTableUtil.getSerializationProps());
+ org.apache.hadoop.hive.ql.metadata.Table metaDataPartTable =
+ context.getDb().getTable(hmstbl.getDbName(), hmstbl.getTableName(),
"partitions", true);
+ Deserializer currSerDe = metaDataPartTable.getDeserializer();
+ ObjectMapper mapper = new ObjectMapper();
+ Table tbl = getTable(hmstbl);
+ while (reader.next(key, value)) {
+ String[] row =
+ fetcher.convert(currSerDe.deserialize(value),
currSerDe.getObjectInspector())
+ .toString().split("\t");
+ parts.add(HiveTableUtil.getParseData(row[PART_IDX], row[SPEC_IDX],
mapper, tbl.spec().specId()));
+ }
+ }
+ Collections.sort(parts);
+ return parts;
+ }
+
}
diff --git
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveTableUtil.java
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveTableUtil.java
index 3bd950c5869..6bc39e8f40e 100644
---
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveTableUtil.java
+++
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveTableUtil.java
@@ -19,6 +19,8 @@
package org.apache.iceberg.mr.hive;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
@@ -31,12 +33,14 @@ import java.util.Properties;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
+import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.hive.conf.Constants;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -45,8 +49,12 @@ import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.hive.metastore.utils.FileUtils;
+import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.io.IOConstants;
import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
+import org.apache.hadoop.mapred.JobConf;
import org.apache.iceberg.AppendFiles;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.DeleteFiles;
@@ -256,4 +264,32 @@ public class HiveTableUtil {
return
Boolean.parseBoolean(properties.getProperty(hive_metastoreConstants.TABLE_IS_CTAS));
}
+ static Properties getSerializationProps() {
+ Properties props = new Properties();
+ props.put(serdeConstants.SERIALIZATION_FORMAT, "" + Utilities.tabCode);
+ props.put(serdeConstants.SERIALIZATION_NULL_FORMAT, "NULL");
+ return props;
+ }
+
+ static String getParseData(String parseData, String specId, ObjectMapper
mapper, Integer currentSpecId)
+ throws JsonProcessingException {
+ Map<String, String> map = mapper.readValue(parseData, Map.class);
+ String partString =
+ map.entrySet().stream()
+ .filter(entry -> entry.getValue() != null)
+ .map(java.lang.Object::toString)
+ .collect(Collectors.joining("/"));
+ String currentSpecMarker = currentSpecId.toString().equals(specId) ?
"current-" : "";
+ return String.format("%sspec-id=%s/%s", currentSpecMarker, specId,
partString);
+ }
+
+ static JobConf getPartJobConf(Configuration confs,
org.apache.hadoop.hive.ql.metadata.Table tbl) {
+ JobConf job = new JobConf(confs);
+ job.set(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR,
Constants.ICEBERG_PARTITION_TABLE_SCHEMA);
+ job.set(InputFormatConfig.TABLE_LOCATION, tbl.getPath().toString());
+ job.set(InputFormatConfig.TABLE_IDENTIFIER, tbl.getFullyQualifiedName() +
".partitions");
+ HiveConf.setVar(job, HiveConf.ConfVars.HIVEFETCHOUTPUTSERDE,
Constants.DELIMITED_JSON_SERDE);
+ HiveConf.setBoolVar(job, HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED,
false);
+ return job;
+ }
}
diff --git
a/iceberg/iceberg-handler/src/test/queries/negative/show_partitions_negative_test.q
b/iceberg/iceberg-handler/src/test/queries/negative/show_partitions_negative_test.q
new file mode 100644
index 00000000000..94c72f1fe78
--- /dev/null
+++
b/iceberg/iceberg-handler/src/test/queries/negative/show_partitions_negative_test.q
@@ -0,0 +1,11 @@
+-- Mask the totalSize value as it can have slight variability, causing test
flakiness
+--! qt:replace:/(\s+totalSize\s+)\S+(\s+)/$1#Masked#$2/
+-- Mask random uuid
+--! qt:replace:/(\s+uuid\s+)\S+(\s*)/$1#Masked#$2/
+
+--Null case -> if partitions spec is altered. Null partitions need to be
ignored.
+create table ice2 (a string, b int, c int) PARTITIONED BY (d_part int, e_part
int) stored by iceberg stored as orc TBLPROPERTIES("format-version"='2') ;
+select * from default.ice2.partitions order by `partition`;
+show partitions ice2;
+
+
diff --git
a/iceberg/iceberg-handler/src/test/queries/positive/show_partitions_test.q
b/iceberg/iceberg-handler/src/test/queries/positive/show_partitions_test.q
new file mode 100644
index 00000000000..0d1ebef44ba
--- /dev/null
+++ b/iceberg/iceberg-handler/src/test/queries/positive/show_partitions_test.q
@@ -0,0 +1,39 @@
+-- Mask the totalSize value as it can have slight variability, causing test
flakiness
+--! qt:replace:/(\s+totalSize\s+)\S+(\s+)/$1#Masked#$2/
+-- Mask random uuid
+--! qt:replace:/(\s+uuid\s+)\S+(\s*)/$1#Masked#$2/
+-- SORT_QUERY_RESULTS
+
+set hive.vectorized.execution.enabled=false;
+
+-- Create a hive and iceberg table to compare.
+create table hiveT1 (a string, b int, c int) PARTITIONED BY (d_part int,
e_part int) stored as orc ;
+insert into hiveT1 values ('aa', 1, 2, 3, 4), ('aa', 1, 2, 3, 4), ('aa', 1, 2,
2, 5), ('aa', 1, 2, 10, 5), ('aa', 1, 2, 10, 5);
+create table ice1 (a string, b int, c int) PARTITIONED BY (d_part int, e_part
int) stored by iceberg stored as orc TBLPROPERTIES("format-version"='2') ;
+insert into ice1 values ('aa', 1, 2, 3, 4), ('aa', 1, 2, 3, 4), ('aa', 1, 2,
2, 5), ('aa', 1, 2, 10, 5), ('aa', 1, 2, 10, 5);
+
+--compare hive table with iceberg table
+show partitions hiveT1;
+show partitions ice1 ;
+select * from default.ice1.partitions order by `partition`;
+
+explain show partitions hiveT1;
+explain show partitions ice1;
+explain select * from default.ice1.partitions;
+
+-- Partition evolution
+create table ice2 (a string, b int, c int) PARTITIONED BY (d_part int, e_part
int) stored by iceberg stored as orc TBLPROPERTIES("format-version"='2') ;
+insert into ice2 values ('aa', 1, 2, 3, 4), ('aa', 1, 2, 3, 4), ('aa', 1, 2,
2, 5), ('aa', 1, 2, 10, 5), ('aa', 1, 2, 10, 5);
+
+select * from default.ice2.partitions order by `partition`;
+show partitions ice2;
+
+ALTER TABLE ice2 SET PARTITION SPEC (c) ;
+select * from default.ice2.partitions order by `partition`;
+show partitions ice2;
+
+insert into ice2 values ('aa', 1, 2, 3, 4), ('aa', 1, 2, 3, 4), ('aa', 1, 3,
2, 5), ('aa', 1, 4, 10, 5), ('aa', 1, 5, 10, 5);
+select * from default.ice2.partitions order by `partition`;
+show partitions ice2;
+
+
diff --git
a/iceberg/iceberg-handler/src/test/results/negative/show_partitions_negative_test.q.out
b/iceberg/iceberg-handler/src/test/results/negative/show_partitions_negative_test.q.out
new file mode 100644
index 00000000000..3044e35ad4f
--- /dev/null
+++
b/iceberg/iceberg-handler/src/test/results/negative/show_partitions_negative_test.q.out
@@ -0,0 +1,20 @@
+PREHOOK: query: create table ice2 (a string, b int, c int) PARTITIONED BY
(d_part int, e_part int) stored by iceberg stored as orc
TBLPROPERTIES("format-version"='2')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice2
+POSTHOOK: query: create table ice2 (a string, b int, c int) PARTITIONED BY
(d_part int, e_part int) stored by iceberg stored as orc
TBLPROPERTIES("format-version"='2')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice2
+PREHOOK: query: select * from default.ice2.partitions order by `partition`
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice2
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from default.ice2.partitions order by `partition`
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice2
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+PREHOOK: query: show partitions ice2
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@ice2
+FAILED: Execution Error, return code 40000 from
org.apache.hadoop.hive.ql.ddl.DDLTask. Exception while processing show
partitions for table ice2. TABLE_NOT_PARTITIONED or the table is empty
diff --git
a/iceberg/iceberg-handler/src/test/results/positive/show_partitions_test.q.out
b/iceberg/iceberg-handler/src/test/results/positive/show_partitions_test.q.out
new file mode 100644
index 00000000000..3d6fcd4ba67
--- /dev/null
+++
b/iceberg/iceberg-handler/src/test/results/positive/show_partitions_test.q.out
@@ -0,0 +1,214 @@
+PREHOOK: query: create table hiveT1 (a string, b int, c int) PARTITIONED BY
(d_part int, e_part int) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@hiveT1
+POSTHOOK: query: create table hiveT1 (a string, b int, c int) PARTITIONED BY
(d_part int, e_part int) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@hiveT1
+PREHOOK: query: insert into hiveT1 values ('aa', 1, 2, 3, 4), ('aa', 1, 2, 3,
4), ('aa', 1, 2, 2, 5), ('aa', 1, 2, 10, 5), ('aa', 1, 2, 10, 5)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@hivet1
+POSTHOOK: query: insert into hiveT1 values ('aa', 1, 2, 3, 4), ('aa', 1, 2, 3,
4), ('aa', 1, 2, 2, 5), ('aa', 1, 2, 10, 5), ('aa', 1, 2, 10, 5)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@hivet1
+POSTHOOK: Output: default@hivet1@d_part=10/e_part=5
+POSTHOOK: Output: default@hivet1@d_part=2/e_part=5
+POSTHOOK: Output: default@hivet1@d_part=3/e_part=4
+POSTHOOK: Lineage: hivet1 PARTITION(d_part=10,e_part=5).a SCRIPT []
+POSTHOOK: Lineage: hivet1 PARTITION(d_part=10,e_part=5).b SCRIPT []
+POSTHOOK: Lineage: hivet1 PARTITION(d_part=10,e_part=5).c SCRIPT []
+POSTHOOK: Lineage: hivet1 PARTITION(d_part=2,e_part=5).a SCRIPT []
+POSTHOOK: Lineage: hivet1 PARTITION(d_part=2,e_part=5).b SCRIPT []
+POSTHOOK: Lineage: hivet1 PARTITION(d_part=2,e_part=5).c SCRIPT []
+POSTHOOK: Lineage: hivet1 PARTITION(d_part=3,e_part=4).a SCRIPT []
+POSTHOOK: Lineage: hivet1 PARTITION(d_part=3,e_part=4).b SCRIPT []
+POSTHOOK: Lineage: hivet1 PARTITION(d_part=3,e_part=4).c SCRIPT []
+PREHOOK: query: create table ice1 (a string, b int, c int) PARTITIONED BY
(d_part int, e_part int) stored by iceberg stored as orc
TBLPROPERTIES("format-version"='2')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice1
+POSTHOOK: query: create table ice1 (a string, b int, c int) PARTITIONED BY
(d_part int, e_part int) stored by iceberg stored as orc
TBLPROPERTIES("format-version"='2')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice1
+PREHOOK: query: insert into ice1 values ('aa', 1, 2, 3, 4), ('aa', 1, 2, 3,
4), ('aa', 1, 2, 2, 5), ('aa', 1, 2, 10, 5), ('aa', 1, 2, 10, 5)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice1
+POSTHOOK: query: insert into ice1 values ('aa', 1, 2, 3, 4), ('aa', 1, 2, 3,
4), ('aa', 1, 2, 2, 5), ('aa', 1, 2, 10, 5), ('aa', 1, 2, 10, 5)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice1
+PREHOOK: query: show partitions hiveT1
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@hivet1
+POSTHOOK: query: show partitions hiveT1
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@hivet1
+d_part=10/e_part=5
+d_part=2/e_part=5
+d_part=3/e_part=4
+PREHOOK: query: show partitions ice1
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@ice1
+POSTHOOK: query: show partitions ice1
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@ice1
+current-spec-id=0/d_part=10/e_part=5
+current-spec-id=0/d_part=2/e_part=5
+current-spec-id=0/d_part=3/e_part=4
+PREHOOK: query: select * from default.ice1.partitions order by `partition`
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice1
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from default.ice1.partitions order by `partition`
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice1
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+{"d_part":10,"e_part":5} 2 1 0
+{"d_part":2,"e_part":5} 1 1 0
+{"d_part":3,"e_part":4} 2 1 0
+PREHOOK: query: explain show partitions hiveT1
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@hivet1
+POSTHOOK: query: explain show partitions hiveT1
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@hivet1
+Stage-1
+ Fetch Operator
+ limit:-1
+ Stage-0
+ Show Partitions{"limit:":"-1","table:":"hiveT1"}
+
+PREHOOK: query: explain show partitions ice1
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@ice1
+POSTHOOK: query: explain show partitions ice1
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@ice1
+Stage-1
+ Fetch Operator
+ limit:-1
+ Stage-0
+ Show Partitions{"limit:":"-1","table:":"ice1"}
+
+PREHOOK: query: explain select * from default.ice1.partitions
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice1
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: explain select * from default.ice1.partitions
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice1
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+Plan optimized by CBO.
+
+Stage-0
+ Fetch Operator
+ limit:-1
+ Select Operator [SEL_1]
+ Output:["_col0","_col1","_col2","_col3"]
+ TableScan [TS_0]
+ Output:["partition","record_count","file_count","spec_id"]
+
+PREHOOK: query: create table ice2 (a string, b int, c int) PARTITIONED BY
(d_part int, e_part int) stored by iceberg stored as orc
TBLPROPERTIES("format-version"='2')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ice2
+POSTHOOK: query: create table ice2 (a string, b int, c int) PARTITIONED BY
(d_part int, e_part int) stored by iceberg stored as orc
TBLPROPERTIES("format-version"='2')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ice2
+PREHOOK: query: insert into ice2 values ('aa', 1, 2, 3, 4), ('aa', 1, 2, 3,
4), ('aa', 1, 2, 2, 5), ('aa', 1, 2, 10, 5), ('aa', 1, 2, 10, 5)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice2
+POSTHOOK: query: insert into ice2 values ('aa', 1, 2, 3, 4), ('aa', 1, 2, 3,
4), ('aa', 1, 2, 2, 5), ('aa', 1, 2, 10, 5), ('aa', 1, 2, 10, 5)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice2
+PREHOOK: query: select * from default.ice2.partitions order by `partition`
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice2
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from default.ice2.partitions order by `partition`
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice2
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+{"d_part":10,"e_part":5} 2 1 0
+{"d_part":2,"e_part":5} 1 1 0
+{"d_part":3,"e_part":4} 2 1 0
+PREHOOK: query: show partitions ice2
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@ice2
+POSTHOOK: query: show partitions ice2
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@ice2
+current-spec-id=0/d_part=10/e_part=5
+current-spec-id=0/d_part=2/e_part=5
+current-spec-id=0/d_part=3/e_part=4
+PREHOOK: query: ALTER TABLE ice2 SET PARTITION SPEC (c)
+PREHOOK: type: ALTERTABLE_SETPARTSPEC
+PREHOOK: Input: default@ice2
+POSTHOOK: query: ALTER TABLE ice2 SET PARTITION SPEC (c)
+POSTHOOK: type: ALTERTABLE_SETPARTSPEC
+POSTHOOK: Input: default@ice2
+POSTHOOK: Output: default@ice2
+PREHOOK: query: select * from default.ice2.partitions order by `partition`
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice2
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from default.ice2.partitions order by `partition`
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice2
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+{"d_part":10,"e_part":5,"c":null} 2 1 0
+{"d_part":2,"e_part":5,"c":null} 1 1 0
+{"d_part":3,"e_part":4,"c":null} 2 1 0
+PREHOOK: query: show partitions ice2
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@ice2
+POSTHOOK: query: show partitions ice2
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@ice2
+spec-id=0/d_part=10/e_part=5
+spec-id=0/d_part=2/e_part=5
+spec-id=0/d_part=3/e_part=4
+PREHOOK: query: insert into ice2 values ('aa', 1, 2, 3, 4), ('aa', 1, 2, 3,
4), ('aa', 1, 3, 2, 5), ('aa', 1, 4, 10, 5), ('aa', 1, 5, 10, 5)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@ice2
+POSTHOOK: query: insert into ice2 values ('aa', 1, 2, 3, 4), ('aa', 1, 2, 3,
4), ('aa', 1, 3, 2, 5), ('aa', 1, 4, 10, 5), ('aa', 1, 5, 10, 5)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@ice2
+PREHOOK: query: select * from default.ice2.partitions order by `partition`
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ice2
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from default.ice2.partitions order by `partition`
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ice2
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+{"d_part":10,"e_part":5,"c":null} 2 1 0
+{"d_part":2,"e_part":5,"c":null} 1 1 0
+{"d_part":3,"e_part":4,"c":null} 2 1 0
+{"d_part":null,"e_part":null,"c":2} 2 1 1
+{"d_part":null,"e_part":null,"c":3} 1 1 1
+{"d_part":null,"e_part":null,"c":4} 1 1 1
+{"d_part":null,"e_part":null,"c":5} 1 1 1
+PREHOOK: query: show partitions ice2
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@ice2
+POSTHOOK: query: show partitions ice2
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@ice2
+current-spec-id=1/c=2
+current-spec-id=1/c=3
+current-spec-id=1/c=4
+current-spec-id=1/c=5
+spec-id=0/d_part=10/e_part=5
+spec-id=0/d_part=2/e_part=5
+spec-id=0/d_part=3/e_part=4
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsOperation.java
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsOperation.java
index 1337b22ed02..de122915f8e 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsOperation.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionsOperation.java
@@ -54,12 +54,12 @@ public class ShowPartitionsOperation extends
DDLOperation<ShowPartitionsDesc> {
@Override
public int execute() throws HiveException {
Table tbl = context.getDb().getTable(desc.getTabName());
- if (!tbl.isPartitioned()) {
- throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED,
desc.getTabName());
- }
-
List<String> parts;
- if (desc.getCond() != null || desc.getOrder() != null) {
+ if (tbl.isNonNative() &&
tbl.getStorageHandler().supportsPartitionTransform()) {
+ parts = tbl.getStorageHandler().showPartitions(context, tbl);
+ } else if (!tbl.isPartitioned()) {
+ throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED,
desc.getTabName());
+ } else if (desc.getCond() != null || desc.getOrder() != null) {
parts = getPartitionNames(tbl);
} else if (desc.getPartSpec() != null) {
parts = context.getDb().getPartitionNames(tbl.getDbName(),
tbl.getTableName(),
@@ -116,4 +116,6 @@ public class ShowPartitionsOperation extends
DDLOperation<ShowPartitionsDesc> {
desc.getOrder(), desc.getLimit());
return partNames;
}
+
}
+
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java
index a0bd32d43ec..fafa8a6f4d9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java
@@ -24,6 +24,7 @@ import java.net.URI;
import java.net.URISyntaxException;
import java.util.Collections;
import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.classification.InterfaceAudience;
import org.apache.hadoop.hive.common.classification.InterfaceStability;
import org.apache.hadoop.hive.common.type.SnapshotContext;
@@ -37,6 +38,7 @@ import org.apache.hadoop.hive.metastore.api.LockType;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.ql.Context.Operation;
+import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc;
import org.apache.hadoop.hive.ql.ddl.table.AlterTableType;
@@ -655,4 +657,16 @@ public interface HiveStorageHandler extends Configurable {
default Boolean hasAppendsOnly(org.apache.hadoop.hive.ql.metadata.Table
hmsTable, SnapshotContext since) {
return null;
}
+
+ /**
+ * Checks if storage handler supports Show Partitions and returns a list of
partitions
+ * @return List of partitions
+ * @throws UnsupportedOperationException
+ * @throws HiveException
+ */
+ default List<String> showPartitions(DDLOperationContext context,
+ org.apache.hadoop.hive.ql.metadata.Table tbl) throws
UnsupportedOperationException, HiveException {
+ throw new UnsupportedOperationException("Storage handler does not support
show partitions command");
+ }
+
}