This is an automated email from the ASF dual-hosted git repository.
morningman pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/master by this push:
new 279fa92f0fe [feature](iceberg catalog) support iceberg view query
(#51376)
279fa92f0fe is described below
commit 279fa92f0fe337eabd619f1c4fa4b58317d90165
Author: heguanhui <[email protected]>
AuthorDate: Fri Jun 20 23:44:07 2025 +0800
[feature](iceberg catalog) support iceberg view query (#51376)
### What problem does this PR solve?
This feature addresses the issue that Doris cannot query Iceberg views.
1. Prerequisite Dependencies
Iceberg version 1.7.x+
2. Design Approach
- Enhanced View Loading in IcebergMetadataCache:
Added view loading capability during loadTable operations for Iceberg.
Introduced getIcebergView() method to retrieve Iceberg views.
- New Methods in IcebergExternalTable:
isView(): Determines if the current Iceberg table is a view.
getViewText(): Retrieves the SQL definition query of the view.
- Execution Plan Generation Adjustment:
Checks isView() during plan generation to identify Iceberg views.
For views, generates a logical subquery execution plan by flattening the
view into a subquery.
- Cache Invalidation Enhancement:
Added view cache invalidation in IcebergMetadataCache when invalidating
catalog/database/table caches.
### Problem Summary:
This PR aims to resolve the issue where querying an Iceberg view in
Doris results in an error indicating the table does not exist.
Specifically, it addresses the problem that Doris currently cannot
recognize and query Iceberg views, causing exceptions when users attempt
to access view data.
---
.../create_preinstalled_scripts/iceberg/run14.sql | 87 +++++++++
.../main/java/org/apache/doris/common/Config.java | 3 +
.../main/java/org/apache/doris/catalog/Env.java | 6 +-
.../apache/doris/datasource/ExternalCatalog.java | 11 ++
.../doris/datasource/hive/HMSExternalTable.java | 4 +-
.../datasource/iceberg/IcebergExternalCatalog.java | 12 +-
.../datasource/iceberg/IcebergExternalTable.java | 108 ++++++++++-
.../datasource/iceberg/IcebergMetadataCache.java | 41 +++++
.../datasource/iceberg/IcebergMetadataOps.java | 99 +++++++++-
.../doris/datasource/iceberg/IcebergUtils.java | 81 +++++---
.../iceberg/source/IcebergApiSource.java | 11 ++
.../datasource/operations/ExternalMetadataOps.java | 33 ++++
.../doris/nereids/rules/analysis/BindRelation.java | 43 ++++-
.../plans/commands/ShowCreateTableCommand.java | 8 +
.../org/apache/doris/planner/IcebergTableSink.java | 3 +
.../iceberg/test_iceberg_view_query_p0.out | Bin 0 -> 1033 bytes
.../iceberg/test_iceberg_view_query_p0.groovy | 203 +++++++++++++++++++++
17 files changed, 712 insertions(+), 41 deletions(-)
diff --git
a/docker/thirdparties/docker-compose/iceberg/scripts/create_preinstalled_scripts/iceberg/run14.sql
b/docker/thirdparties/docker-compose/iceberg/scripts/create_preinstalled_scripts/iceberg/run14.sql
new file mode 100644
index 00000000000..da4f0035c65
--- /dev/null
+++
b/docker/thirdparties/docker-compose/iceberg/scripts/create_preinstalled_scripts/iceberg/run14.sql
@@ -0,0 +1,87 @@
+use demo.test_db;
+drop table if exists test_db.t_partitioned_table;
+drop table if exists test_db.t_unpartitioned_table;
+drop table if exists test_db.t_product_info;
+drop table if exists test_db.t_sales_record;
+CREATE TABLE IF NOT EXISTS t_partitioned_table (
+ col1 int,
+ col2 string,
+ col3 int,
+ col4 string,
+ col5 string
+) USING iceberg
+PARTITIONED BY (col5);
+CREATE TABLE IF NOT EXISTS t_unpartitioned_table (
+ col1 INT,
+ col2 VARCHAR(100),
+ col3 DECIMAL(10, 2)
+) USING iceberg;
+CREATE TABLE IF NOT EXISTS t_product_info (
+ product_id INT,
+ product_name VARCHAR(100),
+ price DECIMAL(10, 2)
+) USING iceberg;
+CREATE TABLE IF NOT EXISTS t_sales_record (
+ sales_id INT,
+ order_id INT,
+ product_id INT,
+ quantity_sold INT,
+ sale_date DATE
+) USING iceberg;
+INSERT INTO t_partitioned_table (col1, col2, col3, col4, col5)
+VALUES
+(1, 'Alice', 25, 'Female', 'New York'),
+(2, 'Bob', 30, 'Male', 'Los Angeles'),
+(3, 'Charlie', 35, 'Male', 'Chicago'),
+(4, 'David', 22, 'Male', 'Houston'),
+(5, 'Eve', 28, 'Female', 'Phoenix');
+INSERT INTO t_unpartitioned_table (col1, col2, col3)
+VALUES
+(1001, 'Product A', 20.00),
+(1002, 'Product B', 30.00),
+(1003, 'Product C', 40.00),
+(1004, 'Product D', 50.00),
+(1005, 'Product E', 60.00);
+INSERT INTO t_product_info (product_id, product_name, price)
+VALUES
+(1001, 'Product A', 20.00),
+(1002, 'Product B', 30.00),
+(1003, 'Product C', 40.00),
+(1004, 'Product D', 50.00),
+(1005, 'Product E', 60.00);
+INSERT INTO t_sales_record (sales_id, order_id, product_id, quantity_sold,
sale_date)
+VALUES
+(7001, 101, 1001, 2, date '2024-01-01'),
+(7002, 102, 1002, 3, date '2024-01-02'),
+(7003, 103, 1003, 1, date '2024-01-03'),
+(7004, 104, 1001, 1, date '2024-01-04'),
+(7005, 105, 1004, 4, date '2024-01-05');
+create view v_with_unpartitioned_table
+as
+select
+ *
+from
+ t_unpartitioned_table;
+create view v_with_partitioned_table
+ as
+ select * from t_partitioned_table order by col1;
+create view v_with_partitioned_column
+as
+select
+ col5
+from
+ t_partitioned_table;
+create view v_with_joint_table
+as
+SELECT
+ pi.product_name,
+ sr.quantity_sold,
+ sr.sale_date
+FROM
+ t_product_info pi
+ JOIN
+ t_sales_record sr
+ ON pi.product_id = sr.product_id
+WHERE
+ sr.sale_date BETWEEN '2024-01-01' AND '2024-01-03'
+ AND sr.quantity_sold > 1;
diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/Config.java
b/fe/fe-common/src/main/java/org/apache/doris/common/Config.java
index 8faaa87a014..935845c8eb1 100644
--- a/fe/fe-common/src/main/java/org/apache/doris/common/Config.java
+++ b/fe/fe-common/src/main/java/org/apache/doris/common/Config.java
@@ -2291,6 +2291,9 @@ public class Config extends ConfigBase {
@ConfField(mutable = true)
public static boolean enable_query_hive_views = true;
+ @ConfField(mutable = true)
+ public static boolean enable_query_iceberg_views = true;
+
/**
* If set to true, doris will automatically synchronize hms metadata to
the cache in fe.
*/
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java
b/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java
index 8a58e58d938..110a38de279 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Env.java
@@ -4244,10 +4244,10 @@ public class Env {
sb.append("\n)");
} else if (table.getType() == TableType.ICEBERG_EXTERNAL_TABLE) {
addTableComment(table, sb);
- org.apache.iceberg.Table icebergTable = ((IcebergExternalTable)
table).getIcebergTable();
- sb.append("\nLOCATION
'").append(icebergTable.location()).append("'");
+ IcebergExternalTable icebergExternalTable = (IcebergExternalTable)
table;
+ sb.append("\nLOCATION
'").append(icebergExternalTable.location()).append("'");
sb.append("\nPROPERTIES (");
- Iterator<Entry<String, String>> iterator =
icebergTable.properties().entrySet().iterator();
+ Iterator<Entry<String, String>> iterator =
icebergExternalTable.properties().entrySet().iterator();
while (iterator.hasNext()) {
Entry<String, String> prop = iterator.next();
sb.append("\n \"").append(prop.getKey()).append("\" =
\"").append(prop.getValue()).append("\"");
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalCatalog.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalCatalog.java
index 62e4074c254..a00a6c65035 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalCatalog.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/ExternalCatalog.java
@@ -1323,4 +1323,15 @@ public abstract class ExternalCatalog
public ThreadPoolExecutor getThreadPoolWithPreAuth() {
return threadPoolWithPreAuth;
}
+
+ /**
+ * Check if an external view exists.
+ * @param dbName
+ * @param viewName
+ * @return
+ */
+ public boolean viewExists(String dbName, String viewName) {
+ throw new UnsupportedOperationException("View is not supported.");
+ }
+
}
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalTable.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalTable.java
index e19015d99b2..8e51fac6df1 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalTable.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalTable.java
@@ -664,7 +664,8 @@ public class HMSExternalTable extends ExternalTable
implements MTMVRelatedTableI
}
private Optional<SchemaCacheValue> getIcebergSchema(SchemaCacheKey key) {
- return IcebergUtils.loadSchemaCacheValue(catalog, dbName, name,
((IcebergSchemaCacheKey) key).getSchemaId());
+ return IcebergUtils.loadSchemaCacheValue(
+ catalog, dbName, name, ((IcebergSchemaCacheKey)
key).getSchemaId(), isView());
}
private Optional<SchemaCacheValue> getHudiSchema(SchemaCacheKey key) {
@@ -1190,4 +1191,5 @@ public class HMSExternalTable extends ExternalTable
implements MTMVRelatedTableI
return Lists.newArrayList();
}
}
+
}
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergExternalCatalog.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergExternalCatalog.java
index bd5997ac386..0727bd15f41 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergExternalCatalog.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergExternalCatalog.java
@@ -94,7 +94,12 @@ public abstract class IcebergExternalCatalog extends
ExternalCatalog {
@Override
public List<String> listTableNames(SessionContext ctx, String dbName) {
makeSureInitialized();
- return metadataOps.listTableNames(dbName);
+ // On the Doris side, the result of SHOW TABLES for Iceberg external
tables includes both tables and views,
+ // so the combined set of tables and views is used here.
+ List<String> tableNames = metadataOps.listTableNames(dbName);
+ List<String> viewNames = metadataOps.listViewNames(dbName);
+ tableNames.addAll(viewNames);
+ return tableNames;
}
@Override
@@ -109,4 +114,9 @@ public abstract class IcebergExternalCatalog extends
ExternalCatalog {
Map<String, String> properties = catalogProperty.getHadoopProperties();
conf.set(Constants.AWS_CREDENTIALS_PROVIDER,
PropertyConverter.getAWSCredentialsProviders(properties));
}
+
+ @Override
+ public boolean viewExists(String dbName, String viewName) {
+ return metadataOps.viewExists(dbName, viewName);
+ }
}
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergExternalTable.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergExternalTable.java
index da003e2caf6..1b1e0b2c223 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergExternalTable.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergExternalTable.java
@@ -28,6 +28,7 @@ import org.apache.doris.common.DdlException;
import org.apache.doris.datasource.ExternalSchemaCache.SchemaCacheKey;
import org.apache.doris.datasource.ExternalTable;
import org.apache.doris.datasource.SchemaCacheValue;
+import org.apache.doris.datasource.mvcc.EmptyMvccSnapshot;
import org.apache.doris.datasource.mvcc.MvccSnapshot;
import org.apache.doris.datasource.mvcc.MvccTable;
import org.apache.doris.datasource.systable.SupportedSysTables;
@@ -48,9 +49,13 @@ import org.apache.doris.thrift.TTableType;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
+import org.apache.commons.lang3.StringUtils;
import org.apache.iceberg.PartitionField;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Table;
+import org.apache.iceberg.view.SQLViewRepresentation;
+import org.apache.iceberg.view.View;
+import org.apache.iceberg.view.ViewVersion;
import java.util.HashMap;
import java.util.List;
@@ -64,6 +69,8 @@ public class IcebergExternalTable extends ExternalTable
implements MTMVRelatedTa
private Table table;
private boolean isValidRelatedTableCached = false;
private boolean isValidRelatedTable = false;
+ private boolean isView;
+ private static final String ENGINE_PROP_NAME = "engine-name";
public IcebergExternalTable(long id, String name, String remoteName,
IcebergExternalCatalog catalog,
IcebergExternalDatabase db) {
@@ -78,6 +85,7 @@ public class IcebergExternalTable extends ExternalTable
implements MTMVRelatedTa
super.makeSureInitialized();
if (!objectCreated) {
objectCreated = true;
+ isView = catalog.viewExists(dbName, getRemoteName());
}
}
@@ -88,7 +96,10 @@ public class IcebergExternalTable extends ExternalTable
implements MTMVRelatedTa
@Override
public Optional<SchemaCacheValue> initSchema(SchemaCacheKey key) {
- return IcebergUtils.loadSchemaCacheValue(catalog, dbName, name,
((IcebergSchemaCacheKey) key).getSchemaId());
+ boolean isView = isView();
+ String tableName = getRemoteName();
+ return IcebergUtils.loadSchemaCacheValue(
+ catalog, dbName, tableName, ((IcebergSchemaCacheKey)
key).getSchemaId(), isView);
}
@Override
@@ -242,8 +253,12 @@ public class IcebergExternalTable extends ExternalTable
implements MTMVRelatedTa
@Override
public MvccSnapshot loadSnapshot(Optional<TableSnapshot> tableSnapshot,
Optional<TableScanParams> scanParams) {
- return new
IcebergMvccSnapshot(IcebergUtils.getIcebergSnapshotCacheValue(
+ if (isView()) {
+ return new EmptyMvccSnapshot();
+ } else {
+ return new
IcebergMvccSnapshot(IcebergUtils.getIcebergSnapshotCacheValue(
tableSnapshot, getCatalog(), getDbName(), getName(),
scanParams));
+ }
}
@Override
@@ -275,4 +290,93 @@ public class IcebergExternalTable extends ExternalTable
implements MTMVRelatedTa
makeSureInitialized();
return SupportedSysTables.ICEBERG_SUPPORTED_SYS_TABLES;
}
+
+ @Override
+ public boolean isView() {
+ makeSureInitialized();
+ return isView;
+ }
+
+ public String getViewText() {
+ try {
+ return catalog.getPreExecutionAuthenticator().execute(() -> {
+ View icebergView = IcebergUtils.getIcebergView(getCatalog(),
dbName, getRemoteName());
+ ViewVersion viewVersion = icebergView.currentVersion();
+ if (viewVersion == null) {
+ throw new RuntimeException(String.format("Cannot get view
version for view '%s'", icebergView));
+ }
+ Map<String, String> summary = viewVersion.summary();
+ if (summary == null) {
+ throw new RuntimeException(String.format("Cannot get
summary for view '%s'", icebergView));
+ }
+ String engineName = summary.get(ENGINE_PROP_NAME);
+ if (StringUtils.isEmpty(engineName)) {
+ throw new RuntimeException(String.format("Cannot get
engine-name for view '%s'", icebergView));
+ }
+ SQLViewRepresentation sqlViewRepresentation =
icebergView.sqlFor(engineName.toLowerCase());
+ if (sqlViewRepresentation == null) {
+ throw new UnsupportedOperationException("Cannot get view
text from iceberg view");
+ }
+ return sqlViewRepresentation.sql();
+ });
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public String getSqlDialect() {
+ try {
+ return catalog.getPreExecutionAuthenticator().execute(() -> {
+ View icebergView = IcebergUtils.getIcebergView(getCatalog(),
dbName, getRemoteName());
+ ViewVersion viewVersion = icebergView.currentVersion();
+ if (viewVersion == null) {
+ throw new RuntimeException(String.format("Cannot get view
version for view '%s'", icebergView));
+ }
+ Map<String, String> summary = viewVersion.summary();
+ if (summary == null) {
+ throw new RuntimeException(String.format("Cannot get
summary for view '%s'", icebergView));
+ }
+ String engineName = summary.get(ENGINE_PROP_NAME);
+ if (StringUtils.isEmpty(engineName)) {
+ throw new RuntimeException(String.format("Cannot get
engine-name for view '%s'", icebergView));
+ }
+ return engineName.toLowerCase();
+ });
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public View getIcebergView() {
+ return IcebergUtils.getIcebergView(getCatalog(), dbName,
getRemoteName());
+ }
+
+ /**
+ * get location of an iceberg table or view
+ * @return
+ */
+ public String location() {
+ if (isView()) {
+ View icebergView = getIcebergView();
+ return icebergView.location();
+ } else {
+ Table icebergTable = getIcebergTable();
+ return icebergTable.location();
+ }
+ }
+
+ /**
+ * get properties of an iceberg table or view
+ * @return
+ */
+ public Map<String, String> properties() {
+ if (isView()) {
+ View icebergView = getIcebergView();
+ return icebergView.properties();
+ } else {
+ Table icebergTable = getIcebergTable();
+ return icebergTable.properties();
+ }
+ }
+
}
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergMetadataCache.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergMetadataCache.java
index f99b652b42d..3befc302d61 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergMetadataCache.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergMetadataCache.java
@@ -35,6 +35,7 @@ import org.apache.iceberg.ManifestFiles;
import org.apache.iceberg.SerializableTable;
import org.apache.iceberg.Snapshot;
import org.apache.iceberg.Table;
+import org.apache.iceberg.view.View;
import org.jetbrains.annotations.NotNull;
import java.util.HashMap;
@@ -49,6 +50,7 @@ public class IcebergMetadataCache {
private final LoadingCache<IcebergMetadataCacheKey, List<Snapshot>>
snapshotListCache;
private final LoadingCache<IcebergMetadataCacheKey, Table> tableCache;
private final LoadingCache<IcebergMetadataCacheKey,
IcebergSnapshotCacheValue> snapshotCache;
+ private final LoadingCache<IcebergMetadataCacheKey, View> viewCache;
public IcebergMetadataCache(ExecutorService executor) {
CacheFactory snapshotListCacheFactory = new CacheFactory(
@@ -74,6 +76,7 @@ public class IcebergMetadataCache {
true,
null);
this.snapshotCache = snapshotCacheFactory.buildCache(key ->
loadSnapshot(key), null, executor);
+ this.viewCache = tableCacheFactory.buildCache(key -> loadView(key),
null, executor);
}
public Table getIcebergTable(CatalogIf catalog, String dbName, String
tbName) {
@@ -153,6 +156,10 @@ public class IcebergMetadataCache {
snapshotCache.asMap().keySet().stream()
.filter(key -> key.catalog.getId() == catalogId)
.forEach(snapshotCache::invalidate);
+
+ viewCache.asMap().entrySet().stream()
+ .filter(entry -> entry.getKey().catalog.getId() == catalogId)
+ .forEach(entry -> viewCache.invalidate(entry.getKey()));
}
public void invalidateTableCache(long catalogId, String dbName, String
tblName) {
@@ -176,6 +183,13 @@ public class IcebergMetadataCache {
.filter(key -> key.catalog.getId() == catalogId &&
key.dbName.equals(dbName) && key.tableName.equals(
tblName))
.forEach(snapshotCache::invalidate);
+ viewCache.asMap().entrySet().stream()
+ .filter(entry -> {
+ IcebergMetadataCacheKey key = entry.getKey();
+ return key.catalog.getId() == catalogId &&
key.dbName.equals(dbName) && key.tableName.equals(
+ tblName);
+ })
+ .forEach(entry -> viewCache.invalidate(entry.getKey()));
}
public void invalidateDbCache(long catalogId, String dbName) {
@@ -196,6 +210,12 @@ public class IcebergMetadataCache {
snapshotCache.asMap().keySet().stream()
.filter(key -> key.catalog.getId() == catalogId &&
key.dbName.equals(dbName))
.forEach(snapshotCache::invalidate);
+ viewCache.asMap().entrySet().stream()
+ .filter(entry -> {
+ IcebergMetadataCacheKey key = entry.getKey();
+ return key.catalog.getId() == catalogId &&
key.dbName.equals(dbName);
+ })
+ .forEach(entry -> viewCache.invalidate(entry.getKey()));
}
private static void initIcebergTableFileIO(Table table, Map<String,
String> props) {
@@ -257,4 +277,25 @@ public class IcebergMetadataCache {
snapshotCache.estimatedSize()));
return res;
}
+
+ private View loadView(IcebergMetadataCacheKey key) {
+ IcebergMetadataOps ops;
+ if (key.catalog instanceof IcebergExternalCatalog) {
+ ops = (IcebergMetadataOps) (((IcebergExternalCatalog)
key.catalog).getMetadataOps());
+ } else {
+ return null;
+ }
+ try {
+ return ((ExternalCatalog)
key.catalog).getPreExecutionAuthenticator().execute(() ->
+ ops.loadView(key.dbName, key.tableName));
+ } catch (Exception e) {
+ throw new RuntimeException(ExceptionUtils.getRootCauseMessage(e),
e);
+ }
+
+ }
+
+ public View getIcebergView(CatalogIf catalog, String dbName, String
tbName) {
+ IcebergMetadataCacheKey key = IcebergMetadataCacheKey.of(catalog,
dbName, tbName);
+ return viewCache.get(key);
+ }
}
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergMetadataOps.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergMetadataOps.java
index 70953c689a1..0589f95b55d 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergMetadataOps.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergMetadataOps.java
@@ -42,10 +42,14 @@ import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.catalog.Namespace;
import org.apache.iceberg.catalog.SupportsNamespaces;
import org.apache.iceberg.catalog.TableIdentifier;
+import org.apache.iceberg.catalog.ViewCatalog;
+import org.apache.iceberg.types.Type;
+import org.apache.iceberg.view.View;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
@@ -127,7 +131,24 @@ public class IcebergMetadataOps implements
ExternalMetadataOps {
try {
return preExecutionAuthenticator.execute(() -> {
List<TableIdentifier> tableIdentifiers =
catalog.listTables(getNamespace(dbName));
- return
tableIdentifiers.stream().map(TableIdentifier::name).collect(Collectors.toList());
+ List<String> views;
+ // Our original intention was simply to clearly define the
responsibilities of ViewCatalog and Catalog.
+ // IcebergMetadataOps handles listTableNames and listViewNames
separately.
+ // listTableNames should only focus on the table type,
+ // but in reality, Iceberg's return includes views. Therefore,
we added a filter to exclude views.
+ if (catalog instanceof ViewCatalog) {
+ views = ((ViewCatalog)
catalog).listViews(getNamespace(dbName))
+
.stream().map(TableIdentifier::name).collect(Collectors.toList());
+ } else {
+ views = Collections.emptyList();
+ }
+ if (views.isEmpty()) {
+ return
tableIdentifiers.stream().map(TableIdentifier::name).collect(Collectors.toList());
+ } else {
+ return tableIdentifiers.stream()
+ .map(TableIdentifier::name)
+ .filter(name ->
!views.contains(name)).collect(Collectors.toList());
+ }
});
} catch (Exception e) {
throw new RuntimeException("Failed to list table names, error
message is:" + e.getMessage(), e);
@@ -280,7 +301,7 @@ public class IcebergMetadataOps implements
ExternalMetadataOps {
.map(col -> new StructField(col.getName(), col.getType(),
col.getComment(), col.isAllowNull()))
.collect(Collectors.toList());
StructType structType = new StructType(new ArrayList<>(collect));
- org.apache.iceberg.types.Type visit =
+ Type visit =
DorisTypeVisitor.visit(structType, new
DorisTypeToIcebergType(structType));
Schema schema = new
Schema(visit.asNestedType().asStructType().fields());
Map<String, String> properties = stmt.getProperties();
@@ -309,7 +330,11 @@ public class IcebergMetadataOps implements
ExternalMetadataOps {
public void dropTableImpl(String dbName, String tableName, boolean
ifExists) throws DdlException {
try {
preExecutionAuthenticator.execute(() -> {
- performDropTable(dbName, tableName, ifExists);
+ if (getExternalCatalog().getMetadataOps().viewExists(dbName,
tableName)) {
+ performDropView(dbName, tableName, ifExists);
+ } else {
+ performDropTable(dbName, tableName, ifExists);
+ }
return null;
});
} catch (Exception e) {
@@ -373,6 +398,47 @@ public class IcebergMetadataOps implements
ExternalMetadataOps {
}
}
+ @Override
+ public boolean viewExists(String dbName, String viewName) {
+ if (!(catalog instanceof ViewCatalog)) {
+ return false;
+ }
+ try {
+ return preExecutionAuthenticator.execute(() ->
+ ((ViewCatalog)
catalog).viewExists(getTableIdentifier(dbName, viewName)));
+ } catch (Exception e) {
+ throw new RuntimeException("Failed to check view exist, error
message is:" + e.getMessage(), e);
+
+ }
+ }
+
+ @Override
+ public View loadView(String dbName, String tblName) {
+ if (!(catalog instanceof ViewCatalog)) {
+ return null;
+ }
+ try {
+ ViewCatalog viewCatalog = (ViewCatalog) catalog;
+ return preExecutionAuthenticator.execute(() ->
viewCatalog.loadView(TableIdentifier.of(dbName, tblName)));
+ } catch (Exception e) {
+ throw new RuntimeException("Failed to load view, error message
is:" + e.getMessage(), e);
+ }
+ }
+
+ @Override
+ public List<String> listViewNames(String db) {
+ if (!(catalog instanceof ViewCatalog)) {
+ return Collections.emptyList();
+ }
+ try {
+ return preExecutionAuthenticator.execute(() ->
+ ((ViewCatalog) catalog).listViews(Namespace.of(db))
+
.stream().map(TableIdentifier::name).collect(Collectors.toList()));
+ } catch (Exception e) {
+ throw new RuntimeException("Failed to list view names, error
message is:" + e.getMessage(), e);
+ }
+ }
+
private TableIdentifier getTableIdentifier(String dbName, String tblName) {
return externalCatalogName
.map(s -> TableIdentifier.of(s, dbName, tblName))
@@ -392,4 +458,31 @@ public class IcebergMetadataOps implements
ExternalMetadataOps {
public ThreadPoolExecutor getThreadPoolWithPreAuth() {
return dorisCatalog.getThreadPoolExecutor();
}
+
+ private void performDropView(String dbName, String viewName, boolean
ifExists) throws DdlException {
+ if (!(catalog instanceof ViewCatalog)) {
+ throw new DdlException("Drop Iceberg view is not supported with
not view catalog.");
+ }
+ ExternalDatabase<?> db = dorisCatalog.getDbNullable(dbName);
+ if (db == null) {
+ if (ifExists) {
+ LOG.info("database [{}] does not exist when drop view[{}]",
dbName, viewName);
+ return;
+ } else {
+ ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR,
dbName);
+ }
+ }
+ ViewCatalog viewCatalog = (ViewCatalog) catalog;
+ if (!viewExists(dbName, viewName)) {
+ if (ifExists) {
+ LOG.info("drop view[{}] which does not exist", viewName);
+ return;
+ } else {
+ ErrorReport.reportDdlException(ErrorCode.ERR_UNKNOWN_TABLE,
viewName, dbName);
+ }
+ }
+ viewCatalog.dropView(getTableIdentifier(dbName, viewName));
+ db.setUnInitialized(true);
+ }
+
}
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergUtils.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergUtils.java
index e956e236903..73d4a2fa76a 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergUtils.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergUtils.java
@@ -103,6 +103,7 @@ import org.apache.iceberg.types.Types;
import org.apache.iceberg.util.LocationUtil;
import org.apache.iceberg.util.SnapshotUtil;
import org.apache.iceberg.util.StructProjection;
+import org.apache.iceberg.view.View;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@@ -629,18 +630,29 @@ public class IcebergUtils {
/**
* Get iceberg schema from catalog and convert them to doris schema
*/
- public static List<Column> getSchema(ExternalCatalog catalog, String
dbName, String name, long schemaId) {
+ public static List<Column> getSchema(
+ ExternalCatalog catalog, String dbName, String name, long
schemaId, boolean isView) {
try {
return catalog.getPreExecutionAuthenticator().execute(() -> {
- org.apache.iceberg.Table icebergTable =
getIcebergTable(catalog, dbName, name);
Schema schema;
- if (schemaId == NEWEST_SCHEMA_ID ||
icebergTable.currentSnapshot() == null) {
- schema = icebergTable.schema();
+ if (isView) {
+ View icebergView = getIcebergView(catalog, dbName, name);
+ if (schemaId == NEWEST_SCHEMA_ID) {
+ schema = icebergView.schema();
+ } else {
+ schema = icebergView.schemas().get((int) schemaId);
+ }
} else {
- schema = icebergTable.schemas().get((int) schemaId);
+ Table icebergTable = getIcebergTable(catalog, dbName,
name);
+ if (schemaId == NEWEST_SCHEMA_ID ||
icebergTable.currentSnapshot() == null) {
+ schema = icebergTable.schema();
+ } else {
+ schema = icebergTable.schemas().get((int) schemaId);
+ }
}
+ String type = isView ? "view" : "table";
Preconditions.checkNotNull(schema,
- "Schema for table " + catalog.getName() + "." + dbName
+ "." + name + " is null");
+ "Schema for " + type + " " + catalog.getName() + "." +
dbName + "." + name + " is null");
return parseSchema(schema);
});
} catch (Exception e) {
@@ -1169,25 +1181,6 @@ public class IcebergUtils {
}
}
- // load table schema from iceberg API to external schema cache.
- public static Optional<SchemaCacheValue> loadSchemaCacheValue(
- ExternalCatalog catalog, String dbName, String tbName, long
schemaId) {
- Table table = IcebergUtils.getIcebergTable(catalog, dbName, tbName);
- List<Column> schema = IcebergUtils.getSchema(catalog, dbName, tbName,
schemaId);
- List<Column> tmpColumns = Lists.newArrayList();
- PartitionSpec spec = table.spec();
- for (PartitionField field : spec.fields()) {
- Types.NestedField col = table.schema().findField(field.sourceId());
- for (Column c : schema) {
- if (c.getName().equalsIgnoreCase(col.name())) {
- tmpColumns.add(c);
- break;
- }
- }
- }
- return Optional.of(new IcebergSchemaCacheValue(schema, tmpColumns));
- }
-
public static List<Column> getIcebergSchema(
TableIf tableIf,
ExternalCatalog catalog,
@@ -1213,4 +1206,42 @@ public class IcebergUtils {
Optional.empty(), catalog, dbName, tbName, Optional.empty());
}
}
+
+ public static org.apache.iceberg.view.View getIcebergView(ExternalCatalog
catalog, String dbName, String tblName) {
+ return getIcebergViewInternal(catalog, dbName, tblName);
+ }
+
+ private static org.apache.iceberg.view.View
getIcebergViewInternal(ExternalCatalog catalog, String dbName,
+ String tblName) {
+ IcebergMetadataCache metadataCache =
Env.getCurrentEnv().getExtMetaCacheMgr().getIcebergMetadataCache();
+ return metadataCache.getIcebergView(catalog, dbName, tblName);
+ }
+
+ public static Optional<SchemaCacheValue> loadSchemaCacheValue(
+ ExternalCatalog catalog, String dbName, String tbName, long
schemaId, boolean isView) {
+ List<Column> schema = IcebergUtils.getSchema(catalog, dbName, tbName,
schemaId, isView);
+ List<Column> tmpColumns = Lists.newArrayList();
+ if (!isView) {
+ // get table partition column info
+ Table table = IcebergUtils.getIcebergTable(catalog, dbName,
tbName);
+ PartitionSpec spec = table.spec();
+ for (PartitionField field : spec.fields()) {
+ Types.NestedField col =
table.schema().findField(field.sourceId());
+ for (Column c : schema) {
+ if (c.getName().equalsIgnoreCase(col.name())) {
+ tmpColumns.add(c);
+ break;
+ }
+ }
+ }
+ }
+ return Optional.of(new IcebergSchemaCacheValue(schema, tmpColumns));
+ }
+
+ public static String showCreateView(IcebergExternalTable
icebergExternalTable) {
+ return String.format("CREATE VIEW `%s` AS ",
icebergExternalTable.getName())
+ +
+ icebergExternalTable.getViewText();
+ }
+
}
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/source/IcebergApiSource.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/source/IcebergApiSource.java
index 4b4e76bea47..8ba1d71c1cb 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/source/IcebergApiSource.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/source/IcebergApiSource.java
@@ -42,6 +42,17 @@ public class IcebergApiSource implements IcebergSource {
public IcebergApiSource(IcebergExternalTable table, TupleDescriptor desc,
Map<String, ColumnRange> columnNameToRange) {
+ // Theoretically, the IcebergScanNode is responsible for scanning data
from physical tables.
+ // Views should not reach this point.
+ // By adding this validation, we aim to ensure that if a view query
does end up here, it indicates a bug.
+ // This helps us identify issues promptly.
+
+ // when use legacy planner, query an iceberg view will enter this
+ // we should set enable_fallback_to_original_planner=false
+ // so that it will throw exception by first planner
+ if (table.isView()) {
+ throw new UnsupportedOperationException("IcebergApiSource does not
support view");
+ }
this.icebergExtTable = table;
this.originTable =
Env.getCurrentEnv().getExtMetaCacheMgr().getIcebergMetadataCache().getIcebergTable(
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/operations/ExternalMetadataOps.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/operations/ExternalMetadataOps.java
index ff2becda2af..0ac498928ad 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/datasource/operations/ExternalMetadataOps.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/datasource/operations/ExternalMetadataOps.java
@@ -25,6 +25,9 @@ import org.apache.doris.common.DdlException;
import org.apache.doris.common.UserException;
import org.apache.doris.nereids.trees.plans.commands.CreateDatabaseCommand;
+import org.apache.iceberg.view.View;
+
+import java.util.Collections;
import java.util.List;
/**
@@ -173,4 +176,34 @@ public interface ExternalMetadataOps {
* close the connection, eg, to hms
*/
void close();
+
+ /**
+ * load an iceberg view.
+ * @param dbName
+ * @param viewName
+ * @return
+ */
+ default View loadView(String dbName, String viewName) {
+ throw new UnsupportedOperationException("Load view is not supported.");
+ }
+
+ /**
+ * Check if an Iceberg view exists.
+ * @param dbName
+ * @param viewName
+ * @return
+ */
+ default boolean viewExists(String dbName, String viewName) {
+ throw new UnsupportedOperationException("View is not supported.");
+ }
+
+ /**
+ * List all views under a specific database.
+ * @param db
+ * @return
+ */
+ default List<String> listViewNames(String db) {
+ return Collections.emptyList();
+ }
+
}
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/BindRelation.java
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/BindRelation.java
index fe4b47bd8f1..d098a42aad1 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/BindRelation.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/nereids/rules/analysis/BindRelation.java
@@ -17,6 +17,7 @@
package org.apache.doris.nereids.rules.analysis;
+import org.apache.doris.analysis.TableSnapshot;
import org.apache.doris.catalog.AggStateType;
import org.apache.doris.catalog.AggregateType;
import org.apache.doris.catalog.Column;
@@ -35,6 +36,7 @@ import org.apache.doris.common.util.Util;
import org.apache.doris.datasource.ExternalTable;
import org.apache.doris.datasource.hive.HMSExternalTable;
import org.apache.doris.datasource.hive.HMSExternalTable.DLAType;
+import org.apache.doris.datasource.iceberg.IcebergExternalTable;
import org.apache.doris.nereids.CTEContext;
import org.apache.doris.nereids.CascadesContext;
import org.apache.doris.nereids.SqlCacheContext;
@@ -409,7 +411,7 @@ public class BindRelation extends OneAnalysisRuleFactory {
String hiveCatalog = hmsTable.getCatalog().getName();
String hiveDb = hmsTable.getDatabase().getFullName();
String ddlSql = hmsTable.getViewText();
- Plan hiveViewPlan = parseAndAnalyzeHiveView(
+ Plan hiveViewPlan = parseAndAnalyzeExternalView(
hmsTable, hiveCatalog, hiveDb, ddlSql,
cascadesContext);
return new LogicalSubQueryAlias<>(qualifiedTableName,
hiveViewPlan);
}
@@ -429,6 +431,33 @@ public class BindRelation extends OneAnalysisRuleFactory {
Optional.ofNullable(unboundRelation.getScanParams()));
}
case ICEBERG_EXTERNAL_TABLE:
+ IcebergExternalTable icebergExternalTable =
(IcebergExternalTable) table;
+ if (Config.enable_query_iceberg_views &&
icebergExternalTable.isView()) {
+ Optional<TableSnapshot> tableSnapshot =
unboundRelation.getTableSnapshot();
+ if (tableSnapshot.isPresent()) {
+ // iceberg view not supported with snapshot
time/version travel
+ // note that enable_fallback_to_original_planner
should be set with false
+ // or else this exception will not be thrown
+ // because legacy planner will retry and thrown
other exception
+ throw new UnsupportedOperationException(
+ "iceberg view not supported with snapshot
time/version travel");
+ }
+ isView = true;
+ String icebergCatalog =
icebergExternalTable.getCatalog().getName();
+ String icebergDb =
icebergExternalTable.getDatabase().getFullName();
+ String ddlSql = icebergExternalTable.getViewText();
+ Plan icebergViewPlan =
parseAndAnalyzeExternalView(icebergExternalTable,
+ icebergCatalog, icebergDb, ddlSql,
cascadesContext);
+ return new LogicalSubQueryAlias<>(qualifiedTableName,
icebergViewPlan);
+ }
+ if (icebergExternalTable.isView()) {
+ throw new UnsupportedOperationException(
+ "please set enable_query_iceberg_views=true to
enable query iceberg views");
+ }
+ return new
LogicalFileScan(unboundRelation.getRelationId(), (ExternalTable) table,
+ qualifierWithoutTableName,
unboundRelation.getTableSample(),
+ unboundRelation.getTableSnapshot(), ImmutableList.of(),
+ Optional.ofNullable(unboundRelation.getScanParams()));
case PAIMON_EXTERNAL_TABLE:
case MAX_COMPUTE_EXTERNAL_TABLE:
case TRINO_CONNECTOR_EXTERNAL_TABLE:
@@ -469,15 +498,17 @@ public class BindRelation extends OneAnalysisRuleFactory {
}
}
- private Plan parseAndAnalyzeHiveView(
- HMSExternalTable table, String hiveCatalog, String hiveDb, String
ddlSql, CascadesContext cascadesContext) {
+ private Plan parseAndAnalyzeExternalView(
+ ExternalTable table, String externalCatalog, String externalDb,
+ String ddlSql, CascadesContext cascadesContext) {
ConnectContext ctx = cascadesContext.getConnectContext();
String previousCatalog = ctx.getCurrentCatalog().getName();
String previousDb = ctx.getDatabase();
String convertedSql = SqlDialectHelper.convertSqlByDialect(ddlSql,
ctx.getSessionVariable());
- // change catalog and db to hive catalog and db, so that we can parse
and analyze the view sql in hive context.
- ctx.changeDefaultCatalog(hiveCatalog);
- ctx.setDatabase(hiveDb);
+ // change catalog and db to external catalog and db,
+ // so that we can parse and analyze the view sql in external context.
+ ctx.changeDefaultCatalog(externalCatalog);
+ ctx.setDatabase(externalDb);
try {
return parseAndAnalyzeView(table, convertedSql, cascadesContext);
} finally {
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/ShowCreateTableCommand.java
b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/ShowCreateTableCommand.java
index ddb5eb56600..5434c6cad94 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/ShowCreateTableCommand.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/ShowCreateTableCommand.java
@@ -31,6 +31,8 @@ import org.apache.doris.common.ErrorReport;
import org.apache.doris.common.util.Util;
import org.apache.doris.datasource.hive.HMSExternalTable;
import org.apache.doris.datasource.hive.HiveMetaStoreClientHelper;
+import org.apache.doris.datasource.iceberg.IcebergExternalTable;
+import org.apache.doris.datasource.iceberg.IcebergUtils;
import org.apache.doris.mysql.privilege.PrivPredicate;
import org.apache.doris.nereids.trees.plans.PlanType;
import org.apache.doris.nereids.trees.plans.commands.info.TableNameInfo;
@@ -134,6 +136,12 @@ public class ShowCreateTableCommand extends ShowCommand {
HiveMetaStoreClientHelper.showCreateTable((HMSExternalTable) table)));
return new ShowResultSet(META_DATA, rows);
}
+ if ((table.getType() == Table.TableType.ICEBERG_EXTERNAL_TABLE)
+ && ((IcebergExternalTable) table).isView()) {
+ rows.add(Arrays.asList(table.getName(),
+ IcebergUtils.showCreateView(((IcebergExternalTable)
table))));
+ return new ShowResultSet(META_DATA, rows);
+ }
List<String> createTableStmt = Lists.newArrayList();
Env.getDdlStmt(null, null, table, createTableStmt, null, null,
false,
true /* hide password */, false, -1L, isBrief, false);
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/planner/IcebergTableSink.java
b/fe/fe-core/src/main/java/org/apache/doris/planner/IcebergTableSink.java
index 5bc0c803cb9..29746fc34e4 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/IcebergTableSink.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/IcebergTableSink.java
@@ -59,6 +59,9 @@ public class IcebergTableSink extends
BaseExternalTableDataSink {
public IcebergTableSink(IcebergExternalTable targetTable) {
super();
+ if (targetTable.isView()) {
+ throw new UnsupportedOperationException("Write data to iceberg
view is not supported");
+ }
this.targetTable = targetTable;
}
diff --git
a/regression-test/data/external_table_p0/iceberg/test_iceberg_view_query_p0.out
b/regression-test/data/external_table_p0/iceberg/test_iceberg_view_query_p0.out
new file mode 100644
index 00000000000..480a2a74156
Binary files /dev/null and
b/regression-test/data/external_table_p0/iceberg/test_iceberg_view_query_p0.out
differ
diff --git
a/regression-test/suites/external_table_p0/iceberg/test_iceberg_view_query_p0.groovy
b/regression-test/suites/external_table_p0/iceberg/test_iceberg_view_query_p0.groovy
new file mode 100644
index 00000000000..a855ed38af1
--- /dev/null
+++
b/regression-test/suites/external_table_p0/iceberg/test_iceberg_view_query_p0.groovy
@@ -0,0 +1,203 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("test_iceberg_view_query_p0",
"p0,external,iceberg,external_docker,external_docker_iceberg") {
+
+ String enableIcebergTest =
context.config.otherConfigs.get("enableIcebergTest")
+ // if (enableIcebergTest == null ||
!enableIcebergTest.equalsIgnoreCase("true")) {
+ // This is suit can not be run currently because only hive catalog can
support view.
+ // but we can only create view in rest catalog
+ // will be fixed later
+ if (true) {
+ logger.info("disable iceberg test.")
+ return
+ }
+
+ try {
+ String hms_port = context.config.otherConfigs.get(hivePrefix +
"HmsPort")
+ String hdfs_port = context.config.otherConfigs.get(hivePrefix +
"HdfsPort")
+ String externalEnvIp = context.config.otherConfigs.get("externalEnvIp")
+
+ //create iceberg hms catalog
+ String iceberg_catalog_name = "test_iceberg_view_query_p0"
+ sql """drop catalog if exists ${iceberg_catalog_name}"""
+ sql """create catalog if not exists ${iceberg_catalog_name} properties
(
+ 'type'='iceberg',
+ 'iceberg.catalog.type'='hms',
+ 'hive.metastore.uris' = 'thrift://${externalEnvIp}:${hms_port}',
+ 'fs.defaultFS' = 'hdfs://${externalEnvIp}:${hdfs_port}',
+ 'use_meta_cache' = 'true'
+ );"""
+
+ //using database and close planner fallback
+ sql """use `${iceberg_catalog_name}`.`test_db`"""
+ sql """set enable_fallback_to_original_planner=false;"""
+
+ // run all suites
+ def q01 = {
+ order_qt_q01 """select * from v_with_partitioned_table order by
col1"""
+ order_qt_q02 """select * from v_with_unpartitioned_table order by
col1"""
+ order_qt_q03 """select * from v_with_partitioned_column order by
col5"""
+ }
+
+ def q02 = {
+ order_qt_q01 """select count(*) from v_with_partitioned_table"""
+ order_qt_q02 """select count(*) from v_with_unpartitioned_table"""
+ order_qt_q03 """select count(*) from v_with_partitioned_column"""
+ }
+
+ def q03 = {
+ order_qt_q01 """select col1,col2,col3,col4 from
v_with_partitioned_table order by col1"""
+ order_qt_q02 """select col5 from v_with_partitioned_table order by
col5"""
+ }
+
+ def q04 = {
+ order_qt_q01 """describe v_with_partitioned_table"""
+ order_qt_q02 """describe v_with_unpartitioned_table"""
+ order_qt_q03 """describe v_with_partitioned_column"""
+ }
+
+ q01()
+ q02()
+ q03()
+ q04()
+
+ def result1 = sql """explain verbose select * from
v_with_partitioned_table"""
+ assertTrue(result1.contains('t_partitioned_table'))
+ def result2 = sql """explain verbose select * from
v_with_unpartitioned_table"""
+ assertTrue(result2.contains('v_with_unpartitioned_table'))
+ def result3 = sql """explain verbose select * from
v_with_partitioned_column"""
+ assertTrue(result3.contains('t_partitioned_table'))
+
+ def result4 = sql """explain verbose select count(*) from
v_with_partitioned_table"""
+ assertTrue(result4.contains('t_partitioned_table'))
+ def result5 = sql """explain verbose select count(*) from
v_with_unpartitioned_table"""
+ assertTrue(result5.contains('t_unpartitioned_table'))
+ def result6 = sql """explain verbose select count(*) from
v_with_partitioned_column"""
+ assertTrue(result6.contains('t_partitioned_table'))
+
+ def result7 = sql """explain verbose select col1,col2,col3,col4 from
v_with_partitioned_table"""
+ assertTrue(result7.contains('t_partitioned_table'))
+ def result8 = sql """explain verbose select col5 from
v_with_partitioned_table"""
+ assertTrue(result8.contains('t_partitioned_table'))
+
+ def result9 = sql """show create table v_with_partitioned_table"""
+ assertTrue(result9.contains('v_with_partitioned_table'))
+ def result10 = sql """show create table v_with_unpartitioned_table"""
+ assertTrue(result10.contains('v_with_unpartitioned_table'))
+ def result11 = sql """show create table v_with_partitioned_column"""
+ assertTrue(result11.contains('v_with_partitioned_column'))
+
+ def result12 = sql """show create view v_with_partitioned_table"""
+ assertTrue(result12.contains('v_with_partitioned_table'))
+ def result13 = sql """show create view v_with_unpartitioned_table"""
+ assertTrue(result13.contains('v_with_unpartitioned_table'))
+ def result14 = sql """show create view v_with_partitioned_column"""
+ assertTrue(result14.contains('v_with_partitioned_column'))
+
+ try {
+ sql """select * from v_with_partitioned_table FOR TIME AS OF
'2025-06-11 20:17:01' order by col1 limit 10"""
+ } catch (Exception e) {
+ assertTrue(e.getMessage().contains("iceberg view not supported
with snapshot time/version travel"), e.getMessage())
+ }
+ try {
+ sql """select * from v_with_partitioned_table FOR VERSION AS OF
5497706844625725452 order by col1 limit 10"""
+ } catch (Exception e) {
+ assertTrue(e.getMessage().contains("iceberg view not supported
with snapshot time/version travel"), e.getMessage())
+ }
+ try {
+ sql """select count(*) from v_with_partitioned_table FOR TIME AS
OF '2025-06-11 20:17:01'"""
+ } catch (Exception e) {
+ assertTrue(e.getMessage().contains("iceberg view not supported
with snapshot time/version travel"), e.getMessage())
+ }
+ try {
+ sql """select count(*) from v_with_partitioned_table FOR VERSION
AS OF 5497706844625725452"""
+ } catch (Exception e) {
+ assertTrue(e.getMessage().contains("iceberg view not supported
with snapshot time/version travel"), e.getMessage())
+ }
+
+ try {
+ sql """select * from v_with_unpartitioned_table FOR TIME AS OF
'2025-06-11 20:17:01' order by col1 limit 10"""
+ } catch (Exception e) {
+ assertTrue(e.getMessage().contains("iceberg view not supported
with snapshot time/version travel"), e.getMessage())
+ }
+ try {
+ sql """select * from v_with_unpartitioned_table FOR VERSION AS OF
5497706844625725452 order by col1 limit 10"""
+ } catch (Exception e) {
+ assertTrue(e.getMessage().contains("iceberg view not supported
with snapshot time/version travel"), e.getMessage())
+ }
+ try {
+ sql """select count(*) from v_with_unpartitioned_table FOR TIME AS
OF '2025-06-11 20:17:01'"""
+ } catch (Exception e) {
+ assertTrue(e.getMessage().contains("iceberg view not supported
with snapshot time/version travel"), e.getMessage())
+ }
+ try {
+ sql """select count(*) from v_with_unpartitioned_table FOR VERSION
AS OF 5497706844625725452"""
+ } catch (Exception e) {
+ assertTrue(e.getMessage().contains("iceberg view not supported
with snapshot time/version travel"), e.getMessage())
+ }
+
+ try {
+ sql """select * from v_with_partitioned_column FOR TIME AS OF
'2025-06-11 20:17:01' order by col5 limit 10"""
+ } catch (Exception e) {
+ assertTrue(e.getMessage().contains("iceberg view not supported
with snapshot time/version travel"), e.getMessage())
+ }
+ try {
+ sql """select * from v_with_partitioned_column FOR VERSION AS OF
5497706844625725452 order by col5 limit 10"""
+ } catch (Exception e) {
+ assertTrue(e.getMessage().contains("iceberg view not supported
with snapshot time/version travel"), e.getMessage())
+ }
+ try {
+ sql """select count(*) from v_with_partitioned_column FOR TIME AS
OF '2025-06-11 20:17:01'"""
+ } catch (Exception e) {
+ assertTrue(e.getMessage().contains("iceberg view not supported
with snapshot time/version travel"), e.getMessage())
+ }
+ try {
+ sql """select count(*) from v_with_partitioned_column FOR VERSION
AS OF 5497706844625725452"""
+ } catch (Exception e) {
+ assertTrue(e.getMessage().contains("iceberg view not supported
with snapshot time/version travel"), e.getMessage())
+ }
+
+ try {
+ sql """select * from v_with_joint_table FOR TIME AS OF '2025-06-11
20:17:01' order by sale_date limit 10"""
+ } catch (Exception e) {
+ assertTrue(e.getMessage().contains("iceberg view not supported
with snapshot time/version travel"), e.getMessage())
+ }
+ try {
+ sql """select * from v_with_joint_table FOR VERSION AS OF
5497706844625725452 order by sale_date limit 10"""
+ } catch (Exception e) {
+ assertTrue(e.getMessage().contains("iceberg view not supported
with snapshot time/version travel"), e.getMessage())
+ }
+ try {
+ sql """select count(*) from v_with_joint_table FOR TIME AS OF
'2025-06-11 20:17:01'"""
+ } catch (Exception e) {
+ assertTrue(e.getMessage().contains("iceberg view not supported
with snapshot time/version travel"), e.getMessage())
+ }
+ try {
+ sql """select count(*) from v_with_joint_table FOR VERSION AS OF
5497706844625725452"""
+ } catch (Exception e) {
+ assertTrue(e.getMessage().contains("iceberg view not supported
with snapshot time/version travel"), e.getMessage())
+ }
+
+ sql """drop view v_with_partitioned_table"""
+ sql """drop view v_with_unpartitioned_table"""
+ sql """drop view v_with_partitioned_column"""
+
+ sql """drop catalog if exists ${iceberg_catalog_name}"""
+ } finally {
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]