This is an automated email from the ASF dual-hosted git repository.
kirs pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/master by this push:
new 357b563b025 [feat](iceberg-catalog)Support Rest(S3Tables) Catalog And
Add Iam role test (#56647)
357b563b025 is described below
commit 357b563b0255e44701d3cc3c321d5e5c58d49f81
Author: Calvin Kirs <[email protected]>
AuthorDate: Thu Oct 9 15:45:15 2025 +0800
[feat](iceberg-catalog)Support Rest(S3Tables) Catalog And Add Iam role test
(#56647)
https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-tables-integrating-open-source.html
Accessing tables using the Amazon S3 Tables Iceberg REST endpoint
```
CREATE CATALOG `rest_s3tables` PROPERTIES (
"warehouse" =
"arn:aws:s3tables:ap-east-1:<account_id>:bucket/doris-s3-table-bucket",
"uri" = "https://s3tables.ap-east-1.amazonaws.com/iceberg",
"type" = "iceberg",
"iceberg.rest.sigv4-enabled" = "true",
"iceberg.rest.signing-region" = "ap-east-1",
"iceberg.rest.signing-name" = "s3tables",
"iceberg.rest.secret-access-key" = "<ACCESS_KEY>",
"iceberg.rest.access-key-id" = "<SECRET_KEY>",
"iceberg.catalog.type" = "rest"
);
```
---
.../property/metastore/IcebergRestProperties.java | 4 +-
.../storage/AbstractS3CompatibleProperties.java | 21 ++--
.../metastore/IcebergRestPropertiesTest.java | 12 ++-
.../property/storage/S3PropertiesTest.java | 2 +-
.../aws_iam_role_p0/test_catalog_with_role.groovy | 118 +++++++++++++++++++++
5 files changed, 143 insertions(+), 14 deletions(-)
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/IcebergRestProperties.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/IcebergRestProperties.java
index faf5d736af1..3110745b91e 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/IcebergRestProperties.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/IcebergRestProperties.java
@@ -287,8 +287,8 @@ public class IcebergRestProperties extends
AbstractIcebergProperties {
}
private void addGlueRestCatalogProperties() {
- if (Strings.isNotBlank(icebergRestSigningName) &&
icebergRestSigningName.equalsIgnoreCase("glue")) {
- icebergRestCatalogProperties.put("rest.signing-name", "glue");
+ if (Strings.isNotBlank(icebergRestSigningName)) {
+ icebergRestCatalogProperties.put("rest.signing-name",
icebergRestSigningName.toLowerCase());
icebergRestCatalogProperties.put("rest.sigv4-enabled",
icebergRestSigV4Enabled);
icebergRestCatalogProperties.put("rest.access-key-id",
icebergRestAccessKeyId);
icebergRestCatalogProperties.put("rest.secret-access-key",
icebergRestSecretAccessKey);
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AbstractS3CompatibleProperties.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AbstractS3CompatibleProperties.java
index 844454b4a73..2a7e58d787f 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AbstractS3CompatibleProperties.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AbstractS3CompatibleProperties.java
@@ -173,23 +173,24 @@ public abstract class AbstractS3CompatibleProperties
extends StorageProperties i
if (StringUtils.isNotBlank(getEndpoint())) {
return;
}
- String endpoint = null;
- // 1. try getting endpoint from uri
+ // 1. try getting endpoint region
+ String endpoint = getEndpointFromRegion();
+ if (StringUtils.isNotBlank(endpoint)) {
+ setEndpoint(endpoint);
+ return;
+ }
+ // 2. try getting endpoint from uri
try {
endpoint = S3PropertyUtils.constructEndpointFromUrl(origProps,
getUsePathStyle(),
getForceParsingByStandardUrl());
+ if (StringUtils.isNotBlank(endpoint)) {
+ setEndpoint(endpoint);
+ }
} catch (Exception e) {
if (LOG.isDebugEnabled()) {
- LOG.debug("Failed to construct endpoint from url: " +
origProps, e);
+ LOG.debug("Failed to construct endpoint from url: {}",
e.getMessage(), e);
}
}
- // 2. try getting endpoint region
- if (StringUtils.isBlank(endpoint)) {
- endpoint = getEndpointFromRegion();
- }
- if (!StringUtils.isBlank(endpoint)) {
- setEndpoint(endpoint);
- }
}
private void setRegionIfPossible() {
diff --git
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/IcebergRestPropertiesTest.java
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/IcebergRestPropertiesTest.java
index ec79e08b74f..d1199df2ae6 100644
---
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/IcebergRestPropertiesTest.java
+++
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/IcebergRestPropertiesTest.java
@@ -335,7 +335,7 @@ public class IcebergRestPropertiesTest {
// Test that non-glue signing names don't require additional properties
Map<String, String> props = new HashMap<>();
props.put("iceberg.rest.uri", "http://localhost:8080");
- props.put("iceberg.rest.signing-name", "custom-service");
+ props.put("iceberg.rest.signing", "custom-service");
IcebergRestProperties restProps = new IcebergRestProperties(props);
restProps.initNormalizeAndCheckProps(); // Should not throw
@@ -347,6 +347,16 @@ public class IcebergRestPropertiesTest {
Assertions.assertFalse(catalogProps.containsKey("rest.access-key-id"));
Assertions.assertFalse(catalogProps.containsKey("rest.secret-access-key"));
Assertions.assertFalse(catalogProps.containsKey("rest.sigv4-enabled"));
+ props.put("iceberg.rest.signing-name", "custom-service");
+ restProps = new IcebergRestProperties(props);
+ restProps.initNormalizeAndCheckProps(); // Should not throw
+ catalogProps = restProps.getIcebergRestCatalogProperties();
+ // Should not contain glue-specific properties
+ Assertions.assertTrue(catalogProps.containsKey("rest.signing-name"));
+ Assertions.assertTrue(catalogProps.containsKey("rest.signing-region"));
+ Assertions.assertTrue(catalogProps.containsKey("rest.access-key-id"));
+
Assertions.assertTrue(catalogProps.containsKey("rest.secret-access-key"));
+ Assertions.assertTrue(catalogProps.containsKey("rest.sigv4-enabled"));
}
@Test
diff --git
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/S3PropertiesTest.java
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/S3PropertiesTest.java
index 248f9de506f..f5596957653 100644
---
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/S3PropertiesTest.java
+++
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/S3PropertiesTest.java
@@ -193,7 +193,7 @@ public class S3PropertiesTest {
Assertions.assertEquals("us-west-2", s3Properties.getRegion());
Assertions.assertEquals("myCOSAccessKey", s3Properties.getAccessKey());
Assertions.assertEquals("myCOSSecretKey", s3Properties.getSecretKey());
- Assertions.assertEquals("s3.us-west-2.amazonaws.com",
s3Properties.getEndpoint());
+ Assertions.assertEquals("https://s3.us-west-2.amazonaws.com",
s3Properties.getEndpoint());
Map<String, String> s3EndpointProps = new HashMap<>();
s3EndpointProps.put("oss.access_key", "myCOSAccessKey");
s3EndpointProps.put("oss.secret_key", "myCOSSecretKey");
diff --git
a/regression-test/suites/aws_iam_role_p0/test_catalog_with_role.groovy
b/regression-test/suites/aws_iam_role_p0/test_catalog_with_role.groovy
new file mode 100644
index 00000000000..e6ba62817b4
--- /dev/null
+++ b/regression-test/suites/aws_iam_role_p0/test_catalog_with_role.groovy
@@ -0,0 +1,118 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import com.google.common.base.Strings;
+
+suite("test_catalog_with_role") {
+ if
(Strings.isNullOrEmpty(context.config.otherConfigs.get("awsLakesRoleArn"))) {
+ logger.info("skip ${name} case, because awsRoleArn is null or empty")
+ return
+ }
+ //query method
+ def createCatalogAndQuery = { catalogProps, catalogName, queryTableName,
expectCounts ->
+ sql """drop catalog if exists ${catalogName}"""
+ sql """
+ ${catalogProps}
+ """
+ def result = sql """
+ select count(1) from ${catalogName}.${queryTableName};
+ """
+ println("result: ${result}")
+ def countValue = result[0][0]
+ assertTrue(countValue == expectCounts.toInteger())
+ sql """drop catalog if exists ${catalogName}"""
+ }
+ String suiteName = "test_catalog_with_role"
+ String awsLakersRoleArn =
context.config.otherConfigs.get("awsLakesRoleArn")
+ String awsLakesRegion = context.config.otherConfigs.get("awsLakesRegion")
+ String icebergFSOnS3CatalogWarehouse =
context.config.otherConfigs.get("icebergFSOnS3CatalogWarehouse")
+ //String hmsMetastoreHost = context.config.hmsMetastoreHost
+ //String icebergHmsTableName = context.config.icebergHmsTableName
+ String icebergGlueTableName =
context.config.otherConfigs.get("icebergGlueTableName")
+
+ String icebergFSOnS3TableName =
context.config.otherConfigs.get("icebergFSOnS3TableName")
+ //String hiveHmsTableName = context.config.hiveHmsTableName
+ String hiveGlueTableName =
context.config.otherConfigs.get("hiveGlueTableName")
+ String expectCounts =
context.config.otherConfigs.get("awsLakesRoleArnQueryExpectCounts")
+
+ String awsS3Property = """
+ "s3.region" = "${awsLakesRegion}",
+ "s3.role_arn" = "${awsLakersRoleArn}"
+ """
+ String awsGlueProperties = """
+ "glue.region"="${awsLakesRegion}",
+ "glue.endpoint" = "https://glue.${awsLakesRegion}.amazonaws.com",
+ "glue.role_arn" = "${awsLakersRoleArn}"
+ """
+ // start test
+ String catalogName = "${suiteName}_iceberg_hadoop_on_s3"
+ //1. iceberg hadoop catalog on s
+ String icebergFSCatalogProps = """
+ create catalog if not exists ${catalogName} properties(
+ "type"="iceberg",
+ "iceberg.catalog.type"="hadoop",
+ "warehouse"="${icebergFSOnS3CatalogWarehouse}",
+ ${awsS3Property}
+ );
+ """
+ createCatalogAndQuery(icebergFSCatalogProps, catalogName,
icebergFSOnS3TableName, expectCounts)
+ catalogName = "${suiteName}_iceberg_glue"
+ String icebergGlueCatalogProps = """
+ create catalog if not exists ${catalogName} properties(
+ "type"="iceberg",
+ "iceberg.catalog.type"="glue",
+ ${awsGlueProperties}
+ );
+ """
+ createCatalogAndQuery(icebergGlueCatalogProps, catalogName,
icebergGlueTableName, expectCounts)
+ catalogName = "${suiteName}_hive_glue"
+ String hiveGlueCatalogProps = """
+ create catalog if not exists ${catalogName} properties (
+ "type"="hms",
+ "hive.metastore.type" = "glue",
+ ${awsGlueProperties}
+ );
+ """
+ createCatalogAndQuery(hiveGlueCatalogProps, catalogName,
hiveGlueTableName, expectCounts)
+/*********** HMS ***************/
+// The following HMS-related cases are commented out because
+// they require a Docker environment for the Hive Metastore service.
+
+/*
+String catalogName = "${suiteName}iceberg_hive_on_s3"
+String icebergHmsCatalogProps = """
+ create catalog if not exists ${catalogName} properties (
+ "type" = "iceberg",
+ "iceberg.catalog.type" = "hive",
+ "hive.metastore.uris" = "thrift://${hmsMetastoreHost}:9083",
+ ${awsS3Property}
+ );
+"""
+createCatalogAndQuery(icebergHmsCatalogProps, catalogName,
icebergHmsTableName, expectCounts)
+
+String hiveHmsCatalogProps = """
+ create catalog if not exists ${catalogName} properties (
+ "type" = "hms",
+ "hive.metastore.uris" = "thrift://${hmsMetastoreHost}:9083",
+ ${awsS3Property}
+ );
+"""
+createCatalogAndQuery(hiveHmsCatalogProps, catalogName, hiveHmsTableName,
expectCounts)
+*/
+
+
+}
\ No newline at end of file
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]