This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 0e10dd4f757 [test](iceberg) Add iceberg rest test for different 
storage (#54590)
0e10dd4f757 is described below

commit 0e10dd4f757a646bc3d78995507f47b472bc4d52
Author: zy-kkk <[email protected]>
AuthorDate: Tue Sep 2 01:17:52 2025 +0800

    [test](iceberg) Add iceberg rest test for different storage (#54590)
---
 .../iceberg-rest/docker-compose.yaml.tpl           | 104 ++++++
 .../iceberg-rest/iceberg-rest_settings.env         |  41 +++
 docker/thirdparties/run-thirdparties-docker.sh     |  27 +-
 regression-test/conf/regression-conf.groovy        |   3 +
 .../pipeline/external/conf/regression-conf.groovy  |   3 +
 .../nonConcurrent/conf/regression-conf.groovy      |   3 +
 .../pipeline/p0/conf/regression-conf.groovy        |   3 +
 .../iceberg/iceberg_rest_s3_storage_test.groovy    | 363 +++++++++++++++++++++
 8 files changed, 546 insertions(+), 1 deletion(-)

diff --git 
a/docker/thirdparties/docker-compose/iceberg-rest/docker-compose.yaml.tpl 
b/docker/thirdparties/docker-compose/iceberg-rest/docker-compose.yaml.tpl
new file mode 100644
index 00000000000..1c4b4f967d4
--- /dev/null
+++ b/docker/thirdparties/docker-compose/iceberg-rest/docker-compose.yaml.tpl
@@ -0,0 +1,104 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+version: "3.8"
+
+services:
+  # S3 Catalog
+  iceberg-rest-s3:
+    image: apache/iceberg-rest-fixture:1.9.2
+    container_name: ${CONTAINER_UID}iceberg-rest-s3
+    ports:
+      - "${ICEBERG_REST_S3_PORT}:8181"
+    environment:
+      - AWS_ACCESS_KEY_ID=${AWSAk}
+      - AWS_SECRET_ACCESS_KEY=${AWSSk}
+      - AWS_REGION=${AWSRegion}
+      - CATALOG_CATALOG__IMPL=org.apache.iceberg.jdbc.JdbcCatalog
+      - CATALOG_URI=jdbc:sqlite:/tmp/s3_catalog.db
+      - CATALOG_JDBC_USER=user
+      - CATALOG_JDBC_PASSWORD=password
+      - 
CATALOG_WAREHOUSE=s3://selectdb-qa-datalake-test-hk/iceberg_rest_warehouse/
+      - CATALOG_IO__IMPL=org.apache.iceberg.aws.s3.S3FileIO
+      - CATALOG_S3_ENDPOINT=https://${AWSEndpoint}
+      - CATALOG_S3_REGION=${AWSRegion}
+      - CATALOG_S3_PATH__STYLE__ACCESS=false
+    healthcheck:
+      test: ["CMD", "curl", "-f", "http://localhost:8181/v1/config";]
+      interval: 10s
+      timeout: 5s
+      retries: 5
+    networks:
+      - ${CONTAINER_UID}iceberg-rest
+
+  # OSS Catalog
+  iceberg-rest-oss:
+    image: apache/iceberg-rest-fixture:1.9.2
+    container_name: ${CONTAINER_UID}iceberg-rest-oss
+    ports:
+      - "${ICEBERG_REST_OSS_PORT}:8181"
+    environment:
+      - AWS_ACCESS_KEY_ID=${OSSAk}
+      - AWS_SECRET_ACCESS_KEY=${OSSSk}
+      - AWS_REGION=${OSSRegion}
+      - CATALOG_CATALOG__IMPL=org.apache.iceberg.jdbc.JdbcCatalog
+      - CATALOG_URI=jdbc:sqlite:/tmp/oss_catalog.db
+      - CATALOG_JDBC_USER=user
+      - CATALOG_JDBC_PASSWORD=password
+      - CATALOG_WAREHOUSE=s3://doris-regression-bj/iceberg_rest_warehouse/
+      - CATALOG_IO__IMPL=org.apache.iceberg.aws.s3.S3FileIO
+      - CATALOG_S3_ENDPOINT=https://${OSSEndpoint}
+      - CATALOG_S3_REGION=${OSSRegion}
+      - CATALOG_S3_PATH__STYLE__ACCESS=false
+    healthcheck:
+      test: ["CMD", "curl", "-f", "http://localhost:8181/v1/config";]
+      interval: 10s
+      timeout: 5s
+      retries: 5
+    networks:
+      - ${CONTAINER_UID}iceberg-rest
+
+  # COS Catalog  
+  iceberg-rest-cos:
+    image: apache/iceberg-rest-fixture:1.9.2
+    container_name: ${CONTAINER_UID}iceberg-rest-cos
+    ports:
+      - "${ICEBERG_REST_COS_PORT}:8181"
+    environment:
+      - AWS_ACCESS_KEY_ID=${COSAk}
+      - AWS_SECRET_ACCESS_KEY=${COSSk}
+      - AWS_REGION=${COSRegion}
+      - CATALOG_CATALOG__IMPL=org.apache.iceberg.jdbc.JdbcCatalog
+      - CATALOG_URI=jdbc:sqlite:/tmp/cos_catalog.db
+      - CATALOG_JDBC_USER=user
+      - CATALOG_JDBC_PASSWORD=password
+      - 
CATALOG_WAREHOUSE=s3://sdb-qa-datalake-test-1308700295/iceberg_rest_warehouse/
+      - CATALOG_IO__IMPL=org.apache.iceberg.aws.s3.S3FileIO
+      - CATALOG_S3_ENDPOINT=https://${COSEndpoint}
+      - CATALOG_S3_REGION=${COSRegion}
+      - CATALOG_S3_PATH__STYLE__ACCESS=false
+    healthcheck:
+      test: ["CMD", "curl", "-f", "http://localhost:8181/v1/config";]
+      interval: 10s
+      timeout: 5s
+      retries: 5
+    networks:
+      - ${CONTAINER_UID}iceberg-rest
+
+networks:
+  ${CONTAINER_UID}iceberg-rest:
+    driver: bridge
\ No newline at end of file
diff --git 
a/docker/thirdparties/docker-compose/iceberg-rest/iceberg-rest_settings.env 
b/docker/thirdparties/docker-compose/iceberg-rest/iceberg-rest_settings.env
new file mode 100644
index 00000000000..2dcf5494f18
--- /dev/null
+++ b/docker/thirdparties/docker-compose/iceberg-rest/iceberg-rest_settings.env
@@ -0,0 +1,41 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Port configuration - different from existing iceberg rest ports
+# Existing iceberg uses 18181, we use 19181-19183 to avoid conflicts
+export ICEBERG_REST_S3_PORT=19181
+export ICEBERG_REST_OSS_PORT=19182
+export ICEBERG_REST_COS_PORT=19183
+
+# AWS S3 Configuration
+export AWSAk="*****************"
+export AWSSk="*****************"
+export AWSEndpoint="s3.ap-east-1.amazonaws.com"
+export AWSRegion="ap-east-1"
+
+# Alibaba Cloud OSS Configuration  
+export OSSAk="*****************"
+export OSSSk="*****************"
+export OSSEndpoint="oss-cn-beijing.aliyuncs.com"
+export OSSRegion="cn-beijing"
+
+# Tencent Cloud COS Configuration
+export COSAk="*****************"
+export COSSk="*****************"
+export COSEndpoint="cos.ap-beijing.myqcloud.com"
+export COSRegion="ap-beijing"
\ No newline at end of file
diff --git a/docker/thirdparties/run-thirdparties-docker.sh 
b/docker/thirdparties/run-thirdparties-docker.sh
index b3d1a5e8c0e..950d0ef656a 100755
--- a/docker/thirdparties/run-thirdparties-docker.sh
+++ b/docker/thirdparties/run-thirdparties-docker.sh
@@ -39,7 +39,7 @@ Usage: $0 <options>
      --no-load-data     do not load data into the components
 
   All valid components:
-    
mysql,pg,oracle,sqlserver,clickhouse,es,hive2,hive3,iceberg,hudi,trino,kafka,mariadb,db2,oceanbase,lakesoul,kerberos,ranger,polaris
+    
mysql,pg,oracle,sqlserver,clickhouse,es,hive2,hive3,iceberg,iceberg-rest,hudi,trino,kafka,mariadb,db2,oceanbase,lakesoul,kerberos,ranger,polaris
   "
     exit 1
 }
@@ -148,6 +148,7 @@ RUN_HIVE2=0
 RUN_HIVE3=0
 RUN_ES=0
 RUN_ICEBERG=0
+RUN_ICEBERG_REST=0
 RUN_HUDI=0
 RUN_TRINO=0
 RUN_KAFKA=0
@@ -185,6 +186,8 @@ for element in "${COMPONENTS_ARR[@]}"; do
         RUN_KAFKA=1
     elif [[ "${element}"x == "iceberg"x ]]; then
         RUN_ICEBERG=1
+    elif [[ "${element}"x == "iceberg-rest"x ]]; then
+        RUN_ICEBERG_REST=1
     elif [[ "${element}"x == "hudi"x ]]; then
         RUN_HUDI=1
     elif [[ "${element}"x == "trino"x ]]; then
@@ -695,6 +698,23 @@ start_ranger() {
     fi
 }
 
+start_iceberg_rest() {
+    echo "RUN_ICEBERG_REST"
+    # iceberg-rest with multiple cloud storage backends
+    ICEBERG_REST_DIR=${ROOT}/docker-compose/iceberg-rest
+    
+    # generate iceberg-rest.yaml
+    export CONTAINER_UID=${CONTAINER_UID}
+    . "${ROOT}"/docker-compose/iceberg-rest/iceberg-rest_settings.env
+    envsubst <"${ICEBERG_REST_DIR}/docker-compose.yaml.tpl" 
>"${ICEBERG_REST_DIR}/docker-compose.yaml"
+    
+    sudo docker compose -f "${ICEBERG_REST_DIR}/docker-compose.yaml" down
+    if [[ "${STOP}" -ne 1 ]]; then
+        # Start all three REST catalogs (S3, OSS, COS)
+        sudo docker compose -f "${ICEBERG_REST_DIR}/docker-compose.yaml" up -d 
--remove-orphans --wait
+    fi
+}
+
 echo "starting dockers in parallel"
 
 reserve_ports
@@ -766,6 +786,11 @@ if [[ "${RUN_ICEBERG}" -eq 1 ]]; then
     pids["iceberg"]=$!
 fi
 
+if [[ "${RUN_ICEBERG_REST}" -eq 1 ]]; then
+    start_iceberg_rest > start_iceberg_rest.log 2>&1 &
+    pids["iceberg-rest"]=$!
+fi
+
 if [[ "${RUN_HUDI}" -eq 1 ]]; then
     start_hudi > start_hudi.log 2>&1 &
     pids["hudi"]=$!
diff --git a/regression-test/conf/regression-conf.groovy 
b/regression-test/conf/regression-conf.groovy
index 364d7db4d4a..539b78ac9d5 100644
--- a/regression-test/conf/regression-conf.groovy
+++ b/regression-test/conf/regression-conf.groovy
@@ -231,6 +231,9 @@ extArrowFlightSqlPassword= ""
 // iceberg rest catalog config
 iceberg_rest_uri_port=18181
 iceberg_minio_port=19001
+iceberg_rest_uri_port_s3=19181
+iceberg_rest_uri_port_oss=19182
+iceberg_rest_uri_port_cos=19183
 
 // polaris rest catalog config
 polaris_rest_uri_port=20181
diff --git a/regression-test/pipeline/external/conf/regression-conf.groovy 
b/regression-test/pipeline/external/conf/regression-conf.groovy
index 9ca064b8a13..a2eadc98dce 100644
--- a/regression-test/pipeline/external/conf/regression-conf.groovy
+++ b/regression-test/pipeline/external/conf/regression-conf.groovy
@@ -128,6 +128,9 @@ enableRefactorParamsTest=true
 
 // iceberg test config
 iceberg_rest_uri_port=18181
+iceberg_rest_uri_port_s3=19181
+iceberg_rest_uri_port_oss=19182
+iceberg_rest_uri_port_cos=19183
 iceberg_minio_port=19001
 enableIcebergTest=true
 
diff --git a/regression-test/pipeline/nonConcurrent/conf/regression-conf.groovy 
b/regression-test/pipeline/nonConcurrent/conf/regression-conf.groovy
index 93e5facccdd..7f6f9d734e7 100644
--- a/regression-test/pipeline/nonConcurrent/conf/regression-conf.groovy
+++ b/regression-test/pipeline/nonConcurrent/conf/regression-conf.groovy
@@ -117,6 +117,9 @@ kafka_port=19193
 // iceberg test config
 iceberg_rest_uri_port=18181
 iceberg_minio_port=19001
+iceberg_rest_uri_port_s3=19181
+iceberg_rest_uri_port_oss=19182
+iceberg_rest_uri_port_cos=19183
 
 // polaris rest catalog config
 polaris_rest_uri_port=20181
diff --git a/regression-test/pipeline/p0/conf/regression-conf.groovy 
b/regression-test/pipeline/p0/conf/regression-conf.groovy
index a1ddefbc566..bfaae824ad9 100644
--- a/regression-test/pipeline/p0/conf/regression-conf.groovy
+++ b/regression-test/pipeline/p0/conf/regression-conf.groovy
@@ -128,6 +128,9 @@ kafka_port=19193
 // iceberg test config
 iceberg_rest_uri_port=18181
 iceberg_minio_port=19001
+iceberg_rest_uri_port_s3=19181
+iceberg_rest_uri_port_oss=19182
+iceberg_rest_uri_port_cos=19183
 
 // polaris rest catalog config
 polaris_rest_uri_port=20181
diff --git 
a/regression-test/suites/external_table_p2/iceberg/iceberg_rest_s3_storage_test.groovy
 
b/regression-test/suites/external_table_p2/iceberg/iceberg_rest_s3_storage_test.groovy
new file mode 100644
index 00000000000..b2e9ad2ee1f
--- /dev/null
+++ 
b/regression-test/suites/external_table_p2/iceberg/iceberg_rest_s3_storage_test.groovy
@@ -0,0 +1,363 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("iceberg_rest_s3_storage_test", 
"p2,external,iceberg,external_docker,external_docker_iceberg_rest,new_catalog_property")
 {
+
+    def testQueryAndInsert = { String catalogProperties, String prefix ->
+
+        // =======  BASIC CATALOG AND DATABASE SETUP  =======
+        def catalog_name = "${prefix}_catalog"
+        sql """
+            DROP CATALOG IF EXISTS ${catalog_name};
+        """
+        sql """
+            CREATE CATALOG IF NOT EXISTS ${catalog_name} PROPERTIES (
+                ${catalogProperties}
+            );
+        """
+        sql """
+            switch ${catalog_name};
+        """
+
+        def db_name = prefix + "_db"
+        sql """
+            DROP DATABASE IF EXISTS ${db_name} FORCE;
+        """
+        sql """
+            CREATE DATABASE IF NOT EXISTS ${db_name};
+        """
+
+        def dbResult = sql """
+            show databases  like "${db_name}";
+        """
+        assert dbResult.size() == 1
+
+        sql """
+            use ${db_name};
+        """
+        // =======  BASIC TABLE OPERATIONS TEST  =======
+        def table_name = prefix + "_table"
+        sql """
+            CREATE TABLE ${table_name} (
+            user_id            BIGINT       NOT NULL COMMENT "user id",
+            name               VARCHAR(20)           COMMENT "name",
+            age                INT                   COMMENT "age"
+        );
+        """
+        sql """
+            insert into ${table_name} values (1, 'a', 10);
+        """
+        // query
+        def queryResult = sql """
+            SELECT * FROM ${table_name};
+        """
+        assert queryResult.size() == 1
+
+        // =======  BRANCH/TAG TEST  =======
+        def branch_name = prefix + "_branch"
+        def tag_name = prefix + "_tag"
+        sql """
+            ALTER TABLE ${table_name} CREATE BRANCH ${branch_name};
+        """
+        sql """
+            ALTER TABLE ${table_name} CREATE TAG ${tag_name};
+        """
+        sql """
+            INSERT OVERWRITE TABLE ${table_name} VALUES (1, 'a', 10),(2, 'b', 
20), (3, 'c', 30)
+        """
+        def originalQueryResult = sql """
+            SELECT * FROM ${table_name};
+        """
+        assert originalQueryResult.size() == 3
+        sql """
+            insert into ${table_name}@branch(${branch_name}) values (4, 'd', 
40)
+        """
+        def branchQueryResult = sql """
+            SELECT * FROM ${table_name}@branch(${branch_name});
+        """
+        assert branchQueryResult.size() == 2
+
+
+        def tagQueryResult = sql """
+            SELECT * FROM ${table_name}@tag(${tag_name});
+        """
+        assert tagQueryResult.size() == 1
+
+        // Note: Tags are read-only in Iceberg, only branches support write 
operations
+
+        sql """
+            ALTER TABLE ${table_name} drop branch ${branch_name};
+        """
+        sql """
+            ALTER TABLE ${table_name} drop tag ${tag_name};
+        """
+        // =======  SYSTEM TABLES TEST  =======
+        // Test $files system table
+        def files_result = sql """
+        SELECT * FROM ${table_name}\$files;
+    """
+        println "Files system table: " + files_result
+
+        // Test $entries system table
+        def entries_result = sql """
+        SELECT * FROM ${table_name}\$entries;
+    """
+        println "Entries system table: " + entries_result
+
+        // Test $history system table
+        def history_result = sql """
+        SELECT * FROM ${table_name}\$history;
+    """
+        println "History system table: " + history_result
+
+        // Test $manifests system table
+        def manifests_result = sql """
+        SELECT * FROM ${table_name}\$manifests;
+    """
+        println "Manifests system table: " + manifests_result
+
+        // Test $metadata_log_entries system table
+        def metadata_log_result = sql """
+        SELECT * FROM ${table_name}\$metadata_log_entries;
+    """
+        println "Metadata log entries system table: " + metadata_log_result
+
+        // Test $partitions system table  
+        def partitions_result = sql """
+        SELECT * FROM ${table_name}\$partitions;
+    """
+        println "Partitions system table: " + partitions_result
+
+        // Test $refs system table
+        def refs_result = sql """
+        SELECT * FROM ${table_name}\$refs;
+    """
+        println "Refs system table: " + refs_result
+
+        // Test $snapshots system table
+        def snapshots_result = sql """
+        SELECT * FROM ${table_name}\$snapshots;
+    """
+        println "Snapshots system table: " + snapshots_result
+
+        println "All system tables test SUCCESS " + catalog_name
+
+        // =======  TIME TRAVEL TEST  =======
+        def iceberg_meta_result = sql """
+        SELECT snapshot_id FROM iceberg_meta(
+                'table' = '${catalog_name}.${db_name}.${table_name}',
+                'query_type' = 'snapshots'
+        ) order by committed_at desc;
+        """
+        def first_snapshot_id = iceberg_meta_result.get(0).get(0);
+        def time_travel = sql """
+        SELECT * FROM ${table_name} FOR VERSION AS OF ${first_snapshot_id};
+        """
+        println time_travel
+        println "iceberg_time_travel_QUERY SUCCESS " + catalog_name
+
+
+        sql """
+            DROP TABLE ${table_name};
+        """
+
+        // =======  PARTITION TABLE TEST  =======
+        table_name = prefix + "_partition_table"
+        sql """
+            CREATE TABLE ${table_name} (
+              `ts` DATETIME COMMENT 'ts',
+              `col1` BOOLEAN COMMENT 'col1',
+              `col2` INT COMMENT 'col2',
+              `col3` BIGINT COMMENT 'col3',
+              `col4` FLOAT COMMENT 'col4',
+              `col5` DOUBLE COMMENT 'col5',
+              `col6` DECIMAL(9,4) COMMENT 'col6',
+              `col7` STRING COMMENT 'col7',
+              `col8` DATE COMMENT 'col8',
+              `col9` DATETIME COMMENT 'col9',
+              `pt1` STRING COMMENT 'pt1',
+              `pt2` STRING COMMENT 'pt2'
+            )
+            PARTITION BY LIST (day(ts), pt1, pt2) ()
+            PROPERTIES (
+              'write-format'='orc',
+              'compression-codec'='zlib'
+            );
+        """
+
+        sql """
+            INSERT OVERWRITE  TABLE ${table_name} values 
+            ('2023-01-01 00:00:00', true, 1, 1, 1.0, 1.0, 1.0000, '1', 
'2023-01-01', '2023-01-01 00:00:00', 'a', '1'),
+            ('2023-01-02 00:00:00', false, 2, 2, 2.0, 2.0, 2.0000, '2', 
'2023-01-02', '2023-01-02 00:00:00', 'b', '2'),
+            ('2023-01-03 00:00:00', true, 3, 3, 3.0, 3.0, 3.0000, '3', 
'2023-01-03', '2023-01-03 00:00:00', 'c', '3');
+        """
+        def partitionQueryResult = sql """
+            SELECT * FROM ${table_name} WHERE pt1='a' and pt2='1';
+        """
+        assert partitionQueryResult.size() == 1
+
+        // =======  PARTITION TABLE BRANCH/TAG TEST  =======
+        branch_name = prefix + "_partition_branch"
+        tag_name = prefix + "_partition_tag"
+
+        sql """
+            ALTER TABLE ${table_name} CREATE BRANCH ${branch_name};
+        """
+        sql """
+            ALTER TABLE ${table_name} CREATE TAG ${tag_name};
+        """
+
+        // Partition table branch write operation
+        sql """
+            insert into ${table_name}@branch(${branch_name}) values 
('2023-01-04 00:00:00', false, 4, 4, 4.0, 4.0, 4.0000, '4', '2023-01-04', 
'2023-01-04 00:00:00', 'd', '4')
+        """
+
+        def partitionBranchResult = sql """
+            SELECT * FROM ${table_name}@branch(${branch_name}) ORDER BY col2;
+        """
+        println "Partition table branch query: " + partitionBranchResult
+
+        def partitionTagResult = sql """
+            SELECT * FROM ${table_name}@tag(${tag_name}) ORDER BY col2;
+        """
+        println "Partition table tag query: " + partitionTagResult
+
+        // Test partition table system tables
+        def partition_files_result = sql """
+            SELECT * FROM ${table_name}\$partitions;
+        """
+        println "Partitions system table: " + partition_files_result
+
+        sql """
+            ALTER TABLE ${table_name} drop branch ${branch_name};
+        """
+        sql """
+            ALTER TABLE ${table_name} drop tag ${tag_name};
+        """
+
+        sql """
+            DROP TABLE ${table_name};
+        """
+
+        sql """
+            DROP DATABASE ${db_name} FORCE;
+        """
+
+        def dropResult = sql """
+            show databases  like "${db_name}";
+        """
+        assert dropResult.size() == 0
+    }
+
+    String enabled = 
context.config.otherConfigs.get("enableExternalIcebergTest")
+    if (enabled != null && enabled.equalsIgnoreCase("true")) {
+        /* REST catalog env and base properties */
+        String externalEnvIp = context.config.otherConfigs.get("externalEnvIp")
+
+        String rest_port_s3 = 
context.config.otherConfigs.get("iceberg_rest_uri_port_s3")
+        String iceberg_rest_type_prop_s3 = """
+            'type'='iceberg',
+            'iceberg.catalog.type'='rest',
+            'iceberg.rest.uri' = 'http://${externalEnvIp}:${rest_port_s3}',
+        """
+
+        String rest_port_oss = 
context.config.otherConfigs.get("iceberg_rest_uri_port_oss")
+        String iceberg_rest_type_prop_oss = """
+            'type'='iceberg',
+            'iceberg.catalog.type'='rest',
+            'iceberg.rest.uri' = 'http://${externalEnvIp}:${rest_port_oss}',
+        """
+
+        String rest_port_cos = 
context.config.otherConfigs.get("iceberg_rest_uri_port_cos")
+        String iceberg_rest_type_prop_cos = """
+            'type'='iceberg',
+            'iceberg.catalog.type'='rest',
+            'iceberg.rest.uri' = 'http://${externalEnvIp}:${rest_port_cos}',
+        """
+
+        /*-----S3------*/
+        String s3_ak = context.config.otherConfigs.get("AWSAk")
+        String s3_sk = context.config.otherConfigs.get("AWSSk")
+        String s3_parent_path = "selectdb-qa-datalake-test-hk"
+        String s3_endpoint = "https://s3.ap-east-1.amazonaws.com";
+        String s3_region = "ap-east-1"
+        String s3_storage_properties = """
+          's3.access_key' = '${s3_ak}',
+          's3.secret_key' = '${s3_sk}',
+          's3.endpoint' = '${s3_endpoint}'
+        """
+        String s3_region_param = """
+         's3.region' = '${s3_region}',
+        """
+        /****************OSS*******************/
+        String oss_ak = context.config.otherConfigs.get("aliYunAk")
+        String oss_sk = context.config.otherConfigs.get("aliYunSk")
+        String oss_endpoint = "https://oss-cn-beijing.aliyuncs.com";
+        String oss_parent_path = "doris-regression-bj"
+        String oss_region = "cn-beijing"
+        String oss_region_param = """
+         'oss.region' = '${oss_region}',
+        """
+        String oss_storage_properties = """
+          'oss.access_key' = '${oss_ak}',
+          'oss.secret_key' = '${oss_sk}',
+          'oss.endpoint' = '${oss_endpoint}'
+        """
+        /****************COS*******************/
+        String cos_ak = context.config.otherConfigs.get("txYunAk")
+        String cos_sk = context.config.otherConfigs.get("txYunSk")
+        String cos_parent_path = "sdb-qa-datalake-test-1308700295";
+        String cos_endpoint = "https://cos.ap-beijing.myqcloud.com";
+        String cos_region = "ap-beijing"
+        String cos_region_param = """
+         'cos.region' = '${cos_region}',
+        """
+
+
+        String cos_storage_properties = """
+          'cos.access_key' = '${cos_ak}',
+          'cos.secret_key' = '${cos_sk}',
+          'cos.endpoint' = '${cos_endpoint}'
+        """
+
+        // -------- REST on OSS --------
+        String warehouse = """
+         'warehouse' = 's3://${oss_parent_path}/iceberg_rest_warehouse',
+        """
+        testQueryAndInsert(iceberg_rest_type_prop_oss + warehouse + 
oss_storage_properties, "iceberg_rest_on_oss")
+        testQueryAndInsert(iceberg_rest_type_prop_oss + warehouse + 
oss_region_param + oss_storage_properties, "iceberg_rest_on_oss_region")
+
+        // -------- REST on COS --------
+        warehouse = """
+         'warehouse' = 's3://${cos_parent_path}/iceberg_rest_warehouse',
+        """
+        testQueryAndInsert(iceberg_rest_type_prop_cos + warehouse + 
cos_storage_properties, "iceberg_rest_on_cos")
+        testQueryAndInsert(iceberg_rest_type_prop_cos + warehouse + 
cos_region_param + cos_storage_properties, "iceberg_rest_on_cos_region")
+
+        // -------- REST on S3 --------
+        warehouse = """
+         'warehouse' = 's3://${s3_parent_path}/iceberg_rest_warehouse',
+        """
+        testQueryAndInsert(iceberg_rest_type_prop_s3 + warehouse + 
s3_storage_properties, "iceberg_rest_on_s3")
+        testQueryAndInsert(iceberg_rest_type_prop_s3 + warehouse + 
s3_region_param + s3_storage_properties, "iceberg_rest_on_s3_region")
+        warehouse = """
+         'warehouse' = 's3a://${s3_parent_path}/iceberg_rest_warehouse',
+        """
+        testQueryAndInsert(iceberg_rest_type_prop_s3 + warehouse + 
s3_storage_properties, "iceberg_rest_on_s3a")
+        testQueryAndInsert(iceberg_rest_type_prop_s3 + warehouse + 
s3_region_param + s3_storage_properties, "iceberg_rest_on_s3a_region")
+    }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to