This is an automated email from the ASF dual-hosted git repository.

yiguolei pushed a commit to branch branch-4.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-4.0 by this push:
     new 66269b5bcf8 branch-4.0: [fix](test)Validate database is empty before 
DROP DATABASE (#58344) (#58423)
66269b5bcf8 is described below

commit 66269b5bcf829f8577953abede07b2d45bb55b5b
Author: Calvin Kirs <[email protected]>
AuthorDate: Thu Nov 27 08:59:08 2025 +0800

    branch-4.0: [fix](test)Validate database is empty before DROP DATABASE 
(#58344) (#58423)
    
    …
    
    #58344
---
 .../azure_blob_all_test.groovy                     | 141 +++++++++++++++++++++
 .../iceberg_on_hms_and_filesystem_and_dlf.groovy   | 103 ++++++++++++++-
 2 files changed, 243 insertions(+), 1 deletion(-)

diff --git 
a/regression-test/suites/external_table_p2/refactor_catalog_param/azure_blob_all_test.groovy
 
b/regression-test/suites/external_table_p2/refactor_catalog_param/azure_blob_all_test.groovy
new file mode 100644
index 00000000000..7451e6c9367
--- /dev/null
+++ 
b/regression-test/suites/external_table_p2/refactor_catalog_param/azure_blob_all_test.groovy
@@ -0,0 +1,141 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+import static groovy.test.GroovyAssert.shouldFail;
+import java.util.concurrent.ThreadLocalRandom
+
+suite("azure_blob_all_test", "p2,external,new_catalog_property") {
+
+
+    String abfsAzureAccountName = 
context.config.otherConfigs.get("abfsAccountName")
+    String abfsAzureAccountKey = 
context.config.otherConfigs.get("abfsAccountKey")
+    String abfsContainer = context.config.otherConfigs.get("abfsContainer")
+    String abfsEndpoint = context.config.otherConfigs.get("abfsEndpoint")
+    def abfs_azure_config_props = """
+        "provider" = "azure",
+        "azure.endpoint"="${abfsEndpoint}",
+        "azure.account_name" = "${abfsAzureAccountName}",
+        "azure.account_key" = "${abfsAzureAccountKey}" 
+    """
+    
+    // Iceberg FS 
+    
+    def testIcebergTest = { String storage_props,String 
iceberg_fs_catalog_name, String protocol,String hdfsLocationType ->
+        
+        iceberg_fs_catalog_name = iceberg_fs_catalog_name + "_" + 
ThreadLocalRandom.current().nextInt(100) 
+        sql """
+         drop catalog if exists ${iceberg_fs_catalog_name};
+        """
+        sql"""
+        create catalog ${iceberg_fs_catalog_name} properties(
+        'type'='iceberg',
+        'iceberg.catalog.type'='hadoop',
+        
'warehouse'='${protocol}://${abfsContainer}@${abfsAzureAccountName}.${hdfsLocationType}.core.windows.net/regression/external/azure/${protocol}/iceberg_fs_warehouse/',
+        ${storage_props}
+        );
+        """
+
+        sql """ 
+         switch ${iceberg_fs_catalog_name} 
+            """
+
+        sql """
+        drop database if exists ${iceberg_fs_catalog_name}_db_test force;
+        """
+        sql """
+        create database ${iceberg_fs_catalog_name}_db_test;
+    """
+        sql """
+        use ${iceberg_fs_catalog_name}_db_test;
+    """
+        sql """
+        create table ${iceberg_fs_catalog_name}_table_test (id int, name 
string)
+    """
+        sql """
+        insert into ${iceberg_fs_catalog_name}_table_test values(1, 
'iceberg_fs_abfs_test');
+    """
+        def query_result = sql """
+        select count(1) from ${iceberg_fs_catalog_name}_table_test;
+    """
+
+        assert query_result[0][0] == 1
+
+        sql """
+        drop table if exists ${iceberg_fs_catalog_name}_table_test;
+    """
+        sql """
+        drop database if exists ${iceberg_fs_catalog_name}_db_test force;
+    """
+        sql """
+        drop catalog if exists ${iceberg_fs_catalog_name};
+        """
+    }
+
+
+    //abfs
+    testIcebergTest(abfs_azure_config_props, "iceberg_fs_abfs_catalog", 
"abfs","dfs")
+    testIcebergTest(abfs_azure_config_props, "iceberg_fs_abfss_catalog", 
"abfss","dfs")
+    
+   
+    
+    //abfss
+    def testPaimonTest = { String storage_props,String paimon_catalog_name, 
String protocol,String hdfsLocationType,String queryTbl ->
+        sql """
+         drop catalog if exists ${paimon_catalog_name};
+        """
+        sql"""
+        create catalog ${paimon_catalog_name} properties(
+        'type'='paimon',
+        'paimon.catalog.type'='filesystem',
+        
'warehouse'='${protocol}://${abfsContainer}@${abfsAzureAccountName}.${hdfsLocationType}.core.windows.net/regression/azure/${protocol}/paimon_fs_warehouse/',
+        ${abfs_azure_config_props}
+    );
+    """
+
+        sql """ 
+         switch ${paimon_catalog_name} 
+    """
+
+        def query_result =sql """
+        select * from ${paimon_catalog_name}.${queryTbl}
+        """
+        println query_result
+
+        sql """
+         drop catalog if exists ${paimon_catalog_name};
+        """
+    }
+    
+    // Paimon FS
+    sql """
+    set force_jni_scanner=false;
+    """
+  
+    def paimon_fs_abfss_db_tbl = "paimon_fs_abfss_test_db.external_test_table"
+    def paimon_fs_abfs_db_tbl = "paimon_fs_abfs_test_db.external_test_table"
+    testPaimonTest(abfs_azure_config_props, "paimon_fs_abfs_catalog", 
"abfs","dfs",paimon_fs_abfs_db_tbl)
+    testPaimonTest(abfs_azure_config_props, "paimon_fs_abfss_catalog", 
"abfss","dfs",paimon_fs_abfss_db_tbl)
+
+    // TODO: Enable this once BE's HDFS dependency management is fully ready.
+    //       This module requires higher-version JARs to support JDK 17 access.
+   /*    sql """
+    set force_jni_scanner=true;
+    """
+    testPaimonTest(abfs_azure_config_props, "paimon_fs_abfs_catalog", 
"abfs","dfs",paimon_fs_abfs_db_tbl)
+    testPaimonTest(abfs_azure_config_props, "paimon_fs_abfss_catalog", 
"abfss","dfs",paimon_fs_abfss_db_tbl)*/
+    
+    
+}
\ No newline at end of file
diff --git 
a/regression-test/suites/external_table_p2/refactor_catalog_param/iceberg_on_hms_and_filesystem_and_dlf.groovy
 
b/regression-test/suites/external_table_p2/refactor_catalog_param/iceberg_on_hms_and_filesystem_and_dlf.groovy
index f3ccbb36889..45b18889767 100644
--- 
a/regression-test/suites/external_table_p2/refactor_catalog_param/iceberg_on_hms_and_filesystem_and_dlf.groovy
+++ 
b/regression-test/suites/external_table_p2/refactor_catalog_param/iceberg_on_hms_and_filesystem_and_dlf.groovy
@@ -14,6 +14,9 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
+
+import java.util.concurrent.ThreadLocalRandom
+
 import static groovy.test.GroovyAssert.shouldFail;
 suite("iceberg_on_hms_and_filesystem_and_dlf", 
"p2,external,new_catalog_property") {
 
@@ -33,7 +36,8 @@ suite("iceberg_on_hms_and_filesystem_and_dlf", 
"p2,external,new_catalog_property
             switch ${catalog_name};
         """
 
-        def db_name = prefix + "_db"
+        def db_name = prefix + "_db"+ ThreadLocalRandom.current().nextInt(100);
+        // Check if database exists
         sql """
             DROP DATABASE IF EXISTS ${db_name} FORCE;
         """
@@ -65,6 +69,103 @@ suite("iceberg_on_hms_and_filesystem_and_dlf", 
"p2,external,new_catalog_property
             SELECT * FROM ${table_name};
         """
         assert queryResult.size() == 1
+        def branch_name = prefix + "_branch"
+        def tag_name = prefix + "_tag"
+        sql """
+            ALTER TABLE ${table_name} CREATE BRANCH ${branch_name};
+        """
+        sql """
+            ALTER TABLE ${table_name} CREATE TAG ${tag_name};
+        """
+        sql """
+            INSERT OVERWRITE  TABLE ${table_name} VALUES (1, 'a', 10),(2, 'b', 
20), (3, 'c', 30)
+        """
+        def originalQueryResult = sql """
+            SELECT * FROM ${table_name};
+        """
+        assert originalQueryResult.size() == 3
+        sql """
+            insert into ${table_name}@branch(${branch_name}) values (4, 'd', 
40)
+        """
+        def branchQueryResult = sql """
+            SELECT * FROM ${table_name}@branch(${branch_name});
+        """
+        assert branchQueryResult.size() == 2
+
+
+        def tagQueryResult = sql """
+            SELECT * FROM ${table_name}@tag(${tag_name});
+        """
+        assert tagQueryResult.size() == 1
+        sql """
+            ALTER TABLE ${table_name} drop branch ${branch_name};
+        """
+        sql """
+            ALTER TABLE ${table_name} drop tag ${tag_name};
+        """
+        try {
+            def sys_query_result = sql """
+            SELECT * FROM ${table_name}\$files;
+        """
+            println sys_query_result
+            println "iceberg_meta_result SUCCESS" + catalog_name
+
+            def iceberg_meta_result = sql """
+        SELECT snapshot_id FROM iceberg_meta(
+                'table' = '${catalog_name}.${db_name}.${table_name}',
+                'query_type' = 'snapshots'
+        ) order by committed_at desc;
+        
+        """
+            def first_snapshot_id = iceberg_meta_result.get(0).get(0);
+            def time_travel =sql """
+            SELECT * FROM ${table_name} FOR VERSION AS OF ${first_snapshot_id};
+        """
+            println time_travel
+
+            println "iceberg_time_travel SUCCESS" + catalog_name
+        }catch (Exception e) {
+            println catalog_name + "system info error"
+        }
+
+
+        sql """
+            DROP TABLE ${table_name};
+        """
+        //partition table
+        table_name = prefix + "_partition_table"
+        sql """
+            CREATE TABLE ${table_name} (
+              `ts` DATETIME COMMENT 'ts',
+              `col1` BOOLEAN COMMENT 'col1',
+              `col2` INT COMMENT 'col2',
+              `col3` BIGINT COMMENT 'col3',
+              `col4` FLOAT COMMENT 'col4',
+              `col5` DOUBLE COMMENT 'col5',
+              `col6` DECIMAL(9,4) COMMENT 'col6',
+              `col7` STRING COMMENT 'col7',
+              `col8` DATE COMMENT 'col8',
+              `col9` DATETIME COMMENT 'col9',
+              `pt1` STRING COMMENT 'pt1',
+              `pt2` STRING COMMENT 'pt2'
+            )
+            PARTITION BY LIST (day(ts), pt1, pt2) ()
+            PROPERTIES (
+              'write-format'='orc',
+              'compression-codec'='zlib'
+            );
+        """
+
+        sql """
+            INSERT OVERWRITE  TABLE ${table_name} values 
+            ('2023-01-01 00:00:00', true, 1, 1, 1.0, 1.0, 1.0000, '1', 
'2023-01-01', '2023-01-01 00:00:00', 'a', '1'),
+            ('2023-01-02 00:00:00', false, 2, 2, 2.0, 2.0, 2.0000, '2', 
'2023-01-02', '2023-01-02 00:00:00', 'b', '2'),
+            ('2023-01-03 00:00:00', true, 3, 3, 3.0, 3.0, 3.0000, '3', 
'2023-01-03', '2023-01-03 00:00:00', 'c', '3');
+        """
+        def partitionQueryResult = sql """
+            SELECT * FROM ${table_name} WHERE pt1='a' and pt2='1';
+        """
+        assert partitionQueryResult.size() == 1
 
         sql """
             DROP TABLE ${table_name};


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to