This is an automated email from the ASF dual-hosted git repository.

kirs pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new b4c5ff69b33 [fix](test)Validate database is empty before DROP DATABASE 
(#58344)
b4c5ff69b33 is described below

commit b4c5ff69b33a31009bef6a1d12f8204b9e9ca4ef
Author: Calvin Kirs <[email protected]>
AuthorDate: Wed Nov 26 19:24:29 2025 +0800

    [fix](test)Validate database is empty before DROP DATABASE (#58344)
    
    ### What problem does this PR solve?
    Previously, the test flow was:
    
    
    - Drop databases
    - Create database and tables
    - Insert data
    - Drop tables
    
    
    However, the logic did not handle the case where dropping tables failed.
    As a result, when the test reran, the database could not be dropped
    because some tables were still present.
    
    
    This update introduces a safety check:
    Before dropping a database, the system now detects whether any tables
    exist under the DB. If so, it automatically drops all tables first,
    ensuring that DROP DATABASE always executes successfully in test
    scenarios.
    
    ### Release note
    
    None
    
    ### Check List (For Author)
    
    - Test <!-- At least one of them must be included. -->
        - [ ] Regression test
        - [ ] Unit Test
        - [ ] Manual test (add detailed scripts or steps below)
        - [ ] No need to test or manual test. Explain why:
    - [ ] This is a refactor/code format and no logic has been changed.
            - [ ] Previous test can cover this change.
            - [ ] No code files have been changed.
            - [ ] Other reason <!-- Add your reason?  -->
    
    - Behavior changed:
        - [ ] No.
        - [ ] Yes. <!-- Explain the behavior change -->
    
    - Does this need documentation?
        - [ ] No.
    - [ ] Yes. <!-- Add document PR link here. eg:
    https://github.com/apache/doris-website/pull/1214 -->
    
    ### Check List (For Reviewer who merge this PR)
    
    - [ ] Confirm the release note
    - [ ] Confirm test cases
    - [ ] Confirm document
    - [ ] Add branch pick label <!-- Add branch pick label that this PR
    should merge into -->
---
 .../azure_blob_all_test.groovy                     |   5 +-
 .../iceberg_on_hms_and_filesystem_and_dlf.groovy   | 103 ++++++++++++++++++++-
 2 files changed, 105 insertions(+), 3 deletions(-)

diff --git 
a/regression-test/suites/external_table_p2/refactor_catalog_param/azure_blob_all_test.groovy
 
b/regression-test/suites/external_table_p2/refactor_catalog_param/azure_blob_all_test.groovy
index 03ef1c576ac..7451e6c9367 100644
--- 
a/regression-test/suites/external_table_p2/refactor_catalog_param/azure_blob_all_test.groovy
+++ 
b/regression-test/suites/external_table_p2/refactor_catalog_param/azure_blob_all_test.groovy
@@ -35,6 +35,7 @@ suite("azure_blob_all_test", 
"p2,external,new_catalog_property") {
     
     def testIcebergTest = { String storage_props,String 
iceberg_fs_catalog_name, String protocol,String hdfsLocationType ->
         
+        iceberg_fs_catalog_name = iceberg_fs_catalog_name + "_" + 
ThreadLocalRandom.current().nextInt(100) 
         sql """
          drop catalog if exists ${iceberg_fs_catalog_name};
         """
@@ -52,7 +53,7 @@ suite("azure_blob_all_test", 
"p2,external,new_catalog_property") {
             """
 
         sql """
-        drop database if exists ${iceberg_fs_catalog_name}_db_test;
+        drop database if exists ${iceberg_fs_catalog_name}_db_test force;
         """
         sql """
         create database ${iceberg_fs_catalog_name}_db_test;
@@ -76,7 +77,7 @@ suite("azure_blob_all_test", 
"p2,external,new_catalog_property") {
         drop table if exists ${iceberg_fs_catalog_name}_table_test;
     """
         sql """
-        drop database if exists ${iceberg_fs_catalog_name}_db_test;
+        drop database if exists ${iceberg_fs_catalog_name}_db_test force;
     """
         sql """
         drop catalog if exists ${iceberg_fs_catalog_name};
diff --git 
a/regression-test/suites/external_table_p2/refactor_catalog_param/iceberg_on_hms_and_filesystem_and_dlf.groovy
 
b/regression-test/suites/external_table_p2/refactor_catalog_param/iceberg_on_hms_and_filesystem_and_dlf.groovy
index f3ccbb36889..45b18889767 100644
--- 
a/regression-test/suites/external_table_p2/refactor_catalog_param/iceberg_on_hms_and_filesystem_and_dlf.groovy
+++ 
b/regression-test/suites/external_table_p2/refactor_catalog_param/iceberg_on_hms_and_filesystem_and_dlf.groovy
@@ -14,6 +14,9 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
+
+import java.util.concurrent.ThreadLocalRandom
+
 import static groovy.test.GroovyAssert.shouldFail;
 suite("iceberg_on_hms_and_filesystem_and_dlf", 
"p2,external,new_catalog_property") {
 
@@ -33,7 +36,8 @@ suite("iceberg_on_hms_and_filesystem_and_dlf", 
"p2,external,new_catalog_property
             switch ${catalog_name};
         """
 
-        def db_name = prefix + "_db"
+        def db_name = prefix + "_db"+ ThreadLocalRandom.current().nextInt(100);
+        // Check if database exists
         sql """
             DROP DATABASE IF EXISTS ${db_name} FORCE;
         """
@@ -65,6 +69,103 @@ suite("iceberg_on_hms_and_filesystem_and_dlf", 
"p2,external,new_catalog_property
             SELECT * FROM ${table_name};
         """
         assert queryResult.size() == 1
+        def branch_name = prefix + "_branch"
+        def tag_name = prefix + "_tag"
+        sql """
+            ALTER TABLE ${table_name} CREATE BRANCH ${branch_name};
+        """
+        sql """
+            ALTER TABLE ${table_name} CREATE TAG ${tag_name};
+        """
+        sql """
+            INSERT OVERWRITE  TABLE ${table_name} VALUES (1, 'a', 10),(2, 'b', 
20), (3, 'c', 30)
+        """
+        def originalQueryResult = sql """
+            SELECT * FROM ${table_name};
+        """
+        assert originalQueryResult.size() == 3
+        sql """
+            insert into ${table_name}@branch(${branch_name}) values (4, 'd', 
40)
+        """
+        def branchQueryResult = sql """
+            SELECT * FROM ${table_name}@branch(${branch_name});
+        """
+        assert branchQueryResult.size() == 2
+
+
+        def tagQueryResult = sql """
+            SELECT * FROM ${table_name}@tag(${tag_name});
+        """
+        assert tagQueryResult.size() == 1
+        sql """
+            ALTER TABLE ${table_name} drop branch ${branch_name};
+        """
+        sql """
+            ALTER TABLE ${table_name} drop tag ${tag_name};
+        """
+        try {
+            def sys_query_result = sql """
+            SELECT * FROM ${table_name}\$files;
+        """
+            println sys_query_result
+            println "iceberg_meta_result SUCCESS" + catalog_name
+
+            def iceberg_meta_result = sql """
+        SELECT snapshot_id FROM iceberg_meta(
+                'table' = '${catalog_name}.${db_name}.${table_name}',
+                'query_type' = 'snapshots'
+        ) order by committed_at desc;
+        
+        """
+            def first_snapshot_id = iceberg_meta_result.get(0).get(0);
+            def time_travel =sql """
+            SELECT * FROM ${table_name} FOR VERSION AS OF ${first_snapshot_id};
+        """
+            println time_travel
+
+            println "iceberg_time_travel SUCCESS" + catalog_name
+        }catch (Exception e) {
+            println catalog_name + "system info error"
+        }
+
+
+        sql """
+            DROP TABLE ${table_name};
+        """
+        //partition table
+        table_name = prefix + "_partition_table"
+        sql """
+            CREATE TABLE ${table_name} (
+              `ts` DATETIME COMMENT 'ts',
+              `col1` BOOLEAN COMMENT 'col1',
+              `col2` INT COMMENT 'col2',
+              `col3` BIGINT COMMENT 'col3',
+              `col4` FLOAT COMMENT 'col4',
+              `col5` DOUBLE COMMENT 'col5',
+              `col6` DECIMAL(9,4) COMMENT 'col6',
+              `col7` STRING COMMENT 'col7',
+              `col8` DATE COMMENT 'col8',
+              `col9` DATETIME COMMENT 'col9',
+              `pt1` STRING COMMENT 'pt1',
+              `pt2` STRING COMMENT 'pt2'
+            )
+            PARTITION BY LIST (day(ts), pt1, pt2) ()
+            PROPERTIES (
+              'write-format'='orc',
+              'compression-codec'='zlib'
+            );
+        """
+
+        sql """
+            INSERT OVERWRITE  TABLE ${table_name} values 
+            ('2023-01-01 00:00:00', true, 1, 1, 1.0, 1.0, 1.0000, '1', 
'2023-01-01', '2023-01-01 00:00:00', 'a', '1'),
+            ('2023-01-02 00:00:00', false, 2, 2, 2.0, 2.0, 2.0000, '2', 
'2023-01-02', '2023-01-02 00:00:00', 'b', '2'),
+            ('2023-01-03 00:00:00', true, 3, 3, 3.0, 3.0, 3.0000, '3', 
'2023-01-03', '2023-01-03 00:00:00', 'c', '3');
+        """
+        def partitionQueryResult = sql """
+            SELECT * FROM ${table_name} WHERE pt1='a' and pt2='1';
+        """
+        assert partitionQueryResult.size() == 1
 
         sql """
             DROP TABLE ${table_name};


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to