This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new 38491c5f37b branch-3.0: [Fix](case) Fix some show data p2 cases #55244 
(#55266)
38491c5f37b is described below

commit 38491c5f37b9761ea89297a8029c3c15e17616d1
Author: github-actions[bot] 
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Tue Aug 26 10:57:19 2025 +0800

    branch-3.0: [Fix](case) Fix some show data p2 cases #55244 (#55266)
    
    Cherry-picked from #55244
    
    Co-authored-by: abmdocrt <[email protected]>
---
 regression-test/plugins/aliyun_oss_sdk.groovy      | 99 +++++++++++++++++-----
 .../plugins/cloud_show_data_plugin.groovy          | 10 ++-
 .../test_cloud_follower_show_data.groovy           | 14 +--
 .../test_cloud_delete_table_rows_show_data.groovy  |  1 +
 .../test_cloud_drop_table_show_data.groovy         |  8 +-
 .../test_cloud_disable_compaction_show_data.groovy |  8 +-
 6 files changed, 103 insertions(+), 37 deletions(-)

diff --git a/regression-test/plugins/aliyun_oss_sdk.groovy 
b/regression-test/plugins/aliyun_oss_sdk.groovy
index 6b0c096d7e5..a23924530d1 100644
--- a/regression-test/plugins/aliyun_oss_sdk.groovy
+++ b/regression-test/plugins/aliyun_oss_sdk.groovy
@@ -91,29 +91,86 @@ Suite.metaClass.listOssObjectWithPrefix = { OSS client, 
String bucketName, Strin
 
 }
 
-// get file size in a specific directory
+/**
+ * 计算指定文件夹的总大小(递归计算所有文件)
+ * @param client OSS客户端实例
+ * @param bucketName OSS存储桶名称
+ * @param folder 文件夹路径前缀
+ * @return 文件夹总大小(字节)
+ */
 Suite.metaClass.calculateFolderLength = { OSS client, String bucketName, 
String folder ->
-    long size = 0L;
+    logger.info("[calculateFolderLength] 开始计算文件夹大小 - Bucket: ${bucketName}, 
Folder: ${folder}")
+    
+    long size = 0L;  // 累计文件大小
     ObjectListing objectListing = null;
-    do {
-        // The default value for MaxKey is 100, and the maximum value is 1000
-        logger.info("debug:" + folder)
-        ListObjectsRequest request = new 
ListObjectsRequest(bucketName).withPrefix(folder).withMaxKeys(1000);
-        if (objectListing != null) {
-            request.setMarker(objectListing.getNextMarker());
-        }
-        objectListing = client.listObjects(request);
-        List<OSSObjectSummary> sums = objectListing.getObjectSummaries();
-        for (OSSObjectSummary s : sums) {
-            logger.info("Object Key: ${s.getKey()}")
-            logger.info("Size: ${s.getSize()} bytes")
-            logger.info("Last Modified: ${s.getLastModified()}")
-            logger.info("Storage Class: ${s.getStorageClass()}")
-            logger.info("Owner: ${s.getOwner()?.getId()}")
-            logger.info("-------------------")
-            size += s.getSize();
-        }
-    } while (objectListing.isTruncated());
+    int pageCount = 0;  // 分页计数器
+    int totalObjects = 0;  // 总文件数量计数器
+    
+    try {
+        // 使用分页方式遍历所有对象,避免一次性加载过多数据
+        do {
+            pageCount++;
+            
+            // 创建列表对象请求,设置最大返回数量为1000(OSS限制的最大值)
+            ListObjectsRequest request = new ListObjectsRequest(bucketName)
+                .withPrefix(folder)
+                .withMaxKeys(1000);
+            
+            // 如果不是第一页,设置分页标记
+            if (objectListing != null) {
+                String nextMarker = objectListing.getNextMarker();
+                request.setMarker(nextMarker);
+            }
+            
+            // 执行OSS请求获取对象列表
+            objectListing = client.listObjects(request);
+            
+            // 获取当前页的对象摘要列表
+            List<OSSObjectSummary> sums = objectListing.getObjectSummaries();
+            
+            
+            // 遍历当前页的所有对象,累加大小
+            for (OSSObjectSummary s : sums) {
+                totalObjects++;
+                long objSize = s.getSize();
+                
+                // 详细记录每个对象的信息
+                logger.info("📄 [OBJECT #${totalObjects}] 单个对象详情:")
+                logger.info("   ├─ Key: ${s.getKey()}")
+                logger.info("   ├─ Size: ${objSize} bytes 
(${String.format('%.2f', objSize / 1024.0 / 1024.0)} MB)")
+                logger.info("   ├─ Last Modified: ${s.getLastModified()}")
+                logger.info("   ├─ Storage Class: ${s.getStorageClass()}")
+                logger.info("   ├─ Owner: ${s.getOwner()?.getId() ?: 'N/A'}")
+                logger.info("   └─ ETag: ${s.getETag()}")
+                
+                // 累加到总大小
+                size += objSize;
+                logger.info("🔢 [RUNNING TOTAL] 当前累计: ${size} bytes 
(${String.format('%.2f', size / 1024.0 / 1024.0)} MB)")
+                logger.info("─────────────────────────────────────────")
+            }
+            
+            
+        } while (objectListing.isTruncated()); // 继续处理下一页,直到所有数据处理完毕
+        
+        // 记录最终统计结果
+        logger.info("📊 [FOLDER SUMMARY] 文件夹统计完成:")
+        logger.info("   ╔══════════════════════════════════════════╗")
+        logger.info("   ║ 📁 文件夹路径: ${folder}")
+        logger.info("   ║ 📝 总文件数: ${totalObjects}")
+        logger.info("   ║ 📏 总大小: ${size} bytes")
+        logger.info("   ║ 📏 总大小: ${String.format('%.2f', size / 1024.0 / 
1024.0)} MB")
+        logger.info("   ║ 📏 总大小: ${String.format('%.2f', size / 1024.0 / 
1024.0 / 1024.0)} GB")
+        logger.info("   ╚══════════════════════════════════════════╝")
+        
+    } catch (Exception e) {
+        logger.error("[calculateFolderLength] 计算文件夹大小时发生异常:", e)
+        logger.error("  - Bucket: ${bucketName}")
+        logger.error("  - Folder: ${folder}")
+        logger.error("  - 已处理对象数: ${totalObjects}")
+        logger.error("  - 当前累计大小: ${size} bytes")
+        throw e  // 重新抛出异常
+    }
+    
     return size;
 }
 
diff --git a/regression-test/plugins/cloud_show_data_plugin.groovy 
b/regression-test/plugins/cloud_show_data_plugin.groovy
index 3180d452dc6..25019937ced 100644
--- a/regression-test/plugins/cloud_show_data_plugin.groovy
+++ b/regression-test/plugins/cloud_show_data_plugin.groovy
@@ -186,8 +186,10 @@ import org.codehaus.groovy.runtime.IOGroovyMethods
     }
 
     Suite.metaClass.show_table_data_size_through_mysql = { String table ->
+        logger.info("[show_table_data_size_through_mysql] 表名: ${table}")
         def mysqlShowDataSize = 0L
         def res = sql_return_maparray " show data from ${table}"
+        logger.info("[show_table_data_size_through_mysql] show data结果: " + 
res.toString())
         def tableSizeInfo = res[0]
         def fields = tableSizeInfo["Size"].split(" ")
         if (fields.length == 2 ){
@@ -196,13 +198,16 @@ import org.codehaus.groovy.runtime.IOGroovyMethods
             mysqlShowDataSize = translate_different_unit_to_MB(sizeField, 
unitField)
         }
         def round_size = new BigDecimal(mysqlShowDataSize).setScale(0, 
BigDecimal.ROUND_FLOOR);
+        logger.info("[show_table_data_size_through_mysql] 最终结果: ${round_size} 
MB")
         return round_size
     }
 
     Suite.metaClass.caculate_table_data_size_through_api = { 
List<List<Object>> tablets ->
         Double apiCaculateSize = 0 
         for (HashMap tablet in tablets) {
+            def tabletId = tablet.TabletId
             def tabletStatus = show_tablet_compaction(tablet)
+            logger.info("[caculate_table_data_size_through_api] tablet ID: 
${tabletId}, status: " + tabletStatus.toString())
             
             for(String rowset: tabletStatus.rowsets){
                 def fields = rowset.split(" ")
@@ -215,6 +220,7 @@ import org.codehaus.groovy.runtime.IOGroovyMethods
             }
         }
         def round_size = new BigDecimal(apiCaculateSize).setScale(0, 
BigDecimal.ROUND_FLOOR);
+        logger.info("[caculate_table_data_size_through_api] 最终结果: 
${round_size} MB")
         return round_size
     }
 
@@ -274,10 +280,10 @@ import org.codehaus.groovy.runtime.IOGroovyMethods
         getBackendIpHttpPort(backendIdToBackendIP, backendIdToBackendHttpPort);
 
         def backendId = backendIdToBackendIP.keySet()[0]
-
+        def code, out, err
         def get_be_param = { paramName ->
             // assuming paramName on all BEs have save value
-            def (code, out, err) = 
show_be_config(backendIdToBackendIP.get(backendId), 
backendIdToBackendHttpPort.get(backendId))
+            (code, out, err) = 
show_be_config(backendIdToBackendIP.get(backendId), 
backendIdToBackendHttpPort.get(backendId))
             assertEquals(code, 0)
             def configList = parseJson(out.trim())
             assert configList instanceof List
diff --git 
a/regression-test/suites/show_data_p2/test_cloud_follower_show_data.groovy 
b/regression-test/suites/show_data_p2/test_cloud_follower_show_data.groovy
index 671191a963d..026420dbb1d 100644
--- a/regression-test/suites/show_data_p2/test_cloud_follower_show_data.groovy
+++ b/regression-test/suites/show_data_p2/test_cloud_follower_show_data.groovy
@@ -91,25 +91,27 @@ suite("test_cloud_follower_show_data","p2, nonConcurrent") {
         assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1])
         assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1])
 
-
+        sleep(10 * 1000)
         def result = sql """show frontends;"""
         logger.info("result:" + result)
         for (int i = 0; i < result.size(); i++) {
             if (result[i][8] == "false" && result[i][11] == "true") {
-                def tokens = context.config.jdbcUrl.split('/')
-                url = tokens[0] + "//" + tokens[2] + "/" + 
"information_schema" + "?"
-                def new_jdbc_url = url.replaceAll(/\/\/[0-9.]+:/, 
"//${switch_ip}:")
+                def follower_ip = result[i][1]
+                
//jdbc:mysql://127.0.0.1:9030/?useLocalSessionState=true&allowLoadLocalInfile=true&zeroDateTimeBehavior=round
+                def old_url = context.config.jdbcUrl
+                def new_jdbc_url = old_url.replaceAll("://[^:]+:", 
"://${follower_ip}:")
                 logger.info("new_jdbc_url: " + new_jdbc_url)
 
                 connect('root', '', new_jdbc_url) {
-                    sql "select count(*) from ${tableName}"
+                    sql "select count(*) from 
regression_test_show_data_p2.${tableName}"
+                    sql "use regression_test_show_data_p2"
 
                     
sizeRecords["mysqlSize"].add(show_table_data_size_through_mysql(tableName))
                 }
             }
         }
 
-        for (int i = 0; i < sizeRecords["mysqlSize"].size(); i++) { 
+        for (int i = 0; i < sizeRecords["mysqlSize"].size(); i++) {
             if (i > 0) {
                 assertEquals(sizeRecords["mysqlSize"][i], 
sizeRecords["mysqlSize"][i-1])
             }
diff --git 
a/regression-test/suites/show_data_p2/test_table_operation/test_cloud_delete_table_rows_show_data.groovy
 
b/regression-test/suites/show_data_p2/test_table_operation/test_cloud_delete_table_rows_show_data.groovy
index 9cef5a53712..a5a49618b46 100644
--- 
a/regression-test/suites/show_data_p2/test_table_operation/test_cloud_delete_table_rows_show_data.groovy
+++ 
b/regression-test/suites/show_data_p2/test_table_operation/test_cloud_delete_table_rows_show_data.groovy
@@ -162,6 +162,7 @@ suite("test_cloud_delete_table_rows_show_data","p2, 
nonConcurrent") {
         assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1])
         assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1])
 
+        sql """set delete_without_partition=true;"""
         sql """delete from ${tableName} where L_ORDERKEY >=0;"""
 
         // 加一下触发compaction的机制
diff --git 
a/regression-test/suites/show_data_p2/test_table_operation/test_cloud_drop_table_show_data.groovy
 
b/regression-test/suites/show_data_p2/test_table_operation/test_cloud_drop_table_show_data.groovy
index d80295d802f..e80deb85834 100644
--- 
a/regression-test/suites/show_data_p2/test_table_operation/test_cloud_drop_table_show_data.groovy
+++ 
b/regression-test/suites/show_data_p2/test_table_operation/test_cloud_drop_table_show_data.groovy
@@ -21,7 +21,7 @@
 import org.codehaus.groovy.runtime.IOGroovyMethods
 
  // loading one data 10 times, expect data size not rising
-suite("test_cloud_drop_and_recover_table_show_data","p2, nonConcurrent") {
+suite("test_cloud_drop_and_show_data","p2, nonConcurrent") {
     //cloud-mode
     if (!isCloudMode()) {
         logger.info("not cloud mode, not run")
@@ -124,15 +124,15 @@ suite("test_cloud_drop_and_recover_table_show_data","p2, 
nonConcurrent") {
             
sizeRecords["cbsSize"].add(caculate_table_data_size_in_backend_storage(tablets))
             logger.info("after drop table force, storageSize is: 
${sizeRecords["cbsSize"][-1]}")
 
-            assertEquals(sizeRecords["cbsSize"][2], 0.0)
+            assertTrue(sizeRecords["cbsSize"][2] == 0)
 
         }
     }
 
     def main = {
         def tableName = "test_cloud_drop_and_recover_table_show_data"
-        //create_normal_table(tableName) 
-        //check(tableName, 1)
+        create_normal_table(tableName) 
+        check(tableName, 1)
 
         tableName = "test_cloud_drop_and_recover_table_force_show_data"
         create_normal_table(tableName) 
diff --git 
a/regression-test/suites/show_data_p2/test_table_property/test_cloud_disable_compaction_show_data.groovy
 
b/regression-test/suites/show_data_p2/test_table_property/test_cloud_disable_compaction_show_data.groovy
index f04c6613334..4b1c0a639da 100644
--- 
a/regression-test/suites/show_data_p2/test_table_property/test_cloud_disable_compaction_show_data.groovy
+++ 
b/regression-test/suites/show_data_p2/test_table_property/test_cloud_disable_compaction_show_data.groovy
@@ -81,11 +81,11 @@ suite("test_cloud_disable_compaction_show_data","p2, 
nonConcurrent") {
         // expect mysqlSize == apiSize == storageSize
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["apiSize"][0])
         assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["cbsSize"][0])
-        // expect load 1 times ==  load 10 times
+        // expect load 10 times >=  10 x load 1 times
         logger.info("after 1 time stream load, size is 
${sizeRecords["mysqlSize"][0]}, after 10 times stream load, size is 
${sizeRecords["mysqlSize"][1]}")
-        assertEquals(sizeRecords["mysqlSize"][0], sizeRecords["mysqlSize"][1])
-        assertEquals(sizeRecords["apiSize"][0], sizeRecords["apiSize"][1])
-        assertEquals(sizeRecords["cbsSize"][0], sizeRecords["cbsSize"][1])
+        assertTrue(sizeRecords["mysqlSize"][1] >= 10 * 
sizeRecords["mysqlSize"][0])
+        assertTrue(sizeRecords["apiSize"][1] >= 10 * sizeRecords["apiSize"][0])
+        assertTrue(sizeRecords["cbsSize"][1] >= 10 * sizeRecords["cbsSize"][0])
     }
 
     set_config_before_show_data_test()


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to