Copilot commented on code in PR #58977:
URL: https://github.com/apache/doris/pull/58977#discussion_r2610480056


##########
regression-test/suites/cloud_p0/packed_file/test_packed_file_concurrent_load.groovy:
##########
@@ -36,36 +36,36 @@ suite("test_packed_file_concurrent_load", "p0, 
nonConcurrent") {
         }
     }
 
-    // Get merge file total small file count metric from all backends
-    def get_merge_file_total_small_file_count = {
+    // Get packed file total small file count metric from all backends
+    def get_packed_file_total_small_file_count = {
         long total_count = 0
         for (String backend_id: backendId_to_backendIP.keySet()) {
             def ip = backendId_to_backendIP.get(backend_id)
             def brpc_port = backendId_to_backendBrpcPort.get(backend_id)
             try {
-                def count = getBrpcMetrics(ip, brpc_port, 
"merge_file_total_small_file_num")
+                def count = getBrpcMetrics(ip, brpc_port, 
"packed_file_total_small_file_num")
                 if (count > 0) {
                     total_count += count
-                    logger.info("BE ${ip}:${brpc_port} 
merge_file_total_small_file_num = ${count}")
+                    logger.info("BE ${ip}:${brpc_port} 
packed_file_total_small_file_num = ${count}")
                 }
             } catch (Exception e) {
                 logger.warn("Failed to get metrics from BE ${ip}:${brpc_port}: 
${e.getMessage()}")
             }
         }
-        logger.info("Total merge_file_total_small_file_num across all 
backends: ${total_count}")
+        logger.info("Total packed_file_total_small_file_num across all 
backends: ${total_count}")
         return total_count
     }
 
 
-    // Enable merge file feature and set small file threshold using 
framework's temporary config function
+    // Enable packed file feature and set small file threshold using 
framework's temporary config function
     // This will automatically restore configs after test completes
     setBeConfigTemporary([
-        "enable_merge_file": "true",
+        "enable_packed_file": "true",
         "small_file_threshold_bytes": "102400"
     ]) {
-        // Get initial merge file count
-        def initial_merge_file_count = get_merge_file_total_small_file_count()
-        logger.info("Initial merge_file_total_small_file_count: 
${initial_merge_file_count}")
+        // Get initial packed file count
+        def initial_packed_file_count = 
get_packed_file_total_small_file_count()
+        logger.info("Initial packed_file_total_small_file_count: 
${initial_packed_file_count}")
 
         // Test case 1: Multiple small concurrent loads to the same tablet
         def tableName1 = "test_merge_file_same_tablet"

Review Comment:
   The table name still uses the old "test_merge_file_" prefix. For consistency 
with the refactoring from "merge_file" to "packed_file", this should be renamed 
to "test_packed_file_same_tablet".
   ```suggestion
           def tableName1 = "test_packed_file_same_tablet"
   ```



##########
regression-test/suites/cloud_p0/packed_file/test_packed_file_with_group_commit.groovy:
##########
@@ -36,38 +36,38 @@ suite("test_packed_file_with_group_commit", "p0, 
nonConcurrent") {
         }
     }
 
-    // Get merge file total small file count metric from all backends
-    def get_merge_file_total_small_file_count = {
+    // Get packed file total small file count metric from all backends
+    def get_packed_file_total_small_file_count = {
         long total_count = 0
         for (String backend_id: backendId_to_backendIP.keySet()) {
             def ip = backendId_to_backendIP.get(backend_id)
             def brpc_port = backendId_to_backendBrpcPort.get(backend_id)
             try {
-                def count = getBrpcMetrics(ip, brpc_port, 
"merge_file_total_small_file_num")
+                def count = getBrpcMetrics(ip, brpc_port, 
"packed_file_total_small_file_num")
                 if (count > 0) {
                     total_count += count
-                    logger.info("BE ${ip}:${brpc_port} 
merge_file_total_small_file_num = ${count}")
+                    logger.info("BE ${ip}:${brpc_port} 
packed_file_total_small_file_num = ${count}")
                 }
             } catch (Exception e) {
                 logger.warn("Failed to get metrics from BE ${ip}:${brpc_port}: 
${e.getMessage()}")
             }
         }
-        logger.info("Total merge_file_total_small_file_num across all 
backends: ${total_count}")
+        logger.info("Total packed_file_total_small_file_num across all 
backends: ${total_count}")
         return total_count
     }
 
-    // Enable merge file feature and set small file threshold using 
framework's temporary config function
+    // Enable packed file feature and set small file threshold using 
framework's temporary config function
     // This will automatically restore configs after test completes
     setBeConfigTemporary([
-        "enable_merge_file": "true",
+        "enable_packed_file": "true",
         "small_file_threshold_bytes": "102400"
     ]) {
-        // Get initial merge file count
-        def initial_merge_file_count = get_merge_file_total_small_file_count()
-        logger.info("Initial merge_file_total_small_file_count: 
${initial_merge_file_count}")
+        // Get initial packed file count
+        def initial_packed_file_count = 
get_packed_file_total_small_file_count()
+        logger.info("Initial packed_file_total_small_file_count: 
${initial_packed_file_count}")
 
-        // Test case: Merge file with group commit enabled
-        // This test verifies that merge file logic works correctly when group 
commit is enabled
+        // Test case: Packed file with group commit enabled
+        // This test verifies that packed file logic works correctly when 
group commit is enabled
         def tableName = "test_merge_file_with_group_commit"

Review Comment:
   The table name still uses the old "test_merge_file_" prefix. For consistency 
with the refactoring from "merge_file" to "packed_file", this should be renamed 
to "test_packed_file_with_group_commit".
   ```suggestion
           def tableName = "test_packed_file_with_group_commit"
   ```



##########
regression-test/suites/cloud_p0/packed_file/test_packed_file_concurrent_load.groovy:
##########
@@ -132,12 +132,12 @@ suite("test_packed_file_concurrent_load", "p0, 
nonConcurrent") {
         assertEquals(expected_rows1, result1[0][0] as int, 
                     "Expected exactly ${expected_rows1} rows for DUPLICATE KEY 
table, got ${result1[0][0]}")
 
-        def count_after_test1 = get_merge_file_total_small_file_count()
-        logger.info("merge_file_total_small_file_count after test case 1: 
${count_after_test1} (initial: ${initial_merge_file_count})")
+        def count_after_test1 = get_packed_file_total_small_file_count()
+        logger.info("packed_file_total_small_file_count after test case 1: 
${count_after_test1} (initial: ${initial_packed_file_count})")
         // The count must increase after test case 1
-        assertTrue(count_after_test1 > initial_merge_file_count,
-                   "merge_file_total_small_file_count must increase after test 
case 1. " +
-                   "Initial: ${initial_merge_file_count}, After test1: 
${count_after_test1}")
+        assertTrue(count_after_test1 > initial_packed_file_count,
+                   "packed_file_total_small_file_count must increase after 
test case 1. " +
+                   "Initial: ${initial_packed_file_count}, After test1: 
${count_after_test1}")
 
         // Test case 2: Multiple small concurrent loads to different partitions
         def tableName2 = "test_merge_file_different_partitions"

Review Comment:
   The table name still uses the old "test_merge_file_" prefix. For consistency 
with the refactoring from "merge_file" to "packed_file", this should be renamed 
to "test_packed_file_different_partitions".
   ```suggestion
           def tableName2 = "test_packed_file_different_partitions"
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to