This is an automated email from the ASF dual-hosted git repository.
dataroaring pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/branch-3.0 by this push:
new 7214c960da8 [fix](case) fix some cases (#54896)
7214c960da8 is described below
commit 7214c960da82f3579c88c7e3b41977985a4a1840
Author: Yongqiang YANG <[email protected]>
AuthorDate: Mon Aug 18 09:25:45 2025 +0800
[fix](case) fix some cases (#54896)
---
...ompaction_get_delete_bitmap_from_cache_fail.groovy | 19 ++++++++++---------
.../cloud/test_cloud_mow_partial_update_retry.groovy | 6 ++++--
...est_cloud_mow_stale_resp_load_load_conflict.groovy | 6 ++++--
...test_cloud_mow_stream_load_with_commit_fail.groovy | 13 +++++++++++++
.../test_cloud_sc_self_retry_with_stop_token.groovy | 2 +-
.../test_load_stream_fault_injection.groovy | 6 ++++++
.../test_segcompaction_fault_injection.groovy | 5 +++++
7 files changed, 43 insertions(+), 14 deletions(-)
diff --git
a/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_compaction_get_delete_bitmap_from_cache_fail.groovy
b/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_compaction_get_delete_bitmap_from_cache_fail.groovy
index 72fda5eea26..eea87d676e9 100644
---
a/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_compaction_get_delete_bitmap_from_cache_fail.groovy
+++
b/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_compaction_get_delete_bitmap_from_cache_fail.groovy
@@ -61,7 +61,7 @@
suite("test_cloud_mow_compaction_get_delete_bitmap_from_cache_fail", "nonConcurr
DebugPoint.enableDebugPoint(injectBe.Host,
injectBe.HttpPort.toInteger(), NodeType.BE, inject_spin_block)
DebugPoint.enableDebugPoint(injectBe.Host,
injectBe.HttpPort.toInteger(), NodeType.BE, inject_cache_miss)
logger.info("run compaction:" + tabletId)
- (code, out, err) = be_run_cumulative_compaction(injectBe.Host,
injectBe.HttpPort, tabletId)
+ def (code, out, err) = be_run_cumulative_compaction(injectBe.Host,
injectBe.HttpPort, tabletId)
logger.info("Run compaction: code=" + code + ", out=" + out + ", err="
+ err)
// Concurrent inserts
@@ -74,14 +74,15 @@
suite("test_cloud_mow_compaction_get_delete_bitmap_from_cache_fail", "nonConcurr
// let compaction continue
DebugPoint.disableDebugPoint(injectBe.Host,
injectBe.HttpPort.toInteger(), NodeType.BE, inject_spin_block)
- do {
- Thread.sleep(100)
- (code, out, err) = be_get_compaction_status(injectBe.Host,
injectBe.HttpPort, tabletId)
- logger.info("Get compaction status: code=" + code + ", out=" +
out + ", err=" + err)
- assertEquals(code, 0)
- def compactionStatus = parseJson(out.trim())
- assertEquals("success", compactionStatus.status.toLowerCase())
- running = compactionStatus.run_status
+ def running = true
+ do {
+ Thread.sleep(100)
+ (code, out, err) = be_get_compaction_status(injectBe.Host,
injectBe.HttpPort, tabletId)
+ logger.info("Get compaction status: code=" + code + ", out=" + out
+ ", err=" + err)
+ assertEquals(code, 0)
+ def compactionStatus = parseJson(out.trim())
+ assertEquals("success", compactionStatus.status.toLowerCase())
+ running = compactionStatus.run_status
} while (running)
Thread.sleep(200)
diff --git
a/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_partial_update_retry.groovy
b/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_partial_update_retry.groovy
index 4f091bef8ea..87749954f17 100644
---
a/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_partial_update_retry.groovy
+++
b/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_partial_update_retry.groovy
@@ -65,7 +65,8 @@ suite("test_cloud_mow_partial_update_retry", "nonConcurrent")
{
// wait util the first partial update load's delete bitmap update
lock expired
// to ensure that the second load can take the delete bitmap
update lock
// Config.delete_bitmap_lock_expiration_seconds = 10s
- Thread.sleep(11 * 1000)
+ def timeout =
getFeConfig("delete_bitmap_lock_expiration_seconds").toInteger() + 2;
+ Thread.sleep(timeout * 1000)
// the second load
GetDebugPoint().enableDebugPointForAllBEs("BaseTablet::update_delete_bitmap.enable_spin_wait",
[token: "token2"])
@@ -80,7 +81,8 @@ suite("test_cloud_mow_partial_update_retry", "nonConcurrent")
{
// keep waiting util the delete bitmap calculation
timeout(Config.calculate_delete_bitmap_task_timeout_seconds = 15s)
// and the first load will retry the calculation of delete bitmap
- Thread.sleep(15 * 1000)
+ timeout =
getFeConfig("calculate_delete_bitmap_task_timeout_seconds").toInteger() + 2;
+ Thread.sleep(timeout * 1000)
// let the first partial update load finish
GetDebugPoint().enableDebugPointForAllBEs("BaseTablet::update_delete_bitmap.block")
diff --git
a/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_stale_resp_load_load_conflict.groovy
b/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_stale_resp_load_load_conflict.groovy
index faafb6b8482..e57621ca3f8 100644
---
a/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_stale_resp_load_load_conflict.groovy
+++
b/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_stale_resp_load_load_conflict.groovy
@@ -62,7 +62,8 @@ suite("test_cloud_mow_stale_resp_load_load_conflict",
"nonConcurrent") {
// wait util the first load's delete bitmap update lock expired
// to ensure that the second load can take the delete bitmap
update lock
// Config.delete_bitmap_lock_expiration_seconds = 10s
- Thread.sleep(11 * 1000)
+ def timeout =
getFeConfig("delete_bitmap_lock_expiration_seconds").toInteger() + 2;
+ Thread.sleep(timeout * 1000)
// the second load
GetDebugPoint().enableDebugPointForAllBEs("BaseTablet::update_delete_bitmap.enable_spin_wait",
[token: "token2"])
@@ -75,7 +76,8 @@ suite("test_cloud_mow_stale_resp_load_load_conflict",
"nonConcurrent") {
// keep waiting util the delete bitmap calculation
timeout(Config.calculate_delete_bitmap_task_timeout_seconds = 15s)
// and the coordinator BE will retry to commit the first load's txn
- Thread.sleep(15 * 1000)
+ timeout =
getFeConfig("calculate_delete_bitmap_task_timeout_seconds").toInteger() + 2;
+ Thread.sleep(timeout * 1000)
// let the first partial update load finish
GetDebugPoint().enableDebugPointForAllBEs("BaseTablet::update_delete_bitmap.block")
diff --git
a/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_stream_load_with_commit_fail.groovy
b/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_stream_load_with_commit_fail.groovy
index c5810bec88a..aaeb596fa07 100644
---
a/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_stream_load_with_commit_fail.groovy
+++
b/regression-test/suites/fault_injection_p0/cloud/test_cloud_mow_stream_load_with_commit_fail.groovy
@@ -146,6 +146,7 @@ suite("test_cloud_mow_stream_load_with_commit_fail",
"nonConcurrent") {
file "test_stream_load0.csv"
time 10000 // limit inflight 10s
+ directToBe backendIdToBackendIP.get(backendId),
backendId_to_backendHttpPort.get(backendId)
check { result, exception, startTime, endTime ->
log.info("Stream load result: ${result}")
@@ -167,6 +168,8 @@ suite("test_cloud_mow_stream_load_with_commit_fail",
"nonConcurrent") {
time 10000 // limit inflight 10s
+ directToBe backendIdToBackendIP.get(backendId),
backendId_to_backendHttpPort.get(backendId)
+
check { result, exception, startTime, endTime ->
log.info("Stream load result: ${result}")
def json = parseJson(result)
@@ -190,6 +193,7 @@ suite("test_cloud_mow_stream_load_with_commit_fail",
"nonConcurrent") {
file "test_stream_load2.csv"
time 10000 // limit inflight 10s
+ directToBe backendIdToBackendIP.get(backendId),
backendId_to_backendHttpPort.get(backendId)
check { result, exception, startTime, endTime ->
log.info("Stream load result: ${result}")
@@ -214,6 +218,7 @@ suite("test_cloud_mow_stream_load_with_commit_fail",
"nonConcurrent") {
file "test_stream_load2.csv"
time 10000 // limit inflight 10s
+ directToBe backendIdToBackendIP.get(backendId),
backendId_to_backendHttpPort.get(backendId)
check { result, exception, startTime, endTime ->
log.info("Stream load result: ${result}")
@@ -239,6 +244,7 @@ suite("test_cloud_mow_stream_load_with_commit_fail",
"nonConcurrent") {
file "test_stream_load3.csv"
time 10000 // limit inflight 10s
+ directToBe backendIdToBackendIP.get(backendId),
backendId_to_backendHttpPort.get(backendId)
check { result, exception, startTime, endTime ->
log.info("Stream load result: ${result}")
@@ -262,6 +268,7 @@ suite("test_cloud_mow_stream_load_with_commit_fail",
"nonConcurrent") {
file "test_stream_load3.csv"
time 10000 // limit inflight 10s
+ directToBe backendIdToBackendIP.get(backendId),
backendId_to_backendHttpPort.get(backendId)
check { result, exception, startTime, endTime ->
log.info("Stream load result: ${result}")
@@ -289,6 +296,7 @@ suite("test_cloud_mow_stream_load_with_commit_fail",
"nonConcurrent") {
file "test_stream_load4.csv"
time 10000 // limit inflight 10s
+ directToBe backendIdToBackendIP.get(backendId),
backendId_to_backendHttpPort.get(backendId)
check { result, exception, startTime, endTime ->
log.info("Stream load result: ${result}")
@@ -313,6 +321,7 @@ suite("test_cloud_mow_stream_load_with_commit_fail",
"nonConcurrent") {
file "test_stream_load4.csv"
time 10000 // limit inflight 10s
+ directToBe backendIdToBackendIP.get(backendId),
backendId_to_backendHttpPort.get(backendId)
check { result, exception, startTime, endTime ->
log.info("Stream load result: ${result}")
@@ -337,6 +346,7 @@ suite("test_cloud_mow_stream_load_with_commit_fail",
"nonConcurrent") {
file "test_stream_load5.csv"
time 10000 // limit inflight 10s
+ directToBe backendIdToBackendIP.get(backendId),
backendId_to_backendHttpPort.get(backendId)
check { result, exception, startTime, endTime ->
log.info("Stream load result: ${result}")
@@ -360,6 +370,7 @@ suite("test_cloud_mow_stream_load_with_commit_fail",
"nonConcurrent") {
file "test_stream_load5.csv"
time 10000 // limit inflight 10s
+ directToBe backendIdToBackendIP.get(backendId),
backendId_to_backendHttpPort.get(backendId)
check { result, exception, startTime, endTime ->
log.info("Stream load result: ${result}")
@@ -386,6 +397,7 @@ suite("test_cloud_mow_stream_load_with_commit_fail",
"nonConcurrent") {
file "test_stream_load6.csv"
time 10000 // limit inflight 10s
+ directToBe backendIdToBackendIP.get(backendId),
backendId_to_backendHttpPort.get(backendId)
check { result, exception, startTime, endTime ->
log.info("Stream load result: ${result}")
@@ -409,6 +421,7 @@ suite("test_cloud_mow_stream_load_with_commit_fail",
"nonConcurrent") {
file "test_stream_load.csv"
time 10000 // limit inflight 10s
+ directToBe backendIdToBackendIP.get(backendId),
backendId_to_backendHttpPort.get(backendId)
check { result, exception, startTime, endTime ->
log.info("Stream load result: ${result}")
diff --git
a/regression-test/suites/fault_injection_p0/cloud/test_cloud_sc_self_retry_with_stop_token.groovy
b/regression-test/suites/fault_injection_p0/cloud/test_cloud_sc_self_retry_with_stop_token.groovy
index 5978320aa19..6a96c2b1ec9 100644
---
a/regression-test/suites/fault_injection_p0/cloud/test_cloud_sc_self_retry_with_stop_token.groovy
+++
b/regression-test/suites/fault_injection_p0/cloud/test_cloud_sc_self_retry_with_stop_token.groovy
@@ -53,7 +53,7 @@ suite("test_cloud_sc_self_retry_with_stop_token",
"nonConcurrent") {
sql "alter table ${table1} modify column c2 varchar(100);"
def res
- Awaitility.await().atMost(40, TimeUnit.SECONDS).pollDelay(1,
TimeUnit.SECONDS).pollInterval(1, TimeUnit.SECONDS).until(() -> {
+ Awaitility.await().atMost(600, TimeUnit.SECONDS).pollDelay(1,
TimeUnit.SECONDS).pollInterval(1, TimeUnit.SECONDS).until(() -> {
res = sql_return_maparray """ SHOW ALTER TABLE COLUMN WHERE
TableName='${table1}' ORDER BY createtime DESC LIMIT 1 """
logger.info("res: ${res}")
if (res[0].State == "FINISHED" || res[0].State == "CANCELLED") {
diff --git
a/regression-test/suites/fault_injection_p0/test_load_stream_fault_injection.groovy
b/regression-test/suites/fault_injection_p0/test_load_stream_fault_injection.groovy
index 0ba05394adf..11415955f4b 100644
---
a/regression-test/suites/fault_injection_p0/test_load_stream_fault_injection.groovy
+++
b/regression-test/suites/fault_injection_p0/test_load_stream_fault_injection.groovy
@@ -19,6 +19,12 @@ import org.codehaus.groovy.runtime.IOGroovyMethods
import org.apache.doris.regression.util.Http
suite("load_stream_fault_injection", "nonConcurrent") {
+ if (isCloudMode()) {
+ // TODO: load stream fault injection test is not supported in cloud
mode yet
+ logger.info("skip test in cloud mode")
+ return
+ }
+
// init query case data
sql """
CREATE TABLE IF NOT EXISTS `baseall` (
diff --git
a/regression-test/suites/fault_injection_p0/test_segcompaction_fault_injection.groovy
b/regression-test/suites/fault_injection_p0/test_segcompaction_fault_injection.groovy
index 2cd9ab7841c..6b827bb0a51 100644
---
a/regression-test/suites/fault_injection_p0/test_segcompaction_fault_injection.groovy
+++
b/regression-test/suites/fault_injection_p0/test_segcompaction_fault_injection.groovy
@@ -19,6 +19,11 @@ import org.codehaus.groovy.runtime.IOGroovyMethods
import org.apache.doris.regression.util.Http
suite("test_segcompaction_correctness", "nonConcurrent,p2") {
+ if (isCloudMode()) {
+ logger.info("skip test in cloud mode")
+ return
+ }
+
def tableName = "segcompaction_correctness_test"
def create_table_sql = """
CREATE TABLE IF NOT EXISTS ${tableName} (
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]