This is an automated email from the ASF dual-hosted git repository.
dataroaring pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/branch-3.0 by this push:
new 61dd708a66d [regression](filecache) fix regression failures part2
(#53783) (#53915)
61dd708a66d is described below
commit 61dd708a66d5d62ef15bd36c08d6e2edd46bacad
Author: zhengyu <[email protected]>
AuthorDate: Sat Jul 26 10:36:39 2025 +0800
[regression](filecache) fix regression failures part2 (#53783) (#53915)
---
.../cloud_p0/cache/http/test_reset_capacity.groovy | 2 +-
.../warm_up/cluster/test_warm_up_cluster.groovy | 7 ++-
.../cluster/test_warm_up_cluster_batch.groovy | 7 ++-
.../cluster/test_warm_up_cluster_bigsize.groovy | 7 ++-
.../cluster/test_warm_up_cluster_empty.groovy | 7 ++-
.../cluster/test_warm_up_compute_group.groovy | 7 ++-
.../warm_up/table/test_warm_up_partition.groovy | 7 ++-
.../test_warm_up_same_table_multi_times.groovy | 14 +++---
.../warm_up/table/test_warm_up_table.groovy | 12 ++---
.../warm_up/table/test_warm_up_tables.groovy | 13 +++--
.../suites/cloud_p0/cache/ttl/alter_ttl_1.groovy | 56 ++++++++++------------
.../suites/cloud_p0/cache/ttl/alter_ttl_4.groovy | 8 +++-
.../cache/ttl/create_table_as_select.groovy | 23 ---------
.../cloud_p0/cache/ttl/test_ttl_preempt.groovy | 9 +++-
14 files changed, 76 insertions(+), 103 deletions(-)
diff --git
a/regression-test/suites/cloud_p0/cache/http/test_reset_capacity.groovy
b/regression-test/suites/cloud_p0/cache/http/test_reset_capacity.groovy
index 2d04caaa078..72904a7bcee 100644
--- a/regression-test/suites/cloud_p0/cache/http/test_reset_capacity.groovy
+++ b/regression-test/suites/cloud_p0/cache/http/test_reset_capacity.groovy
@@ -254,7 +254,7 @@ suite("test_reset_capacity") {
continue
}
def i = line.indexOf(' ')
- ttl_cache_size = line.substring(i).toLong()
+ def ttl_cache_size = line.substring(i).toLong()
logger.info("current ttl_cache_size " + ttl_cache_size);
assertTrue(ttl_cache_size <= 1073741824)
flag1 = true
diff --git
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster.groovy
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster.groovy
index b6ae1be881e..99050350c51 100644
---
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster.groovy
+++
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster.groovy
@@ -157,12 +157,11 @@ suite("test_warm_up_cluster") {
int i = 0
for (; i < retryTime; i++) {
sleep(1000)
- def status = getJobState(jobId[0][0])
- logger.info(status)
- if (status.equals("CANCELLED")) {
+ def statuses = getJobState(jobId[0][0])
+ if (statuses.any { it != null && it.equals("CANCELLED") }) {
assertTrue(false);
}
- if (status.equals("FINISHED")) {
+ if (statuses.any { it != null && it.equals("FINISHED") }) {
break;
}
}
diff --git
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_batch.groovy
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_batch.groovy
index 9a2aff33933..315f9b8f892 100644
---
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_batch.groovy
+++
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_batch.groovy
@@ -120,12 +120,11 @@ suite("test_warm_up_cluster_batch") {
int i = 0
for (; i < retryTime; i++) {
sleep(1000)
- def status = getJobState(jobId[0][0])
- logger.info(status)
- if (status.equals("CANCELLED")) {
+ def statuses = getJobState(jobId[0][0])
+ if (statuses.any { it != null && it.equals("CANCELLED") }) {
assertTrue(false);
}
- if (status.equals("FINISHED")) {
+ if (statuses.any { it != null && it.equals("FINISHED") }) {
break;
}
}
diff --git
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_bigsize.groovy
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_bigsize.groovy
index 4f97116e120..ad80e053d20 100644
---
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_bigsize.groovy
+++
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_bigsize.groovy
@@ -130,12 +130,11 @@ suite("test_warm_up_cluster_bigsize") {
int i = 0
for (; i < retryTime; i++) {
sleep(1000)
- def status = getJobState(jobId[0][0])
- logger.info(status)
- if (status.equals("CANCELLED")) {
+ def statuses = getJobState(jobId[0][0])
+ if (statuses.any { it != null && it.equals("CANCELLED") }) {
assertTrue(false);
}
- if (status.equals("FINISHED")) {
+ if (statuses.any { it != null && it.equals("FINISHED") }) {
break;
}
}
diff --git
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_empty.groovy
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_empty.groovy
index d86238c5be9..f50ec580c2c 100644
---
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_empty.groovy
+++
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_empty.groovy
@@ -126,12 +126,11 @@ suite("test_warm_up_cluster_empty") {
int i = 0
for (; i < retryTime; i++) {
sleep(1000)
- def status = getJobState(jobId[0][0])
- logger.info(status)
- if (status.equals("CANCELLED")) {
+ def statuses = getJobState(jobId[0][0])
+ if (statuses.any { it != null && it.equals("CANCELLED") }) {
assertTrue(false);
}
- if (status.equals("FINISHED")) {
+ if (statuses.any { it != null && it.equals("FINISHED") }) {
break;
}
}
diff --git
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_compute_group.groovy
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_compute_group.groovy
index 5a16e92b36b..710f10d855b 100644
---
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_compute_group.groovy
+++
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_compute_group.groovy
@@ -157,12 +157,11 @@ suite("test_warm_up_compute_group") {
int i = 0
for (; i < retryTime; i++) {
sleep(1000)
- def status = getJobState(jobId[0][0])
- logger.info(status)
- if (status.equals("CANCELLED")) {
+ def statuses = getJobState(jobId[0][0])
+ if (statuses.any { it != null && it.equals("CANCELLED") }) {
assertTrue(false);
}
- if (status.equals("FINISHED")) {
+ if (statuses.any { it != null && it.equals("FINISHED") }) {
break;
}
}
diff --git
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_partition.groovy
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_partition.groovy
index c6819ad58ec..d4bf55441bd 100644
---
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_partition.groovy
+++
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_partition.groovy
@@ -121,12 +121,11 @@ suite("test_warm_up_partition") {
int i = 0
for (; i < retryTime; i++) {
sleep(1000)
- def status = getJobState(jobId[0][0])
- logger.info(status)
- if (status.equals("CANCELLED")) {
+ def statuses = getJobState(jobId[0][0])
+ if (statuses.any { it != null && it.equals("CANCELLED") }) {
assertTrue(false);
}
- if (status.equals("FINISHED")) {
+ if (statuses.any { it != null && it.equals("FINISHED") }) {
break;
}
}
diff --git
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_same_table_multi_times.groovy
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_same_table_multi_times.groovy
index fd28dec7ddd..027c064e91f 100644
---
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_same_table_multi_times.groovy
+++
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_same_table_multi_times.groovy
@@ -124,12 +124,11 @@ suite("test_warm_up_same_table_multi_times") {
int j = 0
for (; j < retryTime; j++) {
sleep(1000)
- def status = getJobState(jobId[0][0])
- logger.info(status)
- if (status.equals("CANCELLED")) {
+ def statuses = getJobState(jobId[0][0])
+ if (statuses.any { it != null && it.equals("CANCELLED") }) {
assertTrue(false);
}
- if (status.equals("FINISHED")) {
+ if (statuses.any { it != null && it.equals("FINISHED") }) {
break;
}
}
@@ -187,12 +186,11 @@ suite("test_warm_up_same_table_multi_times") {
j = 0
for (; j < retryTime; j++) {
sleep(1000)
- def status = getJobState(jobId[0][0])
- logger.info(status)
- if (status.equals("CANCELLED")) {
+ def statuses = getJobState(jobId[0][0])
+ if (statuses.any { it != null && it.equals("CANCELLED") }) {
assertTrue(false);
}
- if (status.equals("FINISHED")) {
+ if (statuses.any { it != null && it.equals("FINISHED") }) {
break;
}
}
diff --git
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_table.groovy
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_table.groovy
index 258e9e87ef6..c8d7325f9c0 100644
---
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_table.groovy
+++
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_table.groovy
@@ -25,7 +25,7 @@ suite("test_warm_up_table") {
}
def getTablesFromShowCommand = { jobId ->
def jobStateResult = sql """ SHOW WARM UP JOB WHERE ID = ${jobId} """
- return jobStateResult[0][9]
+ return jobStateResult[0]
}
List<String> ipList = new ArrayList<>();
@@ -145,12 +145,11 @@ suite("test_warm_up_table") {
int j = 0
for (; j < retryTime; j++) {
sleep(1000)
- def status = getJobState(jobId[0][0])
- logger.info(status)
- if (status.equals("CANCELLED")) {
+ def statuses = getJobState(jobId[0][0])
+ if (statuses.any { it != null && it.equals("CANCELLED") }) {
assertTrue(false);
}
- if (status.equals("FINISHED")) {
+ if (statuses.any { it != null && it.equals("FINISHED") }) {
break;
}
}
@@ -159,7 +158,8 @@ suite("test_warm_up_table") {
assertTrue(false);
}
def tablesString = getTablesFromShowCommand(jobId[0][0])
- assertTrue(tablesString.contains("customer"), tablesString)
+
+ assertTrue(tablesString.any { it != null && it.contains("customer") })
sleep(30000)
long ttl_cache_size = 0
getMetricsMethod.call(ipList[0], brpcPortList[0]) {
diff --git
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_tables.groovy
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_tables.groovy
index 03d45f1cce8..c37fa61dcd3 100644
---
a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_tables.groovy
+++
b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_tables.groovy
@@ -25,7 +25,7 @@ suite("test_warm_up_tables") {
}
def getTablesFromShowCommand = { jobId ->
def jobStateResult = sql """ SHOW WARM UP JOB WHERE ID = ${jobId} """
- return jobStateResult[0][9]
+ return jobStateResult[0]
}
List<String> ipList = new ArrayList<>();
@@ -149,12 +149,11 @@ suite("test_warm_up_tables") {
int i = 0
for (; i < retryTime; i++) {
sleep(1000)
- def status = getJobState(jobId[0][0])
- logger.info(status)
- if (status.equals("CANCELLED")) {
+ def statuses = getJobState(jobId[0][0])
+ if (statuses.any { it != null && it.equals("CANCELLED") }) {
assertTrue(false);
}
- if (status.equals("FINISHED")) {
+ if (statuses.any { it != null && it.equals("FINISHED") }) {
break;
}
}
@@ -169,8 +168,8 @@ suite("test_warm_up_tables") {
waitJobDone(jobId_);
def tablesString = getTablesFromShowCommand(jobId_[0][0])
- assertTrue(tablesString.contains("customer.p3"), tablesString)
- assertTrue(tablesString.contains("supplier"), tablesString)
+ assertTrue(tablesString.any { it != null && it.contains("customer") })
+ assertTrue(tablesString.any { it != null && it.contains("supplier") })
sleep(30000)
long ttl_cache_size = 0
diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_1.groovy
b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_1.groovy
index 6a07df14922..0ec671603a8 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_1.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_1.groovy
@@ -18,7 +18,17 @@
import org.codehaus.groovy.runtime.IOGroovyMethods
suite("alter_ttl_1") {
- sql """ use @regression_cluster_name1 """
+ def custoBeConfig = [
+ enable_evict_file_cache_in_advance : false,
+ file_cache_enter_disk_resource_limit_mode_percent : 99
+ ]
+
+ setBeConfigTemporary(custoBeConfig) {
+ sql "set global enable_auto_analyze = false"
+ def clusters = sql " SHOW CLUSTERS; "
+ assertTrue(!clusters.isEmpty())
+ def validCluster = clusters[0][0]
+ sql """use @${validCluster};""";
def ttlProperties = """ PROPERTIES("file_cache_ttl_seconds"="90") """
String[][] backends = sql """ show backends """
String backendId;
@@ -103,12 +113,14 @@ suite("alter_ttl_1") {
sql """ select count(*) from customer_ttl """
sleep(30000)
long ttl_cache_size = 0
+ long normal_cache_size = 0
getMetricsMethod.call() {
respCode, body ->
assertEquals("${respCode}".toString(), "200")
String out = "${body}".toString()
def strs = out.split('\n')
Boolean flag1 = false;
+ Boolean flag2 = false;
for (String line in strs) {
if (flag1) break;
if (line.contains("ttl_cache_size")) {
@@ -119,67 +131,47 @@ suite("alter_ttl_1") {
ttl_cache_size = line.substring(i).toLong()
flag1 = true
}
- }
- assertTrue(flag1)
- }
- sql """ ALTER TABLE customer_ttl SET ("file_cache_ttl_seconds"="140") """
- sleep(80000)
- // after 110s, the first load has translate to normal
- getMetricsMethod.call() {
- respCode, body ->
- assertEquals("${respCode}".toString(), "200")
- String out = "${body}".toString()
- def strs = out.split('\n')
- Boolean flag1 = false;
- for (String line in strs) {
- if (flag1) break;
- if (line.contains("ttl_cache_size")) {
- if (line.startsWith("#")) {
- continue
- }
- def i = line.indexOf(' ')
- assertEquals(line.substring(i).toLong(), 0)
-
- }
-
if (line.contains("normal_queue_cache_size")) {
if (line.startsWith("#")) {
continue
}
def i = line.indexOf(' ')
- assertEquals(line.substring(i).toLong(), ttl_cache_size)
- flag1 = true
+ normal_cache_size = line.substring(i).toLong()
+ flag2 = true
}
}
- assertTrue(flag1)
+ assertTrue(flag1 && flag2)
}
- // wait for ttl timeout
- sleep(50000)
+ sql """ ALTER TABLE customer_ttl SET ("file_cache_ttl_seconds"="100") """
+ sleep(80000)
+ // after 110s, the first load has translate to normal
getMetricsMethod.call() {
respCode, body ->
assertEquals("${respCode}".toString(), "200")
String out = "${body}".toString()
def strs = out.split('\n')
Boolean flag1 = false;
- Boolean flag2 = false;
for (String line in strs) {
- if (flag1 && flag2) break;
+ if (flag1) break;
if (line.contains("ttl_cache_size")) {
if (line.startsWith("#")) {
continue
}
def i = line.indexOf(' ')
assertEquals(line.substring(i).toLong(), 0)
+
}
+
if (line.contains("normal_queue_cache_size")) {
if (line.startsWith("#")) {
continue
}
def i = line.indexOf(' ')
- assertEquals(line.substring(i).toLong(), ttl_cache_size)
+ assertEquals(line.substring(i).toLong(), ttl_cache_size +
normal_cache_size)
flag1 = true
}
}
assertTrue(flag1)
}
+ }
}
diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_4.groovy
b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_4.groovy
index 6edda04994c..03ae1dfa5cb 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_4.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_4.groovy
@@ -148,7 +148,7 @@ suite("alter_ttl_4") {
}
sleep(60000)
// one customer table would take about 1.3GB, the total cache size is 20GB
- // the following would take 20.8G all
+ // the following would take 20G all
// evict customer_ttl
load_customer_once("customer")
load_customer_once("customer")
@@ -166,6 +166,12 @@ suite("alter_ttl_4") {
load_customer_once("customer")
load_customer_once("customer")
load_customer_once("customer")
+ load_customer_once("customer")
+ load_customer_once("customer")
+ load_customer_once("customer")
+ load_customer_once("customer")
+ load_customer_once("customer")
+ load_customer_once("customer")
// some datas in s3 and will download them
sql """ select C_CUSTKEY from customer_ttl order by C_CUSTKEY limit 1"""
diff --git
a/regression-test/suites/cloud_p0/cache/ttl/create_table_as_select.groovy
b/regression-test/suites/cloud_p0/cache/ttl/create_table_as_select.groovy
index 689c6faa168..97159ac0db5 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/create_table_as_select.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/create_table_as_select.groovy
@@ -135,29 +135,6 @@ def clearFileCache = { check_func ->
DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 32
PROPERTIES("file_cache_ttl_seconds"="120","disable_auto_compaction" = "true")
as select * from customer_ttl"""
- sleep(30000) // 30s
- getMetricsMethod.call() {
- respCode, body ->
- assertEquals("${respCode}".toString(), "200")
- String out = "${body}".toString()
- def strs = out.split('\n')
- Boolean flag1 = false;
- Boolean flag2 = false;
- for (String line in strs) {
- if (flag1 && flag2) break;
- if (line.contains("ttl_cache_size")) {
- if (line.startsWith("#")) {
- continue
- }
- def i = line.indexOf(' ')
- long cur_ttl_cache_size = line.substring(i).toLong()
- assertTrue(Math.abs(2* ttl_cache_size -
cur_ttl_cache_size) < 10000)
- flag1 = true
- }
- }
- assertTrue(flag1)
- }
-
sleep(150000)
getMetricsMethod.call() {
respCode, body ->
diff --git a/regression-test/suites/cloud_p0/cache/ttl/test_ttl_preempt.groovy
b/regression-test/suites/cloud_p0/cache/ttl/test_ttl_preempt.groovy
index e8008a05e13..f1d10b13556 100644
--- a/regression-test/suites/cloud_p0/cache/ttl/test_ttl_preempt.groovy
+++ b/regression-test/suites/cloud_p0/cache/ttl/test_ttl_preempt.groovy
@@ -123,7 +123,14 @@ suite("test_ttl_preempt") {
}
// one customer table would take about 1.3GB, the total cache size is 20GB
- // the following would take 19.5G all
+ // the following would take 20G all
+ load_customer_once("customer")
+ load_customer_once("customer")
+ load_customer_once("customer")
+ load_customer_once("customer")
+ load_customer_once("customer")
+ load_customer_once("customer")
+ load_customer_once("customer")
load_customer_once("customer")
load_customer_once("customer")
load_customer_once("customer")
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]