This is an automated email from the ASF dual-hosted git repository.
yiguolei pushed a commit to branch branch-4.0
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/branch-4.0 by this push:
new f50d7e75e3e branch-4.0: [fix](sql-cache) make test_query_cache_hit
work #58349 (#58387)
f50d7e75e3e is described below
commit f50d7e75e3eb2ce0d2176646394b9767133b4a0c
Author: Mingyu Chen (Rayner) <[email protected]>
AuthorDate: Thu Nov 27 08:55:08 2025 +0800
branch-4.0: [fix](sql-cache) make test_query_cache_hit work #58349 (#58387)
bp #58349
---
.../java/org/apache/doris/qe/ConnectProcessor.java | 12 +--
.../java/org/apache/doris/qe/SessionVariable.java | 9 ---
.../hive/test_hive_query_cache.groovy | 88 +++++++++++-----------
.../hive/test_hive_statistic_auto.groovy | 6 +-
.../external_table_p0/tvf/test_backends_tvf.groovy | 10 +--
.../tvf/test_frontends_disks_tvf.groovy | 8 +-
.../tvf/test_frontends_tvf.groovy | 9 ++-
7 files changed, 67 insertions(+), 75 deletions(-)
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectProcessor.java
b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectProcessor.java
index cb6d36dfe83..99527e99f2c 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectProcessor.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectProcessor.java
@@ -37,6 +37,7 @@ import org.apache.doris.common.Pair;
import org.apache.doris.common.UserException;
import org.apache.doris.common.util.DebugUtil;
import org.apache.doris.common.util.SqlUtils;
+import org.apache.doris.common.util.Util;
import org.apache.doris.datasource.CatalogIf;
import org.apache.doris.datasource.InternalCatalog;
import org.apache.doris.metric.MetricRepo;
@@ -250,7 +251,7 @@ public abstract class ConnectProcessor {
if (stmts == null) {
stmts = parseWithFallback(originStmt, convertedStmt,
sessionVariable);
- if (stmts == null) {
+ if (stmts == null || stmts.isEmpty()) {
return;
}
}
@@ -407,15 +408,9 @@ public abstract class ConnectProcessor {
logicalPlanAdapter.setOrigStmt(statementContext.getOriginStatement());
logicalPlanAdapter.setUserInfo(ctx.getCurrentUserIdentity());
return ImmutableList.of(logicalPlanAdapter);
- } else {
- if
(!ctx.getSessionVariable().testQueryCacheHit.equals("none")) {
- throw new UserException("The variable test_query_cache_hit
is set to "
- +
ConnectContext.get().getSessionVariable().testQueryCacheHit
- + ", but the query cache is not hit.");
- }
}
} catch (Throwable t) {
- LOG.warn("Parse from sql cache failed: " + t.getMessage(), t);
+ LOG.warn("Parse from sql cache failed with unexpected exception:
{}", Util.getRootCauseMessage(t), t);
} finally {
statementContext.releasePlannerResources();
}
@@ -785,3 +780,4 @@ public abstract class ConnectProcessor {
throw new NotSupportedException("Just MysqlConnectProcessor support
execute");
}
}
+
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java
b/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java
index 063a4edb46c..2ed41fdde85 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java
@@ -600,8 +600,6 @@ public class SessionVariable implements Serializable,
Writable {
public static final String EXPAND_RUNTIME_FILTER_BY_INNER_JION =
"expand_runtime_filter_by_inner_join";
- public static final String TEST_QUERY_CACHE_HIT = "test_query_cache_hit";
-
public static final String ENABLE_AUTO_ANALYZE = "enable_auto_analyze";
public static final String FORCE_SAMPLE_ANALYZE = "force_sample_analyze";
@@ -2341,13 +2339,6 @@ public class SessionVariable implements Serializable,
Writable {
}, checker = "checkPartialUpdateNewKeyBehavior", options =
{"APPEND", "ERROR"})
public String partialUpdateNewKeyPolicy = "APPEND";
- @VariableMgr.VarAttr(name = TEST_QUERY_CACHE_HIT, description = {
- "用于测试查询缓存是否命中,如果未命中指定类型的缓存,则会报错",
- "Used to test whether the query cache is hit. "
- + "If the specified type of cache is not hit, an error
will be reported."},
- options = {"none", "sql_cache", "partition_cache"})
- public String testQueryCacheHit = "none";
-
@VariableMgr.VarAttr(name = ENABLE_AUTO_ANALYZE,
description = {"该参数控制是否开启自动收集", "Set false to disable auto
analyze"},
flag = VariableMgr.GLOBAL)
diff --git
a/regression-test/suites/external_table_p0/hive/test_hive_query_cache.groovy
b/regression-test/suites/external_table_p0/hive/test_hive_query_cache.groovy
index 7faaaa83516..947c8c2248f 100644
--- a/regression-test/suites/external_table_p0/hive/test_hive_query_cache.groovy
+++ b/regression-test/suites/external_table_p0/hive/test_hive_query_cache.groovy
@@ -17,6 +17,19 @@
suite("test_hive_query_cache",
"p0,external,hive,external_docker,external_docker_hive") {
withGlobalLock("cache_last_version_interval_second") {
+ def assertHasCache = { String sqlStr ->
+ explain {
+ sql ("physical plan ${sqlStr}")
+ contains("PhysicalSqlCache")
+ }
+ }
+
+ def assertNoCache = { String sqlStr ->
+ explain {
+ sql ("physical plan ${sqlStr}")
+ notContains("PhysicalSqlCache")
+ }
+ }
def q01 = {
qt_q24 """ select name, count(1) as c from student group by name
order by c desc;"""
qt_q25 """ select lo_orderkey, count(1) as c from lineorder group
by lo_orderkey order by c desc;"""
@@ -65,9 +78,9 @@ suite("test_hive_query_cache",
"p0,external,hive,external_docker,external_docker
sql """drop catalog if exists ${catalog_name}"""
sql """create catalog if not exists ${catalog_name} properties (
- "type"="hms",
- 'hive.metastore.uris' = 'thrift://${externalEnvIp}:${hms_port}'
- );"""
+ "type"="hms",
+ 'hive.metastore.uris' = 'thrift://${externalEnvIp}:${hms_port}'
+ );"""
sql """switch ${catalog_name}"""
sql """set enable_fallback_to_original_planner=false"""
@@ -75,30 +88,30 @@ suite("test_hive_query_cache",
"p0,external,hive,external_docker,external_docker
sql """set enable_hive_sql_cache=false"""
def tpch_1sf_q09 = """
- select
- nation,
- o_year,
- sum(amount) as sum_profit
- from
- (
- select
- n_name as nation,
- extract(year from o_orderdate) as o_year,
- l_extendedprice * (1 - l_discount) - ps_supplycost *
l_quantity as amount
- from
- lineitem join[shuffle] orders on o_orderkey =
l_orderkey
- join[shuffle] partsupp on ps_suppkey = l_suppkey
and ps_partkey = l_partkey
- join[shuffle] part on p_partkey = l_partkey and
p_name like '%green%'
- join supplier on s_suppkey = l_suppkey
- join nation on s_nationkey = n_nationkey
- ) as profit
- group by
- nation,
- o_year
- order by
- nation,
- o_year desc;
- """
+ select
+ nation,
+ o_year,
+ sum(amount) as sum_profit
+ from
+ (
+ select
+ n_name as nation,
+ extract(year from o_orderdate) as o_year,
+ l_extendedprice * (1 - l_discount) - ps_supplycost
* l_quantity as amount
+ from
+ lineitem join[shuffle] orders on o_orderkey =
l_orderkey
+ join[shuffle] partsupp on ps_suppkey =
l_suppkey and ps_partkey = l_partkey
+ join[shuffle] part on p_partkey = l_partkey
and p_name like '%green%'
+ join supplier on s_suppkey = l_suppkey
+ join nation on s_nationkey = n_nationkey
+ ) as profit
+ group by
+ nation,
+ o_year
+ order by
+ nation,
+ o_year desc;
+ """
// // test sql cache
sql """admin set frontend
config("cache_last_version_interval_second" = "1");"""
@@ -115,7 +128,6 @@ suite("test_hive_query_cache",
"p0,external,hive,external_docker,external_docker
try {
sql """set enable_sql_cache=true;"""
sql """set enable_hive_sql_cache=true"""
- sql """set test_query_cache_hit="none";"""
sql """select * from lineitem where l_suppkey="abc";""" // non
exist l_suppkey;
sql """select * from lineitem where l_suppkey="abc";"""
} catch (java.sql.SQLException t) {
@@ -127,7 +139,6 @@ suite("test_hive_query_cache",
"p0,external,hive,external_docker,external_docker
sql """use `default`"""
sql """set enable_sql_cache=true;"""
sql """set enable_hive_sql_cache=true"""
- sql """set test_query_cache_hit="none";"""
// 1. first query, because we need to init the schema of
table_with_x01 to update the table's update time
// then sleep 2 seconds to wait longer than
Config.cache_last_version_interval_second,
// so that when doing the second query, we can fill the cache on BE
@@ -136,22 +147,15 @@ suite("test_hive_query_cache",
"p0,external,hive,external_docker,external_docker
// 2. second query is for filling the cache on BE
qt_sql2 """select dt, dt, k2, k5, dt from table_with_x01 where dt
in ('2022-11-10') or dt in ('2022-11-10') order by k2 desc limit 10;"""
// 3. third query, to test cache hit.
- sql """set test_query_cache_hit="sql";"""
+ assertHasCache """select dt, dt, k2, k5, dt from table_with_x01
where dt in ('2022-11-10') or dt in ('2022-11-10') order by k2 desc limit 10;"""
qt_sql3 """select dt, dt, k2, k5, dt from table_with_x01 where dt
in ('2022-11-10') or dt in ('2022-11-10') order by k2 desc limit 10;"""
// test not hit
- try {
- sql """set enable_sql_cache=true;"""
- sql """set enable_hive_sql_cache=true"""
- sql """set test_query_cache_hit="sql";"""
- def r = UUID.randomUUID().toString();
- // using a random sql
- sql """select dt, "${r}" from table_with_x01 where dt in
('2022-11-10') or dt in ('2022-11-10') order by k2 desc limit 10;"""
- assertTrue(1 == 2)
- } catch (Exception t) {
- print t.getMessage()
- assertTrue(t.getMessage().contains("but the query cache is not
hit"));
- }
+ sql """set enable_sql_cache=true;"""
+ sql """set enable_hive_sql_cache=true"""
+ def r = UUID.randomUUID().toString();
+ // using a random sql
+ assertNoCache """select dt, "${r}" from table_with_x01 where dt in
('2022-11-10') or dt in ('2022-11-10') order by k2 desc limit 10;"""
}
}
}
diff --git
a/regression-test/suites/external_table_p0/hive/test_hive_statistic_auto.groovy
b/regression-test/suites/external_table_p0/hive/test_hive_statistic_auto.groovy
index f79ccada9ed..5a97470b2e9 100644
---
a/regression-test/suites/external_table_p0/hive/test_hive_statistic_auto.groovy
+++
b/regression-test/suites/external_table_p0/hive/test_hive_statistic_auto.groovy
@@ -42,7 +42,7 @@ suite("test_hive_statistic_auto",
"p0,external,hive,external_docker,external_doc
for (int i = 0; i < 10; i++) {
Thread.sleep(1000)
def result = sql """show column stats `statistics` (lo_quantity)"""
- if (result.size <= 0) {
+ if (result.size() <= 0) {
continue;
}
assertEquals(result.size(), 1)
@@ -56,7 +56,7 @@ suite("test_hive_statistic_auto",
"p0,external,hive,external_docker,external_doc
assertEquals(result[0][8], "N/A")
result = sql """show column stats `statistics` (lo_orderkey)"""
- if (result.size <= 0) {
+ if (result.size() <= 0) {
continue;
}
assertEquals(result.size(), 1)
@@ -70,7 +70,7 @@ suite("test_hive_statistic_auto",
"p0,external,hive,external_docker,external_doc
assertEquals(result[0][8], "N/A")
result = sql """show column stats `statistics` (lo_linenumber)"""
- if (result.size <= 0) {
+ if (result.size() <= 0) {
continue;
}
assertEquals(result.size(), 1)
diff --git
a/regression-test/suites/external_table_p0/tvf/test_backends_tvf.groovy
b/regression-test/suites/external_table_p0/tvf/test_backends_tvf.groovy
index b06d0868eae..241e15d6f6a 100644
--- a/regression-test/suites/external_table_p0/tvf/test_backends_tvf.groovy
+++ b/regression-test/suites/external_table_p0/tvf/test_backends_tvf.groovy
@@ -19,30 +19,30 @@
suite("test_backends_tvf","p0,external,tvf,external_docker") {
List<List<Object>> table = sql """ select * from backends(); """
assertTrue(table.size() > 0)
- assertEquals(25, table[0].size)
+ assertEquals(25, table[0].size())
// filter columns
table = sql """ select BackendId, Host, Alive, TotalCapacity, Version,
NodeRole from backends();"""
assertTrue(table.size() > 0)
- assertTrue(table[0].size == 6)
+ assertTrue(table[0].size() == 6)
assertEquals(true, table[0][2])
// case insensitive
table = sql """ select backendid, Host, alive, Totalcapacity, version,
nodeRole from backends();"""
assertTrue(table.size() > 0)
- assertTrue(table[0].size == 6)
+ assertTrue(table[0].size() == 6)
assertEquals(true, table[0][2])
// test aliase columns
table = sql """ select backendid as id, Host as name, alive, NodeRole as r
from backends();"""
assertTrue(table.size() > 0)
- assertTrue(table[0].size == 4)
+ assertTrue(table[0].size() == 4)
assertEquals(true, table[0][2])
// test changing position of columns
table = sql """ select Host as name, NodeRole as r, alive from
backends();"""
assertTrue(table.size() > 0)
- assertTrue(table[0].size == 3)
+ assertTrue(table[0].size() == 3)
assertEquals(true, table[0][2])
def res = sql """ select count(*) from backends() where alive = 1; """
diff --git
a/regression-test/suites/external_table_p0/tvf/test_frontends_disks_tvf.groovy
b/regression-test/suites/external_table_p0/tvf/test_frontends_disks_tvf.groovy
index b4355d8dbaf..509fa5a75dc 100644
---
a/regression-test/suites/external_table_p0/tvf/test_frontends_disks_tvf.groovy
+++
b/regression-test/suites/external_table_p0/tvf/test_frontends_disks_tvf.groovy
@@ -19,23 +19,23 @@
suite("test_frontends_disks_tvf", "p0,external,external_docker") {
List<List<Object>> table = sql """ select * from `frontends_disks`(); """
assertTrue(table.size() > 0)
- assertTrue(table[0].size == 10)
+ assertTrue(table[0].size() == 10)
// filter columns
table = sql """ select Name from `frontends_disks`();"""
assertTrue(table.size() > 0)
- assertTrue(table[0].size == 1)
+ assertTrue(table[0].size() == 1)
// case insensitive
table = sql """ select name, host, dirtype, dir from frontends_disks()
order by dirtype;"""
assertTrue(table.size() > 0)
- assertTrue(table[0].size == 4)
+ assertTrue(table[0].size() == 4)
assertEquals("audit-log", table[0][2])
// test aliase columns
table = sql """ select name as n, host as h, dirtype as a from
frontends_disks() order by dirtype; """
assertTrue(table.size() > 0)
- assertTrue(table[0].size == 3)
+ assertTrue(table[0].size() == 3)
assertEquals("audit-log", table[0][2])
// test changing position of columns
diff --git
a/regression-test/suites/external_table_p0/tvf/test_frontends_tvf.groovy
b/regression-test/suites/external_table_p0/tvf/test_frontends_tvf.groovy
index 0f0e4450cef..0fa743d03cd 100644
--- a/regression-test/suites/external_table_p0/tvf/test_frontends_tvf.groovy
+++ b/regression-test/suites/external_table_p0/tvf/test_frontends_tvf.groovy
@@ -18,24 +18,25 @@
// This suit test the `frontends` tvf
suite("test_frontends_tvf","p0,external,tvf,external_docker") {
List<List<Object>> table = sql """ select * from `frontends`(); """
+ logger.info("${table}")
assertTrue(table.size() > 0)
- assertTrue(table[0].size == 19)
+ assertTrue(table[0].size() == 19)
// filter columns
table = sql """ select Name from `frontends`();"""
assertTrue(table.size() > 0)
- assertTrue(table[0].size == 1)
+ assertTrue(table[0].size() == 1)
// case insensitive
table = sql """ select name, host, editlogport, httpport, alive from
frontends();"""
assertTrue(table.size() > 0)
- assertTrue(table[0].size == 5)
+ assertTrue(table[0].size() == 5)
assertEquals("true", table[0][4])
// test aliase columns
table = sql """ select name as n, host as h, alive as a, editlogport as e
from frontends(); """
assertTrue(table.size() > 0)
- assertTrue(table[0].size == 4)
+ assertTrue(table[0].size() == 4)
assertEquals("true", table[0][2])
// test changing position of columns
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]